mirror of
https://github.com/XRPLF/clio.git
synced 2026-01-21 15:15:29 +00:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c59fcf343f | ||
|
|
c35649eb6e | ||
|
|
4da4b49eda |
@@ -50,7 +50,7 @@ runs:
|
|||||||
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||||
with:
|
with:
|
||||||
cache-image: false
|
cache-image: false
|
||||||
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
|
|
||||||
- uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
- uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||||
id: meta
|
id: meta
|
||||||
|
|||||||
17
.github/actions/cmake/action.yml
vendored
17
.github/actions/cmake/action.yml
vendored
@@ -37,10 +37,6 @@ inputs:
|
|||||||
description: Whether to generate Debian package
|
description: Whether to generate Debian package
|
||||||
required: true
|
required: true
|
||||||
default: "false"
|
default: "false"
|
||||||
version:
|
|
||||||
description: Version of the clio_server binary
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
@@ -61,19 +57,6 @@ runs:
|
|||||||
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
|
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
|
||||||
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
|
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
|
||||||
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
|
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
|
||||||
# GitHub creates a merge commit for a PR
|
|
||||||
# https://www.kenmuse.com/blog/the-many-shas-of-a-github-pull-request/
|
|
||||||
#
|
|
||||||
# We:
|
|
||||||
# - explicitly provide branch name
|
|
||||||
# - use `github.head_ref` to get the SHA of last commit in the PR branch
|
|
||||||
#
|
|
||||||
# This way it works both for PRs and pushes to branches.
|
|
||||||
GITHUB_BRANCH_NAME: "${{ github.head_ref || github.ref_name }}"
|
|
||||||
GITHUB_HEAD_SHA: "${{ github.event.pull_request.head.sha || github.sha }}"
|
|
||||||
#
|
|
||||||
# If tag is being pushed, or it's a nightly release, we use that version.
|
|
||||||
FORCE_CLIO_VERSION: ${{ inputs.version }}
|
|
||||||
run: |
|
run: |
|
||||||
cmake \
|
cmake \
|
||||||
-B "${BUILD_DIR}" \
|
-B "${BUILD_DIR}" \
|
||||||
|
|||||||
2
.github/scripts/conan/generate_matrix.py
vendored
2
.github/scripts/conan/generate_matrix.py
vendored
@@ -4,7 +4,7 @@ import json
|
|||||||
|
|
||||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||||
LINUX_CONTAINERS = [
|
LINUX_CONTAINERS = [
|
||||||
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
'{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
]
|
]
|
||||||
LINUX_COMPILERS = ["gcc", "clang"]
|
LINUX_COMPILERS = ["gcc", "clang"]
|
||||||
|
|
||||||
|
|||||||
7
.github/workflows/build.yml
vendored
7
.github/workflows/build.yml
vendored
@@ -23,7 +23,6 @@ on:
|
|||||||
- "cmake/**"
|
- "cmake/**"
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "tests/**"
|
- "tests/**"
|
||||||
- "benchmarks/**"
|
|
||||||
|
|
||||||
- docs/config-description.md
|
- docs/config-description.md
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -50,7 +49,7 @@ jobs:
|
|||||||
build_type: [Release, Debug]
|
build_type: [Release, Debug]
|
||||||
container:
|
container:
|
||||||
[
|
[
|
||||||
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }',
|
'{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }',
|
||||||
]
|
]
|
||||||
static: [true]
|
static: [true]
|
||||||
|
|
||||||
@@ -80,7 +79,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable-build.yml
|
uses: ./.github/workflows/reusable-build.yml
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Debug
|
build_type: Debug
|
||||||
download_ccache: true
|
download_ccache: true
|
||||||
@@ -98,7 +97,7 @@ jobs:
|
|||||||
needs: build-and-test
|
needs: build-and-test
|
||||||
runs-on: heavy
|
runs-on: heavy
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|||||||
8
.github/workflows/check-libxrpl.yml
vendored
8
.github/workflows/check-libxrpl.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
|||||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||||
runs-on: heavy
|
runs-on: heavy
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
@@ -29,9 +29,9 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
disable_ccache: true
|
||||||
|
|
||||||
- name: Update libXRPL version requirement
|
- name: Update libXRPL version requirement
|
||||||
run: |
|
run: |
|
||||||
@@ -69,7 +69,7 @@ jobs:
|
|||||||
needs: build
|
needs: build
|
||||||
runs-on: heavy
|
runs-on: heavy
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||||
|
|||||||
6
.github/workflows/clang-tidy.yml
vendored
6
.github/workflows/clang-tidy.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
|
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
|
||||||
runs-on: heavy
|
runs-on: heavy
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
@@ -44,9 +44,9 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
disable_ccache: true
|
||||||
|
|
||||||
- name: Run conan
|
- name: Run conan
|
||||||
uses: ./.github/actions/conan
|
uses: ./.github/actions/conan
|
||||||
|
|||||||
6
.github/workflows/docs.yml
vendored
6
.github/workflows/docs.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -27,9 +27,9 @@ jobs:
|
|||||||
lfs: true
|
lfs: true
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
disable_ccache: true
|
||||||
|
|
||||||
- name: Create build directory
|
- name: Create build directory
|
||||||
run: mkdir build_docs
|
run: mkdir build_docs
|
||||||
|
|||||||
38
.github/workflows/nightly.yml
vendored
38
.github/workflows/nightly.yml
vendored
@@ -28,20 +28,8 @@ defaults:
|
|||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
get_date:
|
|
||||||
name: Get Date
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
date: ${{ steps.get_date.outputs.date }}
|
|
||||||
steps:
|
|
||||||
- name: Get current date
|
|
||||||
id: get_date
|
|
||||||
run: |
|
|
||||||
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
build-and-test:
|
build-and-test:
|
||||||
name: Build and Test
|
name: Build and Test
|
||||||
needs: get_date
|
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@@ -55,17 +43,17 @@ jobs:
|
|||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Release
|
build_type: Release
|
||||||
static: true
|
static: true
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
- os: heavy
|
- os: heavy
|
||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Debug
|
build_type: Debug
|
||||||
static: true
|
static: true
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
- os: heavy
|
- os: heavy
|
||||||
conan_profile: gcc.ubsan
|
conan_profile: gcc.ubsan
|
||||||
build_type: Release
|
build_type: Release
|
||||||
static: false
|
static: false
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
|
|
||||||
uses: ./.github/workflows/reusable-build-test.yml
|
uses: ./.github/workflows/reusable-build-test.yml
|
||||||
with:
|
with:
|
||||||
@@ -79,16 +67,14 @@ jobs:
|
|||||||
upload_clio_server: true
|
upload_clio_server: true
|
||||||
download_ccache: false
|
download_ccache: false
|
||||||
upload_ccache: false
|
upload_ccache: false
|
||||||
version: nightly-${{ needs.get_date.outputs.date }}
|
|
||||||
|
|
||||||
package:
|
package:
|
||||||
name: Build debian package
|
name: Build debian package
|
||||||
needs: get_date
|
|
||||||
|
|
||||||
uses: ./.github/workflows/reusable-build.yml
|
uses: ./.github/workflows/reusable-build.yml
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Release
|
build_type: Release
|
||||||
download_ccache: false
|
download_ccache: false
|
||||||
@@ -97,13 +83,11 @@ jobs:
|
|||||||
static: true
|
static: true
|
||||||
upload_clio_server: false
|
upload_clio_server: false
|
||||||
package: true
|
package: true
|
||||||
version: nightly-${{ needs.get_date.outputs.date }}
|
|
||||||
targets: package
|
targets: package
|
||||||
analyze_build_time: false
|
analyze_build_time: false
|
||||||
|
|
||||||
analyze_build_time:
|
analyze_build_time:
|
||||||
name: Analyze Build Time
|
name: Analyze Build Time
|
||||||
needs: get_date
|
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@@ -111,7 +95,7 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- os: heavy
|
- os: heavy
|
||||||
conan_profile: clang
|
conan_profile: clang
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
static: true
|
static: true
|
||||||
- os: macos15
|
- os: macos15
|
||||||
conan_profile: apple-clang
|
conan_profile: apple-clang
|
||||||
@@ -130,7 +114,17 @@ jobs:
|
|||||||
upload_clio_server: false
|
upload_clio_server: false
|
||||||
targets: all
|
targets: all
|
||||||
analyze_build_time: true
|
analyze_build_time: true
|
||||||
version: nightly-${{ needs.get_date.outputs.date }}
|
|
||||||
|
get_date:
|
||||||
|
name: Get Date
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
date: ${{ steps.get_date.outputs.date }}
|
||||||
|
steps:
|
||||||
|
- name: Get current date
|
||||||
|
id: get_date
|
||||||
|
run: |
|
||||||
|
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
nightly_release:
|
nightly_release:
|
||||||
needs: [build-and-test, package, get_date]
|
needs: [build-and-test, package, get_date]
|
||||||
|
|||||||
2
.github/workflows/pre-commit-autoupdate.yml
vendored
2
.github/workflows/pre-commit-autoupdate.yml
vendored
@@ -12,7 +12,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
auto-update:
|
auto-update:
|
||||||
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b
|
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||||
with:
|
with:
|
||||||
sign_commit: true
|
sign_commit: true
|
||||||
committer: "Clio CI <skuznetsov@ripple.com>"
|
committer: "Clio CI <skuznetsov@ripple.com>"
|
||||||
|
|||||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -8,7 +8,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-hooks:
|
run-hooks:
|
||||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@01163508e81d7dd63d4601d4090b297a260b18c2
|
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
|
|||||||
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Release
|
build_type: Release
|
||||||
static: true
|
static: true
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
|
|
||||||
uses: ./.github/workflows/reusable-build-test.yml
|
uses: ./.github/workflows/reusable-build-test.yml
|
||||||
with:
|
with:
|
||||||
@@ -43,7 +43,7 @@ jobs:
|
|||||||
upload_clio_server: true
|
upload_clio_server: true
|
||||||
download_ccache: false
|
download_ccache: false
|
||||||
upload_ccache: false
|
upload_ccache: false
|
||||||
version: ${{ github.event_name == 'push' && github.ref_name || '' }}
|
expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
name: Build debian package
|
name: Build debian package
|
||||||
@@ -51,7 +51,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable-build.yml
|
uses: ./.github/workflows/reusable-build.yml
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
conan_profile: gcc
|
conan_profile: gcc
|
||||||
build_type: Release
|
build_type: Release
|
||||||
download_ccache: false
|
download_ccache: false
|
||||||
@@ -60,7 +60,6 @@ jobs:
|
|||||||
static: true
|
static: true
|
||||||
upload_clio_server: false
|
upload_clio_server: false
|
||||||
package: true
|
package: true
|
||||||
version: ${{ github.event_name == 'push' && github.ref_name || '' }}
|
|
||||||
targets: package
|
targets: package
|
||||||
analyze_build_time: false
|
analyze_build_time: false
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/reusable-build-test.yml
vendored
14
.github/workflows/reusable-build-test.yml
vendored
@@ -63,18 +63,18 @@ on:
|
|||||||
type: string
|
type: string
|
||||||
default: all
|
default: all
|
||||||
|
|
||||||
|
expected_version:
|
||||||
|
description: Expected version of the clio_server binary
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
|
||||||
package:
|
package:
|
||||||
description: Whether to generate Debian package
|
description: Whether to generate Debian package
|
||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
||||||
version:
|
|
||||||
description: Version of the clio_server binary
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
uses: ./.github/workflows/reusable-build.yml
|
uses: ./.github/workflows/reusable-build.yml
|
||||||
@@ -90,8 +90,8 @@ jobs:
|
|||||||
upload_clio_server: ${{ inputs.upload_clio_server }}
|
upload_clio_server: ${{ inputs.upload_clio_server }}
|
||||||
targets: ${{ inputs.targets }}
|
targets: ${{ inputs.targets }}
|
||||||
analyze_build_time: false
|
analyze_build_time: false
|
||||||
|
expected_version: ${{ inputs.expected_version }}
|
||||||
package: ${{ inputs.package }}
|
package: ${{ inputs.package }}
|
||||||
version: ${{ inputs.version }}
|
|
||||||
|
|
||||||
test:
|
test:
|
||||||
needs: build
|
needs: build
|
||||||
|
|||||||
44
.github/workflows/reusable-build.yml
vendored
44
.github/workflows/reusable-build.yml
vendored
@@ -60,17 +60,17 @@ on:
|
|||||||
required: true
|
required: true
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
|
expected_version:
|
||||||
|
description: Expected version of the clio_server binary
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
|
||||||
package:
|
package:
|
||||||
description: Whether to generate Debian package
|
description: Whether to generate Debian package
|
||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
version:
|
|
||||||
description: Version of the clio_server binary
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
|
|
||||||
secrets:
|
secrets:
|
||||||
CODECOV_TOKEN:
|
CODECOV_TOKEN:
|
||||||
required: false
|
required: false
|
||||||
@@ -93,11 +93,15 @@ jobs:
|
|||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
# We need to fetch tags to have correct version in the release
|
||||||
|
# The workaround is based on https://github.com/actions/checkout/issues/1467
|
||||||
|
fetch-tags: true
|
||||||
|
ref: ${{ github.ref }}
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: ${{ inputs.download_ccache }}
|
disable_ccache: ${{ !inputs.download_ccache }}
|
||||||
|
|
||||||
- name: Setup conan on macOS
|
- name: Setup conan on macOS
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
@@ -113,7 +117,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Restore ccache cache
|
- name: Restore ccache cache
|
||||||
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
|
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
|
||||||
uses: actions/cache/restore@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||||
with:
|
with:
|
||||||
path: ${{ env.CCACHE_DIR }}
|
path: ${{ env.CCACHE_DIR }}
|
||||||
key: ${{ steps.cache_key.outputs.key }}
|
key: ${{ steps.cache_key.outputs.key }}
|
||||||
@@ -135,7 +139,6 @@ jobs:
|
|||||||
static: ${{ inputs.static }}
|
static: ${{ inputs.static }}
|
||||||
time_trace: ${{ inputs.analyze_build_time }}
|
time_trace: ${{ inputs.analyze_build_time }}
|
||||||
package: ${{ inputs.package }}
|
package: ${{ inputs.package }}
|
||||||
version: ${{ inputs.version }}
|
|
||||||
|
|
||||||
- name: Build Clio
|
- name: Build Clio
|
||||||
uses: ./.github/actions/build-clio
|
uses: ./.github/actions/build-clio
|
||||||
@@ -159,12 +162,12 @@ jobs:
|
|||||||
- name: Show ccache's statistics and zero it
|
- name: Show ccache's statistics and zero it
|
||||||
if: ${{ inputs.download_ccache }}
|
if: ${{ inputs.download_ccache }}
|
||||||
run: |
|
run: |
|
||||||
ccache --show-stats -vv
|
ccache --show-stats
|
||||||
ccache --zero-stats
|
ccache --zero-stats
|
||||||
|
|
||||||
- name: Save ccache cache
|
- name: Save ccache cache
|
||||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||||
uses: actions/cache/save@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||||
with:
|
with:
|
||||||
path: ${{ env.CCACHE_DIR }}
|
path: ${{ env.CCACHE_DIR }}
|
||||||
key: ${{ steps.cache_key.outputs.key }}
|
key: ${{ steps.cache_key.outputs.key }}
|
||||||
@@ -215,20 +218,15 @@ jobs:
|
|||||||
if: ${{ inputs.code_coverage }}
|
if: ${{ inputs.code_coverage }}
|
||||||
uses: ./.github/actions/code-coverage
|
uses: ./.github/actions/code-coverage
|
||||||
|
|
||||||
- name: Verify version is expected
|
- name: Verify expected version
|
||||||
if: ${{ inputs.version != '' }}
|
if: ${{ inputs.expected_version != '' }}
|
||||||
env:
|
env:
|
||||||
INPUT_VERSION: ${{ inputs.version }}
|
INPUT_EXPECTED_VERSION: ${{ inputs.expected_version }}
|
||||||
BUILD_TYPE: ${{ inputs.build_type }}
|
|
||||||
run: |
|
run: |
|
||||||
set -e
|
set -e
|
||||||
EXPECTED_VERSION="clio-${INPUT_VERSION}"
|
EXPECTED_VERSION="clio-${INPUT_EXPECTED_VERSION}"
|
||||||
if [[ "${BUILD_TYPE}" == "Debug" ]]; then
|
actual_version=$(./build/clio_server --version)
|
||||||
EXPECTED_VERSION="${EXPECTED_VERSION}+DEBUG"
|
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
|
||||||
fi
|
|
||||||
|
|
||||||
actual_version=$(./build/clio_server --version | head -n 1)
|
|
||||||
if [[ "${actual_version}" != "${EXPECTED_VERSION}" ]]; then
|
|
||||||
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
|
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
9
.github/workflows/reusable-release.yml
vendored
9
.github/workflows/reusable-release.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
|||||||
release:
|
release:
|
||||||
runs-on: heavy
|
runs-on: heavy
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
env:
|
env:
|
||||||
GH_REPO: ${{ github.repository }}
|
GH_REPO: ${{ github.repository }}
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
@@ -60,9 +60,9 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
disable_ccache: true
|
||||||
|
|
||||||
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||||
with:
|
with:
|
||||||
@@ -91,7 +91,8 @@ jobs:
|
|||||||
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
|
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
|
||||||
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
|
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
|
||||||
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
|
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
|
||||||
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" >> "${RUNNER_TEMP}/release_notes.md"
|
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc"
|
||||||
|
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
|
||||||
|
|
||||||
- name: Upload release notes
|
- name: Upload release notes
|
||||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||||
|
|||||||
18
.github/workflows/reusable-test.yml
vendored
18
.github/workflows/reusable-test.yml
vendored
@@ -126,17 +126,11 @@ jobs:
|
|||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||||
|
|
||||||
- name: Delete and start colima (macOS)
|
- name: Spin up scylladb
|
||||||
# This is a temporary workaround for colima issues on macOS runners
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
|
timeout-minutes: 3
|
||||||
run: |
|
run: |
|
||||||
colima delete --force
|
docker rm --force scylladb || true
|
||||||
colima start
|
|
||||||
|
|
||||||
- name: Spin up scylladb (macOS)
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
|
||||||
timeout-minutes: 1
|
|
||||||
run: |
|
|
||||||
docker run \
|
docker run \
|
||||||
--detach \
|
--detach \
|
||||||
--name scylladb \
|
--name scylladb \
|
||||||
@@ -148,12 +142,8 @@ jobs:
|
|||||||
--memory 16G \
|
--memory 16G \
|
||||||
scylladb/scylla
|
scylladb/scylla
|
||||||
|
|
||||||
- name: Wait for scylladb container to be healthy (macOS)
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
|
||||||
timeout-minutes: 1
|
|
||||||
run: |
|
|
||||||
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
|
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
|
||||||
sleep 1
|
sleep 5
|
||||||
done
|
done
|
||||||
|
|
||||||
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||||
|
|||||||
2
.github/workflows/sanitizers.yml
vendored
2
.github/workflows/sanitizers.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable-build-test.yml
|
uses: ./.github/workflows/reusable-build-test.yml
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }'
|
||||||
download_ccache: false
|
download_ccache: false
|
||||||
upload_ccache: false
|
upload_ccache: false
|
||||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||||
|
|||||||
4
.github/workflows/update-docker-ci.yml
vendored
4
.github/workflows/update-docker-ci.yml
vendored
@@ -141,7 +141,7 @@ jobs:
|
|||||||
files: "docker/compilers/gcc/**"
|
files: "docker/compilers/gcc/**"
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
@@ -290,7 +290,7 @@ jobs:
|
|||||||
files: "docker/tools/**"
|
files: "docker/tools/**"
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
|
|||||||
4
.github/workflows/upload-conan-deps.yml
vendored
4
.github/workflows/upload-conan-deps.yml
vendored
@@ -78,9 +78,9 @@ jobs:
|
|||||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
|
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
disable_ccache: true
|
||||||
|
|
||||||
- name: Setup conan on macOS
|
- name: Setup conan on macOS
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,7 +4,6 @@
|
|||||||
.build
|
.build
|
||||||
.cache
|
.cache
|
||||||
.vscode
|
.vscode
|
||||||
.zed
|
|
||||||
.python-version
|
.python-version
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.sanitizer-report
|
.sanitizer-report
|
||||||
|
|||||||
@@ -29,12 +29,12 @@ repos:
|
|||||||
|
|
||||||
# Autoformat: YAML, JSON, Markdown, etc.
|
# Autoformat: YAML, JSON, Markdown, etc.
|
||||||
- repo: https://github.com/rbubley/mirrors-prettier
|
- repo: https://github.com/rbubley/mirrors-prettier
|
||||||
rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4
|
rev: 3c603eae8faac85303ae675fd33325cff699a797 # frozen: v3.7.3
|
||||||
hooks:
|
hooks:
|
||||||
- id: prettier
|
- id: prettier
|
||||||
|
|
||||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||||
rev: 76b3d32d3f4b965e1d6425253c59407420ae2c43 # frozen: v0.47.0
|
rev: c8fd5003603dd6f12447314ecd935ba87c09aff5 # frozen: v0.46.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: markdownlint-fix
|
- id: markdownlint-fix
|
||||||
exclude: LICENSE.md
|
exclude: LICENSE.md
|
||||||
@@ -59,7 +59,7 @@ repos:
|
|||||||
]
|
]
|
||||||
|
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0
|
rev: 2892f1f81088477370d4fbc56545c05d33d2493f # frozen: 25.11.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ repos:
|
|||||||
language: script
|
language: script
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||||
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8
|
rev: 4c26f99731e7c22a047c35224150ee9e43d7c03e # frozen: v21.1.6
|
||||||
hooks:
|
hooks:
|
||||||
- id: clang-format
|
- id: clang-format
|
||||||
args: [--style=file]
|
args: [--style=file]
|
||||||
|
|||||||
@@ -16,5 +16,5 @@ target_sources(
|
|||||||
include(deps/gbench)
|
include(deps/gbench)
|
||||||
|
|
||||||
target_include_directories(clio_benchmark PRIVATE .)
|
target_include_directories(clio_benchmark PRIVATE .)
|
||||||
target_link_libraries(clio_benchmark PRIVATE clio_rpc clio_util benchmark::benchmark_main spdlog::spdlog)
|
target_link_libraries(clio_benchmark PUBLIC clio_util clio_rpc benchmark::benchmark_main spdlog::spdlog)
|
||||||
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
|
|||||||
@@ -28,17 +28,19 @@
|
|||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <benchmark/benchmark.h>
|
#include <benchmark/benchmark.h>
|
||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <boost/asio/steady_timer.hpp>
|
#include <boost/asio/steady_timer.hpp>
|
||||||
|
#include <boost/asio/thread_pool.hpp>
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
#include <boost/json/object.hpp>
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace rpc;
|
using namespace rpc;
|
||||||
using namespace util::config;
|
using namespace util::config;
|
||||||
@@ -78,56 +80,36 @@ benchmarkWorkQueue(benchmark::State& state)
|
|||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
|
|
||||||
auto const wqThreads = static_cast<uint32_t>(state.range(0));
|
auto const total = static_cast<size_t>(state.range(0));
|
||||||
auto const maxQueueSize = static_cast<uint32_t>(state.range(1));
|
auto const numThreads = static_cast<uint32_t>(state.range(1));
|
||||||
auto const clientThreads = static_cast<uint32_t>(state.range(2));
|
auto const maxSize = static_cast<uint32_t>(state.range(2));
|
||||||
auto const itemsPerClient = static_cast<uint32_t>(state.range(3));
|
auto const delayMs = static_cast<uint32_t>(state.range(3));
|
||||||
auto const clientProcessingMs = static_cast<uint32_t>(state.range(4));
|
|
||||||
|
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
std::atomic_size_t totalExecuted = 0uz;
|
std::atomic_size_t totalExecuted = 0uz;
|
||||||
std::atomic_size_t totalQueued = 0uz;
|
std::atomic_size_t totalQueued = 0uz;
|
||||||
|
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
WorkQueue queue(wqThreads, maxQueueSize);
|
WorkQueue queue(numThreads, maxSize);
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
|
|
||||||
std::vector<std::thread> threads;
|
for (auto i = 0uz; i < total; ++i) {
|
||||||
threads.reserve(clientThreads);
|
totalQueued += static_cast<std::size_t>(queue.postCoro(
|
||||||
|
[&delayMs, &totalExecuted](auto yield) {
|
||||||
|
++totalExecuted;
|
||||||
|
|
||||||
for (auto t = 0uz; t < clientThreads; ++t) {
|
boost::asio::steady_timer timer(yield.get_executor(), std::chrono::milliseconds{delayMs});
|
||||||
threads.emplace_back([&] {
|
timer.async_wait(yield);
|
||||||
for (auto i = 0uz; i < itemsPerClient; ++i) {
|
},
|
||||||
totalQueued += static_cast<std::size_t>(queue.postCoro(
|
/* isWhiteListed = */ false
|
||||||
[&clientProcessingMs, &totalExecuted](auto yield) {
|
));
|
||||||
++totalExecuted;
|
|
||||||
|
|
||||||
boost::asio::steady_timer timer(
|
|
||||||
yield.get_executor(), std::chrono::milliseconds{clientProcessingMs}
|
|
||||||
);
|
|
||||||
timer.async_wait(yield);
|
|
||||||
|
|
||||||
std::this_thread::sleep_for(std::chrono::microseconds{10});
|
|
||||||
},
|
|
||||||
/* isWhiteListed = */ false
|
|
||||||
));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& t : threads)
|
|
||||||
t.join();
|
|
||||||
|
|
||||||
queue.stop();
|
queue.stop();
|
||||||
|
|
||||||
ASSERT(totalExecuted == totalQueued, "Totals don't match");
|
ASSERT(totalExecuted == totalQueued, "Totals don't match");
|
||||||
ASSERT(totalQueued <= itemsPerClient * clientThreads, "Queued more than requested");
|
ASSERT(totalQueued <= total, "Queued more than requested");
|
||||||
|
ASSERT(totalQueued >= maxSize, "Queued less than maxSize");
|
||||||
if (maxQueueSize == 0) {
|
|
||||||
ASSERT(totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount");
|
|
||||||
} else {
|
|
||||||
ASSERT(totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads), "Queued less than expected");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,5 +123,5 @@ benchmarkWorkQueue(benchmark::State& state)
|
|||||||
*/
|
*/
|
||||||
// TODO: figure out what happens on 1 thread
|
// TODO: figure out what happens on 1 thread
|
||||||
BENCHMARK(benchmarkWorkQueue)
|
BENCHMARK(benchmarkWorkQueue)
|
||||||
->ArgsProduct({{2, 4, 8, 16}, {0, 5'000}, {4, 8, 16}, {1'000, 10'000}, {10, 100, 250}})
|
->ArgsProduct({{1'000, 10'000, 100'000}, {2, 4, 8}, {0, 5'000}, {10, 100, 250}})
|
||||||
->Unit(benchmark::kMillisecond);
|
->Unit(benchmark::kMillisecond);
|
||||||
|
|||||||
@@ -49,6 +49,8 @@ postprocessors = [
|
|||||||
]
|
]
|
||||||
# render body even when there are no releases to process
|
# render body even when there are no releases to process
|
||||||
# render_always = true
|
# render_always = true
|
||||||
|
# output file path
|
||||||
|
output = "CHANGELOG.md"
|
||||||
|
|
||||||
[git]
|
[git]
|
||||||
# parse the commits based on https://www.conventionalcommits.org
|
# parse the commits based on https://www.conventionalcommits.org
|
||||||
|
|||||||
@@ -1,42 +1,42 @@
|
|||||||
find_package(Git REQUIRED)
|
find_package(Git REQUIRED)
|
||||||
|
|
||||||
if (DEFINED ENV{GITHUB_BRANCH_NAME})
|
set(GIT_COMMAND describe --tags --exact-match)
|
||||||
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
|
|
||||||
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
|
|
||||||
else ()
|
|
||||||
set(GIT_COMMAND branch --show-current)
|
|
||||||
execute_process(
|
|
||||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH
|
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
|
||||||
)
|
|
||||||
|
|
||||||
set(GIT_COMMAND rev-parse HEAD)
|
|
||||||
execute_process(
|
|
||||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH
|
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
|
||||||
)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE TAG
|
||||||
|
RESULT_VARIABLE RC
|
||||||
|
ERROR_VARIABLE ERR
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE
|
||||||
)
|
)
|
||||||
|
|
||||||
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
|
if (RC EQUAL 0)
|
||||||
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")
|
message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version")
|
||||||
message(STATUS "Build date: ${BUILD_DATE}")
|
set(CLIO_VERSION "${TAG}")
|
||||||
|
set(DOC_CLIO_VERSION "${TAG}")
|
||||||
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
|
|
||||||
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
|
|
||||||
|
|
||||||
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
|
||||||
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
|
||||||
else ()
|
else ()
|
||||||
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version")
|
message(STATUS "Error finding tag in git: ${ERR}")
|
||||||
|
message(STATUS "Will use 'YYYYMMDDHMS-<branch>-<git-rev>' as Clio version")
|
||||||
|
|
||||||
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
|
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}")
|
set(GIT_COMMAND branch --show-current)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
|
set(GIT_COMMAND rev-parse --short HEAD)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
|
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
|
||||||
set(DOC_CLIO_VERSION "develop")
|
set(DOC_CLIO_VERSION "develop")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
|||||||
41
conan.lock
41
conan.lock
@@ -1,42 +1,42 @@
|
|||||||
{
|
{
|
||||||
"version": "0.5",
|
"version": "0.5",
|
||||||
"requires": [
|
"requires": [
|
||||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
|
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
|
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||||
"xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547",
|
"xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547",
|
||||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
|
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||||
"spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1767636069.964",
|
"spdlog/1.16.0#942c2c39562ae25ba575d9c8e2bdf3b6%1763984117.108",
|
||||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
|
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||||
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103",
|
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1764175362.029",
|
||||||
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
|
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
|
||||||
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
|
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
|
||||||
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
|
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
|
||||||
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957",
|
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909",
|
||||||
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
|
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
|
||||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
|
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||||
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
|
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
|
||||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492",
|
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
|
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||||
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
|
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1764175360.142",
|
||||||
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
|
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
|
||||||
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1755784855.585",
|
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842",
|
||||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||||
"fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926",
|
"fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926",
|
||||||
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
|
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
|
||||||
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772",
|
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1763584497.32",
|
||||||
"cassandra-cpp-driver/2.17.0#bd3934138689482102c265d01288a316%1764175359.611",
|
"cassandra-cpp-driver/2.17.0#bd3934138689482102c265d01288a316%1764175359.611",
|
||||||
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336",
|
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1764175359.429",
|
||||||
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837",
|
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1764175359.429",
|
||||||
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1%1764175359.61",
|
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1%1764175359.61",
|
||||||
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
|
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
|
||||||
"abseil/20230802.1#90ba607d4ee8fb5fb157c3db540671fc%1764175359.429"
|
"abseil/20230802.1#90ba607d4ee8fb5fb157c3db540671fc%1764175359.429"
|
||||||
],
|
],
|
||||||
"build_requires": [
|
"build_requires": [
|
||||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
|
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||||
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
|
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
|
||||||
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1765850153.937",
|
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1764175359.44",
|
||||||
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1765850153.479",
|
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1764175359.429",
|
||||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1765850144.355"
|
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28"
|
||||||
],
|
],
|
||||||
"python_requires": [],
|
"python_requires": [],
|
||||||
"overrides": {
|
"overrides": {
|
||||||
@@ -53,6 +53,9 @@
|
|||||||
],
|
],
|
||||||
"sqlite3/3.44.2": [
|
"sqlite3/3.44.2": [
|
||||||
"sqlite3/3.49.1"
|
"sqlite3/3.49.1"
|
||||||
|
],
|
||||||
|
"fmt/12.0.0": [
|
||||||
|
"fmt/12.1.0"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"config_requires": []
|
"config_requires": []
|
||||||
|
|||||||
20
conanfile.py
20
conanfile.py
@@ -14,37 +14,37 @@ class ClioConan(ConanFile):
|
|||||||
requires = [
|
requires = [
|
||||||
"boost/1.83.0",
|
"boost/1.83.0",
|
||||||
"cassandra-cpp-driver/2.17.0",
|
"cassandra-cpp-driver/2.17.0",
|
||||||
"fmt/12.1.0",
|
|
||||||
"grpc/1.50.1",
|
|
||||||
"libbacktrace/cci.20210118",
|
|
||||||
"openssl/1.1.1w",
|
|
||||||
"protobuf/3.21.12",
|
"protobuf/3.21.12",
|
||||||
"spdlog/1.17.0",
|
"grpc/1.50.1",
|
||||||
|
"openssl/1.1.1w",
|
||||||
"xrpl/3.0.0",
|
"xrpl/3.0.0",
|
||||||
"zlib/1.3.1",
|
"zlib/1.3.1",
|
||||||
|
"libbacktrace/cci.20210118",
|
||||||
|
"spdlog/1.16.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
default_options = {
|
default_options = {
|
||||||
|
"xrpl/*:tests": False,
|
||||||
|
"xrpl/*:rocksdb": False,
|
||||||
"cassandra-cpp-driver/*:shared": False,
|
"cassandra-cpp-driver/*:shared": False,
|
||||||
"date/*:header_only": True,
|
"date/*:header_only": True,
|
||||||
"grpc/*:secure": True,
|
|
||||||
"grpc/*:shared": False,
|
"grpc/*:shared": False,
|
||||||
"gtest/*:no_main": True,
|
"grpc/*:secure": True,
|
||||||
"libpq/*:shared": False,
|
"libpq/*:shared": False,
|
||||||
"lz4/*:shared": False,
|
"lz4/*:shared": False,
|
||||||
"openssl/*:shared": False,
|
"openssl/*:shared": False,
|
||||||
"protobuf/*:shared": False,
|
"protobuf/*:shared": False,
|
||||||
"protobuf/*:with_zlib": True,
|
"protobuf/*:with_zlib": True,
|
||||||
"snappy/*:shared": False,
|
"snappy/*:shared": False,
|
||||||
"xrpl/*:rocksdb": False,
|
"gtest/*:no_main": True,
|
||||||
"xrpl/*:tests": False,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
exports_sources = ("CMakeLists.txt", "cmake/*", "src/*")
|
exports_sources = ("CMakeLists.txt", "cmake/*", "src/*")
|
||||||
|
|
||||||
def requirements(self):
|
def requirements(self):
|
||||||
self.requires("gtest/1.17.0")
|
self.requires("gtest/1.14.0")
|
||||||
self.requires("benchmark/1.9.4")
|
self.requires("benchmark/1.9.4")
|
||||||
|
self.requires("fmt/12.1.0", force=True)
|
||||||
|
|
||||||
def configure(self):
|
def configure(self):
|
||||||
if self.settings.compiler == "apple-clang":
|
if self.settings.compiler == "apple-clang":
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ RUN pip install -q --no-cache-dir \
|
|||||||
# lxml 6.0.0 is not compatible with our image
|
# lxml 6.0.0 is not compatible with our image
|
||||||
'lxml<6.0.0' \
|
'lxml<6.0.0' \
|
||||||
cmake \
|
cmake \
|
||||||
conan==2.24.0 \
|
conan==2.22.1 \
|
||||||
gcovr \
|
gcovr \
|
||||||
# We're adding pre-commit to this image as well,
|
# We're adding pre-commit to this image as well,
|
||||||
# because clang-tidy workflow requires it
|
# because clang-tidy workflow requires it
|
||||||
|
|||||||
@@ -5,15 +5,15 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
|
|||||||
|
|
||||||
The image is based on Ubuntu 20.04 and contains:
|
The image is based on Ubuntu 20.04 and contains:
|
||||||
|
|
||||||
- ccache 4.12.2
|
- ccache 4.12.1
|
||||||
- Clang 19
|
- Clang 19
|
||||||
- ClangBuildAnalyzer 1.6.0
|
- ClangBuildAnalyzer 1.6.0
|
||||||
- Conan 2.24.0
|
- Conan 2.22.1
|
||||||
- Doxygen 1.16.1
|
- Doxygen 1.15.0
|
||||||
- GCC 15.2.0
|
- GCC 15.2.0
|
||||||
- GDB 17.1
|
- GDB 16.3
|
||||||
- gh 2.83.2
|
- gh 2.82.1
|
||||||
- git-cliff 2.11.0
|
- git-cliff 2.10.1
|
||||||
- mold 2.40.4
|
- mold 2.40.4
|
||||||
- Ninja 1.13.2
|
- Ninja 1.13.2
|
||||||
- Python 3.8
|
- Python 3.8
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
services:
|
services:
|
||||||
clio_develop:
|
clio_develop:
|
||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
volumes:
|
volumes:
|
||||||
- clio_develop_conan_data:/root/.conan2/p
|
- clio_develop_conan_data:/root/.conan2/p
|
||||||
- clio_develop_ccache:/root/.ccache
|
- clio_develop_ccache:/root/.ccache
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v
|
|||||||
&& ninja install \
|
&& ninja install \
|
||||||
&& rm -rf /tmp/* /var/tmp/*
|
&& rm -rf /tmp/* /var/tmp/*
|
||||||
|
|
||||||
ARG CCACHE_VERSION=4.12.2
|
ARG CCACHE_VERSION=4.12.1
|
||||||
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
||||||
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
||||||
&& cd "ccache-${CCACHE_VERSION}" \
|
&& cd "ccache-${CCACHE_VERSION}" \
|
||||||
@@ -59,7 +59,7 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ARG DOXYGEN_VERSION=1.16.1
|
ARG DOXYGEN_VERSION=1.15.0
|
||||||
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||||
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
||||||
@@ -79,13 +79,13 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
|
|||||||
&& ninja install \
|
&& ninja install \
|
||||||
&& rm -rf /tmp/* /var/tmp/*
|
&& rm -rf /tmp/* /var/tmp/*
|
||||||
|
|
||||||
ARG GIT_CLIFF_VERSION=2.11.0
|
ARG GIT_CLIFF_VERSION=2.10.1
|
||||||
RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \
|
RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \
|
||||||
&& tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \
|
&& tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \
|
||||||
&& mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \
|
&& mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \
|
||||||
&& rm -rf /tmp/* /var/tmp/*
|
&& rm -rf /tmp/* /var/tmp/*
|
||||||
|
|
||||||
ARG GH_VERSION=2.83.2
|
ARG GH_VERSION=2.82.1
|
||||||
RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \
|
RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \
|
||||||
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
|
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
|
||||||
@@ -100,7 +100,7 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ARG GDB_VERSION=17.1
|
ARG GDB_VERSION=16.3
|
||||||
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
|
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
|
||||||
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \
|
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \
|
||||||
&& cd "gdb-${GDB_VERSION}" \
|
&& cd "gdb-${GDB_VERSION}" \
|
||||||
|
|||||||
@@ -175,7 +175,7 @@ Open the `index.html` file in your browser to see the documentation pages.
|
|||||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -it ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
docker run -it ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f
|
||||||
git clone https://github.com/XRPLF/clio
|
git clone https://github.com/XRPLF/clio
|
||||||
cd clio
|
cd clio
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -457,14 +457,6 @@ This document provides a list of all available Clio configuration properties in
|
|||||||
- **Constraints**: None
|
- **Constraints**: None
|
||||||
- **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it.
|
- **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it.
|
||||||
|
|
||||||
### cache.file.async_save
|
|
||||||
|
|
||||||
- **Required**: True
|
|
||||||
- **Type**: boolean
|
|
||||||
- **Default value**: `False`
|
|
||||||
- **Constraints**: None
|
|
||||||
- **Description**: When false, Clio waits for cache saving to finish before shutting down. When true, cache saving runs in parallel with other shutdown operations.
|
|
||||||
|
|
||||||
### log.channels.[].channel
|
### log.channels.[].channel
|
||||||
|
|
||||||
- **Required**: False
|
- **Required**: False
|
||||||
|
|||||||
@@ -77,10 +77,7 @@ CliArgs::parse(int argc, char const* argv[])
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (parsed.contains("version")) {
|
if (parsed.contains("version")) {
|
||||||
std::cout << util::build::getClioFullVersionString() << '\n'
|
std::cout << util::build::getClioFullVersionString() << '\n';
|
||||||
<< "Git commit hash: " << util::build::getGitCommitHash() << '\n'
|
|
||||||
<< "Git build branch: " << util::build::getGitBuildBranch() << '\n'
|
|
||||||
<< "Build date: " << util::build::getBuildDate() << '\n';
|
|
||||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,8 +29,6 @@
|
|||||||
#include "etl/ETLService.hpp"
|
#include "etl/ETLService.hpp"
|
||||||
#include "etl/LoadBalancer.hpp"
|
#include "etl/LoadBalancer.hpp"
|
||||||
#include "etl/NetworkValidatedLedgers.hpp"
|
#include "etl/NetworkValidatedLedgers.hpp"
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "feed/SubscriptionManager.hpp"
|
#include "feed/SubscriptionManager.hpp"
|
||||||
#include "migration/MigrationInspectorFactory.hpp"
|
#include "migration/MigrationInspectorFactory.hpp"
|
||||||
#include "rpc/Counters.hpp"
|
#include "rpc/Counters.hpp"
|
||||||
@@ -123,11 +121,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
// Interface to the database
|
// Interface to the database
|
||||||
auto backend = data::makeBackend(config_, cache);
|
auto backend = data::makeBackend(config_, cache);
|
||||||
|
|
||||||
auto systemState = etl::SystemState::makeSystemState(config_);
|
cluster::ClusterCommunicationService clusterCommunicationService{backend};
|
||||||
|
|
||||||
cluster::ClusterCommunicationService clusterCommunicationService{
|
|
||||||
backend, std::make_unique<etl::WriterState>(systemState)
|
|
||||||
};
|
|
||||||
clusterCommunicationService.run();
|
clusterCommunicationService.run();
|
||||||
|
|
||||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||||
@@ -157,9 +151,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
);
|
);
|
||||||
|
|
||||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||||
auto etl = etl::ETLService::makeETLService(
|
auto etl = etl::ETLService::makeETLService(config_, ctx, backend, subscriptions, balancer, ledgers);
|
||||||
config_, std::move(systemState), ctx, backend, subscriptions, balancer, ledgers
|
|
||||||
);
|
|
||||||
|
|
||||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||||
@@ -205,16 +197,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
appStopper_.setOnStop(
|
appStopper_.setOnStop(
|
||||||
Stopper::makeOnStopCallback(
|
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||||
httpServer.value(),
|
|
||||||
*balancer,
|
|
||||||
*etl,
|
|
||||||
*subscriptions,
|
|
||||||
*backend,
|
|
||||||
cacheSaver,
|
|
||||||
clusterCommunicationService,
|
|
||||||
ioc
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Blocks until stopped.
|
// Blocks until stopped.
|
||||||
@@ -230,9 +213,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
|
|
||||||
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
|
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
|
||||||
appStopper_.setOnStop(
|
appStopper_.setOnStop(
|
||||||
Stopper::makeOnStopCallback(
|
Stopper::makeOnStopCallback(*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||||
*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, clusterCommunicationService, ioc
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Blocks until stopped.
|
// Blocks until stopped.
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Concepts.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "data/LedgerCacheSaver.hpp"
|
#include "data/LedgerCacheSaver.hpp"
|
||||||
#include "etl/ETLServiceInterface.hpp"
|
#include "etl/ETLServiceInterface.hpp"
|
||||||
@@ -83,14 +82,10 @@ public:
|
|||||||
* @param subscriptions The subscription manager to stop.
|
* @param subscriptions The subscription manager to stop.
|
||||||
* @param backend The backend to stop.
|
* @param backend The backend to stop.
|
||||||
* @param cacheSaver The ledger cache saver
|
* @param cacheSaver The ledger cache saver
|
||||||
* @param clusterCommunicationService The cluster communication service to stop.
|
|
||||||
* @param ioc The io_context to stop.
|
* @param ioc The io_context to stop.
|
||||||
* @return The callback to be called on application stop.
|
* @return The callback to be called on application stop.
|
||||||
*/
|
*/
|
||||||
template <
|
template <web::SomeServer ServerType, data::SomeLedgerCacheSaver LedgerCacheSaverType>
|
||||||
web::SomeServer ServerType,
|
|
||||||
data::SomeLedgerCacheSaver LedgerCacheSaverType,
|
|
||||||
cluster::SomeClusterCommunicationService ClusterCommunicationServiceType>
|
|
||||||
static std::function<void(boost::asio::yield_context)>
|
static std::function<void(boost::asio::yield_context)>
|
||||||
makeOnStopCallback(
|
makeOnStopCallback(
|
||||||
ServerType& server,
|
ServerType& server,
|
||||||
@@ -99,7 +94,6 @@ public:
|
|||||||
feed::SubscriptionManagerInterface& subscriptions,
|
feed::SubscriptionManagerInterface& subscriptions,
|
||||||
data::BackendInterface& backend,
|
data::BackendInterface& backend,
|
||||||
LedgerCacheSaverType& cacheSaver,
|
LedgerCacheSaverType& cacheSaver,
|
||||||
ClusterCommunicationServiceType& clusterCommunicationService,
|
|
||||||
boost::asio::io_context& ioc
|
boost::asio::io_context& ioc
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
@@ -117,8 +111,6 @@ public:
|
|||||||
});
|
});
|
||||||
coroutineGroup.asyncWait(yield);
|
coroutineGroup.asyncWait(yield);
|
||||||
|
|
||||||
clusterCommunicationService.stop();
|
|
||||||
|
|
||||||
etl.stop();
|
etl.stop();
|
||||||
LOG(util::LogService::info()) << "ETL stopped";
|
LOG(util::LogService::info()) << "ETL stopped";
|
||||||
|
|
||||||
|
|||||||
@@ -1,141 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/bind_cancellation_slot.hpp>
|
|
||||||
#include <boost/asio/cancellation_type.hpp>
|
|
||||||
#include <boost/asio/error.hpp>
|
|
||||||
#include <boost/asio/execution_context.hpp>
|
|
||||||
#include <boost/asio/executor.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/asio/use_future.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <boost/json/serialize.hpp>
|
|
||||||
#include <boost/json/value.hpp>
|
|
||||||
#include <boost/json/value_from.hpp>
|
|
||||||
#include <boost/json/value_to.hpp>
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
Backend::Backend(
|
|
||||||
boost::asio::thread_pool& ctx,
|
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
|
||||||
std::chrono::steady_clock::duration writeInterval
|
|
||||||
)
|
|
||||||
: backend_(std::move(backend))
|
|
||||||
, writerState_(std::move(writerState))
|
|
||||||
, readerTask_(readInterval, ctx)
|
|
||||||
, writerTask_(writeInterval, ctx)
|
|
||||||
, selfUuid_(std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::run()
|
|
||||||
{
|
|
||||||
readerTask_.run([this](boost::asio::yield_context yield) {
|
|
||||||
auto clusterData = doRead(yield);
|
|
||||||
onNewState_(selfUuid_, std::make_shared<ClusterData>(std::move(clusterData)));
|
|
||||||
});
|
|
||||||
|
|
||||||
writerTask_.run([this]() { doWrite(); });
|
|
||||||
}
|
|
||||||
|
|
||||||
Backend::~Backend()
|
|
||||||
{
|
|
||||||
stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::stop()
|
|
||||||
{
|
|
||||||
readerTask_.stop();
|
|
||||||
writerTask_.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
ClioNode::CUuid
|
|
||||||
Backend::selfId() const
|
|
||||||
{
|
|
||||||
return selfUuid_;
|
|
||||||
}
|
|
||||||
|
|
||||||
Backend::ClusterData
|
|
||||||
Backend::doRead(boost::asio::yield_context yield)
|
|
||||||
{
|
|
||||||
BackendInterface::ClioNodesDataFetchResult expectedResult;
|
|
||||||
try {
|
|
||||||
expectedResult = backend_->fetchClioNodesData(yield);
|
|
||||||
} catch (...) {
|
|
||||||
expectedResult = std::unexpected{"Failed to fetch Clio nodes data"};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!expectedResult.has_value()) {
|
|
||||||
return std::unexpected{std::move(expectedResult).error()};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ClioNode> otherNodesData;
|
|
||||||
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
|
|
||||||
if (uuid == *selfUuid_) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::system::error_code errorCode;
|
|
||||||
auto const json = boost::json::parse(nodeDataStr, errorCode);
|
|
||||||
if (errorCode.failed()) {
|
|
||||||
return std::unexpected{fmt::format("Error parsing json from DB: {}", nodeDataStr)};
|
|
||||||
}
|
|
||||||
|
|
||||||
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
|
|
||||||
if (expectedNodeData.has_error()) {
|
|
||||||
return std::unexpected{fmt::format("Error converting json to ClioNode: {}", nodeDataStr)};
|
|
||||||
}
|
|
||||||
*expectedNodeData->uuid = uuid;
|
|
||||||
otherNodesData.push_back(std::move(expectedNodeData).value());
|
|
||||||
}
|
|
||||||
otherNodesData.push_back(ClioNode::from(selfUuid_, *writerState_));
|
|
||||||
return otherNodesData;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::doWrite()
|
|
||||||
{
|
|
||||||
auto const selfData = ClioNode::from(selfUuid_, *writerState_);
|
|
||||||
boost::json::value jsonValue{};
|
|
||||||
boost::json::value_from(selfData, jsonValue);
|
|
||||||
backend_->writeNodeMessage(*selfData.uuid, boost::json::serialize(jsonValue.as_object()));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/impl/RepeatedTask.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/log/Logger.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/any_io_executor.hpp>
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
|
||||||
#include <boost/asio/execution_context.hpp>
|
|
||||||
#include <boost/asio/executor.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/signals2/connection.hpp>
|
|
||||||
#include <boost/signals2/signal.hpp>
|
|
||||||
#include <boost/signals2/variadic_signal.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <concepts>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Backend communication handler for cluster state synchronization.
|
|
||||||
*
|
|
||||||
* This class manages reading and writing cluster state information to/from the backend database.
|
|
||||||
* It periodically reads the state of other nodes in the cluster and writes the current node's state,
|
|
||||||
* enabling cluster-wide coordination and awareness.
|
|
||||||
*/
|
|
||||||
class Backend {
|
|
||||||
public:
|
|
||||||
/** @brief Type representing cluster data result - either a vector of nodes or an error message */
|
|
||||||
using ClusterData = std::expected<std::vector<ClioNode>, std::string>;
|
|
||||||
|
|
||||||
private:
|
|
||||||
util::Logger log_{"ClusterCommunication"};
|
|
||||||
|
|
||||||
std::shared_ptr<data::BackendInterface> backend_;
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState_;
|
|
||||||
|
|
||||||
impl::RepeatedTask<boost::asio::thread_pool> readerTask_;
|
|
||||||
impl::RepeatedTask<boost::asio::thread_pool> writerTask_;
|
|
||||||
|
|
||||||
ClioNode::Uuid selfUuid_;
|
|
||||||
|
|
||||||
boost::signals2::signal<void(ClioNode::CUuid, std::shared_ptr<ClusterData const>)> onNewState_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Construct a Backend communication handler.
|
|
||||||
*
|
|
||||||
* @param ctx The execution context for asynchronous operations
|
|
||||||
* @param backend Interface to the backend database
|
|
||||||
* @param writerState State indicating whether this node is writing to the database
|
|
||||||
* @param readInterval How often to read cluster state from the backend
|
|
||||||
* @param writeInterval How often to write this node's state to the backend
|
|
||||||
*/
|
|
||||||
Backend(
|
|
||||||
boost::asio::thread_pool& ctx,
|
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
|
||||||
std::chrono::steady_clock::duration writeInterval
|
|
||||||
);
|
|
||||||
|
|
||||||
~Backend();
|
|
||||||
|
|
||||||
Backend(Backend&&) = delete;
|
|
||||||
Backend&
|
|
||||||
operator=(Backend&&) = delete;
|
|
||||||
Backend(Backend const&) = delete;
|
|
||||||
Backend&
|
|
||||||
operator=(Backend const&) = delete;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Start the backend read and write tasks.
|
|
||||||
*
|
|
||||||
* Begins periodic reading of cluster state from the backend and writing of this node's state.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
run();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Stop the backend read and write tasks.
|
|
||||||
*
|
|
||||||
* Stops all periodic tasks and waits for them to complete.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
stop();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Subscribe to new cluster state notifications.
|
|
||||||
*
|
|
||||||
* @tparam S Callable type accepting (ClioNode::cUUID, ClusterData)
|
|
||||||
* @param s Subscriber callback to be invoked when new cluster state is available
|
|
||||||
* @return A connection object that can be used to unsubscribe
|
|
||||||
*/
|
|
||||||
template <typename S>
|
|
||||||
requires std::invocable<S, ClioNode::CUuid, std::shared_ptr<ClusterData const>>
|
|
||||||
boost::signals2::connection
|
|
||||||
subscribeToNewState(S&& s)
|
|
||||||
{
|
|
||||||
return onNewState_.connect(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Get the UUID of this node in the cluster.
|
|
||||||
*
|
|
||||||
* @return The UUID of this node.
|
|
||||||
*/
|
|
||||||
ClioNode::CUuid
|
|
||||||
selfId() const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
ClusterData
|
|
||||||
doRead(boost::asio::yield_context yield);
|
|
||||||
|
|
||||||
void
|
|
||||||
doWrite();
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
add_library(clio_cluster)
|
add_library(clio_cluster)
|
||||||
|
|
||||||
target_sources(
|
target_sources(clio_cluster PRIVATE ClioNode.cpp ClusterCommunicationService.cpp)
|
||||||
clio_cluster PRIVATE Backend.cpp ClioNode.cpp ClusterCommunicationService.cpp Metrics.cpp WriterDecider.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)
|
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/TimeUtils.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
|
||||||
#include <boost/json/conversion.hpp>
|
#include <boost/json/conversion.hpp>
|
||||||
@@ -27,72 +26,39 @@
|
|||||||
#include <boost/json/value.hpp>
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct JsonFields {
|
struct Fields {
|
||||||
static constexpr std::string_view const kUPDATE_TIME = "update_time";
|
static constexpr std::string_view const kUPDATE_TIME = "update_time";
|
||||||
static constexpr std::string_view const kDB_ROLE = "db_role";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
ClioNode
|
|
||||||
ClioNode::from(ClioNode::Uuid uuid, etl::WriterStateInterface const& writerState)
|
|
||||||
{
|
|
||||||
auto const dbRole = [&writerState]() {
|
|
||||||
if (writerState.isReadOnly()) {
|
|
||||||
return ClioNode::DbRole::ReadOnly;
|
|
||||||
}
|
|
||||||
if (writerState.isFallback()) {
|
|
||||||
return ClioNode::DbRole::Fallback;
|
|
||||||
}
|
|
||||||
if (writerState.isLoadingCache()) {
|
|
||||||
return ClioNode::DbRole::LoadingCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
return writerState.isWriting() ? ClioNode::DbRole::Writer : ClioNode::DbRole::NotWriter;
|
|
||||||
}();
|
|
||||||
return ClioNode{.uuid = std::move(uuid), .updateTime = std::chrono::system_clock::now(), .dbRole = dbRole};
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
|
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
|
||||||
{
|
{
|
||||||
jv = {
|
jv = {
|
||||||
{JsonFields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
|
{Fields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
|
||||||
{JsonFields::kDB_ROLE, static_cast<int64_t>(node.dbRole)}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
ClioNode
|
ClioNode
|
||||||
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
|
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
|
||||||
{
|
{
|
||||||
auto const& updateTimeStr = jv.as_object().at(JsonFields::kUPDATE_TIME).as_string();
|
auto const& updateTimeStr = jv.as_object().at(Fields::kUPDATE_TIME).as_string();
|
||||||
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
|
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
|
||||||
if (!updateTime.has_value()) {
|
if (!updateTime.has_value()) {
|
||||||
throw std::runtime_error("Failed to parse update time");
|
throw std::runtime_error("Failed to parse update time");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const dbRoleValue = jv.as_object().at(JsonFields::kDB_ROLE).as_int64();
|
return ClioNode{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = updateTime.value()};
|
||||||
if (dbRoleValue > static_cast<int64_t>(ClioNode::DbRole::MAX))
|
|
||||||
throw std::runtime_error("Invalid db_role value");
|
|
||||||
|
|
||||||
return ClioNode{
|
|
||||||
// Json data doesn't contain uuid so leaving it empty here. It will be filled outside of this parsing
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(),
|
|
||||||
.updateTime = updateTime.value(),
|
|
||||||
.dbRole = static_cast<ClioNode::DbRole>(dbRoleValue)
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/json/conversion.hpp>
|
#include <boost/json/conversion.hpp>
|
||||||
#include <boost/json/value.hpp>
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
@@ -39,37 +37,16 @@ struct ClioNode {
|
|||||||
*/
|
*/
|
||||||
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
|
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
|
||||||
|
|
||||||
/**
|
// enum class WriterRole {
|
||||||
* @brief Database role of a node in the cluster.
|
// ReadOnly,
|
||||||
*
|
// NotWriter,
|
||||||
* Roles are used to coordinate which node writes to the database:
|
// Writer
|
||||||
* - ReadOnly: Node is configured to never write (strict read-only mode)
|
// };
|
||||||
* - NotWriter: Node can write but is currently not the designated writer
|
|
||||||
* - Writer: Node is actively writing to the database
|
|
||||||
* - Fallback: Node is using the fallback writer decision mechanism
|
|
||||||
*
|
|
||||||
* When any node in the cluster is in Fallback mode, the entire cluster switches
|
|
||||||
* from the cluster communication mechanism to the slower but more reliable
|
|
||||||
* database-based conflict detection mechanism.
|
|
||||||
*/
|
|
||||||
enum class DbRole { ReadOnly = 0, LoadingCache = 1, NotWriter = 2, Writer = 3, Fallback = 4, MAX = 4 };
|
|
||||||
|
|
||||||
using Uuid = std::shared_ptr<boost::uuids::uuid>;
|
std::shared_ptr<boost::uuids::uuid> uuid; ///< The UUID of the node.
|
||||||
using CUuid = std::shared_ptr<boost::uuids::uuid const>;
|
|
||||||
|
|
||||||
Uuid uuid; ///< The UUID of the node.
|
|
||||||
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
|
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
|
||||||
DbRole dbRole; ///< The database role of the node
|
|
||||||
|
|
||||||
/**
|
// WriterRole writerRole;
|
||||||
* @brief Create a ClioNode from writer state.
|
|
||||||
*
|
|
||||||
* @param uuid The UUID of the node
|
|
||||||
* @param writerState The writer state to determine the node's database role
|
|
||||||
* @return A ClioNode with the current time and role derived from writerState
|
|
||||||
*/
|
|
||||||
static ClioNode
|
|
||||||
from(Uuid uuid, etl::WriterStateInterface const& writerState);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -19,8 +19,11 @@
|
|||||||
|
|
||||||
#include "cluster/ClusterCommunicationService.hpp"
|
#include "cluster/ClusterCommunicationService.hpp"
|
||||||
|
|
||||||
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "etl/WriterState.hpp"
|
#include "util/Assert.hpp"
|
||||||
|
#include "util/Spawn.hpp"
|
||||||
|
#include "util/log/Logger.hpp"
|
||||||
|
|
||||||
#include <boost/asio/bind_cancellation_slot.hpp>
|
#include <boost/asio/bind_cancellation_slot.hpp>
|
||||||
#include <boost/asio/cancellation_type.hpp>
|
#include <boost/asio/cancellation_type.hpp>
|
||||||
@@ -38,32 +41,76 @@
|
|||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
|
#include <latch>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
|
||||||
|
} // namespace
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
ClusterCommunicationService::ClusterCommunicationService(
|
ClusterCommunicationService::ClusterCommunicationService(
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
std::shared_ptr<data::BackendInterface> backend,
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
std::chrono::steady_clock::duration readInterval,
|
||||||
std::chrono::steady_clock::duration writeInterval
|
std::chrono::steady_clock::duration writeInterval
|
||||||
)
|
)
|
||||||
: backend_(ctx_, std::move(backend), writerState->clone(), readInterval, writeInterval)
|
: backend_(std::move(backend))
|
||||||
, writerDecider_(ctx_, std::move(writerState))
|
, readInterval_(readInterval)
|
||||||
|
, writeInterval_(writeInterval)
|
||||||
|
, finishedCountdown_(kTOTAL_WORKERS)
|
||||||
|
, selfData_{ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
|
||||||
|
.updateTime = std::chrono::system_clock::time_point{}
|
||||||
|
}}
|
||||||
{
|
{
|
||||||
|
nodesInClusterMetric_.set(1); // The node always sees itself
|
||||||
|
isHealthy_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ClusterCommunicationService::run()
|
ClusterCommunicationService::run()
|
||||||
{
|
{
|
||||||
backend_.subscribeToNewState([this](auto&&... args) {
|
ASSERT(not running_ and not stopped_, "Can only be ran once");
|
||||||
metrics_.onNewState(std::forward<decltype(args)>(args)...);
|
running_ = true;
|
||||||
|
|
||||||
|
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||||
|
boost::asio::steady_timer timer(yield.get_executor());
|
||||||
|
boost::system::error_code ec;
|
||||||
|
|
||||||
|
while (running_) {
|
||||||
|
timer.expires_after(readInterval_);
|
||||||
|
auto token = cancelSignal_.slot();
|
||||||
|
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||||
|
|
||||||
|
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||||
|
break;
|
||||||
|
|
||||||
|
doRead(yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
finishedCountdown_.count_down(1);
|
||||||
});
|
});
|
||||||
backend_.subscribeToNewState([this](auto&&... args) {
|
|
||||||
writerDecider_.onNewState(std::forward<decltype(args)>(args)...);
|
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||||
|
boost::asio::steady_timer timer(yield.get_executor());
|
||||||
|
boost::system::error_code ec;
|
||||||
|
|
||||||
|
while (running_) {
|
||||||
|
doWrite();
|
||||||
|
timer.expires_after(writeInterval_);
|
||||||
|
auto token = cancelSignal_.slot();
|
||||||
|
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||||
|
|
||||||
|
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
finishedCountdown_.count_down(1);
|
||||||
});
|
});
|
||||||
backend_.run();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterCommunicationService::~ClusterCommunicationService()
|
ClusterCommunicationService::~ClusterCommunicationService()
|
||||||
@@ -74,7 +121,107 @@ ClusterCommunicationService::~ClusterCommunicationService()
|
|||||||
void
|
void
|
||||||
ClusterCommunicationService::stop()
|
ClusterCommunicationService::stop()
|
||||||
{
|
{
|
||||||
backend_.stop();
|
if (stopped_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
stopped_ = true;
|
||||||
|
|
||||||
|
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
|
||||||
|
running_ = false;
|
||||||
|
|
||||||
|
// cancelSignal_ is not thread safe so we execute emit on the same strand
|
||||||
|
boost::asio::spawn(
|
||||||
|
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
|
||||||
|
)
|
||||||
|
.wait();
|
||||||
|
finishedCountdown_.wait();
|
||||||
|
|
||||||
|
ctx_.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<boost::uuids::uuid>
|
||||||
|
ClusterCommunicationService::selfUuid() const
|
||||||
|
{
|
||||||
|
// Uuid never changes so it is safe to copy it without using strand_
|
||||||
|
return selfData_.uuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
ClioNode
|
||||||
|
ClusterCommunicationService::selfData() const
|
||||||
|
{
|
||||||
|
ClioNode result{};
|
||||||
|
util::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
ClusterCommunicationService::clusterData() const
|
||||||
|
{
|
||||||
|
if (not isHealthy_) {
|
||||||
|
return std::unexpected{"Service is not healthy"};
|
||||||
|
}
|
||||||
|
std::vector<ClioNode> result;
|
||||||
|
util::spawn(strand_, [this, &result](boost::asio::yield_context) {
|
||||||
|
result = otherNodesData_;
|
||||||
|
result.push_back(selfData_);
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ClusterCommunicationService::doRead(boost::asio::yield_context yield)
|
||||||
|
{
|
||||||
|
otherNodesData_.clear();
|
||||||
|
|
||||||
|
BackendInterface::ClioNodesDataFetchResult expectedResult;
|
||||||
|
try {
|
||||||
|
expectedResult = backend_->fetchClioNodesData(yield);
|
||||||
|
} catch (...) {
|
||||||
|
expectedResult = std::unexpected{"Failed to fecth Clio nodes data"};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!expectedResult.has_value()) {
|
||||||
|
LOG(log_.error()) << "Failed to fetch nodes data";
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new vector here to not have partially parsed data in otherNodesData_
|
||||||
|
std::vector<ClioNode> otherNodesData;
|
||||||
|
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
|
||||||
|
if (uuid == *selfData_.uuid) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::system::error_code errorCode;
|
||||||
|
auto const json = boost::json::parse(nodeDataStr, errorCode);
|
||||||
|
if (errorCode.failed()) {
|
||||||
|
LOG(log_.error()) << "Error parsing json from DB: " << nodeDataStr;
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
|
||||||
|
if (expectedNodeData.has_error()) {
|
||||||
|
LOG(log_.error()) << "Error converting json to ClioNode: " << json;
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
*expectedNodeData->uuid = uuid;
|
||||||
|
otherNodesData.push_back(std::move(expectedNodeData).value());
|
||||||
|
}
|
||||||
|
otherNodesData_ = std::move(otherNodesData);
|
||||||
|
nodesInClusterMetric_.set(otherNodesData_.size() + 1);
|
||||||
|
isHealthy_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ClusterCommunicationService::doWrite()
|
||||||
|
{
|
||||||
|
selfData_.updateTime = std::chrono::system_clock::now();
|
||||||
|
boost::json::value jsonValue{};
|
||||||
|
boost::json::value_from(selfData_, jsonValue);
|
||||||
|
backend_->writeNodeMessage(*selfData_.uuid, boost::json::serialize(jsonValue.as_object()));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -19,12 +19,13 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "cluster/Concepts.hpp"
|
#include "cluster/ClusterCommunicationServiceInterface.hpp"
|
||||||
#include "cluster/Metrics.hpp"
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "etl/WriterState.hpp"
|
#include "util/log/Logger.hpp"
|
||||||
|
#include "util/prometheus/Bool.hpp"
|
||||||
|
#include "util/prometheus/Gauge.hpp"
|
||||||
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
#include <boost/asio/cancellation_signal.hpp>
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
@@ -32,49 +33,67 @@
|
|||||||
#include <boost/asio/thread_pool.hpp>
|
#include <boost/asio/thread_pool.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <latch>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
|
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
|
||||||
*/
|
*/
|
||||||
class ClusterCommunicationService : public ClusterCommunicationServiceTag {
|
class ClusterCommunicationService : public ClusterCommunicationServiceInterface {
|
||||||
|
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
|
||||||
|
"cluster_nodes_total_number",
|
||||||
|
{},
|
||||||
|
"Total number of nodes this node can detect in the cluster."
|
||||||
|
);
|
||||||
|
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
|
||||||
|
"cluster_communication_is_healthy",
|
||||||
|
{},
|
||||||
|
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
|
||||||
|
);
|
||||||
|
|
||||||
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
|
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
|
||||||
boost::asio::thread_pool ctx_{1};
|
boost::asio::thread_pool ctx_{1};
|
||||||
Backend backend_;
|
boost::asio::strand<boost::asio::thread_pool::executor_type> strand_ = boost::asio::make_strand(ctx_);
|
||||||
Metrics metrics_;
|
|
||||||
WriterDecider writerDecider_;
|
util::Logger log_{"ClusterCommunication"};
|
||||||
|
|
||||||
|
std::shared_ptr<data::BackendInterface> backend_;
|
||||||
|
|
||||||
|
std::chrono::steady_clock::duration readInterval_;
|
||||||
|
std::chrono::steady_clock::duration writeInterval_;
|
||||||
|
|
||||||
|
boost::asio::cancellation_signal cancelSignal_;
|
||||||
|
std::latch finishedCountdown_;
|
||||||
|
std::atomic_bool running_ = false;
|
||||||
|
bool stopped_ = false;
|
||||||
|
|
||||||
|
ClioNode selfData_;
|
||||||
|
std::vector<ClioNode> otherNodesData_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{1000};
|
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
|
||||||
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1000};
|
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Construct a new Cluster Communication Service object.
|
* @brief Construct a new Cluster Communication Service object.
|
||||||
*
|
*
|
||||||
* @param backend The backend to use for communication.
|
* @param backend The backend to use for communication.
|
||||||
* @param writerState The state showing whether clio is writing to the database.
|
|
||||||
* @param readInterval The interval to read messages from the cluster.
|
* @param readInterval The interval to read messages from the cluster.
|
||||||
* @param writeInterval The interval to write messages to the cluster.
|
* @param writeInterval The interval to write messages to the cluster.
|
||||||
*/
|
*/
|
||||||
ClusterCommunicationService(
|
ClusterCommunicationService(
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
std::shared_ptr<data::BackendInterface> backend,
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
|
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
|
||||||
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
|
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
|
||||||
);
|
);
|
||||||
|
|
||||||
~ClusterCommunicationService() override;
|
~ClusterCommunicationService() override;
|
||||||
|
|
||||||
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
|
|
||||||
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
|
|
||||||
ClusterCommunicationService&
|
|
||||||
operator=(ClusterCommunicationService&&) = delete;
|
|
||||||
ClusterCommunicationService&
|
|
||||||
operator=(ClusterCommunicationService const&) = delete;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start the service.
|
* @brief Start the service.
|
||||||
*/
|
*/
|
||||||
@@ -86,6 +105,44 @@ public:
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
stop();
|
stop();
|
||||||
|
|
||||||
|
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
|
||||||
|
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
|
||||||
|
ClusterCommunicationService&
|
||||||
|
operator=(ClusterCommunicationService&&) = delete;
|
||||||
|
ClusterCommunicationService&
|
||||||
|
operator=(ClusterCommunicationService const&) = delete;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the UUID of the current node.
|
||||||
|
*
|
||||||
|
* @return The UUID of the current node.
|
||||||
|
*/
|
||||||
|
std::shared_ptr<boost::uuids::uuid>
|
||||||
|
selfUuid() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the data of the current node.
|
||||||
|
*
|
||||||
|
* @return The data of the current node.
|
||||||
|
*/
|
||||||
|
ClioNode
|
||||||
|
selfData() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the data of all nodes in the cluster (including self).
|
||||||
|
*
|
||||||
|
* @return The data of all nodes in the cluster or error if the service is not healthy.
|
||||||
|
*/
|
||||||
|
std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
clusterData() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void
|
||||||
|
doRead(boost::asio::yield_context yield);
|
||||||
|
|
||||||
|
void
|
||||||
|
doWrite();
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -17,31 +17,38 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include "cluster/Metrics.hpp"
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
|
|
||||||
#include <memory>
|
#include <expected>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
Metrics::Metrics()
|
/**
|
||||||
{
|
* @brief Interface for the cluster communication service.
|
||||||
nodesInClusterMetric_.set(1); // The node always sees itself
|
*/
|
||||||
isHealthy_ = true;
|
class ClusterCommunicationServiceInterface {
|
||||||
}
|
public:
|
||||||
|
virtual ~ClusterCommunicationServiceInterface() = default;
|
||||||
|
|
||||||
void
|
/**
|
||||||
Metrics::onNewState(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData)
|
* @brief Get the data of the current node.
|
||||||
{
|
*
|
||||||
if (clusterData->has_value()) {
|
* @return The data of the current node.
|
||||||
isHealthy_ = true;
|
*/
|
||||||
nodesInClusterMetric_.set(clusterData->value().size());
|
[[nodiscard]] virtual ClioNode
|
||||||
} else {
|
selfData() const = 0;
|
||||||
isHealthy_ = false;
|
|
||||||
nodesInClusterMetric_.set(1);
|
/**
|
||||||
}
|
* @brief Get the data of all nodes in the cluster (including self).
|
||||||
}
|
*
|
||||||
|
* @return The data of all nodes in the cluster or error if the service is not healthy.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
clusterData() const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Tag type for cluster communication service implementations.
|
|
||||||
*
|
|
||||||
* This tag is used to identify types that implement cluster communication functionality.
|
|
||||||
* Types should inherit from this tag to be recognized as cluster communication services.
|
|
||||||
*/
|
|
||||||
struct ClusterCommunicationServiceTag {
|
|
||||||
virtual ~ClusterCommunicationServiceTag() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
concept SomeClusterCommunicationService = std::derived_from<T, ClusterCommunicationServiceTag>;
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "util/prometheus/Bool.hpp"
|
|
||||||
#include "util/prometheus/Gauge.hpp"
|
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Manages Prometheus metrics for cluster communication and node tracking.
|
|
||||||
*
|
|
||||||
* This class tracks cluster-related metrics including:
|
|
||||||
* - Total number of nodes detected in the cluster
|
|
||||||
* - Health status of cluster communication
|
|
||||||
*/
|
|
||||||
class Metrics {
|
|
||||||
/** @brief Gauge tracking the total number of nodes visible in the cluster */
|
|
||||||
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
|
|
||||||
"cluster_nodes_total_number",
|
|
||||||
{},
|
|
||||||
"Total number of nodes this node can detect in the cluster."
|
|
||||||
);
|
|
||||||
|
|
||||||
/** @brief Boolean metric indicating whether cluster communication is healthy */
|
|
||||||
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
|
|
||||||
"cluster_communication_is_healthy",
|
|
||||||
{},
|
|
||||||
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
|
|
||||||
);
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Constructs a Metrics instance and initializes metrics.
|
|
||||||
*
|
|
||||||
* Sets the initial node count to 1 (self) and marks communication as healthy.
|
|
||||||
*/
|
|
||||||
Metrics();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates metrics based on new cluster state.
|
|
||||||
*
|
|
||||||
* This callback is invoked when cluster state changes. It updates:
|
|
||||||
* - Health status based on whether cluster data is available
|
|
||||||
* - Node count to reflect the current cluster size
|
|
||||||
*
|
|
||||||
* @param uuid The UUID of the node (unused in current implementation)
|
|
||||||
* @param clusterData Shared pointer to the current cluster data; may be empty if communication failed
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
onNewState(ClioNode::CUuid uuid, std::shared_ptr<Backend::ClusterData const> clusterData);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,98 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/Assert.hpp"
|
|
||||||
#include "util/Spawn.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
WriterDecider::WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState)
|
|
||||||
: ctx_(ctx), writerState_(std::move(writerState))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterDecider::onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData)
|
|
||||||
{
|
|
||||||
if (not clusterData->has_value())
|
|
||||||
return;
|
|
||||||
|
|
||||||
util::spawn(
|
|
||||||
ctx_,
|
|
||||||
[writerState = writerState_->clone(),
|
|
||||||
selfId = std::move(selfId),
|
|
||||||
clusterData = clusterData->value()](auto&&) mutable {
|
|
||||||
auto const selfData =
|
|
||||||
std::ranges::find_if(clusterData, [&selfId](ClioNode const& node) { return node.uuid == selfId; });
|
|
||||||
ASSERT(selfData != clusterData.end(), "Self data should always be in the cluster data");
|
|
||||||
|
|
||||||
if (selfData->dbRole == ClioNode::DbRole::Fallback) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (selfData->dbRole == ClioNode::DbRole::ReadOnly) {
|
|
||||||
writerState->giveUpWriting();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If any node in the cluster is in Fallback mode, the entire cluster must switch
|
|
||||||
// to the fallback writer decision mechanism for consistency
|
|
||||||
if (std::ranges::any_of(clusterData, [](ClioNode const& node) {
|
|
||||||
return node.dbRole == ClioNode::DbRole::Fallback;
|
|
||||||
})) {
|
|
||||||
writerState->setWriterDecidingFallback();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are not ReadOnly and there is no Fallback in the cluster
|
|
||||||
std::ranges::sort(clusterData, [](ClioNode const& lhs, ClioNode const& rhs) {
|
|
||||||
return *lhs.uuid < *rhs.uuid;
|
|
||||||
});
|
|
||||||
|
|
||||||
auto const it = std::ranges::find_if(clusterData, [](ClioNode const& node) {
|
|
||||||
return node.dbRole == ClioNode::DbRole::NotWriter or node.dbRole == ClioNode::DbRole::Writer;
|
|
||||||
});
|
|
||||||
|
|
||||||
if (it == clusterData.end()) {
|
|
||||||
// No writer nodes in the cluster yet
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*it->uuid == *selfId) {
|
|
||||||
writerState->startWriting();
|
|
||||||
} else {
|
|
||||||
writerState->giveUpWriting();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Decides which node in the cluster should be the writer based on cluster state.
|
|
||||||
*
|
|
||||||
* This class monitors cluster state changes and determines whether the current node
|
|
||||||
* should act as the writer to the database. The decision is made by:
|
|
||||||
* 1. Sorting all nodes by UUID for deterministic ordering
|
|
||||||
* 2. Selecting the first node that is allowed to write (not ReadOnly)
|
|
||||||
* 3. Activating writing on this node if it's the current node, otherwise deactivating
|
|
||||||
*
|
|
||||||
* This ensures only one node in the cluster actively writes to the database at a time.
|
|
||||||
*/
|
|
||||||
class WriterDecider {
|
|
||||||
/** @brief Thread pool for spawning asynchronous tasks */
|
|
||||||
boost::asio::thread_pool& ctx_;
|
|
||||||
|
|
||||||
/** @brief Interface for controlling the writer state of this node */
|
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Constructs a WriterDecider.
|
|
||||||
*
|
|
||||||
* @param ctx Thread pool for executing asynchronous operations
|
|
||||||
* @param writerState Writer state interface for controlling write operations
|
|
||||||
*/
|
|
||||||
WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Handles cluster state changes and decides whether this node should be the writer.
|
|
||||||
*
|
|
||||||
* This method is called when cluster state changes. It asynchronously:
|
|
||||||
* - Sorts all nodes by UUID to establish a deterministic order
|
|
||||||
* - Identifies the first node allowed to write (not ReadOnly)
|
|
||||||
* - Activates writing if this node is selected, otherwise deactivates writing
|
|
||||||
* - Logs a warning if no nodes in the cluster are allowed to write
|
|
||||||
*
|
|
||||||
* @param selfId The UUID of the current node
|
|
||||||
* @param clusterData Shared pointer to current cluster data; may be empty if communication failed
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "util/Assert.hpp"
|
|
||||||
#include "util/Spawn.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/bind_cancellation_slot.hpp>
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
|
||||||
#include <boost/asio/cancellation_type.hpp>
|
|
||||||
#include <boost/asio/error.hpp>
|
|
||||||
#include <boost/asio/executor.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
#include <boost/asio/use_future.hpp>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
|
||||||
#include <concepts>
|
|
||||||
#include <semaphore>
|
|
||||||
|
|
||||||
namespace cluster::impl {
|
|
||||||
|
|
||||||
// TODO: Try to replace util::Repeat by this. https://github.com/XRPLF/clio/issues/2926
|
|
||||||
template <typename Context>
|
|
||||||
class RepeatedTask {
|
|
||||||
std::chrono::steady_clock::duration interval_;
|
|
||||||
boost::asio::strand<typename Context::executor_type> strand_;
|
|
||||||
|
|
||||||
enum class State { Running, Stopped };
|
|
||||||
std::atomic<State> state_ = State::Stopped;
|
|
||||||
|
|
||||||
std::binary_semaphore semaphore_{0};
|
|
||||||
boost::asio::steady_timer timer_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
RepeatedTask(std::chrono::steady_clock::duration interval, Context& ctx)
|
|
||||||
: interval_(interval), strand_(boost::asio::make_strand(ctx)), timer_(strand_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
~RepeatedTask()
|
|
||||||
{
|
|
||||||
stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Fn>
|
|
||||||
requires std::invocable<Fn, boost::asio::yield_context> or std::invocable<Fn>
|
|
||||||
void
|
|
||||||
run(Fn&& f)
|
|
||||||
{
|
|
||||||
ASSERT(state_ == State::Stopped, "Can only be ran once");
|
|
||||||
state_ = State::Running;
|
|
||||||
util::spawn(strand_, [this, f = std::forward<Fn>(f)](boost::asio::yield_context yield) {
|
|
||||||
boost::system::error_code ec;
|
|
||||||
|
|
||||||
while (state_ == State::Running) {
|
|
||||||
timer_.expires_after(interval_);
|
|
||||||
timer_.async_wait(yield[ec]);
|
|
||||||
|
|
||||||
if (ec or state_ != State::Running)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if constexpr (std::invocable<decltype(f), boost::asio::yield_context>) {
|
|
||||||
f(yield);
|
|
||||||
} else {
|
|
||||||
f();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
semaphore_.release();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
stop()
|
|
||||||
{
|
|
||||||
if (auto expected = State::Running; not state_.compare_exchange_strong(expected, State::Stopped))
|
|
||||||
return; // Already stopped or not started
|
|
||||||
|
|
||||||
boost::asio::spawn(strand_, [this](auto&&) { timer_.cancel(); }, boost::asio::use_future).wait();
|
|
||||||
semaphore_.acquire();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster::impl
|
|
||||||
@@ -146,12 +146,9 @@ AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const
|
|||||||
bool
|
bool
|
||||||
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
|
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
|
||||||
{
|
{
|
||||||
try {
|
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
|
||||||
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
|
return lookupAmendment(all_, *listAmendments, key);
|
||||||
return lookupAmendment(all_, *listAmendments, key);
|
|
||||||
} catch (std::runtime_error const&) {
|
|
||||||
return false; // Some old ledger does not contain Amendments ledger object so do best we can for now
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,19 +157,13 @@ AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<Amendme
|
|||||||
{
|
{
|
||||||
namespace rg = std::ranges;
|
namespace rg = std::ranges;
|
||||||
|
|
||||||
try {
|
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
|
||||||
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
|
std::vector<bool> out;
|
||||||
std::vector<bool> out;
|
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
|
||||||
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
|
return lookupAmendment(all_, *listAmendments, key);
|
||||||
return lookupAmendment(all_, *listAmendments, key);
|
});
|
||||||
});
|
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
}
|
|
||||||
} catch (std::runtime_error const&) {
|
|
||||||
return std::vector<bool>(
|
|
||||||
keys.size(), false
|
|
||||||
); // Some old ledger does not contain Amendments ledger object so do best we can for now
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::vector<bool>(keys.size(), false);
|
return std::vector<bool>(keys.size(), false);
|
||||||
|
|||||||
@@ -30,9 +30,7 @@
|
|||||||
namespace data {
|
namespace data {
|
||||||
|
|
||||||
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
|
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
|
||||||
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path"))
|
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path")), cache_(cache)
|
||||||
, cache_(cache)
|
|
||||||
, isAsync_(config.get<bool>("cache.file.async_save"))
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,9 +56,6 @@ LedgerCacheSaver::save()
|
|||||||
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
|
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if (not isAsync_) {
|
|
||||||
waitToFinish();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -53,7 +53,6 @@ class LedgerCacheSaver {
|
|||||||
std::optional<std::string> cacheFilePath_;
|
std::optional<std::string> cacheFilePath_;
|
||||||
std::reference_wrapper<LedgerCacheInterface const> cache_;
|
std::reference_wrapper<LedgerCacheInterface const> cache_;
|
||||||
std::optional<std::thread> savingThread_;
|
std::optional<std::thread> savingThread_;
|
||||||
bool isAsync_;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ target_sources(
|
|||||||
NetworkValidatedLedgers.cpp
|
NetworkValidatedLedgers.cpp
|
||||||
NFTHelpers.cpp
|
NFTHelpers.cpp
|
||||||
Source.cpp
|
Source.cpp
|
||||||
WriterState.cpp
|
|
||||||
impl/AmendmentBlockHandler.cpp
|
impl/AmendmentBlockHandler.cpp
|
||||||
impl/AsyncGrpcCall.cpp
|
impl/AsyncGrpcCall.cpp
|
||||||
impl/Extraction.cpp
|
impl/Extraction.cpp
|
||||||
|
|||||||
@@ -78,7 +78,6 @@ namespace etl {
|
|||||||
std::shared_ptr<ETLServiceInterface>
|
std::shared_ptr<ETLServiceInterface>
|
||||||
ETLService::makeETLService(
|
ETLService::makeETLService(
|
||||||
util::config::ClioConfigDefinition const& config,
|
util::config::ClioConfigDefinition const& config,
|
||||||
std::shared_ptr<SystemState> state,
|
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||||
@@ -88,6 +87,9 @@ ETLService::makeETLService(
|
|||||||
{
|
{
|
||||||
std::shared_ptr<ETLServiceInterface> ret;
|
std::shared_ptr<ETLServiceInterface> ret;
|
||||||
|
|
||||||
|
auto state = std::make_shared<SystemState>();
|
||||||
|
state->isStrictReadonly = config.get<bool>("read_only");
|
||||||
|
|
||||||
auto fetcher = std::make_shared<impl::LedgerFetcher>(backend, balancer);
|
auto fetcher = std::make_shared<impl::LedgerFetcher>(backend, balancer);
|
||||||
auto extractor = std::make_shared<impl::Extractor>(fetcher);
|
auto extractor = std::make_shared<impl::Extractor>(fetcher);
|
||||||
auto publisher = std::make_shared<impl::LedgerPublisher>(ctx, backend, subscriptions, *state);
|
auto publisher = std::make_shared<impl::LedgerPublisher>(ctx, backend, subscriptions, *state);
|
||||||
@@ -171,7 +173,6 @@ ETLService::ETLService(
|
|||||||
, state_(std::move(state))
|
, state_(std::move(state))
|
||||||
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
||||||
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
||||||
, writeCommandStrand_(ctx_.makeStrand())
|
|
||||||
{
|
{
|
||||||
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
||||||
|
|
||||||
@@ -212,13 +213,14 @@ ETLService::run()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const nextSequence = syncCacheWithDb();
|
auto nextSequence = rng->maxSequence + 1;
|
||||||
|
if (backend_->cache().latestLedgerSequence() != 0) {
|
||||||
|
nextSequence = backend_->cache().latestLedgerSequence();
|
||||||
|
}
|
||||||
|
|
||||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||||
|
|
||||||
startMonitor(nextSequence);
|
startMonitor(nextSequence);
|
||||||
|
|
||||||
state_->isLoadingCache = false;
|
|
||||||
|
|
||||||
// If we are a writer as the result of loading the initial ledger - start loading
|
// If we are a writer as the result of loading the initial ledger - start loading
|
||||||
if (state_->isWriting)
|
if (state_->isWriting)
|
||||||
startLoading(nextSequence);
|
startLoading(nextSequence);
|
||||||
@@ -230,13 +232,6 @@ ETLService::stop()
|
|||||||
{
|
{
|
||||||
LOG(log_.info()) << "Stop called";
|
LOG(log_.info()) << "Stop called";
|
||||||
|
|
||||||
systemStateWriteCommandSubscription_.disconnect();
|
|
||||||
auto count = runningWriteCommandHandlers_.load();
|
|
||||||
while (count != 0) {
|
|
||||||
runningWriteCommandHandlers_.wait(count); // Blocks until value changes
|
|
||||||
count = runningWriteCommandHandlers_.load();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mainLoop_)
|
if (mainLoop_)
|
||||||
mainLoop_->wait();
|
mainLoop_->wait();
|
||||||
if (taskMan_)
|
if (taskMan_)
|
||||||
@@ -348,77 +343,35 @@ ETLService::loadInitialLedgerIfNeeded()
|
|||||||
return rng;
|
return rng;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t
|
|
||||||
ETLService::syncCacheWithDb()
|
|
||||||
{
|
|
||||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
|
||||||
|
|
||||||
while (not backend_->cache().isDisabled() and rng->maxSequence > backend_->cache().latestLedgerSequence()) {
|
|
||||||
LOG(log_.info()) << "Syncing cache with DB. DB latest seq: " << rng->maxSequence
|
|
||||||
<< ". Cache latest seq: " << backend_->cache().latestLedgerSequence();
|
|
||||||
for (auto seq = backend_->cache().latestLedgerSequence(); seq <= rng->maxSequence; ++seq) {
|
|
||||||
LOG(log_.info()) << "ETLService (via syncCacheWithDb) got new seq from db: " << seq;
|
|
||||||
updateCache(seq);
|
|
||||||
}
|
|
||||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
|
||||||
}
|
|
||||||
return rng->maxSequence + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
ETLService::updateCache(uint32_t seq)
|
|
||||||
{
|
|
||||||
auto const cacheNeedsUpdate = backend_->cache().latestLedgerSequence() < seq;
|
|
||||||
auto const backendRange = backend_->fetchLedgerRange();
|
|
||||||
auto const backendNeedsUpdate = backendRange.has_value() and backendRange->maxSequence < seq;
|
|
||||||
|
|
||||||
if (cacheNeedsUpdate) {
|
|
||||||
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
|
||||||
return backend_->fetchLedgerDiff(seq, yield);
|
|
||||||
});
|
|
||||||
cacheUpdater_->update(seq, diff);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (backendNeedsUpdate)
|
|
||||||
backend_->updateRange(seq);
|
|
||||||
|
|
||||||
publisher_->publish(seq, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ETLService::startMonitor(uint32_t seq)
|
ETLService::startMonitor(uint32_t seq)
|
||||||
{
|
{
|
||||||
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
||||||
|
|
||||||
systemStateWriteCommandSubscription_ =
|
|
||||||
state_->writeCommandSignal.connect([this](SystemState::WriteCommand command) {
|
|
||||||
++runningWriteCommandHandlers_;
|
|
||||||
writeCommandStrand_.submit([this, command]() {
|
|
||||||
switch (command) {
|
|
||||||
case etl::SystemState::WriteCommand::StartWriting:
|
|
||||||
attemptTakeoverWriter();
|
|
||||||
break;
|
|
||||||
case etl::SystemState::WriteCommand::StopWriting:
|
|
||||||
giveUpWriter();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
--runningWriteCommandHandlers_;
|
|
||||||
runningWriteCommandHandlers_.notify_one();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
||||||
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
||||||
updateCache(seq);
|
|
||||||
|
if (state_->writeConflict) {
|
||||||
|
LOG(log_.info()) << "Got a write conflict; Giving up writer seat immediately";
|
||||||
|
giveUpWriter();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (not state_->isWriting) {
|
||||||
|
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||||
|
return backend_->fetchLedgerDiff(seq, yield);
|
||||||
|
});
|
||||||
|
|
||||||
|
cacheUpdater_->update(seq, diff);
|
||||||
|
backend_->updateRange(seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
publisher_->publish(seq, {});
|
||||||
});
|
});
|
||||||
|
|
||||||
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
||||||
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
||||||
// Database stall detected - no writer has been active for 10 seconds
|
|
||||||
// This triggers the fallback mechanism and attempts to become the writer
|
|
||||||
if (not state_->isStrictReadonly and not state_->isWriting)
|
if (not state_->isStrictReadonly and not state_->isWriting)
|
||||||
state_->writeCommandSignal(SystemState::WriteCommand::StartWriting);
|
attemptTakeoverWriter();
|
||||||
state_->isWriterDecidingFallback = true;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
monitor_->run();
|
monitor_->run();
|
||||||
@@ -441,13 +394,6 @@ ETLService::attemptTakeoverWriter()
|
|||||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||||
|
|
||||||
if (backend_->cache().latestLedgerSequence() != rng->maxSequence) {
|
|
||||||
LOG(log_.info()) << "Wanted to take over the ETL writer seat but LedgerCache is outdated";
|
|
||||||
// Give ETL time to update LedgerCache. This method will be called because ClusterCommunication will likely to
|
|
||||||
// continue sending StartWriting signal every 1 second
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
state_->isWriting = true; // switch to writer
|
state_->isWriting = true; // switch to writer
|
||||||
LOG(log_.info()) << "Taking over the ETL writer seat";
|
LOG(log_.info()) << "Taking over the ETL writer seat";
|
||||||
startLoading(rng->maxSequence + 1);
|
startLoading(rng->maxSequence + 1);
|
||||||
@@ -458,7 +404,7 @@ ETLService::giveUpWriter()
|
|||||||
{
|
{
|
||||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||||
state_->isWriting = false;
|
state_->isWriting = false;
|
||||||
LOG(log_.info()) << "Giving up writer seat";
|
state_->writeConflict = false;
|
||||||
taskMan_ = nullptr;
|
taskMan_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,7 +52,6 @@
|
|||||||
#include "feed/SubscriptionManagerInterface.hpp"
|
#include "feed/SubscriptionManagerInterface.hpp"
|
||||||
#include "util/async/AnyExecutionContext.hpp"
|
#include "util/async/AnyExecutionContext.hpp"
|
||||||
#include "util/async/AnyOperation.hpp"
|
#include "util/async/AnyOperation.hpp"
|
||||||
#include "util/async/AnyStrand.hpp"
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
#include "util/log/Logger.hpp"
|
#include "util/log/Logger.hpp"
|
||||||
|
|
||||||
@@ -70,12 +69,12 @@
|
|||||||
#include <xrpl/protocol/TxFormats.h>
|
#include <xrpl/protocol/TxFormats.h>
|
||||||
#include <xrpl/protocol/TxMeta.h>
|
#include <xrpl/protocol/TxMeta.h>
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace etl {
|
namespace etl {
|
||||||
|
|
||||||
@@ -118,9 +117,6 @@ class ETLService : public ETLServiceInterface {
|
|||||||
|
|
||||||
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
||||||
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
||||||
boost::signals2::scoped_connection systemStateWriteCommandSubscription_;
|
|
||||||
util::async::AnyStrand writeCommandStrand_;
|
|
||||||
std::atomic<size_t> runningWriteCommandHandlers_{0};
|
|
||||||
|
|
||||||
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
||||||
|
|
||||||
@@ -131,7 +127,6 @@ public:
|
|||||||
* Creates and runs the ETL service.
|
* Creates and runs the ETL service.
|
||||||
*
|
*
|
||||||
* @param config The configuration to use
|
* @param config The configuration to use
|
||||||
* @param state The system state tracking object
|
|
||||||
* @param ctx Execution context for asynchronous operations
|
* @param ctx Execution context for asynchronous operations
|
||||||
* @param backend BackendInterface implementation
|
* @param backend BackendInterface implementation
|
||||||
* @param subscriptions Subscription manager
|
* @param subscriptions Subscription manager
|
||||||
@@ -142,7 +137,6 @@ public:
|
|||||||
static std::shared_ptr<ETLServiceInterface>
|
static std::shared_ptr<ETLServiceInterface>
|
||||||
makeETLService(
|
makeETLService(
|
||||||
util::config::ClioConfigDefinition const& config,
|
util::config::ClioConfigDefinition const& config,
|
||||||
std::shared_ptr<SystemState> state,
|
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||||
@@ -166,7 +160,7 @@ public:
|
|||||||
* @param initialLoadObserver The observer for initial data loading
|
* @param initialLoadObserver The observer for initial data loading
|
||||||
* @param taskManagerProvider The provider of the task manager instance
|
* @param taskManagerProvider The provider of the task manager instance
|
||||||
* @param monitorProvider The provider of the monitor instance
|
* @param monitorProvider The provider of the monitor instance
|
||||||
* @param state The system state tracking object
|
* @param state System state tracking object
|
||||||
*/
|
*/
|
||||||
ETLService(
|
ETLService(
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
@@ -212,12 +206,6 @@ private:
|
|||||||
std::optional<data::LedgerRange>
|
std::optional<data::LedgerRange>
|
||||||
loadInitialLedgerIfNeeded();
|
loadInitialLedgerIfNeeded();
|
||||||
|
|
||||||
[[nodiscard]] uint32_t
|
|
||||||
syncCacheWithDb();
|
|
||||||
|
|
||||||
void
|
|
||||||
updateCache(uint32_t seq);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
startMonitor(uint32_t seq);
|
startMonitor(uint32_t seq);
|
||||||
|
|
||||||
|
|||||||
@@ -19,16 +19,11 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
|
||||||
#include "util/log/Logger.hpp"
|
|
||||||
#include "util/prometheus/Bool.hpp"
|
#include "util/prometheus/Bool.hpp"
|
||||||
#include "util/prometheus/Label.hpp"
|
#include "util/prometheus/Label.hpp"
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/signals2/signal.hpp>
|
#include <atomic>
|
||||||
#include <boost/signals2/variadic_signal.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace etl {
|
namespace etl {
|
||||||
|
|
||||||
@@ -36,25 +31,6 @@ namespace etl {
|
|||||||
* @brief Represents the state of the ETL subsystem.
|
* @brief Represents the state of the ETL subsystem.
|
||||||
*/
|
*/
|
||||||
struct SystemState {
|
struct SystemState {
|
||||||
SystemState()
|
|
||||||
{
|
|
||||||
isLoadingCache = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Factory method to create a SystemState instance.
|
|
||||||
*
|
|
||||||
* @param config The configuration to use for initializing the system state
|
|
||||||
* @return A shared pointer to the newly created SystemState
|
|
||||||
*/
|
|
||||||
static std::shared_ptr<SystemState>
|
|
||||||
makeSystemState(util::config::ClioConfigDefinition const& config)
|
|
||||||
{
|
|
||||||
auto state = std::make_shared<SystemState>();
|
|
||||||
state->isStrictReadonly = config.get<bool>("read_only");
|
|
||||||
return state;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Whether the process is in strict read-only mode.
|
* @brief Whether the process is in strict read-only mode.
|
||||||
*
|
*
|
||||||
@@ -74,31 +50,8 @@ struct SystemState {
|
|||||||
"Whether the process is writing to the database"
|
"Whether the process is writing to the database"
|
||||||
);
|
);
|
||||||
|
|
||||||
/** @brief Whether the process is still loading cache after startup. */
|
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||||
util::prometheus::Bool isLoadingCache = PrometheusService::boolMetric(
|
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||||
"etl_loading_cache",
|
|
||||||
util::prometheus::Labels{},
|
|
||||||
"Whether etl is loading cache after clio startup"
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Commands for controlling the ETL writer state.
|
|
||||||
*
|
|
||||||
* These commands are emitted via writeCommandSignal to coordinate writer state transitions across components.
|
|
||||||
*/
|
|
||||||
enum class WriteCommand {
|
|
||||||
StartWriting, /**< Request to attempt taking over as the ETL writer */
|
|
||||||
StopWriting /**< Request to give up the ETL writer role (e.g., due to write conflict) */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Signal for coordinating ETL writer state transitions.
|
|
||||||
*
|
|
||||||
* This signal allows components to request changes to the writer state without direct coupling.
|
|
||||||
* - Emitted with StartWriting when database stalls and node should attempt to become writer
|
|
||||||
* - Emitted with StopWriting when write conflicts are detected
|
|
||||||
*/
|
|
||||||
boost::signals2::signal<void(WriteCommand)> writeCommandSignal;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Whether clio detected an amendment block.
|
* @brief Whether clio detected an amendment block.
|
||||||
@@ -124,24 +77,6 @@ struct SystemState {
|
|||||||
util::prometheus::Labels{},
|
util::prometheus::Labels{},
|
||||||
"Whether clio detected a corruption that needs manual attention"
|
"Whether clio detected a corruption that needs manual attention"
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* The fallback mechanism is triggered when:
|
|
||||||
* - The database stalls for 10 seconds (detected by Monitor), indicating no active writer
|
|
||||||
* - A write conflict is detected, indicating multiple nodes attempting to write simultaneously
|
|
||||||
*
|
|
||||||
* When fallback mode is active, the cluster stops using the cluster communication mechanism
|
|
||||||
* (TTL-based role announcements) and relies on the slower but more reliable database-based
|
|
||||||
* conflict detection. This flag propagates across the cluster - if any node enters fallback
|
|
||||||
* mode, all nodes in the cluster will switch to fallback mode.
|
|
||||||
*/
|
|
||||||
util::prometheus::Bool isWriterDecidingFallback = PrometheusService::boolMetric(
|
|
||||||
"etl_writing_deciding_fallback",
|
|
||||||
util::prometheus::Labels{},
|
|
||||||
"Whether the cluster is using the fallback writer decision mechanism"
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace etl
|
} // namespace etl
|
||||||
|
|||||||
@@ -1,88 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
namespace etl {
|
|
||||||
|
|
||||||
WriterState::WriterState(std::shared_ptr<SystemState> state) : systemState_(std::move(state))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isReadOnly() const
|
|
||||||
{
|
|
||||||
return systemState_->isStrictReadonly;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isWriting() const
|
|
||||||
{
|
|
||||||
return systemState_->isWriting;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::startWriting()
|
|
||||||
{
|
|
||||||
if (isWriting())
|
|
||||||
return;
|
|
||||||
|
|
||||||
systemState_->writeCommandSignal(SystemState::WriteCommand::StartWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::giveUpWriting()
|
|
||||||
{
|
|
||||||
if (not isWriting())
|
|
||||||
return;
|
|
||||||
|
|
||||||
systemState_->writeCommandSignal(SystemState::WriteCommand::StopWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::setWriterDecidingFallback()
|
|
||||||
{
|
|
||||||
systemState_->isWriterDecidingFallback = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isFallback() const
|
|
||||||
{
|
|
||||||
return systemState_->isWriterDecidingFallback;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isLoadingCache() const
|
|
||||||
{
|
|
||||||
return systemState_->isLoadingCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<WriterStateInterface>
|
|
||||||
WriterState::clone() const
|
|
||||||
{
|
|
||||||
auto c = WriterState(*this);
|
|
||||||
return std::make_unique<WriterState>(std::move(c));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace etl
|
|
||||||
@@ -1,193 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace etl {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Interface for managing writer state in the ETL subsystem.
|
|
||||||
*
|
|
||||||
* This interface provides methods to query and control whether the ETL process
|
|
||||||
* is actively writing to the database. Implementations should coordinate with
|
|
||||||
* the ETL system state to manage write responsibilities.
|
|
||||||
*/
|
|
||||||
class WriterStateInterface {
|
|
||||||
public:
|
|
||||||
virtual ~WriterStateInterface() = default;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is in strict read-only mode.
|
|
||||||
* @return true if the process is in strict read-only mode, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isReadOnly() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is currently writing to the database.
|
|
||||||
* @return true if the process is writing, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isWriting() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to start writing to the database.
|
|
||||||
*
|
|
||||||
* This method signals that the process should take over writing responsibilities.
|
|
||||||
* The actual transition to writing state may not be immediate.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
startWriting() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to stop writing to the database.
|
|
||||||
*
|
|
||||||
* This method signals that the process should give up writing responsibilities.
|
|
||||||
* The actual transition from writing state may not be immediate.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
giveUpWriting() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* @return true if the cluster has switched to fallback mode, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isFallback() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Switch the cluster to the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* This method is called when the cluster needs to transition from the cluster
|
|
||||||
* communication mechanism to the slower but more reliable fallback mechanism.
|
|
||||||
* Once set, this flag propagates to all nodes in the cluster through the
|
|
||||||
* ClioNode DbRole::Fallback state.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
setWriterDecidingFallback() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether clio is still loading cache after startup.
|
|
||||||
*
|
|
||||||
* @return true if clio is still loading cache, false otherwise.
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isLoadingCache() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Create a clone of this writer state.
|
|
||||||
*
|
|
||||||
* Creates a new instance of the writer state with the same underlying system state.
|
|
||||||
* This is used when spawning operations that need their own writer state instance
|
|
||||||
* while sharing the same system state.
|
|
||||||
*
|
|
||||||
* @return A unique pointer to the cloned writer state.
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual std::unique_ptr<WriterStateInterface>
|
|
||||||
clone() const = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Implementation of WriterStateInterface that manages ETL writer state.
|
|
||||||
*
|
|
||||||
* This class coordinates with SystemState to manage whether the ETL process
|
|
||||||
* is actively writing to the database. It provides methods to query the current
|
|
||||||
* writing state and request transitions between writing and non-writing states.
|
|
||||||
*/
|
|
||||||
class WriterState : public WriterStateInterface {
|
|
||||||
private:
|
|
||||||
std::shared_ptr<SystemState> systemState_; /**< @brief Shared system state for ETL coordination */
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Construct a WriterState with the given system state.
|
|
||||||
* @param state Shared pointer to the system state for coordination
|
|
||||||
*/
|
|
||||||
WriterState(std::shared_ptr<SystemState> state);
|
|
||||||
|
|
||||||
bool
|
|
||||||
isReadOnly() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is currently writing to the database.
|
|
||||||
* @return true if the process is writing, false otherwise
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isWriting() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to start writing to the database.
|
|
||||||
*
|
|
||||||
* If already writing, this method does nothing. Otherwise, it sets the
|
|
||||||
* shouldTakeoverWriting flag in the system state to signal the request.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
startWriting() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to stop writing to the database.
|
|
||||||
*
|
|
||||||
* If not currently writing, this method does nothing. Otherwise, it sets the
|
|
||||||
* shouldGiveUpWriter flag in the system state to signal the request.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
giveUpWriting() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Switch the cluster to the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* Sets the isWriterDecidingFallback flag in the system state, which will be
|
|
||||||
* propagated to other nodes in the cluster through the ClioNode DbRole::Fallback state.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
setWriterDecidingFallback() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* @return true if the cluster has switched to fallback mode, false otherwise
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isFallback() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether clio is still loading cache after startup.
|
|
||||||
*
|
|
||||||
* @return true if clio is still loading cache, false otherwise.
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isLoadingCache() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Create a clone of this writer state.
|
|
||||||
*
|
|
||||||
* Creates a new WriterState instance sharing the same system state.
|
|
||||||
*
|
|
||||||
* @return A unique pointer to the cloned writer state.
|
|
||||||
*/
|
|
||||||
std::unique_ptr<WriterStateInterface>
|
|
||||||
clone() const override;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace etl
|
|
||||||
@@ -45,7 +45,6 @@
|
|||||||
#include <xrpl/protocol/Serializer.h>
|
#include <xrpl/protocol/Serializer.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@@ -77,8 +76,6 @@ class LedgerPublisher : public LedgerPublisherInterface {
|
|||||||
|
|
||||||
util::async::AnyStrand publishStrand_;
|
util::async::AnyStrand publishStrand_;
|
||||||
|
|
||||||
std::atomic_bool stop_{false};
|
|
||||||
|
|
||||||
std::shared_ptr<BackendInterface> backend_;
|
std::shared_ptr<BackendInterface> backend_;
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
||||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||||
@@ -128,7 +125,7 @@ public:
|
|||||||
{
|
{
|
||||||
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
||||||
size_t numAttempts = 0;
|
size_t numAttempts = 0;
|
||||||
while (not stop_) {
|
while (not state_.get().isStopping) {
|
||||||
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
||||||
|
|
||||||
if (!range || range->maxSequence < ledgerSequence) {
|
if (!range || range->maxSequence < ledgerSequence) {
|
||||||
@@ -261,18 +258,6 @@ public:
|
|||||||
return *lastPublishedSequence_.lock();
|
return *lastPublishedSequence_.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Stops publishing
|
|
||||||
*
|
|
||||||
* @note This is a basic implementation to satisfy tests. This will be improved in
|
|
||||||
* https://github.com/XRPLF/clio/issues/2833
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
stop()
|
|
||||||
{
|
|
||||||
stop_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void
|
void
|
||||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||||
|
|||||||
@@ -75,10 +75,7 @@ Loader::load(model::LedgerData const& data)
|
|||||||
<< "; took " << duration << "ms";
|
<< "; took " << duration << "ms";
|
||||||
|
|
||||||
if (not success) {
|
if (not success) {
|
||||||
// Write conflict detected - another node wrote to the database
|
state_->writeConflict = true;
|
||||||
// This triggers the fallback mechanism and stops this node from writing
|
|
||||||
state_->writeCommandSignal(SystemState::WriteCommand::StopWriting);
|
|
||||||
state_->isWriterDecidingFallback = true;
|
|
||||||
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
||||||
return std::unexpected(LoaderError::WriteConflict);
|
return std::unexpected(LoaderError::WriteConflict);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,9 @@
|
|||||||
#include "util/prometheus/Label.hpp"
|
#include "util/prometheus/Label.hpp"
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
|
#include <boost/asio/post.hpp>
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
|
#include <boost/asio/strand.hpp>
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/object.hpp>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@@ -37,27 +39,6 @@
|
|||||||
|
|
||||||
namespace rpc {
|
namespace rpc {
|
||||||
|
|
||||||
void
|
|
||||||
WorkQueue::OneTimeCallable::setCallable(std::function<void()> func)
|
|
||||||
{
|
|
||||||
func_ = std::move(func);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WorkQueue::OneTimeCallable::operator()()
|
|
||||||
{
|
|
||||||
if (not called_) {
|
|
||||||
func_();
|
|
||||||
called_ = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
WorkQueue::OneTimeCallable::
|
|
||||||
operator bool() const
|
|
||||||
{
|
|
||||||
return func_.operator bool();
|
|
||||||
}
|
|
||||||
|
|
||||||
WorkQueue::WorkQueue(DontStartProcessingTag, std::uint32_t numWorkers, uint32_t maxSize)
|
WorkQueue::WorkQueue(DontStartProcessingTag, std::uint32_t numWorkers, uint32_t maxSize)
|
||||||
: queued_{PrometheusService::counterInt(
|
: queued_{PrometheusService::counterInt(
|
||||||
"work_queue_queued_total_number",
|
"work_queue_queued_total_number",
|
||||||
@@ -75,6 +56,8 @@ WorkQueue::WorkQueue(DontStartProcessingTag, std::uint32_t numWorkers, uint32_t
|
|||||||
"The current number of tasks in the queue"
|
"The current number of tasks in the queue"
|
||||||
)}
|
)}
|
||||||
, ioc_{numWorkers}
|
, ioc_{numWorkers}
|
||||||
|
, strand_{ioc_.get_executor()}
|
||||||
|
, waitTimer_(ioc_)
|
||||||
{
|
{
|
||||||
if (maxSize != 0)
|
if (maxSize != 0)
|
||||||
maxSize_ = maxSize;
|
maxSize_ = maxSize;
|
||||||
@@ -94,14 +77,12 @@ WorkQueue::~WorkQueue()
|
|||||||
void
|
void
|
||||||
WorkQueue::startProcessing()
|
WorkQueue::startProcessing()
|
||||||
{
|
{
|
||||||
ASSERT(not processingStarted_, "Attempt to start processing work queue more than once");
|
util::spawn(strand_, [this](auto yield) {
|
||||||
processingStarted_ = true;
|
ASSERT(not hasDispatcher_, "Dispatcher already running");
|
||||||
|
|
||||||
// Spawn workers for all tasks that were queued before processing started
|
hasDispatcher_ = true;
|
||||||
auto const numTasks = size();
|
dispatcherLoop(yield);
|
||||||
for (auto i = 0uz; i < numTasks; ++i) {
|
});
|
||||||
util::spawn(ioc_, [this](auto yield) { executeTask(yield); });
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -117,28 +98,93 @@ WorkQueue::postCoro(TaskType func, bool isWhiteListed, Priority priority)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
++curSize_.get();
|
||||||
|
auto needsWakeup = false;
|
||||||
|
|
||||||
{
|
{
|
||||||
auto state = queueState_.lock();
|
auto state = dispatcherState_.lock();
|
||||||
|
|
||||||
|
needsWakeup = std::exchange(state->isIdle, false);
|
||||||
|
|
||||||
state->push(priority, std::move(func));
|
state->push(priority, std::move(func));
|
||||||
}
|
}
|
||||||
|
|
||||||
++curSize_.get();
|
if (needsWakeup)
|
||||||
|
boost::asio::post(strand_, [this] { waitTimer_.cancel(); });
|
||||||
if (not processingStarted_)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
util::spawn(ioc_, [this](auto yield) { executeTask(yield); });
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
WorkQueue::dispatcherLoop(boost::asio::yield_context yield)
|
||||||
|
{
|
||||||
|
LOG(log_.info()) << "WorkQueue dispatcher starting";
|
||||||
|
|
||||||
|
// all ongoing tasks must be completed before stopping fully
|
||||||
|
while (not stopping_ or size() > 0) {
|
||||||
|
std::optional<TaskType> task;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto state = dispatcherState_.lock();
|
||||||
|
|
||||||
|
if (state->empty()) {
|
||||||
|
state->isIdle = true;
|
||||||
|
} else {
|
||||||
|
task = state->popNext();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (not stopping_ and not task.has_value()) {
|
||||||
|
waitTimer_.expires_at(std::chrono::steady_clock::time_point::max());
|
||||||
|
boost::system::error_code ec;
|
||||||
|
waitTimer_.async_wait(yield[ec]);
|
||||||
|
} else if (task.has_value()) {
|
||||||
|
util::spawn(
|
||||||
|
ioc_,
|
||||||
|
[this, spawnedAt = std::chrono::system_clock::now(), task = std::move(*task)](auto yield) mutable {
|
||||||
|
auto const takenAt = std::chrono::system_clock::now();
|
||||||
|
auto const waited =
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(takenAt - spawnedAt).count();
|
||||||
|
|
||||||
|
++queued_.get();
|
||||||
|
durationUs_.get() += waited;
|
||||||
|
LOG(log_.info()) << "WorkQueue wait time: " << waited << ", queue size: " << size();
|
||||||
|
|
||||||
|
task(yield);
|
||||||
|
|
||||||
|
--curSize_.get();
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(log_.info()) << "WorkQueue dispatcher shutdown requested - time to execute onTasksComplete";
|
||||||
|
|
||||||
|
{
|
||||||
|
auto onTasksComplete = onQueueEmpty_.lock();
|
||||||
|
ASSERT(onTasksComplete->operator bool(), "onTasksComplete must be set when stopping is true.");
|
||||||
|
onTasksComplete->operator()();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(log_.info()) << "WorkQueue dispatcher finished";
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
WorkQueue::requestStop(std::function<void()> onQueueEmpty)
|
WorkQueue::requestStop(std::function<void()> onQueueEmpty)
|
||||||
{
|
{
|
||||||
auto handler = onQueueEmpty_.lock();
|
auto handler = onQueueEmpty_.lock();
|
||||||
handler->setCallable(std::move(onQueueEmpty));
|
*handler = std::move(onQueueEmpty);
|
||||||
|
|
||||||
stopping_ = true;
|
stopping_ = true;
|
||||||
|
auto needsWakeup = false;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto state = dispatcherState_.lock();
|
||||||
|
needsWakeup = std::exchange(state->isIdle, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (needsWakeup)
|
||||||
|
boost::asio::post(strand_, [this] { waitTimer_.cancel(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -148,12 +194,6 @@ WorkQueue::stop()
|
|||||||
requestStop();
|
requestStop();
|
||||||
|
|
||||||
ioc_.join();
|
ioc_.join();
|
||||||
|
|
||||||
{
|
|
||||||
auto onTasksComplete = onQueueEmpty_.lock();
|
|
||||||
ASSERT(onTasksComplete->operator bool(), "onTasksComplete must be set when stopping is true.");
|
|
||||||
onTasksComplete->operator()();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WorkQueue
|
WorkQueue
|
||||||
@@ -187,29 +227,4 @@ WorkQueue::size() const
|
|||||||
return curSize_.get().value();
|
return curSize_.get().value();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
WorkQueue::executeTask(boost::asio::yield_context yield)
|
|
||||||
{
|
|
||||||
std::optional<TaskWithTimestamp> taskWithTimestamp;
|
|
||||||
{
|
|
||||||
auto state = queueState_.lock();
|
|
||||||
taskWithTimestamp = state->popNext();
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(
|
|
||||||
taskWithTimestamp.has_value(),
|
|
||||||
"Queue should not be empty as we spawn a coro with executeTask for each postCoro."
|
|
||||||
);
|
|
||||||
auto const takenAt = std::chrono::system_clock::now();
|
|
||||||
auto const waited =
|
|
||||||
std::chrono::duration_cast<std::chrono::microseconds>(takenAt - taskWithTimestamp->queuedAt).count();
|
|
||||||
|
|
||||||
++queued_.get();
|
|
||||||
durationUs_.get() += waited;
|
|
||||||
LOG(log_.info()) << "WorkQueue wait time: " << waited << ", queue size: " << size();
|
|
||||||
|
|
||||||
taskWithTimestamp->task(yield);
|
|
||||||
--curSize_.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace rpc
|
} // namespace rpc
|
||||||
|
|||||||
@@ -25,12 +25,15 @@
|
|||||||
#include "util/prometheus/Counter.hpp"
|
#include "util/prometheus/Counter.hpp"
|
||||||
#include "util/prometheus/Gauge.hpp"
|
#include "util/prometheus/Gauge.hpp"
|
||||||
|
|
||||||
|
#include <boost/asio.hpp>
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
|
#include <boost/asio/steady_timer.hpp>
|
||||||
|
#include <boost/asio/strand.hpp>
|
||||||
#include <boost/asio/thread_pool.hpp>
|
#include <boost/asio/thread_pool.hpp>
|
||||||
|
#include <boost/json.hpp>
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/object.hpp>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <chrono>
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
@@ -61,13 +64,7 @@ struct Reportable {
|
|||||||
*/
|
*/
|
||||||
class WorkQueue : public Reportable {
|
class WorkQueue : public Reportable {
|
||||||
using TaskType = std::function<void(boost::asio::yield_context)>;
|
using TaskType = std::function<void(boost::asio::yield_context)>;
|
||||||
|
using QueueType = std::queue<TaskType>;
|
||||||
struct TaskWithTimestamp {
|
|
||||||
TaskType task;
|
|
||||||
std::chrono::system_clock::time_point queuedAt;
|
|
||||||
};
|
|
||||||
|
|
||||||
using QueueType = std::queue<TaskWithTimestamp>;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@@ -79,21 +76,22 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct QueueState {
|
struct DispatcherState {
|
||||||
QueueType high;
|
QueueType high;
|
||||||
QueueType normal;
|
QueueType normal;
|
||||||
|
|
||||||
|
bool isIdle = false;
|
||||||
size_t highPriorityCounter = 0;
|
size_t highPriorityCounter = 0;
|
||||||
|
|
||||||
void
|
void
|
||||||
push(Priority priority, TaskType&& task)
|
push(Priority priority, auto&& task)
|
||||||
{
|
{
|
||||||
auto& queue = [this, priority] -> QueueType& {
|
auto& queue = [this, priority] -> QueueType& {
|
||||||
if (priority == Priority::High)
|
if (priority == Priority::High)
|
||||||
return high;
|
return high;
|
||||||
return normal;
|
return normal;
|
||||||
}();
|
}();
|
||||||
queue.push(TaskWithTimestamp{.task = std::move(task), .queuedAt = std::chrono::system_clock::now()});
|
queue.push(std::forward<decltype(task)>(task));
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool
|
[[nodiscard]] bool
|
||||||
@@ -102,21 +100,21 @@ private:
|
|||||||
return high.empty() and normal.empty();
|
return high.empty() and normal.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] std::optional<TaskWithTimestamp>
|
[[nodiscard]] std::optional<TaskType>
|
||||||
popNext()
|
popNext()
|
||||||
{
|
{
|
||||||
if (not high.empty() and (highPriorityCounter < kTAKE_HIGH_PRIO or normal.empty())) {
|
if (not high.empty() and (highPriorityCounter < kTAKE_HIGH_PRIO or normal.empty())) {
|
||||||
auto taskWithTimestamp = std::move(high.front());
|
auto task = std::move(high.front());
|
||||||
high.pop();
|
high.pop();
|
||||||
++highPriorityCounter;
|
++highPriorityCounter;
|
||||||
return taskWithTimestamp;
|
return task;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (not normal.empty()) {
|
if (not normal.empty()) {
|
||||||
auto taskWithTimestamp = std::move(normal.front());
|
auto task = std::move(normal.front());
|
||||||
normal.pop();
|
normal.pop();
|
||||||
highPriorityCounter = 0;
|
highPriorityCounter = 0;
|
||||||
return taskWithTimestamp;
|
return task;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
@@ -135,26 +133,14 @@ private:
|
|||||||
|
|
||||||
util::Logger log_{"RPC"};
|
util::Logger log_{"RPC"};
|
||||||
boost::asio::thread_pool ioc_;
|
boost::asio::thread_pool ioc_;
|
||||||
|
boost::asio::strand<boost::asio::thread_pool::executor_type> strand_;
|
||||||
|
bool hasDispatcher_ = false;
|
||||||
|
|
||||||
std::atomic_bool stopping_;
|
std::atomic_bool stopping_;
|
||||||
std::atomic_bool processingStarted_{false};
|
|
||||||
|
|
||||||
class OneTimeCallable {
|
util::Mutex<std::function<void()>> onQueueEmpty_;
|
||||||
std::function<void()> func_;
|
util::Mutex<DispatcherState> dispatcherState_;
|
||||||
bool called_{false};
|
boost::asio::steady_timer waitTimer_;
|
||||||
|
|
||||||
public:
|
|
||||||
void
|
|
||||||
setCallable(std::function<void()> func);
|
|
||||||
|
|
||||||
void
|
|
||||||
operator()();
|
|
||||||
|
|
||||||
explicit
|
|
||||||
operator bool() const;
|
|
||||||
};
|
|
||||||
util::Mutex<OneTimeCallable> onQueueEmpty_;
|
|
||||||
util::Mutex<QueueState> queueState_;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
struct DontStartProcessingTag {};
|
struct DontStartProcessingTag {};
|
||||||
@@ -248,7 +234,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void
|
void
|
||||||
executeTask(boost::asio::yield_context yield);
|
dispatcherLoop(boost::asio::yield_context yield);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rpc
|
} // namespace rpc
|
||||||
|
|||||||
@@ -54,10 +54,10 @@ OnAssert::resetAction()
|
|||||||
void
|
void
|
||||||
OnAssert::defaultAction(std::string_view message)
|
OnAssert::defaultAction(std::string_view message)
|
||||||
{
|
{
|
||||||
if (LogServiceState::initialized() and LogServiceState::hasSinks()) {
|
if (LogServiceState::initialized()) {
|
||||||
LOG(LogService::fatal()) << message;
|
LOG(LogService::fatal()) << message;
|
||||||
} else {
|
} else {
|
||||||
std::cerr << message << std::endl;
|
std::cerr << message;
|
||||||
}
|
}
|
||||||
std::exit(EXIT_FAILURE); // std::abort does not flush gcovr output and causes uncovered lines
|
std::exit(EXIT_FAILURE); // std::abort does not flush gcovr output and causes uncovered lines
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,460 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "util/async/Concepts.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/any_io_executor.hpp>
|
|
||||||
#include <boost/asio/experimental/channel.hpp>
|
|
||||||
#include <boost/asio/experimental/concurrent_channel.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/system/detail/error_code.hpp>
|
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
#include <cstddef>
|
|
||||||
#include <memory>
|
|
||||||
#include <optional>
|
|
||||||
#include <type_traits>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
namespace util {
|
|
||||||
|
|
||||||
#ifdef __clang__
|
|
||||||
namespace detail {
|
|
||||||
// Forward declaration for compile-time check
|
|
||||||
template <typename T>
|
|
||||||
struct ChannelInstantiated;
|
|
||||||
} // namespace detail
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Specifies the producer concurrency model for a Channel.
|
|
||||||
*/
|
|
||||||
enum class ProducerType {
|
|
||||||
Single, /**< Only one Sender can exist (non-copyable). Uses direct Guard ownership for zero overhead. */
|
|
||||||
Multi /**< Multiple Senders can exist (copyable). Uses shared_ptr<Guard> for shared ownership. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Specifies the consumer concurrency model for a Channel.
|
|
||||||
*/
|
|
||||||
enum class ConsumerType {
|
|
||||||
Single, /**< Only one Receiver can exist (non-copyable). Uses direct Guard ownership for zero overhead. */
|
|
||||||
Multi /**< Multiple Receivers can exist (copyable). Uses shared_ptr<Guard> for shared ownership. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Represents a go-like channel, a multi-producer (Sender) multi-consumer (Receiver) thread-safe data pipe.
|
|
||||||
* @note Use INSTANTIATE_CHANNEL_FOR_CLANG macro when using this class. See docs at the bottom of the file for more
|
|
||||||
* details.
|
|
||||||
*
|
|
||||||
* @tparam T The type of data the channel transfers
|
|
||||||
* @tparam P ProducerType::Multi (default) for multi-producer or ProducerType::Single for single-producer
|
|
||||||
* @tparam C ConsumerType::Multi (default) for multi-consumer or ConsumerType::Single for single-consumer
|
|
||||||
*/
|
|
||||||
template <typename T, ProducerType P = ProducerType::Multi, ConsumerType C = ConsumerType::Multi>
|
|
||||||
class Channel {
|
|
||||||
static constexpr bool kIS_MULTI_PRODUCER = (P == ProducerType::Multi);
|
|
||||||
static constexpr bool kIS_MULTI_CONSUMER = (C == ConsumerType::Multi);
|
|
||||||
|
|
||||||
private:
|
|
||||||
class ControlBlock {
|
|
||||||
using InternalChannelType = boost::asio::experimental::concurrent_channel<void(boost::system::error_code, T)>;
|
|
||||||
boost::asio::any_io_executor executor_;
|
|
||||||
InternalChannelType ch_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
template <typename ContextType>
|
|
||||||
requires(not async::SomeExecutionContext<ContextType>)
|
|
||||||
ControlBlock(ContextType&& context, std::size_t capacity)
|
|
||||||
: executor_(context.get_executor()), ch_(context, capacity)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
template <async::SomeExecutionContext ContextType>
|
|
||||||
ControlBlock(ContextType&& context, std::size_t capacity)
|
|
||||||
: executor_(context.getExecutor().get_executor()), ch_(context.getExecutor(), capacity)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] InternalChannelType&
|
|
||||||
channel()
|
|
||||||
{
|
|
||||||
return ch_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
close()
|
|
||||||
{
|
|
||||||
if (not isClosed()) {
|
|
||||||
ch_.close();
|
|
||||||
// Workaround for Boost bug: close() alone doesn't cancel pending async operations.
|
|
||||||
// We must call cancel() to unblock them. The bug also causes cancel() to return
|
|
||||||
// error_code 0 instead of channel_cancelled, so async operations must check
|
|
||||||
// isClosed() to detect this case.
|
|
||||||
// https://github.com/chriskohlhoff/asio/issues/1575
|
|
||||||
ch_.cancel();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool
|
|
||||||
isClosed() const
|
|
||||||
{
|
|
||||||
return not ch_.is_open();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This is used to close the channel once either all Senders or all Receivers are destroyed
|
|
||||||
*/
|
|
||||||
struct Guard {
|
|
||||||
std::shared_ptr<ControlBlock> shared;
|
|
||||||
|
|
||||||
~Guard()
|
|
||||||
{
|
|
||||||
shared->close();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief The sending end of a channel.
|
|
||||||
*
|
|
||||||
* Sender is movable. For multi-producer channels, Sender is also copyable.
|
|
||||||
* The channel remains open as long as at least one Sender exists.
|
|
||||||
* When all Sender instances are destroyed, the channel is closed and receivers will receive std::nullopt.
|
|
||||||
*/
|
|
||||||
class Sender {
|
|
||||||
std::shared_ptr<ControlBlock> shared_;
|
|
||||||
std::conditional_t<kIS_MULTI_PRODUCER, std::shared_ptr<Guard>, Guard> guard_;
|
|
||||||
|
|
||||||
friend class Channel<T, P, C>;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Constructs a Sender from a shared control block.
|
|
||||||
* @param shared The shared control block managing the channel state
|
|
||||||
*/
|
|
||||||
Sender(std::shared_ptr<ControlBlock> shared)
|
|
||||||
: shared_(shared), guard_([shared = std::move(shared)]() {
|
|
||||||
if constexpr (kIS_MULTI_PRODUCER) {
|
|
||||||
return std::make_shared<Guard>(std::move(shared));
|
|
||||||
} else {
|
|
||||||
return Guard{std::move(shared)};
|
|
||||||
}
|
|
||||||
}())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
Sender(Sender&&) = default;
|
|
||||||
Sender(Sender const&)
|
|
||||||
requires kIS_MULTI_PRODUCER
|
|
||||||
= default;
|
|
||||||
Sender(Sender const&)
|
|
||||||
requires(!kIS_MULTI_PRODUCER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
Sender&
|
|
||||||
operator=(Sender&&) = default;
|
|
||||||
Sender&
|
|
||||||
operator=(Sender const&)
|
|
||||||
requires kIS_MULTI_PRODUCER
|
|
||||||
= default;
|
|
||||||
Sender&
|
|
||||||
operator=(Sender const&)
|
|
||||||
requires(!kIS_MULTI_PRODUCER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Asynchronously sends data through the channel using a coroutine.
|
|
||||||
*
|
|
||||||
* Blocks the coroutine until the data is sent or the channel is closed.
|
|
||||||
*
|
|
||||||
* @tparam D The type of data to send (must be convertible to T)
|
|
||||||
* @param data The data to send
|
|
||||||
* @param yield The Boost.Asio yield context for coroutine suspension
|
|
||||||
* @return true if the data was sent successfully, false if the channel is closed
|
|
||||||
*/
|
|
||||||
template <typename D>
|
|
||||||
bool
|
|
||||||
asyncSend(D&& data, boost::asio::yield_context yield)
|
|
||||||
requires(std::convertible_to<std::remove_cvref_t<D>, std::remove_cvref_t<T>>)
|
|
||||||
{
|
|
||||||
boost::system::error_code const ecIn;
|
|
||||||
boost::system::error_code ecOut;
|
|
||||||
shared_->channel().async_send(ecIn, std::forward<D>(data), yield[ecOut]);
|
|
||||||
|
|
||||||
// Workaround: asio channels bug returns ec=0 on cancel, check isClosed() instead
|
|
||||||
if (not ecOut and shared_->isClosed())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return not ecOut;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Asynchronously sends data through the channel using a callback.
|
|
||||||
*
|
|
||||||
* The callback is invoked when the send operation completes.
|
|
||||||
*
|
|
||||||
* @tparam D The type of data to send (must be convertible to T)
|
|
||||||
* @param data The data to send
|
|
||||||
* @param fn Callback function invoked with true if successful, false if the channel is closed
|
|
||||||
*/
|
|
||||||
template <typename D>
|
|
||||||
void
|
|
||||||
asyncSend(D&& data, std::invocable<bool> auto&& fn)
|
|
||||||
requires(std::convertible_to<std::remove_cvref_t<D>, std::remove_cvref_t<T>>)
|
|
||||||
{
|
|
||||||
boost::system::error_code const ecIn;
|
|
||||||
shared_->channel().async_send(
|
|
||||||
ecIn,
|
|
||||||
std::forward<D>(data),
|
|
||||||
[fn = std::forward<decltype(fn)>(fn), shared = shared_](boost::system::error_code ec) mutable {
|
|
||||||
// Workaround: asio channels bug returns ec=0 on cancel, check isClosed() instead
|
|
||||||
if (not ec and shared->isClosed()) {
|
|
||||||
fn(false);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn(not ec);
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Attempts to send data through the channel without blocking.
|
|
||||||
*
|
|
||||||
* @tparam D The type of data to send (must be convertible to T)
|
|
||||||
* @param data The data to send
|
|
||||||
* @return true if the data was sent successfully, false if the channel is full or closed
|
|
||||||
*/
|
|
||||||
template <typename D>
|
|
||||||
bool
|
|
||||||
trySend(D&& data)
|
|
||||||
requires(std::convertible_to<std::remove_cvref_t<D>, std::remove_cvref_t<T>>)
|
|
||||||
{
|
|
||||||
boost::system::error_code ec;
|
|
||||||
return shared_->channel().try_send(ec, std::forward<D>(data));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The receiving end of a channel.
|
|
||||||
*
|
|
||||||
* Receiver is movable. For multi-consumer channels, Receiver is also copyable.
|
|
||||||
* Multiple receivers can consume from the same multi-consumer channel concurrently.
|
|
||||||
* When all Receiver instances are destroyed, the channel is closed and senders will fail to send.
|
|
||||||
*/
|
|
||||||
class Receiver {
|
|
||||||
std::shared_ptr<ControlBlock> shared_;
|
|
||||||
std::conditional_t<kIS_MULTI_CONSUMER, std::shared_ptr<Guard>, Guard> guard_;
|
|
||||||
|
|
||||||
friend class Channel<T, P, C>;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Constructs a Receiver from a shared control block.
|
|
||||||
* @param shared The shared control block managing the channel state
|
|
||||||
*/
|
|
||||||
Receiver(std::shared_ptr<ControlBlock> shared)
|
|
||||||
: shared_(shared), guard_([shared = std::move(shared)]() {
|
|
||||||
if constexpr (kIS_MULTI_CONSUMER) {
|
|
||||||
return std::make_shared<Guard>(std::move(shared));
|
|
||||||
} else {
|
|
||||||
return Guard{std::move(shared)};
|
|
||||||
}
|
|
||||||
}())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
Receiver(Receiver&&) = default;
|
|
||||||
Receiver(Receiver const&)
|
|
||||||
requires kIS_MULTI_CONSUMER
|
|
||||||
= default;
|
|
||||||
Receiver(Receiver const&)
|
|
||||||
requires(!kIS_MULTI_CONSUMER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
Receiver&
|
|
||||||
operator=(Receiver&&) = default;
|
|
||||||
Receiver&
|
|
||||||
operator=(Receiver const&)
|
|
||||||
requires kIS_MULTI_CONSUMER
|
|
||||||
= default;
|
|
||||||
Receiver&
|
|
||||||
operator=(Receiver const&)
|
|
||||||
requires(!kIS_MULTI_CONSUMER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Attempts to receive data from the channel without blocking.
|
|
||||||
*
|
|
||||||
* @return std::optional containing the received value, or std::nullopt if the channel is empty or closed
|
|
||||||
*/
|
|
||||||
std::optional<T>
|
|
||||||
tryReceive()
|
|
||||||
{
|
|
||||||
std::optional<T> result;
|
|
||||||
shared_->channel().try_receive([&result](boost::system::error_code ec, auto&& value) {
|
|
||||||
if (not ec)
|
|
||||||
result = std::forward<decltype(value)>(value);
|
|
||||||
});
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Asynchronously receives data from the channel using a coroutine.
|
|
||||||
*
|
|
||||||
* Blocks the coroutine until data is available or the channel is closed.
|
|
||||||
*
|
|
||||||
* @param yield The Boost.Asio yield context for coroutine suspension
|
|
||||||
* @return std::optional containing the received value, or std::nullopt if the channel is closed
|
|
||||||
*/
|
|
||||||
[[nodiscard]] std::optional<T>
|
|
||||||
asyncReceive(boost::asio::yield_context yield)
|
|
||||||
{
|
|
||||||
boost::system::error_code ec;
|
|
||||||
auto value = shared_->channel().async_receive(yield[ec]);
|
|
||||||
|
|
||||||
if (ec)
|
|
||||||
return std::nullopt;
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Asynchronously receives data from the channel using a callback.
|
|
||||||
*
|
|
||||||
* The callback is invoked when data is available or the channel is closed.
|
|
||||||
*
|
|
||||||
* @param fn Callback function invoked with std::optional containing the value, or std::nullopt if closed
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
asyncReceive(std::invocable<std::optional<std::remove_cvref_t<T>>> auto&& fn)
|
|
||||||
{
|
|
||||||
shared_->channel().async_receive(
|
|
||||||
[fn = std::forward<decltype(fn)>(fn)](boost::system::error_code ec, T&& value) mutable {
|
|
||||||
if (ec) {
|
|
||||||
fn(std::optional<T>(std::nullopt));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn(std::make_optional<T>(std::move(value)));
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Checks if the channel is closed.
|
|
||||||
*
|
|
||||||
* A channel is closed when all Sender instances have been destroyed.
|
|
||||||
*
|
|
||||||
* @return true if the channel is closed, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] bool
|
|
||||||
isClosed() const
|
|
||||||
{
|
|
||||||
return shared_->isClosed();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Factory function to create channel components.
|
|
||||||
* @param context A supported context type (either io_context or thread_pool)
|
|
||||||
* @param capacity Size of the internal buffer on the channel
|
|
||||||
* @return A pair of Sender and Receiver
|
|
||||||
*/
|
|
||||||
static std::pair<Sender, Receiver>
|
|
||||||
create(auto&& context, std::size_t capacity)
|
|
||||||
{
|
|
||||||
#ifdef __clang__
|
|
||||||
static_assert(
|
|
||||||
util::detail::ChannelInstantiated<T>::value,
|
|
||||||
"When using Channel<T> with Clang, you must add INSTANTIATE_CHANNEL_FOR_CLANG(T) "
|
|
||||||
"to one .cpp file. See documentation at the bottom of Channel.hpp for details."
|
|
||||||
);
|
|
||||||
#endif
|
|
||||||
auto shared = std::make_shared<ControlBlock>(std::forward<decltype(context)>(context), capacity);
|
|
||||||
auto sender = Sender{shared};
|
|
||||||
auto receiver = Receiver{std::move(shared)};
|
|
||||||
|
|
||||||
return {std::move(sender), std::move(receiver)};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace util
|
|
||||||
|
|
||||||
// ================================================================================================
|
|
||||||
// Clang/Apple Clang Workaround for Boost.Asio Experimental Channels
|
|
||||||
// ================================================================================================
|
|
||||||
//
|
|
||||||
// IMPORTANT: When using Channel<T> with Clang or Apple Clang, you MUST add the following line
|
|
||||||
// to ONE .cpp file that uses Channel<T>:
|
|
||||||
//
|
|
||||||
// INSTANTIATE_CHANNEL_FOR_CLANG(YourType)
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// // In ChannelTests.cpp or any .cpp file that uses Channel<int>:
|
|
||||||
// #include "util/Channel.hpp"
|
|
||||||
// INSTANTIATE_CHANNEL_FOR_CLANG(int)
|
|
||||||
//
|
|
||||||
// Why this is needed:
|
|
||||||
// Boost.Asio's experimental concurrent_channel has a bug where close() doesn't properly cancel
|
|
||||||
// pending async operations. When using cancellation signals (which we do in our workaround),
|
|
||||||
// Clang generates vtable references for internal cancellation_handler types but Boost.Asio
|
|
||||||
// doesn't provide the definitions, causing linker errors:
|
|
||||||
//
|
|
||||||
// Undefined symbols for architecture arm64:
|
|
||||||
// "boost::asio::detail::cancellation_handler<...>::call(boost::asio::cancellation_type)"
|
|
||||||
// "boost::asio::detail::cancellation_handler<...>::destroy()"
|
|
||||||
//
|
|
||||||
// This macro explicitly instantiates the required template specializations.
|
|
||||||
//
|
|
||||||
// See: https://github.com/chriskohlhoff/asio/issues/1575
|
|
||||||
//
|
|
||||||
#ifdef __clang__
|
|
||||||
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
|
||||||
#include <boost/asio/experimental/channel_traits.hpp>
|
|
||||||
#include <boost/asio/experimental/detail/channel_service.hpp>
|
|
||||||
|
|
||||||
namespace util::detail {
|
|
||||||
// Tag type used to verify that INSTANTIATE_CHANNEL_FOR_CLANG was called for a given type
|
|
||||||
template <typename T>
|
|
||||||
struct ChannelInstantiated : std::false_type {};
|
|
||||||
} // namespace util::detail
|
|
||||||
|
|
||||||
#define INSTANTIATE_CHANNEL_FOR_CLANG(T) \
|
|
||||||
/* NOLINTNEXTLINE(cppcoreguidelines-virtual-class-destructor) */ \
|
|
||||||
template class boost::asio::detail::cancellation_handler< \
|
|
||||||
boost::asio::experimental::detail::channel_service<boost::asio::detail::posix_mutex>:: \
|
|
||||||
op_cancellation<boost::asio::experimental::channel_traits<>, void(boost::system::error_code, T)>>; \
|
|
||||||
namespace util::detail { \
|
|
||||||
template <> \
|
|
||||||
struct ChannelInstantiated<T> : std::true_type {}; \
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
// No workaround needed for non-Clang compilers
|
|
||||||
#define INSTANTIATE_CHANNEL_FOR_CLANG(T)
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@@ -22,7 +22,6 @@
|
|||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <boost/asio/strand.hpp>
|
#include <boost/asio/strand.hpp>
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
#include <exception>
|
#include <exception>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
|
||||||
|
|||||||
@@ -29,27 +29,6 @@
|
|||||||
|
|
||||||
namespace util::async {
|
namespace util::async {
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Tag type for identifying execution context types.
|
|
||||||
*
|
|
||||||
* Types that inherit from this tag can be detected using the SomeExecutionContext concept.
|
|
||||||
* This allows generic code to differentiate between raw Boost.Asio contexts and wrapped execution contexts.
|
|
||||||
*/
|
|
||||||
struct ExecutionContextTag {
|
|
||||||
virtual ~ExecutionContextTag() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Concept that identifies types derived from ExecutionContextTag.
|
|
||||||
*
|
|
||||||
* This concept is used to detect custom execution context wrappers (like BasicExecutionContext)
|
|
||||||
* and distinguish them from raw Boost.Asio contexts (io_context, thread_pool, etc.).
|
|
||||||
*
|
|
||||||
* @tparam T The type to check
|
|
||||||
*/
|
|
||||||
template <typename T>
|
|
||||||
concept SomeExecutionContext = std::derived_from<std::remove_cvref_t<T>, ExecutionContextTag>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Specifies the interface for an entity that can be stopped
|
* @brief Specifies the interface for an entity that can be stopped
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ template <
|
|||||||
typename DispatcherType,
|
typename DispatcherType,
|
||||||
typename TimerContextProvider = impl::SelfContextProvider,
|
typename TimerContextProvider = impl::SelfContextProvider,
|
||||||
typename ErrorHandlerType = impl::DefaultErrorHandler>
|
typename ErrorHandlerType = impl::DefaultErrorHandler>
|
||||||
class BasicExecutionContext : public ExecutionContextTag {
|
class BasicExecutionContext {
|
||||||
ContextType context_;
|
ContextType context_;
|
||||||
|
|
||||||
/** @cond */
|
/** @cond */
|
||||||
@@ -182,7 +182,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* @brief Stops the underlying thread pool.
|
* @brief Stops the underlying thread pool.
|
||||||
*/
|
*/
|
||||||
~BasicExecutionContext() override
|
~BasicExecutionContext()
|
||||||
{
|
{
|
||||||
stop();
|
stop();
|
||||||
}
|
}
|
||||||
@@ -402,20 +402,6 @@ public:
|
|||||||
{
|
{
|
||||||
context_.join();
|
context_.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Get the underlying executor.
|
|
||||||
*
|
|
||||||
* Provides access to the wrapped executor for cases where the execution context
|
|
||||||
* needs to interact with components that require explicit executor access (like Channel).
|
|
||||||
*
|
|
||||||
* @return Reference to the underlying executor
|
|
||||||
*/
|
|
||||||
typename ContextType::Executor&
|
|
||||||
getExecutor()
|
|
||||||
{
|
|
||||||
return context_.getExecutor();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -36,26 +36,17 @@ struct SpawnDispatchStrategy {
|
|||||||
{
|
{
|
||||||
auto op = outcome.getOperation();
|
auto op = outcome.getOperation();
|
||||||
|
|
||||||
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
util::spawn(
|
||||||
util::spawn(
|
ctx.getExecutor(),
|
||||||
ctx.getExecutor(),
|
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)](auto yield) mutable {
|
||||||
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)](auto yield) mutable {
|
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
||||||
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
auto& stopSource = outcome.getStopSource();
|
||||||
auto& stopSource = outcome.getStopSource();
|
std::invoke(std::forward<decltype(fn)>(fn), outcome, stopSource, stopSource[yield]);
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome, stopSource, stopSource[yield]);
|
} else {
|
||||||
} else {
|
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
boost::asio::post(
|
|
||||||
ctx.getExecutor(),
|
|
||||||
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)]() mutable {
|
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
||||||
}
|
}
|
||||||
);
|
}
|
||||||
}
|
);
|
||||||
|
|
||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
@@ -64,7 +55,7 @@ struct SpawnDispatchStrategy {
|
|||||||
static void
|
static void
|
||||||
post(ContextType& ctx, FnType&& fn)
|
post(ContextType& ctx, FnType&& fn)
|
||||||
{
|
{
|
||||||
boost::asio::post(ctx.getExecutor(), [fn = std::forward<FnType>(fn)]() mutable {
|
util::spawn(ctx.getExecutor(), [fn = std::forward<FnType>(fn)](auto) mutable {
|
||||||
std::invoke(std::forward<decltype(fn)>(fn));
|
std::invoke(std::forward<decltype(fn)>(fn));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,20 +26,7 @@ namespace util::build {
|
|||||||
#ifndef CLIO_VERSION
|
#ifndef CLIO_VERSION
|
||||||
#error "CLIO_VERSION must be defined"
|
#error "CLIO_VERSION must be defined"
|
||||||
#endif
|
#endif
|
||||||
#ifndef GIT_COMMIT_HASH
|
|
||||||
#error "GIT_COMMIT_HASH must be defined"
|
|
||||||
#endif
|
|
||||||
#ifndef GIT_BUILD_BRANCH
|
|
||||||
#error "GIT_BUILD_BRANCH must be defined"
|
|
||||||
#endif
|
|
||||||
#ifndef BUILD_DATE
|
|
||||||
#error "BUILD_DATE must be defined"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static constexpr char kVERSION_STRING[] = CLIO_VERSION;
|
static constexpr char kVERSION_STRING[] = CLIO_VERSION;
|
||||||
static constexpr char kGIT_COMMIT_HASH[] = GIT_COMMIT_HASH;
|
|
||||||
static constexpr char kGIT_BUILD_BRANCH[] = GIT_BUILD_BRANCH;
|
|
||||||
static constexpr char kBUILD_DATE[] = BUILD_DATE;
|
|
||||||
|
|
||||||
std::string const&
|
std::string const&
|
||||||
getClioVersionString()
|
getClioVersionString()
|
||||||
@@ -55,25 +42,4 @@ getClioFullVersionString()
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getGitCommitHash()
|
|
||||||
{
|
|
||||||
static std::string const value = kGIT_COMMIT_HASH; // NOLINT(readability-identifier-naming)
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getGitBuildBranch()
|
|
||||||
{
|
|
||||||
static std::string const value = kGIT_BUILD_BRANCH; // NOLINT(readability-identifier-naming)
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getBuildDate()
|
|
||||||
{
|
|
||||||
static std::string const value = kBUILD_DATE; // NOLINT(readability-identifier-naming)
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace util::build
|
} // namespace util::build
|
||||||
|
|||||||
@@ -29,13 +29,4 @@ getClioVersionString();
|
|||||||
std::string const&
|
std::string const&
|
||||||
getClioFullVersionString();
|
getClioFullVersionString();
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getGitCommitHash();
|
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getGitBuildBranch();
|
|
||||||
|
|
||||||
std::string const&
|
|
||||||
getBuildDate();
|
|
||||||
|
|
||||||
} // namespace util::build
|
} // namespace util::build
|
||||||
|
|||||||
@@ -3,7 +3,4 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/ClioVersion.cmake)
|
|||||||
add_library(clio_build_version)
|
add_library(clio_build_version)
|
||||||
target_sources(clio_build_version PRIVATE Build.cpp)
|
target_sources(clio_build_version PRIVATE Build.cpp)
|
||||||
target_link_libraries(clio_build_version PUBLIC clio_options)
|
target_link_libraries(clio_build_version PUBLIC clio_options)
|
||||||
target_compile_definitions(
|
target_compile_definitions(clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}")
|
||||||
clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}" GIT_COMMIT_HASH="${GIT_COMMIT_HASH}"
|
|
||||||
GIT_BUILD_BRANCH="${GIT_BUILD_BRANCH}" BUILD_DATE="${BUILD_DATE}"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -361,7 +361,6 @@ getClioConfig()
|
|||||||
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async").withConstraint(gValidateLoadMode)},
|
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async").withConstraint(gValidateLoadMode)},
|
||||||
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
|
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
|
||||||
{"cache.file.max_sequence_age", ConfigValue{ConfigType::Integer}.defaultValue(5000)},
|
{"cache.file.max_sequence_age", ConfigValue{ConfigType::Integer}.defaultValue(5000)},
|
||||||
{"cache.file.async_save", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
|
|
||||||
|
|
||||||
{"log.channels.[].channel",
|
{"log.channels.[].channel",
|
||||||
Array{ConfigValue{ConfigType::String}.optional().withConstraint(gValidateChannelName)}},
|
Array{ConfigValue{ConfigType::String}.optional().withConstraint(gValidateChannelName)}},
|
||||||
|
|||||||
@@ -282,9 +282,6 @@ This document provides a list of all available Clio configuration properties in
|
|||||||
KV{.key = "cache.file.max_sequence_age",
|
KV{.key = "cache.file.max_sequence_age",
|
||||||
.value = "Max allowed difference between the latest sequence in DB and in cache file. If the cache file is "
|
.value = "Max allowed difference between the latest sequence in DB and in cache file. If the cache file is "
|
||||||
"too old (contains too low latest sequence) Clio will reject using it."},
|
"too old (contains too low latest sequence) Clio will reject using it."},
|
||||||
KV{.key = "cache.file.async_save",
|
|
||||||
.value = "When false, Clio waits for cache saving to finish before shutting down. When true, "
|
|
||||||
"cache saving runs in parallel with other shutdown operations."},
|
|
||||||
KV{.key = "log.channels.[].channel", .value = "The name of the log channel."},
|
KV{.key = "log.channels.[].channel", .value = "The name of the log channel."},
|
||||||
KV{.key = "log.channels.[].level", .value = "The log level for the specific log channel."},
|
KV{.key = "log.channels.[].level", .value = "The log level for the specific log channel."},
|
||||||
KV{.key = "log.level",
|
KV{.key = "log.level",
|
||||||
|
|||||||
@@ -271,12 +271,6 @@ LogServiceState::initialized()
|
|||||||
return initialized_;
|
return initialized_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
LogServiceState::hasSinks()
|
|
||||||
{
|
|
||||||
return not sinks_.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
LogServiceState::reset()
|
LogServiceState::reset()
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -267,14 +267,6 @@ protected:
|
|||||||
[[nodiscard]] static bool
|
[[nodiscard]] static bool
|
||||||
initialized();
|
initialized();
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether the LogService has any sink. If there is no sink, logger will not log messages anywhere.
|
|
||||||
*
|
|
||||||
* @return true if the LogService has at least one sink
|
|
||||||
*/
|
|
||||||
[[nodiscard]] static bool
|
|
||||||
hasSinks();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reset the logging service to uninitialized state.
|
* @brief Reset the logging service to uninitialized state.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -25,7 +25,6 @@
|
|||||||
#include "util/config/ConfigDefinition.hpp"
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
struct MockWriterStateBase : public etl::WriterStateInterface {
|
|
||||||
MOCK_METHOD(bool, isReadOnly, (), (const, override));
|
|
||||||
MOCK_METHOD(bool, isWriting, (), (const, override));
|
|
||||||
MOCK_METHOD(void, startWriting, (), (override));
|
|
||||||
MOCK_METHOD(void, giveUpWriting, (), (override));
|
|
||||||
MOCK_METHOD(void, setWriterDecidingFallback, (), (override));
|
|
||||||
MOCK_METHOD(bool, isFallback, (), (const, override));
|
|
||||||
MOCK_METHOD(bool, isLoadingCache, (), (const, override));
|
|
||||||
MOCK_METHOD(std::unique_ptr<etl::WriterStateInterface>, clone, (), (const, override));
|
|
||||||
};
|
|
||||||
|
|
||||||
using MockWriterState = testing::StrictMock<MockWriterStateBase>;
|
|
||||||
using NiceMockWriterState = testing::NiceMock<MockWriterStateBase>;
|
|
||||||
@@ -21,12 +21,8 @@ target_sources(
|
|||||||
data/impl/LedgerCacheFileTests.cpp
|
data/impl/LedgerCacheFileTests.cpp
|
||||||
data/impl/OutputFileTests.cpp
|
data/impl/OutputFileTests.cpp
|
||||||
# Cluster
|
# Cluster
|
||||||
cluster/BackendTests.cpp
|
|
||||||
cluster/ClioNodeTests.cpp
|
cluster/ClioNodeTests.cpp
|
||||||
cluster/ClusterCommunicationServiceTests.cpp
|
cluster/ClusterCommunicationServiceTests.cpp
|
||||||
cluster/MetricsTests.cpp
|
|
||||||
cluster/RepeatedTaskTests.cpp
|
|
||||||
cluster/WriterDeciderTests.cpp
|
|
||||||
# ETL
|
# ETL
|
||||||
etl/AmendmentBlockHandlerTests.cpp
|
etl/AmendmentBlockHandlerTests.cpp
|
||||||
etl/CacheLoaderSettingsTests.cpp
|
etl/CacheLoaderSettingsTests.cpp
|
||||||
@@ -50,9 +46,7 @@ target_sources(
|
|||||||
etl/SchedulingTests.cpp
|
etl/SchedulingTests.cpp
|
||||||
etl/SourceImplTests.cpp
|
etl/SourceImplTests.cpp
|
||||||
etl/SubscriptionSourceTests.cpp
|
etl/SubscriptionSourceTests.cpp
|
||||||
etl/SystemStateTests.cpp
|
|
||||||
etl/TaskManagerTests.cpp
|
etl/TaskManagerTests.cpp
|
||||||
etl/WriterStateTests.cpp
|
|
||||||
etl/ext/CoreTests.cpp
|
etl/ext/CoreTests.cpp
|
||||||
etl/ext/CacheTests.cpp
|
etl/ext/CacheTests.cpp
|
||||||
etl/ext/MPTTests.cpp
|
etl/ext/MPTTests.cpp
|
||||||
@@ -173,7 +167,6 @@ target_sources(
|
|||||||
util/AccountUtilsTests.cpp
|
util/AccountUtilsTests.cpp
|
||||||
util/AssertTests.cpp
|
util/AssertTests.cpp
|
||||||
util/BytesConverterTests.cpp
|
util/BytesConverterTests.cpp
|
||||||
util/ChannelTests.cpp
|
|
||||||
util/CoroutineTest.cpp
|
util/CoroutineTest.cpp
|
||||||
util/MoveTrackerTests.cpp
|
util/MoveTrackerTests.cpp
|
||||||
util/ObservableValueTest.cpp
|
util/ObservableValueTest.cpp
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
#include "app/Stopper.hpp"
|
#include "app/Stopper.hpp"
|
||||||
#include "cluster/Concepts.hpp"
|
|
||||||
#include "util/AsioContextTestFixture.hpp"
|
#include "util/AsioContextTestFixture.hpp"
|
||||||
#include "util/MockBackend.hpp"
|
#include "util/MockBackend.hpp"
|
||||||
#include "util/MockETLService.hpp"
|
#include "util/MockETLService.hpp"
|
||||||
@@ -88,10 +87,6 @@ struct StopperMakeCallbackTest : util::prometheus::WithPrometheus, SyncAsioConte
|
|||||||
MOCK_METHOD(void, waitToFinish, ());
|
MOCK_METHOD(void, waitToFinish, ());
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MockClusterCommunicationService : cluster::ClusterCommunicationServiceTag {
|
|
||||||
MOCK_METHOD(void, stop, (), ());
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
testing::StrictMock<ServerMock> serverMock_;
|
testing::StrictMock<ServerMock> serverMock_;
|
||||||
testing::StrictMock<MockLoadBalancer> loadBalancerMock_;
|
testing::StrictMock<MockLoadBalancer> loadBalancerMock_;
|
||||||
@@ -99,7 +94,6 @@ protected:
|
|||||||
testing::StrictMock<MockSubscriptionManager> subscriptionManagerMock_;
|
testing::StrictMock<MockSubscriptionManager> subscriptionManagerMock_;
|
||||||
testing::StrictMock<MockBackend> backendMock_{util::config::ClioConfigDefinition{}};
|
testing::StrictMock<MockBackend> backendMock_{util::config::ClioConfigDefinition{}};
|
||||||
testing::StrictMock<MockLedgerCacheSaver> cacheSaverMock_;
|
testing::StrictMock<MockLedgerCacheSaver> cacheSaverMock_;
|
||||||
testing::StrictMock<MockClusterCommunicationService> clusterCommunicationServiceMock_;
|
|
||||||
boost::asio::io_context ioContextToStop_;
|
boost::asio::io_context ioContextToStop_;
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -121,7 +115,6 @@ TEST_F(StopperMakeCallbackTest, makeCallbackTest)
|
|||||||
subscriptionManagerMock_,
|
subscriptionManagerMock_,
|
||||||
backendMock_,
|
backendMock_,
|
||||||
cacheSaverMock_,
|
cacheSaverMock_,
|
||||||
clusterCommunicationServiceMock_,
|
|
||||||
ioContextToStop_
|
ioContextToStop_
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -129,9 +122,6 @@ TEST_F(StopperMakeCallbackTest, makeCallbackTest)
|
|||||||
EXPECT_CALL(cacheSaverMock_, save).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(cacheSaverMock_, save).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(serverMock_, stop).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(serverMock_, stop).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(loadBalancerMock_, stop).InSequence(s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(loadBalancerMock_, stop).InSequence(s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(clusterCommunicationServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
|
||||||
EXPECT_FALSE(isContextStopped());
|
|
||||||
});
|
|
||||||
EXPECT_CALL(etlServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(etlServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(subscriptionManagerMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
EXPECT_CALL(subscriptionManagerMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
||||||
EXPECT_FALSE(isContextStopped());
|
EXPECT_FALSE(isContextStopped());
|
||||||
|
|||||||
@@ -1,351 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "util/MockBackendTestFixture.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <boost/json/serialize.hpp>
|
|
||||||
#include <boost/json/value.hpp>
|
|
||||||
#include <boost/json/value_from.hpp>
|
|
||||||
#include <boost/json/value_to.hpp>
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <boost/uuid/uuid_io.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <memory>
|
|
||||||
#include <semaphore>
|
|
||||||
#include <stdexcept>
|
|
||||||
#include <string>
|
|
||||||
#include <thread>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
|
|
||||||
struct ClusterBackendTest : util::prometheus::WithPrometheus, MockBackendTestStrict {
|
|
||||||
~ClusterBackendTest() override
|
|
||||||
{
|
|
||||||
ctx.stop();
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::asio::thread_pool ctx;
|
|
||||||
std::unique_ptr<MockWriterState> writerState = std::make_unique<MockWriterState>();
|
|
||||||
MockWriterState& writerStateRef = *writerState;
|
|
||||||
testing::StrictMock<testing::MockFunction<void(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const>)>>
|
|
||||||
callbackMock;
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
|
|
||||||
class SemaphoreReleaseGuard {
|
|
||||||
std::binary_semaphore& semaphore_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
SemaphoreReleaseGuard(std::binary_semaphore& s) : semaphore_(s)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
~SemaphoreReleaseGuard()
|
|
||||||
{
|
|
||||||
semaphore_.release();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, SubscribeToNewState)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 1);
|
|
||||||
auto const& nodeData = clusterData->value().front();
|
|
||||||
EXPECT_EQ(nodeData.uuid, selfId);
|
|
||||||
EXPECT_EQ(nodeData.dbRole, ClioNode::DbRole::ReadOnly);
|
|
||||||
EXPECT_LE(nodeData.updateTime, std::chrono::system_clock::now());
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, Stop)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{20});
|
|
||||||
clusterBackend.stop();
|
|
||||||
|
|
||||||
testing::Mock::VerifyAndClearExpectations(&(*backend_));
|
|
||||||
// Wait to make sure there is no new calls of mockDbBackend
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{20});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataThrowsException)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Throw(std::runtime_error("Database connection failed")));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->error(), "Failed to fetch Clio nodes data");
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsDataWithOtherNodes)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
auto const otherNodeJson = R"JSON({
|
|
||||||
"db_role": 3,
|
|
||||||
"update_time": "2025-01-15T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, otherNodeJson}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isFallback).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isLoadingCache).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isWriting).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([&](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value()) << clusterData->error();
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 2);
|
|
||||||
EXPECT_EQ(selfId, clusterBackend.selfId());
|
|
||||||
|
|
||||||
bool foundSelf = false;
|
|
||||||
bool foundOther = false;
|
|
||||||
|
|
||||||
for (auto const& node : clusterData->value()) {
|
|
||||||
if (*node.uuid == *selfId) {
|
|
||||||
foundSelf = true;
|
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::NotWriter);
|
|
||||||
} else if (*node.uuid == otherUuid) {
|
|
||||||
foundOther = true;
|
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::Writer);
|
|
||||||
}
|
|
||||||
EXPECT_LE(node.updateTime, std::chrono::system_clock::now());
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_TRUE(foundSelf);
|
|
||||||
EXPECT_TRUE(foundOther);
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsOnlySelfData)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const selfNodeJson = R"JSON({
|
|
||||||
"db_role": 1,
|
|
||||||
"update_time": "2025-01-16T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData).Times(testing::AtLeast(1)).WillRepeatedly([&]() {
|
|
||||||
return BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{*clusterBackend.selfId(), selfNodeJson}}
|
|
||||||
};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 1);
|
|
||||||
auto const& nodeData = clusterData->value().front();
|
|
||||||
EXPECT_EQ(nodeData.uuid, selfId);
|
|
||||||
EXPECT_EQ(nodeData.dbRole, ClioNode::DbRole::ReadOnly);
|
|
||||||
EXPECT_LE(nodeData.updateTime, std::chrono::system_clock::now());
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsInvalidJson)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
auto const invalidJson = "{ invalid json";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, invalidJson}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this, invalidJson](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr("Error parsing json from DB"));
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr(invalidJson));
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsValidJsonButCannotConvertToClioNode)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
// Valid JSON but missing required field 'db_role'
|
|
||||||
auto const validJsonMissingField = R"JSON({
|
|
||||||
"update_time": "2025-01-16T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, validJsonMissingField}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr("Error converting json to ClioNode"));
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, WriteNodeMessageWritesSelfDataWithRecentTimestampAndDbRole)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
auto const beforeRun = std::chrono::floor<std::chrono::seconds>(std::chrono::system_clock::now());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isFallback).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isLoadingCache).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isWriting).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([&](boost::uuids::uuid const& uuid, std::string message) {
|
|
||||||
SemaphoreReleaseGuard guard{semaphore};
|
|
||||||
auto const afterWrite = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
EXPECT_EQ(uuid, *clusterBackend.selfId());
|
|
||||||
auto const json = boost::json::parse(message);
|
|
||||||
auto const node = boost::json::try_value_to<ClioNode>(json);
|
|
||||||
ASSERT_TRUE(node.has_value());
|
|
||||||
EXPECT_EQ(node->dbRole, ClioNode::DbRole::NotWriter);
|
|
||||||
EXPECT_GE(node->updateTime, beforeRun);
|
|
||||||
EXPECT_LE(node->updateTime, afterWrite);
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
@@ -18,8 +18,6 @@
|
|||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
#include "util/NameGenerator.hpp"
|
|
||||||
#include "util/TimeUtils.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/object.hpp>
|
||||||
@@ -28,11 +26,9 @@
|
|||||||
#include <boost/json/value_to.hpp>
|
#include <boost/json/value_to.hpp>
|
||||||
#include <boost/uuid/random_generator.hpp>
|
#include <boost/uuid/random_generator.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
@@ -48,44 +44,44 @@ struct ClioNodeTest : testing::Test {
|
|||||||
|
|
||||||
TEST_F(ClioNodeTest, Serialization)
|
TEST_F(ClioNodeTest, Serialization)
|
||||||
{
|
{
|
||||||
|
// Create a ClioNode with test data
|
||||||
ClioNode const node{
|
ClioNode const node{
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()), .updateTime = updateTime
|
||||||
.updateTime = updateTime,
|
|
||||||
.dbRole = ClioNode::DbRole::Writer
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Serialize to JSON
|
||||||
boost::json::value jsonValue;
|
boost::json::value jsonValue;
|
||||||
EXPECT_NO_THROW(boost::json::value_from(node, jsonValue));
|
EXPECT_NO_THROW(boost::json::value_from(node, jsonValue));
|
||||||
|
|
||||||
|
// Verify JSON structure
|
||||||
ASSERT_TRUE(jsonValue.is_object()) << jsonValue;
|
ASSERT_TRUE(jsonValue.is_object()) << jsonValue;
|
||||||
auto const& obj = jsonValue.as_object();
|
auto const& obj = jsonValue.as_object();
|
||||||
|
|
||||||
|
// Check update_time exists and is a string
|
||||||
EXPECT_TRUE(obj.contains("update_time"));
|
EXPECT_TRUE(obj.contains("update_time"));
|
||||||
EXPECT_TRUE(obj.at("update_time").is_string());
|
EXPECT_TRUE(obj.at("update_time").is_string());
|
||||||
|
|
||||||
EXPECT_TRUE(obj.contains("db_role"));
|
|
||||||
EXPECT_TRUE(obj.at("db_role").is_number());
|
|
||||||
EXPECT_EQ(obj.at("db_role").as_int64(), static_cast<int64_t>(node.dbRole));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClioNodeTest, Deserialization)
|
TEST_F(ClioNodeTest, Deserialization)
|
||||||
{
|
{
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}, {"db_role", 1}};
|
boost::json::value const jsonValue = {{"update_time", updateTimeStr}};
|
||||||
|
|
||||||
ClioNode node{
|
// Deserialize to ClioNode
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = {}, .dbRole = ClioNode::DbRole::ReadOnly
|
ClioNode node{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = {}};
|
||||||
};
|
EXPECT_NO_THROW(node = boost::json::value_to<ClioNode>(jsonValue));
|
||||||
ASSERT_NO_THROW(node = boost::json::value_to<ClioNode>(jsonValue));
|
|
||||||
|
|
||||||
|
// Verify deserialized data
|
||||||
EXPECT_NE(node.uuid, nullptr);
|
EXPECT_NE(node.uuid, nullptr);
|
||||||
EXPECT_EQ(*node.uuid, boost::uuids::uuid{});
|
EXPECT_EQ(*node.uuid, boost::uuids::uuid{});
|
||||||
EXPECT_EQ(node.updateTime, updateTime);
|
EXPECT_EQ(node.updateTime, updateTime);
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::LoadingCache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClioNodeTest, DeserializationInvalidTime)
|
TEST_F(ClioNodeTest, DeserializationInvalidTime)
|
||||||
{
|
{
|
||||||
|
// Prepare an invalid time format
|
||||||
boost::json::value const jsonValue{"update_time", "invalid_format"};
|
boost::json::value const jsonValue{"update_time", "invalid_format"};
|
||||||
|
|
||||||
|
// Expect an exception during deserialization
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,145 +93,3 @@ TEST_F(ClioNodeTest, DeserializationMissingTime)
|
|||||||
// Expect an exception
|
// Expect an exception
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ClioNodeDbRoleTestBundle {
|
|
||||||
std::string testName;
|
|
||||||
ClioNode::DbRole role;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClioNodeDbRoleTest : ClioNodeTest, testing::WithParamInterface<ClioNodeDbRoleTestBundle> {};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
AllDbRoles,
|
|
||||||
ClioNodeDbRoleTest,
|
|
||||||
testing::Values(
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "ReadOnly", .role = ClioNode::DbRole::ReadOnly},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "LoadingCache", .role = ClioNode::DbRole::LoadingCache},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "NotWriter", .role = ClioNode::DbRole::NotWriter},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "Writer", .role = ClioNode::DbRole::Writer},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "Fallback", .role = ClioNode::DbRole::Fallback}
|
|
||||||
),
|
|
||||||
tests::util::kNAME_GENERATOR
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST_P(ClioNodeDbRoleTest, Serialization)
|
|
||||||
{
|
|
||||||
auto const param = GetParam();
|
|
||||||
ClioNode const node{
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
|
||||||
.updateTime = updateTime,
|
|
||||||
.dbRole = param.role
|
|
||||||
};
|
|
||||||
auto const jsonValue = boost::json::value_from(node);
|
|
||||||
EXPECT_EQ(jsonValue.as_object().at("db_role").as_int64(), static_cast<int64_t>(param.role));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ClioNodeDbRoleTest, Deserialization)
|
|
||||||
{
|
|
||||||
auto const param = GetParam();
|
|
||||||
boost::json::value const jsonValue = {
|
|
||||||
{"update_time", updateTimeStr}, {"db_role", static_cast<int64_t>(param.role)}
|
|
||||||
};
|
|
||||||
auto const node = boost::json::value_to<ClioNode>(jsonValue);
|
|
||||||
EXPECT_EQ(node.dbRole, param.role);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClioNodeDbRoleTest, DeserializationInvalidDbRole)
|
|
||||||
{
|
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}, {"db_role", 10}};
|
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClioNodeDbRoleTest, DeserializationMissingDbRole)
|
|
||||||
{
|
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}};
|
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ClioNodeFromTestBundle {
|
|
||||||
std::string testName;
|
|
||||||
bool readOnly;
|
|
||||||
bool fallback;
|
|
||||||
bool loadingCache;
|
|
||||||
bool writing;
|
|
||||||
ClioNode::DbRole expectedRole;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClioNodeFromTest : ClioNodeTest, testing::WithParamInterface<ClioNodeFromTestBundle> {
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
|
|
||||||
MockWriterState writerState;
|
|
||||||
};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
AllWriterStates,
|
|
||||||
ClioNodeFromTest,
|
|
||||||
testing::Values(
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "ReadOnly",
|
|
||||||
.readOnly = true,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::ReadOnly
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "Fallback",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = true,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::Fallback
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "LoadingCache",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = true,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::LoadingCache
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "NotWriterNotReadOnly",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::NotWriter
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "Writer",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = true,
|
|
||||||
.expectedRole = ClioNode::DbRole::Writer
|
|
||||||
}
|
|
||||||
),
|
|
||||||
tests::util::kNAME_GENERATOR
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST_P(ClioNodeFromTest, FromWriterState)
|
|
||||||
{
|
|
||||||
auto const& param = GetParam();
|
|
||||||
|
|
||||||
EXPECT_CALL(writerState, isReadOnly()).WillOnce(testing::Return(param.readOnly));
|
|
||||||
if (not param.readOnly) {
|
|
||||||
EXPECT_CALL(writerState, isFallback()).WillOnce(testing::Return(param.fallback));
|
|
||||||
if (not param.fallback) {
|
|
||||||
EXPECT_CALL(writerState, isLoadingCache()).WillOnce(testing::Return(param.loadingCache));
|
|
||||||
if (not param.loadingCache) {
|
|
||||||
EXPECT_CALL(writerState, isWriting()).WillOnce(testing::Return(param.writing));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto const beforeTime = std::chrono::system_clock::now();
|
|
||||||
auto const node = ClioNode::from(uuid, writerState);
|
|
||||||
auto const afterTime = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
EXPECT_EQ(node.uuid, uuid);
|
|
||||||
EXPECT_EQ(node.dbRole, param.expectedRole);
|
|
||||||
EXPECT_GE(node.updateTime, beforeTime);
|
|
||||||
EXPECT_LE(node.updateTime, afterTime);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -22,197 +22,207 @@
|
|||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "util/MockBackendTestFixture.hpp"
|
#include "util/MockBackendTestFixture.hpp"
|
||||||
#include "util/MockPrometheus.hpp"
|
#include "util/MockPrometheus.hpp"
|
||||||
#include "util/MockWriterState.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
#include "util/prometheus/Bool.hpp"
|
||||||
|
#include "util/prometheus/Gauge.hpp"
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/parse.hpp>
|
||||||
#include <boost/json/serialize.hpp>
|
#include <boost/json/serialize.hpp>
|
||||||
|
#include <boost/json/string.hpp>
|
||||||
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/json/value_from.hpp>
|
#include <boost/json/value_from.hpp>
|
||||||
|
#include <boost/uuid/random_generator.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
#include <boost/uuid/uuid_io.hpp>
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
#include <condition_variable>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <semaphore>
|
#include <mutex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace cluster;
|
using namespace cluster;
|
||||||
|
|
||||||
struct ClusterCommunicationServiceTest : util::prometheus::WithPrometheus, MockBackendTest {
|
namespace {
|
||||||
std::unique_ptr<NiceMockWriterState> writerState = std::make_unique<NiceMockWriterState>();
|
std::vector<ClioNode> const kOTHER_NODES_DATA = {
|
||||||
NiceMockWriterState& writerStateRef = *writerState;
|
ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
||||||
|
.updateTime = util::systemTpFromUtcStr("2015-05-15T12:00:00Z", ClioNode::kTIME_FORMAT).value()
|
||||||
|
},
|
||||||
|
ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
||||||
|
.updateTime = util::systemTpFromUtcStr("2015-05-15T12:00:01Z", ClioNode::kTIME_FORMAT).value()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
} // namespace
|
||||||
|
|
||||||
static constexpr std::chrono::milliseconds kSHORT_INTERVAL{1};
|
struct ClusterCommunicationServiceTest : util::prometheus::WithPrometheus, MockBackendTestStrict {
|
||||||
|
ClusterCommunicationService clusterCommunicationService{
|
||||||
|
backend_,
|
||||||
|
std::chrono::milliseconds{5},
|
||||||
|
std::chrono::milliseconds{9}
|
||||||
|
};
|
||||||
|
|
||||||
static boost::uuids::uuid
|
util::prometheus::GaugeInt& nodesInClusterMetric = PrometheusService::gaugeInt("cluster_nodes_total_number", {});
|
||||||
makeUuid(uint8_t value)
|
util::prometheus::Bool isHealthyMetric = PrometheusService::boolMetric("cluster_communication_is_healthy", {});
|
||||||
|
|
||||||
|
std::mutex mtx;
|
||||||
|
std::condition_variable cv;
|
||||||
|
|
||||||
|
void
|
||||||
|
notify()
|
||||||
{
|
{
|
||||||
boost::uuids::uuid uuid{};
|
std::unique_lock const lock{mtx};
|
||||||
std::ranges::fill(uuid, value);
|
cv.notify_one();
|
||||||
return uuid;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ClioNode
|
void
|
||||||
makeNode(boost::uuids::uuid const& uuid, ClioNode::DbRole role)
|
wait()
|
||||||
{
|
{
|
||||||
return ClioNode{
|
std::unique_lock lock{mtx};
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(uuid),
|
cv.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds{100});
|
||||||
.updateTime = std::chrono::system_clock::now(),
|
|
||||||
.dbRole = role
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::string
|
|
||||||
nodeToJson(ClioNode const& node)
|
|
||||||
{
|
|
||||||
boost::json::value v = boost::json::value_from(node);
|
|
||||||
return boost::json::serialize(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterCommunicationServiceTest()
|
|
||||||
{
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([]() {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, isReadOnly()).WillByDefault(testing::Return(false));
|
|
||||||
ON_CALL(*state, isWriting()).WillByDefault(testing::Return(true));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
ON_CALL(writerStateRef, isReadOnly()).WillByDefault(testing::Return(false));
|
|
||||||
ON_CALL(writerStateRef, isWriting()).WillByDefault(testing::Return(true));
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
waitForSignal(std::binary_semaphore& sem, std::chrono::milliseconds timeout = std::chrono::milliseconds{1000})
|
|
||||||
{
|
|
||||||
return sem.try_acquire_for(timeout);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, BackendReadsAndWritesData)
|
TEST_F(ClusterCommunicationServiceTest, Write)
|
||||||
{
|
{
|
||||||
auto const otherUuid = makeUuid(0x02);
|
auto const selfUuid = *clusterCommunicationService.selfUuid();
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
|
||||||
std::binary_semaphore writeSemaphore{0};
|
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
auto const nowStr = util::systemTpToUtcStr(std::chrono::system_clock::now(), ClioNode::kTIME_FORMAT);
|
||||||
{otherUuid, nodeToJson(makeNode(otherUuid, ClioNode::DbRole::Writer))}
|
auto const nowStrPrefix = nowStr.substr(0, nowStr.size() - 3);
|
||||||
}};
|
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_CALL(*backend_, writeNodeMessage(selfUuid, testing::_)).WillOnce([&](auto&&, std::string const& jsonStr) {
|
||||||
fetchSemaphore.release();
|
auto const jv = boost::json::parse(jsonStr);
|
||||||
return fetchResult;
|
ASSERT_TRUE(jv.is_object());
|
||||||
}));
|
auto const& obj = jv.as_object();
|
||||||
|
ASSERT_TRUE(obj.contains("update_time"));
|
||||||
|
ASSERT_TRUE(obj.at("update_time").is_string());
|
||||||
|
EXPECT_THAT(std::string{obj.at("update_time").as_string()}, testing::StartsWith(nowStrPrefix));
|
||||||
|
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Invoke([&](auto, auto) { writeSemaphore.release(); }));
|
notify();
|
||||||
|
});
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
service.run();
|
// destructor of clusterCommunicationService calls .stop()
|
||||||
|
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
|
||||||
EXPECT_TRUE(waitForSignal(writeSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, MetricsGetsNewStateFromBackend)
|
TEST_F(ClusterCommunicationServiceTest, Read_FetchFailed)
|
||||||
{
|
{
|
||||||
auto const otherUuid = makeUuid(0x02);
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore writerActionSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) { return std::unexpected{"Failed"}; });
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
clusterCommunicationService.run();
|
||||||
{otherUuid, nodeToJson(makeNode(otherUuid, ClioNode::DbRole::Writer))}
|
wait();
|
||||||
}};
|
// call .stop() manually so that workers exit before expectations are called more times than we want
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) { return fetchResult; }));
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([&]() mutable {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, startWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
ON_CALL(*state, giveUpWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
|
|
||||||
auto& nodesInClusterMetric = PrometheusService::gaugeInt("cluster_nodes_total_number", {});
|
|
||||||
auto isHealthyMetric = PrometheusService::boolMetric("cluster_communication_is_healthy", {});
|
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
|
||||||
|
|
||||||
service.run();
|
|
||||||
|
|
||||||
// WriterDecider is called after metrics are updated so we could use it as a signal to stop
|
|
||||||
EXPECT_TRUE(waitForSignal(writerActionSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
|
|
||||||
EXPECT_EQ(nodesInClusterMetric.value(), 2);
|
|
||||||
EXPECT_TRUE(static_cast<bool>(isHealthyMetric));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, WriterDeciderCallsWriterStateMethodsAccordingly)
|
TEST_F(ClusterCommunicationServiceTest, Read_FetchThrew)
|
||||||
{
|
{
|
||||||
auto const smallerUuid = makeUuid(0x00);
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
std::binary_semaphore writerActionSemaphore{0};
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly(testing::Throw(data::DatabaseTimeout{}));
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
clusterCommunicationService.run();
|
||||||
{smallerUuid, nodeToJson(makeNode(smallerUuid, ClioNode::DbRole::Writer))}
|
wait();
|
||||||
}};
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
fetchSemaphore.release();
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
return fetchResult;
|
|
||||||
}));
|
|
||||||
|
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Return());
|
|
||||||
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([&]() mutable {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, startWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
ON_CALL(*state, giveUpWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
|
||||||
|
|
||||||
service.run();
|
|
||||||
|
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
|
||||||
EXPECT_TRUE(waitForSignal(writerActionSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, StopHaltsBackendOperations)
|
TEST_F(ClusterCommunicationServiceTest, Read_GotInvalidJson)
|
||||||
{
|
{
|
||||||
std::atomic<int> backendOperationsCount{0};
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) {
|
||||||
|
return std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
||||||
|
{boost::uuids::random_generator()(), "invalid json"}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{}};
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
backendOperationsCount++;
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
fetchSemaphore.release();
|
}
|
||||||
return fetchResult;
|
|
||||||
}));
|
TEST_F(ClusterCommunicationServiceTest, Read_GotInvalidNodeData)
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Invoke([&](auto&&, auto&&) {
|
{
|
||||||
backendOperationsCount++;
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
}));
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) {
|
||||||
service.run();
|
return std::vector<std::pair<boost::uuids::uuid, std::string>>{{boost::uuids::random_generator()(), "{}"}};
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
});
|
||||||
service.stop();
|
|
||||||
|
clusterCommunicationService.run();
|
||||||
auto const countAfterStop = backendOperationsCount.load();
|
wait();
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{50});
|
clusterCommunicationService.stop();
|
||||||
EXPECT_EQ(backendOperationsCount.load(), countAfterStop);
|
|
||||||
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ClusterCommunicationServiceTest, Read_Success)
|
||||||
|
{
|
||||||
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
|
EXPECT_EQ(nodesInClusterMetric.value(), 1);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
auto const clusterData = clusterCommunicationService.clusterData();
|
||||||
|
ASSERT_TRUE(clusterData.has_value());
|
||||||
|
ASSERT_EQ(clusterData->size(), kOTHER_NODES_DATA.size() + 1);
|
||||||
|
for (auto const& node : kOTHER_NODES_DATA) {
|
||||||
|
auto const it =
|
||||||
|
std::ranges::find_if(*clusterData, [&](ClioNode const& n) { return *(n.uuid) == *(node.uuid); });
|
||||||
|
EXPECT_NE(it, clusterData->cend()) << boost::uuids::to_string(*node.uuid);
|
||||||
|
}
|
||||||
|
auto const selfUuid = clusterCommunicationService.selfUuid();
|
||||||
|
auto const it =
|
||||||
|
std::ranges::find_if(*clusterData, [&selfUuid](ClioNode const& node) { return node.uuid == selfUuid; });
|
||||||
|
EXPECT_NE(it, clusterData->end());
|
||||||
|
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([this](auto&&) {
|
||||||
|
auto const selfUuid = clusterCommunicationService.selfUuid();
|
||||||
|
std::vector<std::pair<boost::uuids::uuid, std::string>> result = {
|
||||||
|
{*selfUuid, R"JSON({"update_time": "2015-05-15:12:00:00"})JSON"},
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto const& node : kOTHER_NODES_DATA) {
|
||||||
|
boost::json::value jsonValue;
|
||||||
|
boost::json::value_from(node, jsonValue);
|
||||||
|
result.emplace_back(*node.uuid, boost::json::serialize(jsonValue));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
});
|
||||||
|
|
||||||
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
|
EXPECT_EQ(nodesInClusterMetric.value(), 3);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,179 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/Metrics.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/prometheus/Gauge.hpp"
|
|
||||||
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <expected>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
using namespace util::prometheus;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct MetricsTest : WithMockPrometheus {
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid1 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid2 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid3 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, InitializesMetricsOnConstruction)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithValidClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
ClioNode node1{.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer};
|
|
||||||
ClioNode node2{.uuid = uuid2, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::ReadOnly};
|
|
||||||
ClioNode node3{
|
|
||||||
.uuid = uuid3, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::NotWriter
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<ClioNode> nodes = {node1, node2, node3};
|
|
||||||
Backend::ClusterData clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(3));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithEmptyClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
std::vector<ClioNode> nodes = {};
|
|
||||||
Backend::ClusterData clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(0));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithFailedClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
Backend::ClusterData clusterData =
|
|
||||||
std::expected<std::vector<ClioNode>, std::string>(std::unexpected("Connection failed"));
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(0));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithSingleNode)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
ClioNode node1{.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer};
|
|
||||||
|
|
||||||
std::vector<ClioNode> nodes = {node1};
|
|
||||||
Backend::ClusterData clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateRecoveryFromFailure)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
Backend::ClusterData clusterData1 =
|
|
||||||
std::expected<std::vector<ClioNode>, std::string>(std::unexpected("Connection timeout"));
|
|
||||||
auto sharedClusterData1 = std::make_shared<Backend::ClusterData>(clusterData1);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(0));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData1);
|
|
||||||
|
|
||||||
ClioNode node1{.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer};
|
|
||||||
ClioNode node2{.uuid = uuid2, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::ReadOnly};
|
|
||||||
|
|
||||||
std::vector<ClioNode> nodes = {node1, node2};
|
|
||||||
Backend::ClusterData clusterData2 = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData2 = std::make_shared<Backend::ClusterData>(clusterData2);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(2));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid2, sharedClusterData2);
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/impl/RepeatedTask.hpp"
|
|
||||||
#include "util/AsioContextTestFixture.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
|
||||||
#include <semaphore>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
using namespace cluster::impl;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct RepeatedTaskTest : AsyncAsioContextTest {
|
|
||||||
static constexpr auto kTIMEOUT = std::chrono::seconds{5};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename MockFunctionType>
|
|
||||||
struct RepeatedTaskTypedTest : RepeatedTaskTest {
|
|
||||||
std::atomic_int32_t callCount{0};
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
testing::StrictMock<MockFunctionType> mockFn;
|
|
||||||
|
|
||||||
void
|
|
||||||
expectCalls(int const expectedCalls)
|
|
||||||
{
|
|
||||||
callCount = 0;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn, Call).Times(AtLeast(expectedCalls)).WillRepeatedly([this, expectedCalls](auto&&...) {
|
|
||||||
++callCount;
|
|
||||||
if (callCount >= expectedCalls) {
|
|
||||||
semaphore.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
using TypesToTest = Types<MockFunction<void()>, MockFunction<void(boost::asio::yield_context)>>;
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
TYPED_TEST_SUITE(RepeatedTaskTypedTest, TypesToTest);
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, CallsFunctionRepeatedly)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, StopsImmediately)
|
|
||||||
{
|
|
||||||
auto const interval = std::chrono::seconds(5);
|
|
||||||
RepeatedTask<boost::asio::io_context> task(interval, this->ctx_);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
|
||||||
|
|
||||||
auto start = std::chrono::steady_clock::now();
|
|
||||||
task.stop();
|
|
||||||
EXPECT_LT(std::chrono::steady_clock::now() - start, interval);
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, MultipleStops)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
task.stop();
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, DestructorStopsTask)
|
|
||||||
{
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
// Destructor will call stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
auto const countAfterDestruction = this->callCount.load();
|
|
||||||
|
|
||||||
// Wait a bit - no more calls should happen
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
|
||||||
|
|
||||||
EXPECT_EQ(this->callCount, countAfterDestruction);
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, StopWithoutRunIsNoOp)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
// Should not crash or hang
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(RepeatedTaskTest, MultipleTasksRunConcurrently)
|
|
||||||
{
|
|
||||||
StrictMock<MockFunction<void()>> mockFn1;
|
|
||||||
StrictMock<MockFunction<void()>> mockFn2;
|
|
||||||
|
|
||||||
RepeatedTask<boost::asio::io_context> task1(std::chrono::milliseconds(1), ctx_);
|
|
||||||
RepeatedTask<boost::asio::io_context> task2(std::chrono::milliseconds(2), ctx_);
|
|
||||||
|
|
||||||
std::atomic_int32_t callCount1{0};
|
|
||||||
std::atomic_int32_t callCount2{0};
|
|
||||||
std::binary_semaphore semaphore1{0};
|
|
||||||
std::binary_semaphore semaphore2{0};
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn1, Call).Times(AtLeast(10)).WillRepeatedly([&]() {
|
|
||||||
if (++callCount1 >= 10) {
|
|
||||||
semaphore1.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn2, Call).Times(AtLeast(5)).WillRepeatedly([&]() {
|
|
||||||
if (++callCount2 >= 5) {
|
|
||||||
semaphore2.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
task1.run(mockFn1.AsStdFunction());
|
|
||||||
task2.run(mockFn2.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(semaphore1.try_acquire_for(kTIMEOUT));
|
|
||||||
EXPECT_TRUE(semaphore2.try_acquire_for(kTIMEOUT));
|
|
||||||
|
|
||||||
task1.stop();
|
|
||||||
task2.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, TaskStateTransitionsCorrectly)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
task.stop(); // Should be no-op
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
|
|
||||||
// Stop again should be no-op
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(RepeatedTaskTest, FunctionCanAccessYieldContext)
|
|
||||||
{
|
|
||||||
StrictMock<MockFunction<void(boost::asio::yield_context)>> mockFn;
|
|
||||||
std::atomic_bool yieldContextUsed = false;
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), ctx_);
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn, Call).Times(AtLeast(1)).WillRepeatedly([&](boost::asio::yield_context yield) {
|
|
||||||
if (yieldContextUsed)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Use the yield context to verify it's valid
|
|
||||||
boost::asio::steady_timer timer(yield.get_executor());
|
|
||||||
timer.expires_after(std::chrono::milliseconds(1));
|
|
||||||
boost::system::error_code ec;
|
|
||||||
timer.async_wait(yield[ec]);
|
|
||||||
EXPECT_FALSE(ec) << ec.message();
|
|
||||||
yieldContextUsed = true;
|
|
||||||
semaphore.release();
|
|
||||||
});
|
|
||||||
|
|
||||||
task.run(mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(semaphore.try_acquire_for(kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
|
|
||||||
EXPECT_TRUE(yieldContextUsed);
|
|
||||||
}
|
|
||||||
@@ -1,314 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <chrono>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
|
|
||||||
enum class ExpectedAction { StartWriting, GiveUpWriting, NoAction, SetFallback };
|
|
||||||
|
|
||||||
struct WriterDeciderTestParams {
|
|
||||||
std::string testName;
|
|
||||||
uint8_t selfUuidValue;
|
|
||||||
std::vector<std::pair<uint8_t, ClioNode::DbRole>> nodes;
|
|
||||||
ExpectedAction expectedAction;
|
|
||||||
bool useEmptyClusterData = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct WriterDeciderTest : testing::TestWithParam<WriterDeciderTestParams> {
|
|
||||||
~WriterDeciderTest() override
|
|
||||||
{
|
|
||||||
ctx.stop();
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::asio::thread_pool ctx{1};
|
|
||||||
std::unique_ptr<MockWriterState> writerState = std::make_unique<MockWriterState>();
|
|
||||||
MockWriterState& writerStateRef = *writerState;
|
|
||||||
|
|
||||||
static ClioNode
|
|
||||||
makeNode(boost::uuids::uuid const& uuid, ClioNode::DbRole role)
|
|
||||||
{
|
|
||||||
return ClioNode{
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(uuid),
|
|
||||||
.updateTime = std::chrono::system_clock::now(),
|
|
||||||
.dbRole = role
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
static boost::uuids::uuid
|
|
||||||
makeUuid(uint8_t value)
|
|
||||||
{
|
|
||||||
boost::uuids::uuid uuid{};
|
|
||||||
std::ranges::fill(uuid, value);
|
|
||||||
return uuid;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_P(WriterDeciderTest, WriterSelection)
|
|
||||||
{
|
|
||||||
auto const& params = GetParam();
|
|
||||||
|
|
||||||
auto const selfUuid = makeUuid(params.selfUuidValue);
|
|
||||||
|
|
||||||
WriterDecider decider{ctx, std::move(writerState)};
|
|
||||||
|
|
||||||
auto clonedState = std::make_unique<MockWriterState>();
|
|
||||||
|
|
||||||
// Set up expectations based on expected action
|
|
||||||
switch (params.expectedAction) {
|
|
||||||
case ExpectedAction::StartWriting:
|
|
||||||
EXPECT_CALL(*clonedState, startWriting());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::GiveUpWriting:
|
|
||||||
EXPECT_CALL(*clonedState, giveUpWriting());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::SetFallback:
|
|
||||||
EXPECT_CALL(*clonedState, setWriterDecidingFallback());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::NoAction:
|
|
||||||
if (not params.useEmptyClusterData) {
|
|
||||||
// For all-ReadOnly case, we still clone but don't call any action
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
}
|
|
||||||
// For empty cluster data, clone is never called
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<Backend::ClusterData> clusterData;
|
|
||||||
ClioNode::CUuid selfIdPtr;
|
|
||||||
|
|
||||||
if (params.useEmptyClusterData) {
|
|
||||||
clusterData = std::make_shared<Backend::ClusterData>(std::unexpected(std::string("Communication failed")));
|
|
||||||
selfIdPtr = std::make_shared<boost::uuids::uuid>(selfUuid);
|
|
||||||
} else {
|
|
||||||
std::vector<ClioNode> nodes;
|
|
||||||
nodes.reserve(params.nodes.size());
|
|
||||||
for (auto const& [uuidValue, role] : params.nodes) {
|
|
||||||
auto node = makeNode(makeUuid(uuidValue), role);
|
|
||||||
if (uuidValue == params.selfUuidValue) {
|
|
||||||
selfIdPtr = node.uuid; // Use the same shared_ptr as in the node
|
|
||||||
}
|
|
||||||
nodes.push_back(std::move(node));
|
|
||||||
}
|
|
||||||
clusterData = std::make_shared<Backend::ClusterData>(std::move(nodes));
|
|
||||||
}
|
|
||||||
|
|
||||||
decider.onNewState(selfIdPtr, clusterData);
|
|
||||||
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
WriterDeciderTests,
|
|
||||||
WriterDeciderTest,
|
|
||||||
testing::Values(
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfNodeIsSelectedAsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsSelectedAsWriter",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "NodesAreSortedByUUID",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x03, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}, {0x01, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "FirstNodeAfterReadOnlyIsNotSelf",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "FirstNodeAfterReadOnlyIsSelf",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "AllNodesReadOnlyGiveUpWriting",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::ReadOnly}, {0x02, ClioNode::DbRole::ReadOnly}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "EmptyClusterDataNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {},
|
|
||||||
.expectedAction = ExpectedAction::NoAction,
|
|
||||||
.useEmptyClusterData = true
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SingleNodeClusterSelfIsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "NotWriterRoleIsSelectedWhenNoWriterRole",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::NotWriter}, {0x02, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedRolesFirstNonReadOnlyIsSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter},
|
|
||||||
{0x04, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "ShuffledNodesAreSortedCorrectly",
|
|
||||||
.selfUuidValue = 0x04,
|
|
||||||
.nodes =
|
|
||||||
{{0x04, ClioNode::DbRole::Writer},
|
|
||||||
{0x01, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "ShuffledNodesWithReadOnlySelfIsSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x05, ClioNode::DbRole::Writer},
|
|
||||||
{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x04, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::ReadOnly}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsFallbackNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Fallback}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::NoAction
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsFallbackSetsFallbackMode",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Fallback}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsReadOnlyOthersAreFallbackGiveUpWriting",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::ReadOnly}, {0x02, ClioNode::DbRole::Fallback}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MultipleFallbackNodesSelfNotFallbackSetsFallback",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::Fallback},
|
|
||||||
{0x02, ClioNode::DbRole::Fallback},
|
|
||||||
{0x03, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedRolesWithOneFallbackSetsFallback",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::NotWriter},
|
|
||||||
{0x03, ClioNode::DbRole::Fallback},
|
|
||||||
{0x04, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsLoadingCacheOtherIsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::LoadingCache}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsLoadingCacheSkipToNextWriter",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "AllNodesLoadingCacheNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::LoadingCache}, {0x02, ClioNode::DbRole::LoadingCache}},
|
|
||||||
.expectedAction = ExpectedAction::NoAction
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedWithLoadingCacheReadOnlyFirstNonReadOnlyNonLoadingCacheSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x04, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "LoadingCacheBeforeWriterSkipsLoadingCache",
|
|
||||||
.selfUuidValue = 0x04,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x02, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x04, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
}
|
|
||||||
),
|
|
||||||
[](testing::TestParamInfo<WriterDeciderTestParams> const& info) { return info.param.testName; }
|
|
||||||
);
|
|
||||||
@@ -32,8 +32,8 @@
|
|||||||
#include <xrpl/protocol/Indexes.h>
|
#include <xrpl/protocol/Indexes.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <functional>
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
@@ -104,13 +104,16 @@ TEST_F(AmendmentCenterTest, IsMultipleEnabled)
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AmendmentCenterTest, IsEnabledReturnsFalseWhenAmendmentsLedgerObjectUnavailable)
|
TEST_F(AmendmentCenterTest, IsEnabledThrowsWhenUnavailable)
|
||||||
{
|
{
|
||||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::amendments().key, kSEQ, testing::_))
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::amendments().key, kSEQ, testing::_))
|
||||||
.WillOnce(testing::Return(std::nullopt));
|
.WillOnce(testing::Return(std::nullopt));
|
||||||
|
|
||||||
runSpawn([this](auto yield) {
|
runSpawn([this](auto yield) {
|
||||||
EXPECT_NO_THROW(EXPECT_FALSE(amendmentCenter.isEnabled(yield, "irrelevant", kSEQ)));
|
EXPECT_THROW(
|
||||||
|
{ [[maybe_unused]] auto const result = amendmentCenter.isEnabled(yield, "irrelevant", kSEQ); },
|
||||||
|
std::runtime_error
|
||||||
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,21 +126,6 @@ TEST_F(AmendmentCenterTest, IsEnabledReturnsFalseWhenNoAmendments)
|
|||||||
runSpawn([this](auto yield) { EXPECT_FALSE(amendmentCenter.isEnabled(yield, "irrelevant", kSEQ)); });
|
runSpawn([this](auto yield) { EXPECT_FALSE(amendmentCenter.isEnabled(yield, "irrelevant", kSEQ)); });
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(AmendmentCenterTest, IsEnabledReturnsVectorOfFalseWhenAmendmentsLedgerObjectUnavailable)
|
|
||||||
{
|
|
||||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::amendments().key, kSEQ, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::nullopt));
|
|
||||||
|
|
||||||
runSpawn([this](auto yield) {
|
|
||||||
std::vector<data::AmendmentKey> const keys{"fixUniversalNumber", "ImmediateOfferKilled"};
|
|
||||||
std::vector<bool> vec;
|
|
||||||
EXPECT_NO_THROW(vec = amendmentCenter.isEnabled(yield, keys, kSEQ));
|
|
||||||
|
|
||||||
EXPECT_EQ(vec.size(), keys.size());
|
|
||||||
EXPECT_TRUE(std::ranges::all_of(vec, std::logical_not<>{}));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(AmendmentCenterTest, IsEnabledReturnsVectorOfFalseWhenNoAmendments)
|
TEST_F(AmendmentCenterTest, IsEnabledReturnsVectorOfFalseWhenNoAmendments)
|
||||||
{
|
{
|
||||||
auto const amendments = createBrokenAmendmentsObject();
|
auto const amendments = createBrokenAmendmentsObject();
|
||||||
|
|||||||
@@ -47,23 +47,17 @@ struct LedgerCacheSaverTest : virtual testing::Test {
|
|||||||
constexpr static auto kFILE_PATH = "./cache.bin";
|
constexpr static auto kFILE_PATH = "./cache.bin";
|
||||||
|
|
||||||
static ClioConfigDefinition
|
static ClioConfigDefinition
|
||||||
generateConfig(bool cacheFilePathHasValue, bool asyncSave)
|
generateConfig(bool cacheFilePathHasValue)
|
||||||
{
|
{
|
||||||
auto config = ClioConfigDefinition{{
|
auto config = ClioConfigDefinition{{
|
||||||
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
|
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
|
||||||
{"cache.file.async_save", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
|
|
||||||
}};
|
}};
|
||||||
|
|
||||||
ConfigFileJson jsonFile{boost::json::object{}};
|
ConfigFileJson jsonFile{boost::json::object{}};
|
||||||
if (cacheFilePathHasValue) {
|
if (cacheFilePathHasValue) {
|
||||||
auto const jsonObject = boost::json::parse(
|
auto const jsonObject =
|
||||||
fmt::format(
|
boost::json::parse(fmt::format(R"JSON({{"cache": {{"file": {{"path": "{}"}}}}}})JSON", kFILE_PATH))
|
||||||
R"JSON({{"cache": {{"file": {{"path": "{}", "async_save": {} }} }} }})JSON",
|
.as_object();
|
||||||
kFILE_PATH,
|
|
||||||
asyncSave
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.as_object();
|
|
||||||
jsonFile = ConfigFileJson{jsonObject};
|
jsonFile = ConfigFileJson{jsonObject};
|
||||||
}
|
}
|
||||||
auto const errors = config.parse(jsonFile);
|
auto const errors = config.parse(jsonFile);
|
||||||
@@ -74,7 +68,7 @@ struct LedgerCacheSaverTest : virtual testing::Test {
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, SaveSuccessfully)
|
TEST_F(LedgerCacheSaverTest, SaveSuccessfully)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce(testing::Return(std::expected<void, std::string>{}));
|
EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce(testing::Return(std::expected<void, std::string>{}));
|
||||||
@@ -85,7 +79,7 @@ TEST_F(LedgerCacheSaverTest, SaveSuccessfully)
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, SaveWithError)
|
TEST_F(LedgerCacheSaverTest, SaveWithError)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH))
|
EXPECT_CALL(cache, saveToFile(kFILE_PATH))
|
||||||
@@ -97,7 +91,7 @@ TEST_F(LedgerCacheSaverTest, SaveWithError)
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, NoSaveWhenPathNotConfigured)
|
TEST_F(LedgerCacheSaverTest, NoSaveWhenPathNotConfigured)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ false, /* asyncSave = */ true);
|
auto const config = generateConfig(false);
|
||||||
|
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
saver.save();
|
saver.save();
|
||||||
@@ -106,7 +100,7 @@ TEST_F(LedgerCacheSaverTest, NoSaveWhenPathNotConfigured)
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, DestructorWaitsForCompletion)
|
TEST_F(LedgerCacheSaverTest, DestructorWaitsForCompletion)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
|
|
||||||
std::binary_semaphore semaphore{1};
|
std::binary_semaphore semaphore{1};
|
||||||
std::atomic_bool saveCompleted{false};
|
std::atomic_bool saveCompleted{false};
|
||||||
@@ -129,7 +123,7 @@ TEST_F(LedgerCacheSaverTest, DestructorWaitsForCompletion)
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, WaitToFinishCanBeCalledMultipleTimes)
|
TEST_F(LedgerCacheSaverTest, WaitToFinishCanBeCalledMultipleTimes)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH));
|
EXPECT_CALL(cache, saveToFile(kFILE_PATH));
|
||||||
@@ -141,7 +135,7 @@ TEST_F(LedgerCacheSaverTest, WaitToFinishCanBeCalledMultipleTimes)
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, WaitToFinishWithoutSaveIsSafe)
|
TEST_F(LedgerCacheSaverTest, WaitToFinishWithoutSaveIsSafe)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
EXPECT_NO_THROW(saver.waitToFinish());
|
EXPECT_NO_THROW(saver.waitToFinish());
|
||||||
}
|
}
|
||||||
@@ -150,61 +144,13 @@ struct LedgerCacheSaverAssertTest : LedgerCacheSaverTest, common::util::WithMock
|
|||||||
|
|
||||||
TEST_F(LedgerCacheSaverAssertTest, MultipleSavesNotAllowed)
|
TEST_F(LedgerCacheSaverAssertTest, MultipleSavesNotAllowed)
|
||||||
{
|
{
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
auto const config = generateConfig(true);
|
||||||
|
|
||||||
LedgerCacheSaver saver{config, cache};
|
LedgerCacheSaver saver{config, cache};
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&](auto&&) {
|
EXPECT_CALL(cache, saveToFile(kFILE_PATH));
|
||||||
semaphore.acquire();
|
|
||||||
return std::expected<void, std::string>{};
|
|
||||||
});
|
|
||||||
saver.save();
|
saver.save();
|
||||||
EXPECT_CLIO_ASSERT_FAIL({ saver.save(); });
|
EXPECT_CLIO_ASSERT_FAIL({ saver.save(); });
|
||||||
semaphore.release();
|
|
||||||
|
|
||||||
saver.waitToFinish();
|
saver.waitToFinish();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, SyncSaveWaitsForCompletion)
|
|
||||||
{
|
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ false);
|
|
||||||
|
|
||||||
std::atomic_bool saveCompleted{false};
|
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&]() {
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
|
||||||
saveCompleted = true;
|
|
||||||
return std::expected<void, std::string>{};
|
|
||||||
});
|
|
||||||
|
|
||||||
LedgerCacheSaver saver{config, cache};
|
|
||||||
saver.save();
|
|
||||||
EXPECT_TRUE(saveCompleted);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(LedgerCacheSaverTest, AsyncSaveDoesNotWaitForCompletion)
|
|
||||||
{
|
|
||||||
auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true);
|
|
||||||
|
|
||||||
std::binary_semaphore saveStarted{0};
|
|
||||||
std::binary_semaphore continueExecution{0};
|
|
||||||
std::atomic_bool saveCompleted{false};
|
|
||||||
|
|
||||||
EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&]() {
|
|
||||||
saveStarted.release();
|
|
||||||
continueExecution.acquire();
|
|
||||||
saveCompleted = true;
|
|
||||||
return std::expected<void, std::string>{};
|
|
||||||
});
|
|
||||||
|
|
||||||
LedgerCacheSaver saver{config, cache};
|
|
||||||
saver.save();
|
|
||||||
|
|
||||||
EXPECT_TRUE(saveStarted.try_acquire_for(std::chrono::seconds{5}));
|
|
||||||
EXPECT_FALSE(saveCompleted);
|
|
||||||
|
|
||||||
continueExecution.release();
|
|
||||||
saver.waitToFinish();
|
|
||||||
EXPECT_TRUE(saveCompleted);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -216,10 +216,6 @@ protected:
|
|||||||
std::shared_ptr<testing::NiceMock<MockMonitorProvider>> monitorProvider_ =
|
std::shared_ptr<testing::NiceMock<MockMonitorProvider>> monitorProvider_ =
|
||||||
std::make_shared<testing::NiceMock<MockMonitorProvider>>();
|
std::make_shared<testing::NiceMock<MockMonitorProvider>>();
|
||||||
std::shared_ptr<etl::SystemState> systemState_ = std::make_shared<etl::SystemState>();
|
std::shared_ptr<etl::SystemState> systemState_ = std::make_shared<etl::SystemState>();
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockWriteSignalCommandCallback_;
|
|
||||||
boost::signals2::scoped_connection writeCommandConnection_{
|
|
||||||
systemState_->writeCommandSignal.connect(mockWriteSignalCommandCallback_.AsStdFunction())
|
|
||||||
};
|
|
||||||
|
|
||||||
etl::ETLService service_{
|
etl::ETLService service_{
|
||||||
ctx_,
|
ctx_,
|
||||||
@@ -304,7 +300,6 @@ TEST_F(ETLServiceTests, RunWithEmptyDatabase)
|
|||||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
||||||
auto& mockTaskManagerRef = *mockTaskManager;
|
auto& mockTaskManagerRef = *mockTaskManager;
|
||||||
auto ledgerData = createTestData(kSEQ);
|
auto ledgerData = createTestData(kSEQ);
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
|
|
||||||
testing::Sequence const s;
|
testing::Sequence const s;
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
||||||
@@ -313,61 +308,25 @@ TEST_F(ETLServiceTests, RunWithEmptyDatabase)
|
|||||||
EXPECT_CALL(*balancer_, loadInitialLedger(kSEQ, testing::_, testing::_))
|
EXPECT_CALL(*balancer_, loadInitialLedger(kSEQ, testing::_, testing::_))
|
||||||
.WillOnce(testing::Return(std::vector<std::string>{}));
|
.WillOnce(testing::Return(std::vector<std::string>{}));
|
||||||
EXPECT_CALL(*loader_, loadInitialLedger).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
EXPECT_CALL(*loader_, loadInitialLedger).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
||||||
// In syncCacheWithDb()
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).Times(2).InSequence(s).WillRepeatedly([this]() {
|
.InSequence(s)
|
||||||
backend_->cache().update({}, kSEQ, false);
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
return data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockTaskManagerRef, run);
|
EXPECT_CALL(mockTaskManagerRef, run);
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_)).WillOnce([&](auto&&...) {
|
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||||
EXPECT_FALSE(systemState_->isLoadingCache);
|
.WillOnce(testing::Return(std::unique_ptr<etl::TaskManagerInterface>(mockTaskManager.release())));
|
||||||
return std::unique_ptr<etl::TaskManagerInterface>(mockTaskManager.release());
|
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, testing::_, testing::_))
|
||||||
});
|
.WillOnce([](auto, auto, auto, auto, auto) { return std::make_unique<testing::NiceMock<MockMonitor>>(); });
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, RunWithPopulatedDatabase)
|
TEST_F(ETLServiceTests, RunWithPopulatedDatabase)
|
||||||
{
|
{
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
EXPECT_CALL(*monitorProvider_, make).WillOnce([](auto, auto, auto, auto, auto) {
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
});
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, SyncCacheWithDbBeforeStartingMonitor)
|
|
||||||
{
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
backend_->cache().update({}, kSEQ - 2, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ - 1, testing::_));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ - 1, std::vector<data::LedgerObject>()))
|
|
||||||
.WillOnce([this](auto const seq, auto&&...) { backend_->cache().update({}, seq, false); });
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ, testing::_));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ, std::vector<data::LedgerObject>()))
|
|
||||||
.WillOnce([this](auto const seq, auto&&...) { backend_->cache().update({}, seq, false); });
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
@@ -405,22 +364,19 @@ TEST_F(ETLServiceTests, HandlesWriteConflictInMonitorSubscription)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
writeCommandConnection_.disconnect();
|
systemState_->writeConflict = true;
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||||
ASSERT_TRUE(capturedCallback);
|
ASSERT_TRUE(capturedCallback);
|
||||||
capturedCallback(kSEQ + 1);
|
capturedCallback(kSEQ + 1);
|
||||||
|
|
||||||
|
EXPECT_FALSE(systemState_->writeConflict);
|
||||||
EXPECT_FALSE(systemState_->isWriting);
|
EXPECT_FALSE(systemState_->isWriting);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -441,11 +397,8 @@ TEST_F(ETLServiceTests, NormalFlowInMonitorSubscription)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
@@ -471,19 +424,13 @@ TEST_F(ETLServiceTests, AttemptTakeoverWriter)
|
|||||||
return std::move(mockMonitor);
|
return std::move(mockMonitor);
|
||||||
});
|
});
|
||||||
|
|
||||||
std::function<void(uint32_t)> onNewSeqCallback;
|
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&onNewSeqCallback](auto cb) {
|
|
||||||
onNewSeqCallback = std::move(cb);
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
||||||
capturedDbStalledCallback = callback;
|
capturedDbStalledCallback = callback;
|
||||||
return boost::signals2::scoped_connection{};
|
return boost::signals2::scoped_connection{};
|
||||||
});
|
});
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
@@ -500,14 +447,10 @@ TEST_F(ETLServiceTests, AttemptTakeoverWriter)
|
|||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
ASSERT_TRUE(capturedDbStalledCallback);
|
||||||
EXPECT_FALSE(systemState_->isWriting); // will attempt to become writer after new sequence appears but not yet
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
capturedDbStalledCallback();
|
||||||
EXPECT_TRUE(systemState_->isWriting); // should attempt to become writer
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated
|
EXPECT_TRUE(systemState_->isWriting); // should attempt to become writer
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
||||||
@@ -527,25 +470,22 @@ TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
systemState_->isWriting = true;
|
systemState_->isWriting = true;
|
||||||
writeCommandConnection_.disconnect();
|
systemState_->writeConflict = true; // got a write conflict along the way
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
ASSERT_TRUE(capturedCallback);
|
||||||
capturedCallback(kSEQ + 1);
|
capturedCallback(kSEQ + 1);
|
||||||
|
|
||||||
EXPECT_FALSE(systemState_->isWriting); // gives up writing
|
EXPECT_FALSE(systemState_->isWriting); // gives up writing
|
||||||
|
EXPECT_FALSE(systemState_->writeConflict); // and removes write conflict flag
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, CancelledLoadInitialLedger)
|
TEST_F(ETLServiceTests, CancelledLoadInitialLedger)
|
||||||
@@ -599,327 +539,3 @@ TEST_F(ETLServiceTests, RunStopsIfInitialLoadIsCancelledByBalancer)
|
|||||||
EXPECT_FALSE(service_.isAmendmentBlocked());
|
EXPECT_FALSE(service_.isAmendmentBlocked());
|
||||||
EXPECT_FALSE(service_.isCorruptionDetected());
|
EXPECT_FALSE(service_.isCorruptionDetected());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, DbStalledDoesNotTriggerSignalWhenStrictReadonly)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void()> capturedDbStalledCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
|
||||||
capturedDbStalledCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = true; // strict readonly mode
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
// No signal should be emitted because node is in strict readonly mode
|
|
||||||
// But fallback flag should still be set
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated even in readonly
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, DbStalledDoesNotTriggerSignalWhenAlreadyWriting)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void()> capturedDbStalledCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
|
||||||
capturedDbStalledCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
systemState_->isWriting = true; // already writing
|
|
||||||
|
|
||||||
// No signal should be emitted because node is already writing
|
|
||||||
// But fallback flag should still be set
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, CacheUpdatesDependOnActualCacheState_WriterMode)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = true; // In writer mode
|
|
||||||
|
|
||||||
// Simulate cache is behind (e.g., update failed previously)
|
|
||||||
// Cache latestLedgerSequence returns kSEQ (behind the new seq kSEQ + 1)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ); // Set cache to kSEQ
|
|
||||||
|
|
||||||
std::vector<data::LedgerObject> const dummyDiff = {};
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ + 1, testing::_)).WillOnce(testing::Return(dummyDiff));
|
|
||||||
|
|
||||||
// Cache should be updated even though we're in writer mode
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ + 1, testing::A<std::vector<data::LedgerObject> const&>()));
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, OnlyCacheUpdatesWhenBackendIsCurrent)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set backend range to be at kSEQ + 1 (already current)
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ + 1}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
// Cache is behind (at kSEQ)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ);
|
|
||||||
|
|
||||||
std::vector<data::LedgerObject> const dummyDiff = {};
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ + 1, testing::_)).WillOnce(testing::Return(dummyDiff));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ + 1, testing::A<std::vector<data::LedgerObject> const&>()));
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, NoUpdatesWhenBothCacheAndBackendAreCurrent)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set backend range to be at kSEQ + 1 (already current)
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ + 1}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
|
|
||||||
// Cache is current (at kSEQ + 1)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ + 1);
|
|
||||||
|
|
||||||
// Neither should be updated
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff).Times(0);
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(testing::_, testing::A<std::vector<data::LedgerObject> const&>())).Times(0);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, StopWaitsForWriteCommandHandlersToComplete)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
|
|
||||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
|
||||||
|
|
||||||
// Emit a command
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so the handler should have run
|
|
||||||
// Stop should wait for the handler to complete and disconnect the subscription
|
|
||||||
service_.stop();
|
|
||||||
|
|
||||||
// Verify stop() returned, meaning all handlers completed
|
|
||||||
SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, WriteConflictIsHandledImmediately_NotDelayed)
|
|
||||||
{
|
|
||||||
// This test verifies that write conflicts are handled immediately via signal,
|
|
||||||
// not delayed until the next sequence notification (the old behavior)
|
|
||||||
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedNewSeqCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedNewSeqCallback](auto callback) {
|
|
||||||
capturedNewSeqCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = true;
|
|
||||||
|
|
||||||
// Emit StopWriting signal (simulating write conflict from Loader)
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so the handler should have run immediately
|
|
||||||
// Verify that isWriting is immediately set to false
|
|
||||||
EXPECT_FALSE(systemState_->isWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, WriteCommandsAreSerializedOnStrand)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
auto mockTaskManager1 = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
auto mockTaskManager2 = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
|
|
||||||
// Set up expectations for the sequence of write commands
|
|
||||||
// The signals should be processed in order: StartWriting, StopWriting, StartWriting
|
|
||||||
{
|
|
||||||
testing::InSequence seq;
|
|
||||||
|
|
||||||
// First StartWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager1)));
|
|
||||||
|
|
||||||
// Then StopWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
// Finally second StartWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager2)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit multiple signals rapidly - they should be serialized on the strand
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so all signals should have been processed
|
|
||||||
// Final state should be writing (last signal was StartWriting)
|
|
||||||
EXPECT_TRUE(systemState_->isWriting);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -216,14 +216,15 @@ TEST_F(ETLLedgerPublisherTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
|||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = true;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
publisher.stop();
|
|
||||||
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
static constexpr auto kMAX_ATTEMPT = 2;
|
static constexpr auto kMAX_ATTEMPT = 2;
|
||||||
@@ -237,6 +238,7 @@ TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
|||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsFalse)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsFalse)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
||||||
|
|||||||
@@ -188,59 +188,3 @@ TEST_F(LoadingAssertTest, LoadInitialLedgerHasDataInDB)
|
|||||||
|
|
||||||
EXPECT_CLIO_ASSERT_FAIL({ [[maybe_unused]] auto unused = loader_.loadInitialLedger(data); });
|
EXPECT_CLIO_ASSERT_FAIL({ [[maybe_unused]] auto unused = loader_.loadInitialLedger(data); });
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadWriteConflictEmitsStopWritingSignal)
|
|
||||||
{
|
|
||||||
state_->isWriting = true; // writer is active
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).WillOnce(testing::Return(false)); // simulate write conflict
|
|
||||||
EXPECT_CALL(mockSignalCallback, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_FALSE(result.has_value());
|
|
||||||
EXPECT_EQ(result.error(), etl::LoaderError::WriteConflict);
|
|
||||||
EXPECT_TRUE(state_->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadSuccessDoesNotEmitSignal)
|
|
||||||
{
|
|
||||||
state_->isWriting = true; // writer is active
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).WillOnce(testing::Return(true)); // success
|
|
||||||
// No signal should be emitted on success
|
|
||||||
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_TRUE(result.has_value());
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadWhenNotWritingDoesNotCheckConflict)
|
|
||||||
{
|
|
||||||
state_->isWriting = false; // not a writer
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
// doFinishWrites should not be called when not writing
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).Times(0);
|
|
||||||
// No signal should be emitted
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_TRUE(result.has_value());
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,73 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2026, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
|
||||||
#include "util/config/ConfigFileJson.hpp"
|
|
||||||
#include "util/config/ConfigValue.hpp"
|
|
||||||
#include "util/config/Types.hpp"
|
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
using namespace etl;
|
|
||||||
using namespace util::config;
|
|
||||||
|
|
||||||
struct SystemStateTest : util::prometheus::WithPrometheus {};
|
|
||||||
|
|
||||||
TEST_F(SystemStateTest, InitialValuesAreCorrect)
|
|
||||||
{
|
|
||||||
auto state = SystemState{};
|
|
||||||
|
|
||||||
EXPECT_FALSE(state.isStrictReadonly);
|
|
||||||
EXPECT_FALSE(state.isWriting);
|
|
||||||
EXPECT_TRUE(state.isLoadingCache);
|
|
||||||
EXPECT_FALSE(state.isAmendmentBlocked);
|
|
||||||
EXPECT_FALSE(state.isCorruptionDetected);
|
|
||||||
EXPECT_FALSE(state.isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SystemStateReadOnlyTest : util::prometheus::WithPrometheus, testing::WithParamInterface<bool> {};
|
|
||||||
|
|
||||||
TEST_P(SystemStateReadOnlyTest, MakeSystemStateWithReadOnly)
|
|
||||||
{
|
|
||||||
auto const readOnlyValue = GetParam();
|
|
||||||
auto const configJson = boost::json::parse(fmt::format(R"JSON({{"read_only": {}}})JSON", readOnlyValue));
|
|
||||||
|
|
||||||
auto config = ClioConfigDefinition{{{"read_only", ConfigValue{ConfigType::Boolean}}}};
|
|
||||||
auto const configFile = ConfigFileJson{configJson.as_object()};
|
|
||||||
auto const errors = config.parse(configFile);
|
|
||||||
ASSERT_FALSE(errors.has_value());
|
|
||||||
|
|
||||||
auto state = SystemState::makeSystemState(config);
|
|
||||||
|
|
||||||
EXPECT_EQ(state->isStrictReadonly, readOnlyValue);
|
|
||||||
EXPECT_FALSE(state->isWriting);
|
|
||||||
EXPECT_TRUE(state->isLoadingCache);
|
|
||||||
EXPECT_FALSE(state->isAmendmentBlocked);
|
|
||||||
EXPECT_FALSE(state->isCorruptionDetected);
|
|
||||||
EXPECT_FALSE(state->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(SystemStateTest, SystemStateReadOnlyTest, testing::Values(true, false));
|
|
||||||
@@ -1,162 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
using namespace etl;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct WriterStateTest : util::prometheus::WithPrometheus {
|
|
||||||
std::shared_ptr<SystemState> systemState = std::make_shared<SystemState>();
|
|
||||||
StrictMock<MockFunction<void(SystemState::WriteCommand)>> mockWriteCommand;
|
|
||||||
WriterState writerState{systemState};
|
|
||||||
|
|
||||||
WriterStateTest()
|
|
||||||
{
|
|
||||||
systemState->writeCommandSignal.connect(mockWriteCommand.AsStdFunction());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsWritingReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
EXPECT_FALSE(writerState.isWriting());
|
|
||||||
|
|
||||||
systemState->isWriting = true;
|
|
||||||
EXPECT_TRUE(writerState.isWriting());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, StartWritingEmitsStartWritingCommand)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteCommand, Call(SystemState::WriteCommand::StartWriting));
|
|
||||||
|
|
||||||
writerState.startWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, StartWritingDoesNothingWhenAlreadyWriting)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
// No EXPECT_CALL - StrictMock will fail if any command is emitted
|
|
||||||
|
|
||||||
writerState.startWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, GiveUpWritingEmitsStopWritingCommand)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteCommand, Call(SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
writerState.giveUpWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, GiveUpWritingDoesNothingWhenNotWriting)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
// No EXPECT_CALL - StrictMock will fail if any command is emitted
|
|
||||||
|
|
||||||
writerState.giveUpWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsFallbackReturnsFalseByDefault)
|
|
||||||
{
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, SetWriterDecidingFallbackSetsFlag)
|
|
||||||
{
|
|
||||||
EXPECT_FALSE(systemState->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
writerState.setWriterDecidingFallback();
|
|
||||||
|
|
||||||
EXPECT_TRUE(systemState->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsFallbackReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isWriterDecidingFallback = false;
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
|
|
||||||
systemState->isWriterDecidingFallback = true;
|
|
||||||
EXPECT_TRUE(writerState.isFallback());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsReadOnlyReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isStrictReadonly = false;
|
|
||||||
EXPECT_FALSE(writerState.isReadOnly());
|
|
||||||
|
|
||||||
systemState->isStrictReadonly = true;
|
|
||||||
EXPECT_TRUE(writerState.isReadOnly());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsLoadingCacheReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isLoadingCache = false;
|
|
||||||
EXPECT_FALSE(writerState.isLoadingCache());
|
|
||||||
|
|
||||||
systemState->isLoadingCache = true;
|
|
||||||
EXPECT_TRUE(writerState.isLoadingCache());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, CloneCreatesNewInstanceWithSameSystemState)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
systemState->isStrictReadonly = true;
|
|
||||||
systemState->isLoadingCache = false;
|
|
||||||
|
|
||||||
auto cloned = writerState.clone();
|
|
||||||
|
|
||||||
ASSERT_NE(cloned.get(), &writerState);
|
|
||||||
EXPECT_TRUE(cloned->isWriting());
|
|
||||||
EXPECT_TRUE(cloned->isReadOnly());
|
|
||||||
EXPECT_FALSE(cloned->isLoadingCache());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, ClonedInstanceSharesSystemState)
|
|
||||||
{
|
|
||||||
auto cloned = writerState.clone();
|
|
||||||
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
EXPECT_TRUE(writerState.isWriting());
|
|
||||||
EXPECT_TRUE(cloned->isWriting());
|
|
||||||
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
EXPECT_FALSE(writerState.isWriting());
|
|
||||||
EXPECT_FALSE(cloned->isWriting());
|
|
||||||
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
EXPECT_FALSE(cloned->isFallback());
|
|
||||||
cloned->setWriterDecidingFallback();
|
|
||||||
EXPECT_TRUE(writerState.isFallback());
|
|
||||||
EXPECT_TRUE(cloned->isFallback());
|
|
||||||
}
|
|
||||||
356
tests/unit/etlng/LedgerPublisherTests.cpp
Normal file
356
tests/unit/etlng/LedgerPublisherTests.cpp
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of clio: https://github.com/XRPLF/clio
|
||||||
|
Copyright (c) 2025, the clio developers.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include "data/DBHelpers.hpp"
|
||||||
|
#include "data/Types.hpp"
|
||||||
|
#include "etl/SystemState.hpp"
|
||||||
|
#include "util/AsioContextTestFixture.hpp"
|
||||||
|
#include "util/MockBackendTestFixture.hpp"
|
||||||
|
#include "util/MockPrometheus.hpp"
|
||||||
|
#include "util/MockSubscriptionManager.hpp"
|
||||||
|
#include "util/TestObject.hpp"
|
||||||
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
|
|
||||||
|
#include <etlng/impl/LedgerPublisher.hpp>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <xrpl/basics/chrono.h>
|
||||||
|
#include <xrpl/protocol/Indexes.h>
|
||||||
|
#include <xrpl/protocol/LedgerHeader.h>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <optional>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using namespace testing;
|
||||||
|
using namespace etlng;
|
||||||
|
using namespace data;
|
||||||
|
using namespace std::chrono;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr auto kACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
|
||||||
|
constexpr auto kACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun";
|
||||||
|
constexpr auto kLEDGER_HASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652";
|
||||||
|
constexpr auto kSEQ = 30;
|
||||||
|
constexpr auto kAGE = 800;
|
||||||
|
constexpr auto kAMOUNT = 100;
|
||||||
|
constexpr auto kFEE = 3;
|
||||||
|
constexpr auto kFINAL_BALANCE = 110;
|
||||||
|
constexpr auto kFINAL_BALANCE2 = 30;
|
||||||
|
|
||||||
|
MATCHER_P(ledgerHeaderMatcher, expectedHeader, "Headers match")
|
||||||
|
{
|
||||||
|
return arg.seq == expectedHeader.seq && arg.hash == expectedHeader.hash &&
|
||||||
|
arg.closeTime == expectedHeader.closeTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
struct ETLLedgerPublisherNgTest : util::prometheus::WithPrometheus, MockBackendTestStrict, SyncAsioContextTest {
|
||||||
|
util::config::ClioConfigDefinition cfg{{}};
|
||||||
|
StrictMockSubscriptionManagerSharedPtr mockSubscriptionManagerPtr;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderSkipDueToAge)
|
||||||
|
{
|
||||||
|
// Use kAGE (800) which is > MAX_LEDGER_AGE_SECONDS (600) to test skipping
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
// Verify last published sequence is set immediately
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
// Since age > MAX_LEDGER_AGE_SECONDS, these should not be called
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(0);
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderWithinAgeLimit)
|
||||||
|
{
|
||||||
|
// Use age 0 which is < MAX_LEDGER_AGE_SECONDS to ensure publishing happens
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
// Verify last published sequence is set immediately
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 0));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingTrue)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_FALSE(backend_->fetchLedgerRange());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
// mock 1 transaction
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto const nowPlus10 = system_clock::now() + seconds(10);
|
||||||
|
auto const closeTime = duration_cast<seconds>(nowPlus10.time_since_epoch()).count() - kRIPPLE_EPOCH_START;
|
||||||
|
dummyLedgerHeader.closeTime = ripple::NetClock::time_point{seconds{closeTime}};
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsTrue)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = true;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqMaxAttempt)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
static constexpr auto kMAX_ATTEMPT = 2;
|
||||||
|
|
||||||
|
LedgerRange const range{.minSequence = kSEQ - 1, .maxSequence = kSEQ - 1};
|
||||||
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).Times(kMAX_ATTEMPT).WillRepeatedly(Return(range));
|
||||||
|
|
||||||
|
EXPECT_FALSE(publisher.publish(kSEQ, kMAX_ATTEMPT, std::chrono::milliseconds{1}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsFalse)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
||||||
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(Return(range));
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
EXPECT_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillOnce(Return(dummyLedgerHeader));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.publish(kSEQ, {}));
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleTxInOrder)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
// t1 index > t2 index
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
t1.date = 1;
|
||||||
|
TransactionAndMetadata t2;
|
||||||
|
t2.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t2.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 1)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t2.ledgerSequence = kSEQ;
|
||||||
|
t2.date = 2;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1, t2}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 2));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
|
||||||
|
Sequence const s;
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t2, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t1, _)).InSequence(s);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishVeryOldLedgerShouldSkip)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
// Create a ledger header with age (800) greater than MAX_LEDGER_AGE_SECONDS (600)
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 800);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleLedgersInQuickSuccession)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader1 = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto const dummyLedgerHeader2 = createLedgerHeader(kLEDGER_HASH, kSEQ + 1, 0);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ + 1);
|
||||||
|
|
||||||
|
// Publish two ledgers in quick succession
|
||||||
|
publisher.publish(dummyLedgerHeader1);
|
||||||
|
publisher.publish(dummyLedgerHeader2);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ + 1, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ + 1, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
|
||||||
|
Sequence const s;
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader1), _, _, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader1), _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader2), _, _, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader2), _)).InSequence(s);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ + 1);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
@@ -18,7 +18,6 @@
|
|||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include "rpc/WorkQueue.hpp"
|
#include "rpc/WorkQueue.hpp"
|
||||||
#include "util/MockAssert.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
#include "util/MockPrometheus.hpp"
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
#include "util/config/ConfigValue.hpp"
|
#include "util/config/ConfigValue.hpp"
|
||||||
@@ -30,12 +29,10 @@
|
|||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <chrono>
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <semaphore>
|
#include <semaphore>
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace util;
|
using namespace util;
|
||||||
@@ -114,32 +111,7 @@ TEST_F(WorkQueueTest, NonWhitelistedPreventSchedulingAtQueueLimitExceeded)
|
|||||||
EXPECT_TRUE(unblocked);
|
EXPECT_TRUE(unblocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WorkQueueDelayedStartTest : WithPrometheus {
|
struct WorkQueuePriorityTest : WithPrometheus, virtual ::testing::Test {
|
||||||
WorkQueue queue{WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 100};
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(WorkQueueDelayedStartTest, WaitTimeIncludesDelayBeforeStartProcessing)
|
|
||||||
{
|
|
||||||
std::atomic_bool taskExecuted = false;
|
|
||||||
|
|
||||||
ASSERT_TRUE(queue.postCoro(
|
|
||||||
[&taskExecuted](auto /* yield */) { taskExecuted = true; },
|
|
||||||
/* isWhiteListed = */ true
|
|
||||||
));
|
|
||||||
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
|
||||||
queue.startProcessing();
|
|
||||||
queue.stop();
|
|
||||||
|
|
||||||
EXPECT_TRUE(taskExecuted);
|
|
||||||
|
|
||||||
auto const report = queue.report();
|
|
||||||
auto const durationUs = report.at("queued_duration_us").as_uint64();
|
|
||||||
|
|
||||||
EXPECT_GE(durationUs, 50000u) << "Wait time should include the delay before startProcessing";
|
|
||||||
}
|
|
||||||
|
|
||||||
struct WorkQueuePriorityTest : WithPrometheus {
|
|
||||||
WorkQueue queue{WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 100};
|
WorkQueue queue{WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 100};
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -235,7 +207,11 @@ TEST_F(WorkQueueStopTest, CallsOnTasksCompleteWhenStoppingOnLastTask)
|
|||||||
queue.stop();
|
queue.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WorkQueueMockPrometheusTest : WithMockPrometheus {};
|
struct WorkQueueMockPrometheusTest : WithMockPrometheus, RPCWorkQueueTestBase {
|
||||||
|
WorkQueueMockPrometheusTest() : RPCWorkQueueTestBase(/* workers = */ 1, /*maxQueueSize = */ 2)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
TEST_F(WorkQueueMockPrometheusTest, postCoroCounters)
|
TEST_F(WorkQueueMockPrometheusTest, postCoroCounters)
|
||||||
{
|
{
|
||||||
@@ -245,40 +221,17 @@ TEST_F(WorkQueueMockPrometheusTest, postCoroCounters)
|
|||||||
|
|
||||||
std::binary_semaphore semaphore{0};
|
std::binary_semaphore semaphore{0};
|
||||||
|
|
||||||
EXPECT_CALL(curSizeMock, value())
|
EXPECT_CALL(curSizeMock, value()).WillOnce(::testing::Return(0)).WillRepeatedly(::testing::Return(1));
|
||||||
.WillOnce(::testing::Return(0)) // in startProcessing
|
|
||||||
.WillOnce(::testing::Return(0)); // first check in postCoro
|
|
||||||
EXPECT_CALL(curSizeMock, add(1));
|
EXPECT_CALL(curSizeMock, add(1));
|
||||||
EXPECT_CALL(queuedMock, add(1));
|
EXPECT_CALL(queuedMock, add(1));
|
||||||
EXPECT_CALL(durationMock, add(::testing::Ge(0))).WillOnce([&](auto) {
|
EXPECT_CALL(durationMock, add(::testing::Ge(0))).WillOnce([&](auto) {
|
||||||
EXPECT_CALL(curSizeMock, add(-1));
|
EXPECT_CALL(curSizeMock, add(-1));
|
||||||
|
EXPECT_CALL(curSizeMock, value()).WillOnce(::testing::Return(0));
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
});
|
});
|
||||||
|
|
||||||
// Note: the queue is not in the fixture because above expectations must be setup before startProcessing runs
|
|
||||||
WorkQueue queue(/* numWorkers = */ 4, /* maxSize = */ 2);
|
|
||||||
auto const res = queue.postCoro([&](auto /* yield */) { semaphore.acquire(); }, /* isWhiteListed = */ false);
|
auto const res = queue.postCoro([&](auto /* yield */) { semaphore.acquire(); }, /* isWhiteListed = */ false);
|
||||||
|
|
||||||
ASSERT_TRUE(res);
|
ASSERT_TRUE(res);
|
||||||
queue.stop();
|
queue.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: not using EXPECT_CLIO_ASSERT_FAIL because exception is swallowed by the WQ context
|
|
||||||
// TODO [https://github.com/XRPLF/clio/issues/2906]: Enable the test once we figure out a better way to do it without
|
|
||||||
// using up >2 minutes of CI time
|
|
||||||
struct WorkQueueDeathTest : WorkQueueMockPrometheusTest, common::util::WithMockAssert {};
|
|
||||||
TEST_F(WorkQueueDeathTest, DISABLED_ExecuteTaskAssertsWhenQueueIsEmpty)
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto& queuedMock = makeMock<CounterInt>("work_queue_queued_total_number", "");
|
|
||||||
[[maybe_unused]] auto& durationMock = makeMock<CounterInt>("work_queue_cumulative_tasks_duration_us", "");
|
|
||||||
auto& curSizeMock = makeMock<GaugeInt>("work_queue_current_size", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(curSizeMock, value()).WillRepeatedly(::testing::Return(1)); // lie about the size
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
WorkQueue queue(WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 2);
|
|
||||||
queue.startProcessing(); // the actual queue is empty which will lead to assertion failure
|
|
||||||
},
|
|
||||||
".*"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,756 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "util/Assert.hpp"
|
|
||||||
#include "util/Channel.hpp"
|
|
||||||
#include "util/Mutex.hpp"
|
|
||||||
#include "util/OverloadSet.hpp"
|
|
||||||
#include "util/Spawn.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
#include <boost/asio/post.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/system/detail/error_code.hpp>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
|
||||||
#include <cstddef>
|
|
||||||
#include <memory>
|
|
||||||
#include <optional>
|
|
||||||
#include <semaphore>
|
|
||||||
#include <string>
|
|
||||||
#include <utility>
|
|
||||||
#include <variant>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
constexpr auto kDEFAULT_THREAD_POOL_SIZE = 4;
|
|
||||||
constexpr auto kTEST_TIMEOUT = std::chrono::seconds{10};
|
|
||||||
|
|
||||||
constexpr auto kNUM_SENDERS = 3uz;
|
|
||||||
constexpr auto kNUM_RECEIVERS = 3uz;
|
|
||||||
constexpr auto kVALUES_PER_SENDER = 500uz;
|
|
||||||
constexpr auto kTOTAL_EXPECTED = kNUM_SENDERS * kVALUES_PER_SENDER;
|
|
||||||
|
|
||||||
enum class ContextType { IOContext, ThreadPool };
|
|
||||||
|
|
||||||
constexpr int
|
|
||||||
generateValue(std::size_t senderId, std::size_t i)
|
|
||||||
{
|
|
||||||
return static_cast<int>((senderId * 100) + i);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int>
|
|
||||||
generateExpectedValues()
|
|
||||||
{
|
|
||||||
std::vector<int> expectedValues;
|
|
||||||
expectedValues.reserve(kTOTAL_EXPECTED);
|
|
||||||
for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) {
|
|
||||||
for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) {
|
|
||||||
expectedValues.push_back(generateValue(senderId, i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::ranges::sort(expectedValues);
|
|
||||||
return expectedValues;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int> const kEXPECTED_VALUES = generateExpectedValues();
|
|
||||||
|
|
||||||
std::string
|
|
||||||
contextTypeToString(ContextType type)
|
|
||||||
{
|
|
||||||
return type == ContextType::IOContext ? "IOContext" : "ThreadPool";
|
|
||||||
}
|
|
||||||
|
|
||||||
class ContextWrapper {
|
|
||||||
public:
|
|
||||||
using ContextVariant = std::variant<boost::asio::io_context, boost::asio::thread_pool>;
|
|
||||||
|
|
||||||
explicit ContextWrapper(ContextType type)
|
|
||||||
: context_([type] {
|
|
||||||
if (type == ContextType::IOContext)
|
|
||||||
return ContextVariant(std::in_place_type_t<boost::asio::io_context>());
|
|
||||||
|
|
||||||
if (type == ContextType::ThreadPool)
|
|
||||||
return ContextVariant(std::in_place_type_t<boost::asio::thread_pool>(), kDEFAULT_THREAD_POOL_SIZE);
|
|
||||||
|
|
||||||
ASSERT(false, "Unknown new type of context");
|
|
||||||
std::unreachable();
|
|
||||||
}())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Fn>
|
|
||||||
void
|
|
||||||
withExecutor(Fn&& fn)
|
|
||||||
{
|
|
||||||
std::visit(std::forward<Fn>(fn), context_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
run()
|
|
||||||
{
|
|
||||||
std::visit(
|
|
||||||
util::OverloadSet{
|
|
||||||
[](boost::asio::io_context& context) { context.run_for(kTEST_TIMEOUT); },
|
|
||||||
[](boost::asio::thread_pool& context) { context.join(); },
|
|
||||||
},
|
|
||||||
context_
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
ContextVariant context_;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
class ChannelSpawnTest : public TestWithParam<ContextType> {
|
|
||||||
protected:
|
|
||||||
ChannelSpawnTest() : context_(GetParam())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
ContextWrapper context_;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ChannelCallbackTest : public TestWithParam<ContextType> {
|
|
||||||
protected:
|
|
||||||
ChannelCallbackTest() : context_(GetParam())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
ContextWrapper context_;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_P(ChannelSpawnTest, MultipleSendersOneReceiver)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
|
||||||
|
|
||||||
util::spawn(executor, [&receiver, &receivedValues](boost::asio::yield_context yield) mutable {
|
|
||||||
while (true) {
|
|
||||||
auto value = receiver.asyncReceive(yield);
|
|
||||||
if (not value.has_value())
|
|
||||||
break;
|
|
||||||
receivedValues.lock()->push_back(*value);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
auto localSender = std::move(sender);
|
|
||||||
for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) {
|
|
||||||
util::spawn(executor, [senderCopy = localSender, senderId](boost::asio::yield_context yield) mutable {
|
|
||||||
for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) {
|
|
||||||
if (not senderCopy.asyncSend(generateValue(senderId, i), yield))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED);
|
|
||||||
std::ranges::sort(receivedValues.lock().get());
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelSpawnTest, MultipleSendersMultipleReceivers)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
|
||||||
std::vector receivers(kNUM_RECEIVERS, receiver);
|
|
||||||
|
|
||||||
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
|
||||||
util::spawn(
|
|
||||||
executor,
|
|
||||||
[&receiverRef = receivers[receiverId], &receivedValues](boost::asio::yield_context yield) mutable {
|
|
||||||
while (true) {
|
|
||||||
auto value = receiverRef.asyncReceive(yield);
|
|
||||||
if (not value.has_value())
|
|
||||||
break;
|
|
||||||
receivedValues.lock()->push_back(*value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto localSender = std::move(sender);
|
|
||||||
for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) {
|
|
||||||
util::spawn(executor, [senderCopy = localSender, senderId](boost::asio::yield_context yield) mutable {
|
|
||||||
for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) {
|
|
||||||
auto const value = generateValue(senderId, i);
|
|
||||||
if (not senderCopy.asyncSend(value, yield))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED);
|
|
||||||
std::ranges::sort(receivedValues.lock().get());
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelSpawnTest, ChannelClosureScenarios)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
|
|
||||||
util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context yield) mutable {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.isClosed());
|
|
||||||
|
|
||||||
bool const success = sender.asyncSend(42, yield);
|
|
||||||
EXPECT_TRUE(success);
|
|
||||||
|
|
||||||
auto value = receiver.asyncReceive(yield);
|
|
||||||
EXPECT_TRUE(value.has_value());
|
|
||||||
EXPECT_EQ(*value, 42);
|
|
||||||
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto tempSender = std::move(sender);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_TRUE(receiver.isClosed());
|
|
||||||
|
|
||||||
auto closedValue = receiver.asyncReceive(yield);
|
|
||||||
EXPECT_FALSE(closedValue.has_value());
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelSpawnTest, TrySendTryReceiveMethods)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
|
|
||||||
util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context) mutable {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 3);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.tryReceive().has_value());
|
|
||||||
|
|
||||||
EXPECT_TRUE(sender.trySend(42));
|
|
||||||
EXPECT_TRUE(sender.trySend(43));
|
|
||||||
EXPECT_TRUE(sender.trySend(44));
|
|
||||||
EXPECT_FALSE(sender.trySend(45)); // channel full
|
|
||||||
|
|
||||||
auto value1 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value1.has_value());
|
|
||||||
EXPECT_EQ(*value1, 42);
|
|
||||||
|
|
||||||
auto value2 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value2.has_value());
|
|
||||||
EXPECT_EQ(*value2, 43);
|
|
||||||
|
|
||||||
EXPECT_TRUE(sender.trySend(46));
|
|
||||||
|
|
||||||
auto value3 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value3.has_value());
|
|
||||||
EXPECT_EQ(*value3, 44);
|
|
||||||
|
|
||||||
auto value4 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value4.has_value());
|
|
||||||
EXPECT_EQ(*value4, 46);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.tryReceive().has_value());
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelSpawnTest, TryMethodsWithClosedChannel)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
|
|
||||||
util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context) mutable {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 3);
|
|
||||||
|
|
||||||
EXPECT_TRUE(sender.trySend(42));
|
|
||||||
EXPECT_TRUE(sender.trySend(43));
|
|
||||||
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto tempSender = std::move(sender);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_TRUE(receiver.isClosed());
|
|
||||||
|
|
||||||
auto value1 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value1.has_value());
|
|
||||||
EXPECT_EQ(*value1, 42);
|
|
||||||
|
|
||||||
auto value2 = receiver.tryReceive();
|
|
||||||
EXPECT_TRUE(value2.has_value());
|
|
||||||
EXPECT_EQ(*value2, 43);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.tryReceive().has_value());
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
SpawnTests,
|
|
||||||
ChannelSpawnTest,
|
|
||||||
Values(ContextType::IOContext, ContextType::ThreadPool),
|
|
||||||
[](TestParamInfo<ContextType> const& info) { return contextTypeToString(info.param); }
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST_P(ChannelCallbackTest, MultipleSendersOneReceiver)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
|
||||||
|
|
||||||
auto receiveNext = [&receiver, &receivedValues](this auto&& self) -> void {
|
|
||||||
if (receivedValues.lock()->size() >= kTOTAL_EXPECTED)
|
|
||||||
return;
|
|
||||||
|
|
||||||
receiver.asyncReceive([&receivedValues, self = std::forward<decltype(self)>(self)](auto value) {
|
|
||||||
if (value.has_value()) {
|
|
||||||
receivedValues.lock()->push_back(*value);
|
|
||||||
self();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
boost::asio::post(executor, receiveNext);
|
|
||||||
|
|
||||||
{
|
|
||||||
auto localSender = std::move(sender);
|
|
||||||
for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) {
|
|
||||||
auto senderCopy = localSender;
|
|
||||||
boost::asio::post(executor, [senderCopy = std::move(senderCopy), senderId, &executor]() mutable {
|
|
||||||
auto sendNext = [senderCopy = std::move(senderCopy),
|
|
||||||
senderId,
|
|
||||||
&executor](this auto&& self, std::size_t i) -> void {
|
|
||||||
if (i >= kVALUES_PER_SENDER)
|
|
||||||
return;
|
|
||||||
|
|
||||||
senderCopy.asyncSend(
|
|
||||||
generateValue(senderId, i),
|
|
||||||
[self = std::forward<decltype(self)>(self), &executor, i](bool success) mutable {
|
|
||||||
if (success)
|
|
||||||
boost::asio::post(executor, [self = std::move(self), i]() mutable { self(i + 1); });
|
|
||||||
}
|
|
||||||
);
|
|
||||||
};
|
|
||||||
sendNext(0);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED);
|
|
||||||
std::ranges::sort(receivedValues.lock().get());
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelCallbackTest, MultipleSendersMultipleReceivers)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
|
||||||
std::vector receivers(kNUM_RECEIVERS, receiver);
|
|
||||||
|
|
||||||
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
|
||||||
auto& receiverRef = receivers[receiverId];
|
|
||||||
auto receiveNext = [&receiverRef, &receivedValues](this auto&& self) -> void {
|
|
||||||
receiverRef.asyncReceive([&receivedValues, self = std::forward<decltype(self)>(self)](auto value) {
|
|
||||||
if (value.has_value()) {
|
|
||||||
receivedValues.lock()->push_back(*value);
|
|
||||||
self();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
};
|
|
||||||
boost::asio::post(executor, receiveNext);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto localSender = std::move(sender);
|
|
||||||
for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) {
|
|
||||||
auto senderCopy = localSender;
|
|
||||||
boost::asio::post(executor, [senderCopy = std::move(senderCopy), senderId, &executor]() mutable {
|
|
||||||
auto sendNext = [senderCopy = std::move(senderCopy),
|
|
||||||
senderId,
|
|
||||||
&executor](this auto&& self, std::size_t i) -> void {
|
|
||||||
if (i >= kVALUES_PER_SENDER)
|
|
||||||
return;
|
|
||||||
|
|
||||||
senderCopy.asyncSend(
|
|
||||||
generateValue(senderId, i),
|
|
||||||
[self = std::forward<decltype(self)>(self), &executor, i](bool success) mutable {
|
|
||||||
if (success)
|
|
||||||
boost::asio::post(executor, [self = std::move(self), i]() mutable { self(i + 1); });
|
|
||||||
}
|
|
||||||
);
|
|
||||||
};
|
|
||||||
sendNext(0);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED);
|
|
||||||
std::ranges::sort(receivedValues.lock().get());
|
|
||||||
|
|
||||||
EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelCallbackTest, ChannelClosureScenarios)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
auto receiverPtr = std::make_shared<decltype(receiver)>(std::move(receiver));
|
|
||||||
auto senderPtr = std::make_shared<std::optional<decltype(sender)>>(std::move(sender));
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiverPtr->isClosed());
|
|
||||||
|
|
||||||
senderPtr->value().asyncSend(42, [&executor, receiverPtr, senderPtr, &testCompleted](bool success) {
|
|
||||||
EXPECT_TRUE(success);
|
|
||||||
|
|
||||||
receiverPtr->asyncReceive([&executor, receiverPtr, senderPtr, &testCompleted](auto value) {
|
|
||||||
EXPECT_TRUE(value.has_value());
|
|
||||||
EXPECT_EQ(*value, 42);
|
|
||||||
|
|
||||||
boost::asio::post(executor, [&executor, receiverPtr, senderPtr, &testCompleted]() {
|
|
||||||
senderPtr->reset();
|
|
||||||
EXPECT_TRUE(receiverPtr->isClosed());
|
|
||||||
|
|
||||||
boost::asio::post(executor, [receiverPtr, &testCompleted]() {
|
|
||||||
receiverPtr->asyncReceive([&testCompleted](auto closedValue) {
|
|
||||||
EXPECT_FALSE(closedValue.has_value());
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelCallbackTest, TrySendTryReceiveMethods)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 2);
|
|
||||||
auto receiverPtr = std::make_shared<decltype(receiver)>(std::move(receiver));
|
|
||||||
auto senderPtr = std::make_shared<decltype(sender)>(std::move(sender));
|
|
||||||
|
|
||||||
boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() {
|
|
||||||
EXPECT_FALSE(receiverPtr->tryReceive().has_value());
|
|
||||||
|
|
||||||
EXPECT_TRUE(senderPtr->trySend(100));
|
|
||||||
EXPECT_TRUE(senderPtr->trySend(101));
|
|
||||||
EXPECT_FALSE(senderPtr->trySend(102)); // channel full
|
|
||||||
|
|
||||||
auto value1 = receiverPtr->tryReceive();
|
|
||||||
EXPECT_TRUE(value1.has_value());
|
|
||||||
EXPECT_EQ(*value1, 100);
|
|
||||||
|
|
||||||
EXPECT_TRUE(senderPtr->trySend(103));
|
|
||||||
|
|
||||||
auto value2 = receiverPtr->tryReceive();
|
|
||||||
EXPECT_TRUE(value2.has_value());
|
|
||||||
EXPECT_EQ(*value2, 101);
|
|
||||||
|
|
||||||
auto value3 = receiverPtr->tryReceive();
|
|
||||||
EXPECT_TRUE(value3.has_value());
|
|
||||||
EXPECT_EQ(*value3, 103);
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ChannelCallbackTest, TryMethodsWithClosedChannel)
|
|
||||||
{
|
|
||||||
context_.withExecutor([this](auto& executor) {
|
|
||||||
std::atomic_bool testCompleted{false};
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 3);
|
|
||||||
auto receiverPtr = std::make_shared<util::Channel<int>::Receiver>(std::move(receiver));
|
|
||||||
auto senderPtr = std::make_shared<std::optional<util::Channel<int>::Sender>>(std::move(sender));
|
|
||||||
|
|
||||||
boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() {
|
|
||||||
EXPECT_TRUE(senderPtr->value().trySend(100));
|
|
||||||
EXPECT_TRUE(senderPtr->value().trySend(101));
|
|
||||||
|
|
||||||
senderPtr->reset();
|
|
||||||
|
|
||||||
EXPECT_TRUE(receiverPtr->isClosed());
|
|
||||||
|
|
||||||
auto value1 = receiverPtr->tryReceive();
|
|
||||||
EXPECT_TRUE(value1.has_value());
|
|
||||||
EXPECT_EQ(*value1, 100);
|
|
||||||
|
|
||||||
auto value2 = receiverPtr->tryReceive();
|
|
||||||
EXPECT_TRUE(value2.has_value());
|
|
||||||
EXPECT_EQ(*value2, 101);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiverPtr->tryReceive().has_value());
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
context_.run();
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
CallbackTests,
|
|
||||||
ChannelCallbackTest,
|
|
||||||
Values(ContextType::IOContext, ContextType::ThreadPool),
|
|
||||||
[](TestParamInfo<ContextType> const& info) { return contextTypeToString(info.param); }
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST(ChannelTest, MultipleSenderCopiesErrorHandling)
|
|
||||||
{
|
|
||||||
boost::asio::io_context executor;
|
|
||||||
bool testCompleted = false;
|
|
||||||
|
|
||||||
util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context yield) mutable {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
|
|
||||||
bool const success = sender.asyncSend(42, yield);
|
|
||||||
EXPECT_TRUE(success);
|
|
||||||
|
|
||||||
auto value = receiver.asyncReceive(yield);
|
|
||||||
EXPECT_TRUE(value.has_value());
|
|
||||||
EXPECT_EQ(*value, 42);
|
|
||||||
|
|
||||||
auto senderCopy = sender;
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto tempSender = std::move(sender);
|
|
||||||
// tempSender destroyed here, but senderCopy still exists
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.isClosed());
|
|
||||||
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto tempSender = std::move(senderCopy);
|
|
||||||
// now all senders are destroyed, channel should close
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_TRUE(receiver.isClosed());
|
|
||||||
|
|
||||||
auto closedValue = receiver.asyncReceive(yield);
|
|
||||||
EXPECT_FALSE(closedValue.has_value());
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.run_for(kTEST_TIMEOUT);
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ChannelTest, ChannelClosesWhenAllSendersDestroyed)
|
|
||||||
{
|
|
||||||
boost::asio::io_context executor;
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
|
|
||||||
EXPECT_FALSE(receiver.isClosed());
|
|
||||||
|
|
||||||
auto senderCopy = sender;
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto temp = std::move(sender);
|
|
||||||
}
|
|
||||||
EXPECT_FALSE(receiver.isClosed()); // one sender still exists
|
|
||||||
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto temp = std::move(senderCopy);
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(receiver.isClosed()); // all senders destroyed
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ChannelTest, ChannelClosesWhenAllReceiversDestroyed)
|
|
||||||
{
|
|
||||||
boost::asio::io_context executor;
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
|
|
||||||
EXPECT_TRUE(sender.trySend(42));
|
|
||||||
|
|
||||||
auto receiverCopy = receiver;
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto temp = std::move(receiver);
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(sender.trySend(43)); // one receiver still exists, can send
|
|
||||||
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto temp = std::move(receiverCopy);
|
|
||||||
}
|
|
||||||
EXPECT_FALSE(sender.trySend(44)); // all receivers destroyed, channel closed
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ChannelTest, ChannelPreservesOrderFIFO)
|
|
||||||
{
|
|
||||||
boost::asio::io_context executor;
|
|
||||||
bool testCompleted = false;
|
|
||||||
std::vector<int> const valuesToSend = {42, 7, 99, 13, 5, 88, 21, 3, 67, 54};
|
|
||||||
|
|
||||||
util::spawn(executor, [&executor, &testCompleted, &valuesToSend](boost::asio::yield_context yield) mutable {
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
std::vector<int> receivedValues;
|
|
||||||
|
|
||||||
// Spawn a receiver coroutine that collects all values
|
|
||||||
util::spawn(executor, [&receiver, &receivedValues](boost::asio::yield_context yield) mutable {
|
|
||||||
auto value = receiver.asyncReceive(yield);
|
|
||||||
while (value.has_value()) {
|
|
||||||
receivedValues.push_back(*value);
|
|
||||||
value = receiver.asyncReceive(yield);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Send all values
|
|
||||||
for (int const value : valuesToSend) {
|
|
||||||
EXPECT_TRUE(sender.asyncSend(value, yield));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close sender to signal end of data
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto temp = std::move(sender);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Give receiver time to process all values
|
|
||||||
boost::asio::steady_timer timer(executor, std::chrono::milliseconds{50});
|
|
||||||
timer.async_wait(yield);
|
|
||||||
|
|
||||||
// Verify received values match sent values in the same order
|
|
||||||
EXPECT_EQ(receivedValues, valuesToSend);
|
|
||||||
|
|
||||||
testCompleted = true;
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.run_for(kTEST_TIMEOUT);
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ChannelTest, AsyncReceiveWakesUpWhenSenderDestroyed)
|
|
||||||
{
|
|
||||||
boost::asio::io_context executor;
|
|
||||||
bool testCompleted = false;
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 5);
|
|
||||||
auto senderPtr = std::make_shared<decltype(sender)>(std::move(sender));
|
|
||||||
|
|
||||||
util::spawn(
|
|
||||||
executor,
|
|
||||||
[&receiver, senderPtr = std::move(senderPtr), &testCompleted, &executor](boost::asio::yield_context) mutable {
|
|
||||||
// Start receiving - this will block because no data is sent
|
|
||||||
auto receiveTask = [&receiver, &testCompleted](boost::asio::yield_context yield) {
|
|
||||||
auto const value = receiver.asyncReceive(yield);
|
|
||||||
EXPECT_FALSE(value.has_value()); // Should receive nullopt when sender is destroyed
|
|
||||||
testCompleted = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
util::spawn(executor, receiveTask);
|
|
||||||
|
|
||||||
senderPtr.reset();
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
executor.run_for(kTEST_TIMEOUT);
|
|
||||||
EXPECT_TRUE(testCompleted);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test verifies the workaround for a bug in boost::asio::experimental::concurrent_channel where close() does not
|
|
||||||
// cancel pending async operations. Our Channel wrapper calls cancel() after close() to ensure pending operations are
|
|
||||||
// unblocked.
|
|
||||||
// See: https://github.com/chriskohlhoff/asio/issues/1575
|
|
||||||
TEST(ChannelTest, PendingAsyncSendsAreCancelledOnClose)
|
|
||||||
{
|
|
||||||
boost::asio::thread_pool pool{4};
|
|
||||||
static constexpr auto kPENDING_NUM_SENDERS = 10uz;
|
|
||||||
|
|
||||||
// Channel with capacity 0 - all sends will block waiting for a receiver
|
|
||||||
auto [sender, receiver] = util::Channel<int>::create(pool, 0);
|
|
||||||
|
|
||||||
std::atomic<std::size_t> completedSends{0};
|
|
||||||
std::counting_semaphore<kPENDING_NUM_SENDERS> semaphore{kPENDING_NUM_SENDERS};
|
|
||||||
|
|
||||||
// Spawn multiple senders that will all block (no receiver is consuming)
|
|
||||||
for (auto i = 0uz; i < kPENDING_NUM_SENDERS; ++i) {
|
|
||||||
util::spawn(
|
|
||||||
pool, [senderCopy = sender, i, &completedSends, &semaphore](boost::asio::yield_context yield) mutable {
|
|
||||||
semaphore.release(1);
|
|
||||||
EXPECT_FALSE(senderCopy.asyncSend(static_cast<int>(i), yield));
|
|
||||||
++completedSends;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
semaphore.acquire();
|
|
||||||
|
|
||||||
// Close the channel by destroying the only receiver we have.
|
|
||||||
// Our workaround calls cancel() after close() to unblock pending operations
|
|
||||||
{
|
|
||||||
[[maybe_unused]] auto r = std::move(receiver);
|
|
||||||
}
|
|
||||||
|
|
||||||
// All senders should complete (unblocked by our cancel() workaround)
|
|
||||||
pool.join();
|
|
||||||
|
|
||||||
// All sends should have completed (returned false due to closed channel)
|
|
||||||
EXPECT_EQ(completedSends, kPENDING_NUM_SENDERS);
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_CHANNEL_FOR_CLANG(int);
|
|
||||||
Reference in New Issue
Block a user