Compare commits

..

1 Commits

Author SHA1 Message Date
mathbunnyru
99a33777c9 style: Update pre-commit hooks 2025-07-02 15:24:51 +00:00
58 changed files with 872 additions and 986 deletions

View File

@@ -17,9 +17,6 @@ inputs:
platforms:
description: Platforms to build the image for (e.g. linux/amd64,linux/arm64)
required: true
build_args:
description: List of build-time variables
required: false
dockerhub_repo:
description: DockerHub repository name
@@ -64,4 +61,13 @@ runs:
platforms: ${{ inputs.platforms }}
push: ${{ inputs.push_image == 'true' }}
tags: ${{ steps.meta.outputs.tags }}
build-args: ${{ inputs.build_args }}
- name: Update DockerHub description
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 # v4.0.2
with:
username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_PW }}
repository: ${{ inputs.dockerhub_repo }}
short-description: ${{ inputs.dockerhub_description }}
readme-filepath: ${{ inputs.directory }}/README.md

View File

@@ -2,9 +2,9 @@ name: Build
on:
push:
branches: [release/*, develop]
branches: [master, release/*, develop]
pull_request:
branches: [release/*, develop]
branches: [master, release/*, develop]
paths:
- .github/workflows/build.yml

View File

@@ -22,11 +22,6 @@ jobs:
with:
lfs: true
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- name: Create build directory
run: mkdir build_docs

View File

@@ -96,7 +96,6 @@ jobs:
uses: ./.github/workflows/release_impl.yml
with:
overwrite_release: true
prerelease: true
title: "Clio development (nightly) build"
version: nightly
header: >

View File

@@ -40,11 +40,8 @@ jobs:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
with:
commit-message: "style: Update pre-commit hooks"
committer: Clio CI <skuznetsov@ripple.com>
branch: update/pre-commit-hooks
branch-suffix: timestamp
delete-branch: true
title: "style: Update pre-commit hooks"
commit-message: "style: Update pre-commit hooks"
body: Update versions of pre-commit hooks to latest version.
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"

View File

@@ -3,7 +3,8 @@ name: Run pre-commit hooks
on:
pull_request:
push:
branches: [develop]
branches:
- develop
workflow_dispatch:
jobs:

View File

@@ -48,7 +48,6 @@ jobs:
uses: ./.github/workflows/release_impl.yml
with:
overwrite_release: false
prerelease: ${{ contains(github.ref_name, '-') }}
title: "${{ github.ref_name}}"
version: "${{ github.ref_name }}"
header: >

View File

@@ -8,11 +8,6 @@ on:
required: true
type: boolean
prerelease:
description: "Create a prerelease"
required: true
type: boolean
title:
description: "Release title"
required: true
@@ -30,12 +25,12 @@ on:
generate_changelog:
description: "Generate changelog"
required: true
required: false
type: boolean
draft:
description: "Create a draft release"
required: true
required: false
type: boolean
jobs:
@@ -114,7 +109,7 @@ jobs:
shell: bash
run: |
gh release create "${{ inputs.version }}" \
${{ inputs.prerelease && '--prerelease' || '' }} \
${{ inputs.overwrite_release && '--prerelease' || '' }} \
--title "${{ inputs.title }}" \
--target "${GITHUB_SHA}" \
${{ inputs.draft && '--draft' || '' }} \

View File

@@ -37,17 +37,12 @@ jobs:
strategy:
fail-fast: false
matrix:
compiler: [gcc, clang]
sanitizer_ext: [.asan, .tsan, .ubsan]
build_type: [Release, Debug]
compiler: ["gcc", "clang"]
sanitizer_ext: [".asan", ".tsan", ".ubsan"]
exclude:
# Currently, clang.tsan unit tests hang
- compiler: clang
sanitizer_ext: .tsan
build_type: Release
- compiler: clang
sanitizer_ext: .tsan
build_type: Debug
uses: ./.github/workflows/build_and_test.yml
with:
@@ -55,7 +50,7 @@ jobs:
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
disable_cache: true
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
build_type: ${{ matrix.build_type }}
build_type: Release
static: false
run_unit_tests: true
run_integration_tests: false

View File

@@ -85,7 +85,7 @@ jobs:
if: env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
uses: actions/upload-artifact@v4
with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
name: ${{ inputs.conan_profile }}_report
path: .sanitizer-report/*
include-hidden-files: true

View File

@@ -29,10 +29,6 @@ concurrency:
cancel-in-progress: false
env:
CLANG_MAJOR_VERSION: 19
GCC_MAJOR_VERSION: 12
GCC_VERSION: 12.3.0
GHCR_REPO: ghcr.io/${{ github.repository_owner }}
jobs:
@@ -63,13 +59,10 @@ jobs:
directory: docker/compilers/gcc
tags: |
type=raw,value=amd64-latest
type=raw,value=amd64-${{ env.GCC_MAJOR_VERSION }}
type=raw,value=amd64-${{ env.GCC_VERSION }}
type=raw,value=amd64-12
type=raw,value=amd64-12.3.0
type=raw,value=amd64-${{ github.sha }}
platforms: linux/amd64
build_args: |
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: rippleci/clio_gcc
dockerhub_description: GCC compiler for XRPLF/clio.
@@ -100,13 +93,10 @@ jobs:
directory: docker/compilers/gcc
tags: |
type=raw,value=arm64-latest
type=raw,value=arm64-${{ env.GCC_MAJOR_VERSION }}
type=raw,value=arm64-${{ env.GCC_VERSION }}
type=raw,value=arm64-12
type=raw,value=arm64-12.3.0
type=raw,value=arm64-${{ github.sha }}
platforms: linux/arm64
build_args: |
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: rippleci/clio_gcc
dockerhub_description: GCC compiler for XRPLF/clio.
@@ -152,8 +142,8 @@ jobs:
for image in ${{ env.GHCR_REPO_LC }}/clio-gcc rippleci/clio_gcc; do
docker buildx imagetools create \
-t $image:latest \
-t $image:${{ env.GCC_MAJOR_VERSION }} \
-t $image:${{ env.GCC_VERSION }} \
-t $image:12 \
-t $image:12.3.0 \
-t $image:${{ github.sha }} \
$image:arm64-latest \
$image:amd64-latest
@@ -186,18 +176,15 @@ jobs:
directory: docker/compilers/clang
tags: |
type=raw,value=latest
type=raw,value=${{ env.CLANG_MAJOR_VERSION }}
type=raw,value=16
type=raw,value=${{ github.sha }}
platforms: linux/amd64,linux/arm64
build_args: |
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
dockerhub_repo: rippleci/clio_clang
dockerhub_description: Clang compiler for XRPLF/clio.
tools-amd64:
name: Build and push tools docker image (amd64)
tools:
name: Build and push tools docker image
runs-on: heavy
needs: [gcc-merge]
steps:
- uses: actions/checkout@v4
@@ -218,85 +205,14 @@ jobs:
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/tools
tags: |
type=raw,value=amd64-latest
type=raw,value=amd64-${{ github.sha }}
platforms: linux/amd64
build_args: |
GCC_VERSION=${{ env.GCC_VERSION }}
tools-arm64:
name: Build and push tools docker image (arm64)
runs-on: heavy-arm64
needs: [gcc-merge]
steps:
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
- uses: ./.github/actions/build_docker_image
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
images: |
${{ env.GHCR_REPO }}/clio-tools
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/tools
tags: |
type=raw,value=arm64-latest
type=raw,value=arm64-${{ github.sha }}
platforms: linux/arm64
build_args: |
GCC_VERSION=${{ env.GCC_VERSION }}
tools-merge:
name: Merge and push multi-arch tools docker image
runs-on: heavy
needs: [tools-amd64, tools-arm64]
steps:
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Make GHCR_REPO lowercase
run: |
echo "GHCR_REPO_LC=$(echo ${{env.GHCR_REPO}} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_ENV}
- name: Create and push multi-arch manifest
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
run: |
image=${{ env.GHCR_REPO_LC }}/clio-tools
docker buildx imagetools create \
-t $image:latest \
-t $image:${{ github.sha }} \
$image:arm64-latest \
$image:amd64-latest
type=raw,value=latest
type=raw,value=${{ github.sha }}
platforms: linux/amd64,linux/arm64
ci:
name: Build and push CI docker image
runs-on: heavy
needs: [gcc-merge, clang, tools-merge]
needs: [gcc-merge, clang, tools]
steps:
- uses: actions/checkout@v4
@@ -313,11 +229,8 @@ jobs:
directory: docker/ci
tags: |
type=raw,value=latest
type=raw,value=gcc_${{ env.GCC_MAJOR_VERSION }}_clang_${{ env.CLANG_MAJOR_VERSION }}
type=raw,value=gcc_12_clang_16
type=raw,value=${{ github.sha }}
platforms: linux/amd64,linux/arm64
build_args: |
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: rippleci/clio_ci
dockerhub_description: CI image for XRPLF/clio.

View File

@@ -11,7 +11,8 @@ on:
default: false
type: boolean
pull_request:
branches: [develop]
branches:
- develop
paths:
- .github/workflows/upload_conan_deps.yml
@@ -23,7 +24,8 @@ on:
- conanfile.py
- conan.lock
push:
branches: [develop]
branches:
- develop
paths:
- .github/workflows/upload_conan_deps.yml

View File

@@ -69,17 +69,15 @@ endif ()
# Enable selected sanitizer if enabled via `san`
if (san)
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
if (NOT san IN_LIST SUPPORTED_SANITIZERS)
list(FIND SUPPORTED_SANITIZERS "${san}" INDEX)
if (INDEX EQUAL -1)
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
endif ()
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
set(SAN_OPTIMIZATION_FLAG "")
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(SAN_OPTIMIZATION_FLAG -O1)
endif ()
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
target_compile_options(
clio_options INTERFACE # Sanitizers recommend minimum of -O1 for reasonable performance
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
)
target_compile_definitions(
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>

View File

@@ -4,42 +4,39 @@
find_package(Git REQUIRED)
set(GIT_COMMAND describe --tags --exact-match)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG
RESULT_VARIABLE RC
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if (RC EQUAL 0)
# if we are on a tag, use the tag name
set(CLIO_VERSION "${TAG}")
set(DOC_CLIO_VERSION "${TAG}")
else ()
# if not, use YYYYMMDDHMS-<branch>-<git-rev>
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if (BRANCH STREQUAL "")
set(BRANCH "dev")
endif ()
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name
# YYYYMMDDHMS-<branch>-<git-rev>
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE DATE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
set(DOC_CLIO_VERSION "develop")
else ()
set(GIT_COMMAND describe --tags)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE CLIO_TAG_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(CLIO_VERSION "${CLIO_TAG_VERSION}")
set(DOC_CLIO_VERSION "${CLIO_TAG_VERSION}")
endif ()
if (CMAKE_BUILD_TYPE MATCHES Debug)

View File

@@ -1,46 +1,47 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1752006674.334",
"xrpl/2.5.0#7880d1696f11fceb1d498570f1a184c8%1752006708.218",
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1752006674.338",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1752006674.465",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1752006674.077",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1752006673.227",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1752006673.069",
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1752006673.862",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1752006672.983",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1752006672.825",
"libuv/1.46.0#78565d142ac7102776256328a26cdf60%1752006672.827",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1752006672.826",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1752006672.822",
"libarchive/3.7.6#e0453864b2a4d225f06b3304903cb2b7%1752006672.917",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1752006672.658",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1752006671.555",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1752006671.777",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1752006671.557",
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1752006671.553",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1752006671.654",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1752006671.554",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1752006671.549",
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562%1752006671.557",
"benchmark/1.8.3#1a2ce62c99e2b3feaa57b1f0c15a8c46%1752006671.408",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1752006671.555"
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1750263730.908",
"xrpl/2.5.0#7880d1696f11fceb1d498570f1a184c8%1751035267.743",
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1750263721.79",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1750263717.455",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1750263715.145",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1750263713.526",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1750263698.841",
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1750263685.885",
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1750263683.814",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1750263681.745",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1750263679.891",
"libuv/1.46.0#78565d142ac7102776256328a26cdf60%1750263677.819",
"libiconv/1.17#1ae2f60ab5d08de1643a22a81b360c59%1750257497.552",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1750263675.748",
"libarchive/3.7.6#e0453864b2a4d225f06b3304903cb2b7%1750263671.05",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1750263668.751",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1750263666.833",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1750263646.614",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1746298708.362",
"fmt/10.1.1#021e170cf81db57da82b5f737b6906c1%1750263644.741",
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1750263643.099",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1750263632.157",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1750263630.06",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1750263627.95",
"boost/1.83.0#8eb22f36ddfb61f54bbc412c4555bd66%1750263616.444",
"benchmark/1.8.3#1a2ce62c99e2b3feaa57b1f0c15a8c46%1724323740.181",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1750263609.776"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1752006673.173",
"cmake/3.31.7#57c3e118bcf267552c0ea3f8bee1e7d5%1752006671.64",
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1752006671.407"
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1750263698.841",
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1750263690.822",
"cmake/3.31.7#57c3e118bcf267552c0ea3f8bee1e7d5%1749863707.208",
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1750263614.565"
],
"python_requires": [],
"overrides": {
"boost/1.83.0": [
null,
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562"
"boost/1.83.0#8eb22f36ddfb61f54bbc412c4555bd66"
],
"protobuf/3.21.9": [
null,

View File

@@ -1,10 +1,7 @@
ARG CLANG_MAJOR_VERSION=invalid
ARG GCC_VERSION=invalid
FROM ghcr.io/xrplf/clio-gcc:${GCC_VERSION} AS clio-gcc
FROM ghcr.io/xrplf/clio-gcc:12.3.0 AS clio-gcc
FROM ghcr.io/xrplf/clio-tools:latest AS clio-tools
FROM ghcr.io/xrplf/clio-clang:${CLANG_MAJOR_VERSION}
FROM ghcr.io/xrplf/clio-clang:16
ARG DEBIAN_FRONTEND=noninteractive
@@ -82,8 +79,6 @@ RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
COPY --from=clio-tools \
/usr/local/bin/mold \
/usr/local/bin/ld.mold \
/usr/local/bin/ccache \
/usr/local/bin/doxygen \
/usr/local/bin/ClangBuildAnalyzer \

View File

@@ -6,14 +6,13 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
The image is based on Ubuntu 20.04 and contains:
- ccache 4.11.3
- Clang 19
- clang 16.0.6
- ClangBuildAnalyzer 1.6.0
- Conan 2.17.0
- Doxygen 1.12
- GCC 12.3.0
- conan 2.17.0
- doxygen 1.12
- gcc 12.3.0
- gh 2.74
- git-cliff 2.9.1
- mold 2.40.1
- and some other useful tools
Conan is set up to build Clio without any additional steps.

View File

@@ -4,9 +4,8 @@ build_type=Release
compiler=clang
compiler.cppstd=20
compiler.libcxx=libc++
compiler.version=19
compiler.version=16
os=Linux
[conf]
tools.build:compiler_executables={"c": "/usr/bin/clang-19", "cpp": "/usr/bin/clang++-19"}
grpc/1.50.1:tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
tools.build:compiler_executables={"c": "/usr/bin/clang-16", "cpp": "/usr/bin/clang++-16"}

View File

@@ -8,6 +8,8 @@ SHELL ["/bin/bash", "-c"]
USER root
WORKDIR /root
ARG CLANG_VERSION=16
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
wget \
@@ -16,17 +18,13 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG CLANG_MAJOR_VERSION=invalid
# Bump this version to force rebuild of the image
ARG BUILD_VERSION=0
RUN wget --progress=dot:giga https://apt.llvm.org/llvm.sh \
&& chmod +x llvm.sh \
&& ./llvm.sh ${CLANG_MAJOR_VERSION} \
&& ./llvm.sh ${CLANG_VERSION} \
&& rm -rf llvm.sh \
&& apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
libc++-${CLANG_MAJOR_VERSION}-dev \
libc++abi-${CLANG_MAJOR_VERSION}-dev \
libc++-${CLANG_VERSION}-dev \
libc++abi-${CLANG_VERSION}-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -1,14 +1,16 @@
ARG UBUNTU_VERSION=20.04
ARG GCC_MAJOR_VERSION=invalid
ARG GCC_MAJOR_VERSION=12
FROM ubuntu:$UBUNTU_VERSION AS build
ARG UBUNTU_VERSION
ARG GCC_MAJOR_VERSION
ARG BUILD_VERSION=7
ARG GCC_MINOR_VERSION=3
ARG GCC_PATCH_VERSION=0
ARG GCC_VERSION=${GCC_MAJOR_VERSION}.${GCC_MINOR_VERSION}.${GCC_PATCH_VERSION}
ARG BUILD_VERSION=6
ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
@@ -25,8 +27,6 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG GCC_VERSION
WORKDIR /
RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.gz \
&& tar xf gcc-$GCC_VERSION.tar.gz

View File

@@ -1,40 +1,24 @@
ARG GCC_VERSION=invalid
FROM ghcr.io/xrplf/clio-gcc:${GCC_VERSION}
FROM ubuntu:20.04
ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ARG BUILD_VERSION=1
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
bison \
build-essential \
cmake \
flex \
ninja-build \
python3 \
python3-pip \
software-properties-common \
wget \
&& pip3 install -q --no-cache-dir \
cmake==3.31.6 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /tmp
ARG MOLD_VERSION=2.40.1
RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \
&& tar xf "v${MOLD_VERSION}.tar.gz" \
&& cd "mold-${MOLD_VERSION}" \
&& mkdir build \
&& cd build \
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
&& ninja install \
&& rm -rf /tmp/* /var/tmp/*
ARG CCACHE_VERSION=4.11.3
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
@@ -42,7 +26,7 @@ RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download
&& mkdir build \
&& cd build \
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DENABLE_TESTING=False .. \
&& ninja install \
&& cmake --build . --target install \
&& rm -rf /tmp/* /var/tmp/*
ARG DOXYGEN_VERSION=1.12.0
@@ -52,7 +36,7 @@ RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/downlo
&& mkdir build \
&& cd build \
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
&& ninja install \
&& cmake --build . --target install \
&& rm -rf /tmp/* /var/tmp/*
ARG CLANG_BUILD_ANALYZER_VERSION=1.6.0
@@ -62,7 +46,7 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
&& mkdir build \
&& cd build \
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
&& ninja install \
&& cmake --build . --target install \
&& rm -rf /tmp/* /var/tmp/*
ARG GIT_CLIFF_VERSION=2.9.1

View File

@@ -10,5 +10,4 @@ target_link_libraries(
clio_web
clio_rpc
clio_migration
PRIVATE Boost::program_options
)

View File

@@ -97,7 +97,7 @@ HealthCheckHandler::operator()(
boost::asio::yield_context
)
{
static constexpr auto kHEALTH_CHECK_HTML = R"html(
static auto constexpr kHEALTH_CHECK_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Test page for Clio</title></head>

View File

@@ -198,6 +198,39 @@ struct MPTHolderData {
ripple::AccountID holder;
};
/**
* @brief Check whether the supplied object is an offer.
*
* @param object The object to check
* @return true if the object is an offer; false otherwise
*/
template <typename T>
inline bool
isOffer(T const& object)
{
static constexpr short kOFFER_OFFSET = 0x006f;
static constexpr short kSHIFT = 8;
short offerBytes = (object[1] << kSHIFT) | object[2];
return offerBytes == kOFFER_OFFSET;
}
/**
* @brief Check whether the supplied hex represents an offer object.
*
* @param object The object to check
* @return true if the object is an offer; false otherwise
*/
template <typename T>
inline bool
isOfferHex(T const& object)
{
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
if (blob)
return isOffer(*blob);
return false;
}
/**
* @brief Check whether the supplied object is a dir node.
*
@@ -208,10 +241,6 @@ template <typename T>
inline bool
isDirNode(T const& object)
{
static constexpr auto kMIN_SIZE_REQUIRED = 3;
if (std::size(object) < kMIN_SIZE_REQUIRED)
return false;
static constexpr short kDIR_NODE_SPACE_KEY = 0x0064;
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == kDIR_NODE_SPACE_KEY;
@@ -235,6 +264,23 @@ isBookDir(T const& key, R const& object)
return !sle[~ripple::sfOwner].has_value();
}
/**
* @brief Get the book out of an offer object.
*
* @param offer The offer to get the book for
* @return Book as ripple::uint256
*/
template <typename T>
inline ripple::uint256
getBook(T const& offer)
{
ripple::SerialIter it{offer.data(), offer.size()};
ripple::SLE const sle{it, {}};
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
return book;
}
/**
* @brief Get the book base.
*

View File

@@ -6,7 +6,7 @@ To support additional database types, you can create new classes that implement
## Data Model
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [_SHAMap_](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [_SHAMap_](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
`rippled` nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existence of the leaf node data to other `rippled` nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized `rippled` nodes.

View File

@@ -99,7 +99,7 @@ public:
connect() const;
/**
* @brief Connect to the specified keyspace asynchronously.
* @brief Connect to the the specified keyspace asynchronously.
*
* @param keyspace The keyspace to use
* @return A future
@@ -137,7 +137,7 @@ public:
disconnect() const;
/**
* @brief Reconnect to the specified keyspace asynchronously.
* @brief Reconnect to the the specified keyspace asynchronously.
*
* @param keyspace The keyspace to use
* @return A future

View File

@@ -57,6 +57,7 @@
#include <string>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
using namespace util::config;
@@ -277,46 +278,40 @@ LoadBalancer::forwardToRippled(
return std::unexpected{rpc::ClioError::RpcCommandIsMissing};
auto const cmd = boost::json::value_to<std::string>(request.at("command"));
if (forwardingCache_) {
if (auto cachedResponse = forwardingCache_->get(cmd); cachedResponse) {
forwardingCounters_.cacheHit.get() += 1;
return std::move(cachedResponse).value();
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
bool servedFromCache = true;
auto updater = [this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
servedFromCache = false;
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
if (result.has_value()) {
return util::ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(), .response = std::move(result).value()
};
}
return std::unexpected{
util::ResponseExpirationCache::Error{.status = rpc::Status{result.error()}, .warnings = {}}
};
};
auto result = forwardingCache_->getOrUpdate(
yield, cmd, std::move(updater), [](util::ResponseExpirationCache::EntryData const& entry) {
return not entry.response.contains("error");
}
);
if (servedFromCache) {
++forwardingCounters_.cacheHit.get();
}
}
forwardingCounters_.cacheMiss.get() += 1;
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
auto numAttempts = 0u;
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
std::optional<boost::json::object> response;
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
while (numAttempts < sources_.size()) {
auto [res, duration] =
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
if (res) {
forwardingCounters_.successDuration.get() += duration;
response = std::move(res).value();
break;
if (result.has_value()) {
return std::move(result).value();
}
forwardingCounters_.failDuration.get() += duration;
++forwardingCounters_.retries.get();
error = std::max(error, res.error()); // Choose the best result between all sources
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
auto const combinedError = result.error().status.code;
ASSERT(std::holds_alternative<rpc::ClioError>(combinedError), "There could be only ClioError here");
return std::unexpected{std::get<rpc::ClioError>(combinedError)};
}
if (response) {
if (forwardingCache_ and not response->contains("error"))
forwardingCache_->put(cmd, *response);
return std::move(response).value();
}
return std::unexpected{error};
return forwardToRippledImpl(request, clientIp, isAdmin, yield);
}
boost::json::value
@@ -407,4 +402,47 @@ LoadBalancer::chooseForwardingSource()
}
}
std::expected<boost::json::object, rpc::CombinedError>
LoadBalancer::forwardToRippledImpl(
boost::json::object const& request,
std::optional<std::string> const& clientIp,
bool const isAdmin,
boost::asio::yield_context yield
)
{
++forwardingCounters_.cacheMiss.get();
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
auto numAttempts = 0u;
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
std::optional<boost::json::object> response;
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
while (numAttempts < sources_.size()) {
auto [res, duration] =
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
if (res) {
forwardingCounters_.successDuration.get() += duration;
response = std::move(res).value();
break;
}
forwardingCounters_.failDuration.get() += duration;
++forwardingCounters_.retries.get();
error = std::max(error, res.error()); // Choose the best result between all sources
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
}
if (response.has_value()) {
return std::move(response).value();
}
return std::unexpected{error};
}
} // namespace etl

View File

@@ -49,7 +49,6 @@
#include <concepts>
#include <cstdint>
#include <expected>
#include <functional>
#include <memory>
#include <optional>
#include <string>
@@ -282,6 +281,14 @@ private:
*/
void
chooseForwardingSource();
std::expected<boost::json::object, rpc::CombinedError>
forwardToRippledImpl(
boost::json::object const& request,
std::optional<std::string> const& clientIp,
bool isAdmin,
boost::asio::yield_context yield
);
};
} // namespace etl

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include "data/DBHelpers.hpp"
#include "util/Assert.hpp"
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
@@ -360,18 +359,14 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
std::vector<NFTsData>
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob)
{
// https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0020-non-fungible-tokens#tokenpage-id-format
ASSERT(key.size() == ripple::uint256::size(), "The size of the key (token) is expected to fit uint256 exactly");
auto const sle =
std::vector<NFTsData> nfts;
ripple::STLedgerEntry const sle =
ripple::STLedgerEntry(ripple::SerialIter{blob.data(), blob.size()}, ripple::uint256::fromVoid(key.data()));
if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
return {};
return nfts;
auto const owner = ripple::AccountID::fromVoid(key.data());
std::vector<NFTsData> nfts;
for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens))
nfts.emplace_back(node.getFieldH256(ripple::sfNFTokenID), seq, owner, node.getFieldVL(ripple::sfURI));

View File

@@ -58,6 +58,7 @@
#include <string>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
using namespace util::config;
@@ -283,46 +284,40 @@ LoadBalancer::forwardToRippled(
return std::unexpected{rpc::ClioError::RpcCommandIsMissing};
auto const cmd = boost::json::value_to<std::string>(request.at("command"));
if (forwardingCache_) {
if (auto cachedResponse = forwardingCache_->get(cmd); cachedResponse) {
forwardingCounters_.cacheHit.get() += 1;
return std::move(cachedResponse).value();
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
bool servedFromCache = true;
auto updater = [this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
servedFromCache = false;
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
if (result.has_value()) {
return util::ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(), .response = std::move(result).value()
};
}
return std::unexpected{
util::ResponseExpirationCache::Error{.status = rpc::Status{result.error()}, .warnings = {}}
};
};
auto result = forwardingCache_->getOrUpdate(
yield, cmd, std::move(updater), [](util::ResponseExpirationCache::EntryData const& entry) {
return not entry.response.contains("error");
}
);
if (servedFromCache) {
++forwardingCounters_.cacheHit.get();
}
}
forwardingCounters_.cacheMiss.get() += 1;
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
auto numAttempts = 0u;
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
std::optional<boost::json::object> response;
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
while (numAttempts < sources_.size()) {
auto [res, duration] =
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
if (res) {
forwardingCounters_.successDuration.get() += duration;
response = std::move(res).value();
break;
if (result.has_value()) {
return std::move(result).value();
}
forwardingCounters_.failDuration.get() += duration;
++forwardingCounters_.retries.get();
error = std::max(error, res.error()); // Choose the best result between all sources
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
auto const combinedError = result.error().status.code;
ASSERT(std::holds_alternative<rpc::ClioError>(combinedError), "There could be only ClioError here");
return std::unexpected{std::get<rpc::ClioError>(combinedError)};
}
if (response) {
if (forwardingCache_ and not response->contains("error"))
forwardingCache_->put(cmd, *response);
return std::move(response).value();
}
return std::unexpected{error};
return forwardToRippledImpl(request, clientIp, isAdmin, yield);
}
boost::json::value
@@ -413,4 +408,47 @@ LoadBalancer::chooseForwardingSource()
}
}
std::expected<boost::json::object, rpc::CombinedError>
LoadBalancer::forwardToRippledImpl(
boost::json::object const& request,
std::optional<std::string> const& clientIp,
bool isAdmin,
boost::asio::yield_context yield
)
{
++forwardingCounters_.cacheMiss.get();
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
auto numAttempts = 0u;
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
std::optional<boost::json::object> response;
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
while (numAttempts < sources_.size()) {
auto [res, duration] =
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
if (res) {
forwardingCounters_.successDuration.get() += duration;
response = std::move(res).value();
break;
}
forwardingCounters_.failDuration.get() += duration;
++forwardingCounters_.retries.get();
error = std::max(error, res.error()); // Choose the best result between all sources
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
}
if (response.has_value()) {
return std::move(response).value();
}
return std::unexpected{error};
}
} // namespace etlng

View File

@@ -282,6 +282,14 @@ private:
*/
void
chooseForwardingSource();
std::expected<boost::json::object, rpc::CombinedError>
forwardToRippledImpl(
boost::json::object const& request,
std::optional<std::string> const& clientIp,
bool isAdmin,
boost::asio::yield_context yield
);
};
} // namespace etlng

View File

@@ -32,7 +32,7 @@ namespace migration {
*/
struct MigrationManagerInterface : virtual public MigrationInspectorInterface {
/**
* @brief Run the migration according to the given migrator's name
* @brief Run the the migration according to the given migrator's name
*/
virtual void
runMigration(std::string const&) = 0;

View File

@@ -56,7 +56,7 @@ public:
}
/**
* @brief Run the migration according to the given migrator's name
* @brief Run the the migration according to the given migrator's name
*
* @param name The name of the migrator
*/

View File

@@ -2,7 +2,6 @@
add_library(clio_rpc_center)
target_sources(clio_rpc_center PRIVATE RPCCenter.cpp)
target_include_directories(clio_rpc_center PUBLIC "${CMAKE_SOURCE_DIR}/src")
target_link_libraries(clio_rpc_center PUBLIC clio_options)
add_library(clio_rpc)

View File

@@ -157,55 +157,48 @@ public:
return forwardingProxy_.forward(ctx);
}
if (not ctx.isAdmin and responseCache_) {
if (auto res = responseCache_->get(ctx.method); res.has_value())
return Result{std::move(res).value()};
}
if (not ctx.isAdmin and responseCache_ and responseCache_->shouldCache(ctx.method)) {
auto updater = [this, &ctx](boost::asio::yield_context)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
auto result = buildResponseImpl(ctx);
if (backend_->isTooBusy()) {
LOG(log_.error()) << "Database is too busy. Rejecting request";
notifyTooBusy(); // TODO: should we add ctx.method if we have it?
return Result{Status{RippledError::rpcTOO_BUSY}};
}
auto const extracted =
[&result]() -> std::expected<boost::json::object, util::ResponseExpirationCache::Error> {
if (result.response.has_value()) {
return std::move(result.response).value();
}
return std::unexpected{util::ResponseExpirationCache::Error{
.status = std::move(result.response).error(), .warnings = std::move(result.warnings)
}};
}();
auto const method = handlerProvider_->getHandler(ctx.method);
if (!method) {
notifyUnknownCommand();
return Result{Status{RippledError::rpcUNKNOWN_COMMAND}};
}
try {
LOG(perfLog_.debug()) << ctx.tag() << " start executing rpc `" << ctx.method << '`';
auto const context = Context{
.yield = ctx.yield,
.session = ctx.session,
.isAdmin = ctx.isAdmin,
.clientIp = ctx.clientIp,
.apiVersion = ctx.apiVersion
if (extracted.has_value()) {
return util::ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(), .response = std::move(extracted).value()
};
}
return std::unexpected{std::move(extracted).error()};
};
auto v = (*method).process(ctx.params, context);
LOG(perfLog_.debug()) << ctx.tag() << " finish executing rpc `" << ctx.method << '`';
if (not v) {
notifyErrored(ctx.method);
} else if (not ctx.isAdmin and responseCache_) {
responseCache_->put(ctx.method, v.result->as_object());
auto result = responseCache_->getOrUpdate(
ctx.yield,
ctx.method,
std::move(updater),
[&ctx](util::ResponseExpirationCache::EntryData const& entry) {
return not ctx.isAdmin and not entry.response.contains("error");
}
);
if (result.has_value()) {
return Result{std::move(result).value()};
}
return Result{std::move(v)};
} catch (data::DatabaseTimeout const& t) {
LOG(log_.error()) << "Database timeout";
notifyTooBusy();
return Result{Status{RippledError::rpcTOO_BUSY}};
} catch (std::exception const& ex) {
LOG(log_.error()) << ctx.tag() << "Caught exception: " << ex.what();
notifyInternalError();
return Result{Status{RippledError::rpcINTERNAL}};
auto error = std::move(result).error();
Result errorResult{std::move(error.status)};
errorResult.warnings = std::move(error.warnings);
return errorResult;
}
return buildResponseImpl(ctx);
}
/**

View File

@@ -42,15 +42,12 @@ target_sources(
# This must be above the target_link_libraries call otherwise backtrace doesn't work
if ("${san}" STREQUAL "")
target_link_libraries(clio_util PUBLIC Boost::stacktrace_backtrace)
target_link_libraries(clio_util PUBLIC Boost::stacktrace_backtrace dl libbacktrace::libbacktrace)
endif ()
target_link_libraries(
clio_util
PUBLIC Boost::headers
Boost::iostreams
Boost::log
Boost::log_setup
fmt::fmt
openssl::openssl
xrpl::libxrpl

View File

@@ -16,44 +16,31 @@
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "util/ResponseExpirationCache.hpp"
#include "util/Assert.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <chrono>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
namespace util {
void
ResponseExpirationCache::Entry::put(boost::json::object response)
ResponseExpirationCache::ResponseExpirationCache(
std::chrono::steady_clock::duration cacheTimeout,
std::unordered_set<std::string> const& cmds
)
: cacheTimeout_(cacheTimeout)
{
response_ = std::move(response);
lastUpdated_ = std::chrono::steady_clock::now();
}
std::optional<boost::json::object>
ResponseExpirationCache::Entry::get() const
{
return response_;
}
std::chrono::steady_clock::time_point
ResponseExpirationCache::Entry::lastUpdated() const
{
return lastUpdated_;
}
void
ResponseExpirationCache::Entry::invalidate()
{
response_.reset();
for (auto const& command : cmds) {
cache_.emplace(command, std::make_unique<CacheEntry>());
}
}
bool
@@ -62,38 +49,41 @@ ResponseExpirationCache::shouldCache(std::string const& cmd)
return cache_.contains(cmd);
}
std::optional<boost::json::object>
ResponseExpirationCache::get(std::string const& cmd) const
std::expected<boost::json::object, ResponseExpirationCache::Error>
ResponseExpirationCache::getOrUpdate(
boost::asio::yield_context yield,
std::string const& cmd,
Updater updater,
Verifier verifier
)
{
auto it = cache_.find(cmd);
if (it == cache_.end())
return std::nullopt;
ASSERT(it != cache_.end(), "Can't get a value which is not in the cache");
auto const& entry = it->second.lock<std::shared_lock>();
if (std::chrono::steady_clock::now() - entry->lastUpdated() > cacheTimeout_)
return std::nullopt;
auto& entry = it->second;
{
auto result = entry->asyncGet(yield, updater, verifier);
if (not result.has_value()) {
return std::unexpected{std::move(result).error()};
}
if (std::chrono::steady_clock::now() - result->lastUpdated < cacheTimeout_) {
return std::move(result)->response;
}
}
return entry->get();
}
void
ResponseExpirationCache::put(std::string const& cmd, boost::json::object const& response)
{
if (not shouldCache(cmd))
return;
ASSERT(cache_.contains(cmd), "Command is not in the cache: {}", cmd);
auto entry = cache_[cmd].lock<std::unique_lock>();
entry->put(response);
// Force update due to cache timeout
auto result = entry->update(yield, std::move(updater), std::move(verifier));
if (not result.has_value()) {
return std::unexpected{std::move(result).error()};
}
return std::move(result)->response;
}
void
ResponseExpirationCache::invalidate()
{
for (auto& [_, entry] : cache_) {
auto entryLock = entry.lock<std::unique_lock>();
entryLock->invalidate();
entry->invalidate();
}
}

View File

@@ -16,15 +16,18 @@
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "util/Mutex.hpp"
#include "rpc/Errors.hpp"
#include "util/BlockingCache.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/array.hpp>
#include <boost/json/object.hpp>
#include <chrono>
#include <optional>
#include <shared_mutex>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
@@ -33,94 +36,89 @@ namespace util {
/**
* @brief Cache of requests' responses with TTL support and configurable cacheable commands
*
* This class implements a time-based expiration cache for RPC responses. It allows
* caching responses for specified commands and automatically invalidates them after
* a configured timeout period. The cache uses BlockingCache internally to handle
* concurrent access and updates.
*/
class ResponseExpirationCache {
public:
/**
* @brief A class to store a cache entry.
* @brief A data structure to store a cache entry with its timestamp
*/
class Entry {
std::chrono::steady_clock::time_point lastUpdated_;
std::optional<boost::json::object> response_;
public:
/**
* @brief Put a response into the cache
*
* @param response The response to store
*/
void
put(boost::json::object response);
/**
* @brief Get the response from the cache
*
* @return The response
*/
std::optional<boost::json::object>
get() const;
/**
* @brief Get the last time the cache was updated
*
* @return The last time the cache was updated
*/
std::chrono::steady_clock::time_point
lastUpdated() const;
/**
* @brief Invalidate the cache entry
*/
void
invalidate();
struct EntryData {
std::chrono::steady_clock::time_point lastUpdated; ///< When the entry was last updated
boost::json::object response; ///< The cached response data
};
std::chrono::steady_clock::duration cacheTimeout_;
std::unordered_map<std::string, util::Mutex<Entry, std::shared_mutex>> cache_;
/**
* @brief A data structure to represent errors that can occur during an update of the cache
*/
struct Error {
rpc::Status status; ///< The status code and message of the error
boost::json::array warnings; ///< Any warnings related to the request
bool
shouldCache(std::string const& cmd);
bool
operator==(Error const&) const = default;
};
using CacheEntry = util::BlockingCache<EntryData, Error>;
private:
std::chrono::steady_clock::duration cacheTimeout_;
std::unordered_map<std::string, std::unique_ptr<CacheEntry>> cache_;
public:
/**
* @brief Construct a new Cache object
* @brief Construct a new ResponseExpirationCache object
*
* @param cacheTimeout The time for cache entries to expire
* @param cmds The commands that should be cached
* @param cacheTimeout The time period after which cached entries expire
* @param cmds The commands that should be cached (requests for other commands won't be cached)
*/
ResponseExpirationCache(
std::chrono::steady_clock::duration cacheTimeout,
std::unordered_set<std::string> const& cmds
)
: cacheTimeout_(cacheTimeout)
{
for (auto const& command : cmds) {
cache_.emplace(command, Entry{});
}
}
);
/**
* @brief Get a response from the cache
* @brief Check if the given command should be cached
*
* @param cmd The command to check
* @return true if the command should be cached, false otherwise
*/
bool
shouldCache(std::string const& cmd);
using Updater = CacheEntry::Updater;
using Verifier = CacheEntry::Verifier;
/**
* @brief Get a cached response or update the cache if necessary
*
* This method returns a cached response if it exists and hasn't expired.
* If the cache entry is expired or doesn't exist, it calls the updater to
* generate a new value. If multiple coroutines request the same entry
* simultaneously, only one updater will be called while others wait.
*
* @note cmd must be one of the commands that are cached. There is an ASSERT() inside the function
*
* @param yield Asio yield context for coroutine suspension
* @param cmd The command to get the response for
* @return The response if it exists or std::nullopt otherwise
* @param updater Function to generate the response if not in cache or expired
* @param verifier Function to validate if a response should be cached
* @return The cached or newly generated response, or an error
*/
[[nodiscard]] std::optional<boost::json::object>
get(std::string const& cmd) const;
/**
* @brief Put a response into the cache if the request should be cached
*
* @param cmd The command to store the response for
* @param response The response to store
*/
void
put(std::string const& cmd, boost::json::object const& response);
[[nodiscard]] std::expected<boost::json::object, Error>
getOrUpdate(boost::asio::yield_context yield, std::string const& cmd, Updater updater, Verifier verifier);
/**
* @brief Invalidate all entries in the cache
*
* This causes all cached entries to be cleared, forcing the next access
* to generate new responses.
*/
void
invalidate();
};
} // namespace util

View File

@@ -62,7 +62,7 @@
namespace web::impl {
static constexpr auto kHEALTH_CHECK_HTML = R"html(
static auto constexpr kHEALTH_CHECK_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Test page for Clio</title></head>

View File

@@ -19,10 +19,10 @@
#include "web/ng/SubscriptionContext.hpp"
#include "util/Assert.hpp"
#include "util/Taggable.hpp"
#include "web/SubscriptionContextInterface.hpp"
#include <boost/asio/buffer.hpp>
#include <boost/asio/spawn.hpp>
#include <cstddef>
@@ -50,31 +50,24 @@ SubscriptionContext::SubscriptionContext(
{
}
SubscriptionContext::~SubscriptionContext()
{
ASSERT(disconnected_, "SubscriptionContext must be disconnected before destroying");
}
void
SubscriptionContext::send(std::shared_ptr<std::string> message)
{
if (disconnected_ or gotError_)
if (disconnected_)
return;
if (maxSendQueueSize_.has_value() and tasksGroup_.size() >= *maxSendQueueSize_) {
tasksGroup_.spawn(yield_, [this](boost::asio::yield_context innerYield) {
connection_.get().close(innerYield);
});
gotError_ = true;
disconnected_ = true;
return;
}
tasksGroup_.spawn(yield_, [this, message = std::move(message)](boost::asio::yield_context innerYield) mutable {
auto const maybeError = connection_.get().sendShared(std::move(message), innerYield);
if (maybeError.has_value() and errorHandler_(*maybeError, connection_)) {
tasksGroup_.spawn(yield_, [this, message = std::move(message)](boost::asio::yield_context innerYield) {
auto const maybeError = connection_.get().sendBuffer(boost::asio::buffer(*message), innerYield);
if (maybeError.has_value() and errorHandler_(*maybeError, connection_))
connection_.get().close(innerYield);
gotError_ = true;
}
});
}
@@ -99,8 +92,8 @@ SubscriptionContext::apiSubversion() const
void
SubscriptionContext::disconnect(boost::asio::yield_context yield)
{
disconnected_ = true;
onDisconnect_(this);
disconnected_ = true;
tasksGroup_.asyncWait(yield);
}

View File

@@ -61,7 +61,6 @@ private:
boost::signals2::signal<void(SubscriptionContextInterface*)> onDisconnect_;
std::atomic_bool disconnected_{false};
std::atomic_bool gotError_{false};
/**
* @brief The API version of the web stream client.
@@ -88,8 +87,6 @@ public:
ErrorHandler errorHandler
);
~SubscriptionContext() override;
/**
* @brief Send message to the client
* @note This method does nothing after disconnected() was called.

View File

@@ -26,7 +26,6 @@
#include "web/ng/Request.hpp"
#include "web/ng/Response.hpp"
#include "web/ng/impl/Concepts.hpp"
#include "web/ng/impl/SendingQueue.hpp"
#include "web/ng/impl/WsConnection.hpp"
#include <boost/asio/buffer.hpp>
@@ -76,10 +75,6 @@ class HttpConnection : public UpgradableConnection {
StreamType stream_;
std::optional<boost::beast::http::request<boost::beast::http::string_body>> request_;
std::chrono::steady_clock::duration timeout_{kDEFAULT_TIMEOUT};
using MessageType = boost::beast::http::response<boost::beast::http::string_body>;
SendingQueue<MessageType> sendingQueue_;
bool closed_{false};
public:
@@ -90,12 +85,7 @@ public:
util::TagDecoratorFactory const& tagDecoratorFactory
)
requires IsTcpStream<StreamType>
: UpgradableConnection(std::move(ip), std::move(buffer), tagDecoratorFactory)
, stream_{std::move(socket)}
, sendingQueue_([this](MessageType const& message, auto&& yield) {
boost::beast::get_lowest_layer(stream_).expires_after(timeout_);
boost::beast::http::async_write(stream_, message, yield);
})
: UpgradableConnection(std::move(ip), std::move(buffer), tagDecoratorFactory), stream_{std::move(socket)}
{
}
@@ -109,20 +99,9 @@ public:
requires IsSslTcpStream<StreamType>
: UpgradableConnection(std::move(ip), std::move(buffer), tagDecoratorFactory)
, stream_{std::move(socket), sslCtx}
, sendingQueue_([this](MessageType const& message, auto&& yield) {
boost::beast::get_lowest_layer(stream_).expires_after(timeout_);
boost::beast::http::async_write(stream_, message, yield);
})
{
}
HttpConnection(HttpConnection&& other) = delete;
HttpConnection&
operator=(HttpConnection&& other) = delete;
HttpConnection(HttpConnection const& other) = delete;
HttpConnection&
operator=(HttpConnection const& other) = delete;
std::optional<Error>
sslHandshake(boost::asio::yield_context yield)
requires IsSslTcpStream<StreamType>
@@ -151,7 +130,12 @@ public:
boost::asio::yield_context yield
) override
{
return sendingQueue_.send(std::move(response), yield);
boost::system::error_code error;
boost::beast::get_lowest_layer(stream_).expires_after(timeout_);
boost::beast::http::async_write(stream_, response, yield[error]);
if (error)
return error;
return std::nullopt;
}
void

View File

@@ -1,73 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "web/ng/Error.hpp"
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/system/detail/error_code.hpp>
#include <functional>
#include <optional>
#include <queue>
namespace web::ng::impl {
template <typename T>
class SendingQueue {
public:
using Sender = std::function<void(T const&, boost::asio::basic_yield_context<boost::asio::any_io_executor>)>;
private:
std::queue<T> queue_;
Sender sender_;
Error error_;
bool isSending_{false};
public:
SendingQueue(Sender sender) : sender_{std::move(sender)}
{
}
std::optional<Error>
send(T message, boost::asio::yield_context yield)
{
if (error_)
return error_;
queue_.push(std::move(message));
if (isSending_)
return std::nullopt;
isSending_ = true;
while (not queue_.empty() and not error_) {
auto const responseToSend = std::move(queue_.front());
queue_.pop();
sender_(responseToSend, yield[error_]);
}
isSending_ = false;
if (error_)
return error_;
return std::nullopt;
}
};
} // namespace web::ng::impl

View File

@@ -19,7 +19,6 @@
#pragma once
#include "util/OverloadSet.hpp"
#include "util/Taggable.hpp"
#include "util/build/Build.hpp"
#include "web/ng/Connection.hpp"
@@ -27,7 +26,6 @@
#include "web/ng/Request.hpp"
#include "web/ng/Response.hpp"
#include "web/ng/impl/Concepts.hpp"
#include "web/ng/impl/SendingQueue.hpp"
#include <boost/asio/buffer.hpp>
#include <boost/asio/ip/tcp.hpp>
@@ -51,7 +49,6 @@
#include <optional>
#include <string>
#include <utility>
#include <variant>
namespace web::ng::impl {
@@ -60,17 +57,13 @@ public:
using Connection::Connection;
virtual std::optional<Error>
sendShared(std::shared_ptr<std::string> message, boost::asio::yield_context yield) = 0;
sendBuffer(boost::asio::const_buffer buffer, boost::asio::yield_context yield) = 0;
};
template <typename StreamType>
class WsConnection : public WsConnectionBase {
boost::beast::websocket::stream<StreamType> stream_;
boost::beast::http::request<boost::beast::http::string_body> initialRequest_;
using MessageType = std::variant<Response, std::shared_ptr<std::string>>;
SendingQueue<MessageType> sendingQueue_;
bool closed_{false};
public:
@@ -84,30 +77,10 @@ public:
: WsConnectionBase(std::move(ip), std::move(buffer), tagDecoratorFactory)
, stream_(std::move(stream))
, initialRequest_(std::move(initialRequest))
, sendingQueue_{[this](MessageType const& message, auto&& yield) {
boost::asio::const_buffer const buffer = std::visit(
util::OverloadSet{
[](Response const& r) -> boost::asio::const_buffer { return r.asWsResponse(); },
[](std::shared_ptr<std::string> const& m) -> boost::asio::const_buffer {
return boost::asio::buffer(*m);
}
},
message
);
stream_.async_write(buffer, yield);
}}
{
setupWsStream();
}
~WsConnection() override = default;
WsConnection(WsConnection&&) = delete;
WsConnection&
operator=(WsConnection&&) = delete;
WsConnection(WsConnection const&) = delete;
WsConnection&
operator=(WsConnection const&) = delete;
std::optional<Error>
performHandshake(boost::asio::yield_context yield)
{
@@ -125,9 +98,16 @@ public:
}
std::optional<Error>
sendShared(std::shared_ptr<std::string> message, boost::asio::yield_context yield) override
sendBuffer(boost::asio::const_buffer buffer, boost::asio::yield_context yield) override
{
return sendingQueue_.send(std::move(message), yield);
boost::beast::websocket::stream_base::timeout timeoutOption{};
stream_.get_option(timeoutOption);
boost::system::error_code error;
stream_.async_write(buffer, yield[error]);
if (error)
return error;
return std::nullopt;
}
void
@@ -143,7 +123,7 @@ public:
std::optional<Error>
send(Response response, boost::asio::yield_context yield) override
{
return sendingQueue_.send(std::move(response), yield);
return sendBuffer(response.asWsResponse(), yield);
}
std::expected<Request, Error>

View File

@@ -148,16 +148,11 @@ createObjectWithTwoNFTs()
auto const nftPage = createNftTokenPage({{kNFT_ID, url1}, {kNFT_ID2, url2}}, std::nullopt);
auto const serializerNftPage = nftPage.getSerializer();
auto const account = getAccountIdWithString(kACCOUNT);
// key is a token made up from owner's account ID followed by unused (in Clio) value described here:
// https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0020-non-fungible-tokens#tokenpage-id-format
auto constexpr kEXTRA_BYTES = "000000000000";
auto const key = std::string(std::begin(account), std::end(account)) + kEXTRA_BYTES;
return {
.key = {},
.keyRaw = key,
.keyRaw = std::string(reinterpret_cast<char const*>(account.data()), ripple::AccountID::size()),
.data = {},
.dataRaw =
std::string(static_cast<char const*>(serializerNftPage.getDataPtr()), serializerNftPage.getDataLength()),

View File

@@ -42,9 +42,4 @@ WithMockAssert::throwOnAssert(std::string_view m)
throw MockAssertException{.message = std::string{m}};
}
WithMockAssertNoThrow::~WithMockAssertNoThrow()
{
::util::impl::OnAssert::resetAction();
}
} // namespace common::util

View File

@@ -19,8 +19,6 @@
#pragma once
#include "util/Assert.hpp" // IWYU pragma: keep
#include <gmock/gmock.h>
#include <gtest/gtest.h>
@@ -43,35 +41,19 @@ private:
throwOnAssert(std::string_view m);
};
class WithMockAssertNoThrow : virtual public testing::Test {
public:
~WithMockAssertNoThrow() override;
};
} // namespace common::util
#define EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE(statement, message_regex) \
if (dynamic_cast<common::util::WithMockAssert*>(this) != nullptr) { \
EXPECT_THROW( \
{ \
try { \
statement; \
} catch (common::util::WithMockAssert::MockAssertException const& e) { \
EXPECT_THAT(e.message, testing::ContainsRegex(message_regex)); \
throw; \
} \
}, \
common::util::WithMockAssert::MockAssertException \
); \
} else if (dynamic_cast<common::util::WithMockAssertNoThrow*>(this) != nullptr) { \
testing::StrictMock<testing::MockFunction<void(std::string_view)>> callMock; \
::util::impl::OnAssert::setAction([&callMock](std::string_view m) { callMock.Call(m); }); \
EXPECT_CALL(callMock, Call(testing::ContainsRegex(message_regex))); \
statement; \
::util::impl::OnAssert::resetAction(); \
} else { \
std::cerr << "EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE() can be used only inside test body" << std::endl; \
std::terminate(); \
}
#define EXPECT_CLIO_ASSERT_FAIL(statement) EXPECT_THROW(statement, MockAssertException)
#define EXPECT_CLIO_ASSERT_FAIL(statement) EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE(statement, ".*")
#define EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE(statement, message_regex) \
EXPECT_THROW( \
{ \
try { \
statement; \
} catch (common::util::WithMockAssert::MockAssertException const& e) { \
EXPECT_THAT(e.message, testing::ContainsRegex(message_regex)); \
throw; \
} \
}, \
common::util::WithMockAssert::MockAssertException \
)

View File

@@ -33,7 +33,6 @@
#include <chrono>
#include <memory>
#include <optional>
#include <string>
struct MockWsConnectionImpl : web::ng::impl::WsConnectionBase {
using WsConnectionBase::WsConnectionBase;
@@ -51,12 +50,7 @@ struct MockWsConnectionImpl : web::ng::impl::WsConnectionBase {
MOCK_METHOD(void, close, (boost::asio::yield_context), (override));
using SendBufferReturnType = std::optional<web::ng::Error>;
MOCK_METHOD(
SendBufferReturnType,
sendShared,
(std::shared_ptr<std::string>, boost::asio::yield_context),
(override)
);
MOCK_METHOD(SendBufferReturnType, sendBuffer, (boost::asio::const_buffer, boost::asio::yield_context), (override));
};
using MockWsConnection = testing::NiceMock<MockWsConnectionImpl>;

View File

@@ -21,5 +21,5 @@ target_sources(
target_compile_options(clio_options INTERFACE -gdwarf-4)
target_include_directories(clio_integration_tests PRIVATE .)
target_link_libraries(clio_integration_tests PUBLIC clio_testing_common PRIVATE Boost::program_options)
target_link_libraries(clio_integration_tests PUBLIC clio_testing_common)
set_target_properties(clio_integration_tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -26,6 +26,7 @@
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/STTx.h>
@@ -470,19 +471,17 @@ TEST_F(NFTHelpersTest, NFTDataFromLedgerObject)
ripple::Blob const uri1Blob(url1.begin(), url1.end());
ripple::Blob const uri2Blob(url2.begin(), url2.end());
auto const account = getAccountIdWithString(kACCOUNT);
auto const nftPage = createNftTokenPage({{kNFT_ID, url1}, {kNFT_ID2, url2}}, std::nullopt);
auto const serializerNftPage = nftPage.getSerializer();
auto const blob =
std::string(static_cast<char const*>(serializerNftPage.getDataPtr()), serializerNftPage.getDataLength());
// key is a token made up from owner's account ID followed by unused (in Clio) value described here:
// https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0020-non-fungible-tokens#tokenpage-id-format
auto constexpr kEXTRA_BYTES = "000000000000";
auto const key = std::string(std::begin(account), std::end(account)) + kEXTRA_BYTES;
int constexpr kSEQ{5};
auto const account = getAccountIdWithString(kACCOUNT);
uint32_t constexpr kSEQ{5};
auto const nftDatas = etl::getNFTDataFromObj(kSEQ, key, blob);
auto const nftDatas = etl::getNFTDataFromObj(
kSEQ,
std::string(reinterpret_cast<char const*>(account.data()), ripple::AccountID::size()),
std::string(static_cast<char const*>(serializerNftPage.getDataPtr()), serializerNftPage.getDataLength())
);
EXPECT_EQ(nftDatas.size(), 2);
EXPECT_EQ(nftDatas[0].tokenID, ripple::uint256(kNFT_ID));

View File

@@ -222,7 +222,7 @@ TEST_F(BlockingCacheTest, InvalidateWhenStateIsHasValue)
EXPECT_EQ(cache->state(), Cache::State::NoValue);
}
TEST_F(BlockingCacheTest, UpdateFromTwoCoroutinesHappensOnlyOnce)
TEST_F(BlockingCacheTest, UpdateFromTwoCoroutinesHappensOnlyOnes)
{
auto waitingCoroutine = [&](boost::asio::yield_context yield) {
auto result = cache->update(yield, mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());

View File

@@ -17,53 +17,307 @@
*/
//==============================================================================
#include "rpc/Errors.hpp"
#include "util/AsioContextTestFixture.hpp"
#include "util/MockAssert.hpp"
#include "util/ResponseExpirationCache.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <chrono>
#include <string>
#include <thread>
#include <unordered_set>
using namespace util;
using testing::MockFunction;
using testing::Return;
using testing::StrictMock;
struct ResponseExpirationCacheTests : public ::testing::Test {
protected:
ResponseExpirationCache cache_{std::chrono::seconds{100}, {"key"}};
boost::json::object object_{{"key", "value"}};
struct ResponseExpirationCacheTest : SyncAsioContextTest {
using MockUpdater = StrictMock<MockFunction<
std::expected<ResponseExpirationCache::EntryData, ResponseExpirationCache::Error>(boost::asio::yield_context)>>;
using MockVerifier = StrictMock<MockFunction<bool(ResponseExpirationCache::EntryData const&)>>;
std::string const cmd = "server_info";
boost::json::object const obj = {{"some key", "some value"}};
MockUpdater mockUpdater;
MockVerifier mockVerifier;
};
TEST_F(ResponseExpirationCacheTests, PutAndGetNotExpired)
TEST_F(ResponseExpirationCacheTest, ShouldCacheDeterminesIfCommandIsCacheable)
{
EXPECT_FALSE(cache_.get("key").has_value());
std::unordered_set<std::string> const cmds = {cmd, "account_info"};
ResponseExpirationCache cache{std::chrono::seconds(10), cmds};
cache_.put("key", object_);
auto result = cache_.get("key");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, object_);
result = cache_.get("key2");
ASSERT_FALSE(result.has_value());
for (auto const& c : cmds) {
EXPECT_TRUE(cache.shouldCache(c));
}
cache_.put("key2", object_);
result = cache_.get("key2");
ASSERT_FALSE(result.has_value());
EXPECT_FALSE(cache.shouldCache("account_tx"));
EXPECT_FALSE(cache.shouldCache("ledger"));
EXPECT_FALSE(cache.shouldCache("submit"));
EXPECT_FALSE(cache.shouldCache(""));
}
TEST_F(ResponseExpirationCacheTests, Invalidate)
TEST_F(ResponseExpirationCacheTest, ShouldCacheEmptySetMeansNothingCacheable)
{
cache_.put("key", object_);
cache_.invalidate();
EXPECT_FALSE(cache_.get("key").has_value());
std::unordered_set<std::string> const emptyCmds;
ResponseExpirationCache cache{std::chrono::seconds(10), emptyCmds};
EXPECT_FALSE(cache.shouldCache("server_info"));
EXPECT_FALSE(cache.shouldCache("account_info"));
EXPECT_FALSE(cache.shouldCache("any_command"));
EXPECT_FALSE(cache.shouldCache(""));
}
TEST_F(ResponseExpirationCacheTests, GetExpired)
TEST_F(ResponseExpirationCacheTest, ShouldCacheCaseMatchingIsRequired)
{
ResponseExpirationCache cache{std::chrono::milliseconds{1}, {"key"}};
auto const response = boost::json::object{{"key", "value"}};
std::unordered_set<std::string> const specificCmds = {cmd};
ResponseExpirationCache cache{std::chrono::seconds(10), specificCmds};
cache.put("key", response);
std::this_thread::sleep_for(std::chrono::milliseconds{2});
auto const result = cache.get("key");
EXPECT_FALSE(result);
EXPECT_TRUE(cache.shouldCache(cmd));
EXPECT_FALSE(cache.shouldCache("SERVER_INFO"));
EXPECT_FALSE(cache.shouldCache("Server_Info"));
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateNoValueInCacheCallsUpdaterAndVerifier)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
runSpawn([&](boost::asio::yield_context yield) {
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = obj,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
});
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateExpiredValueInCacheCallsUpdaterAndVerifier)
{
ResponseExpirationCache cache{std::chrono::milliseconds(1), {cmd}};
runSpawn([&](boost::asio::yield_context yield) {
boost::json::object const expiredObject = {{"some key", "expired value"}};
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = expiredObject,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), expiredObject);
std::this_thread::sleep_for(std::chrono::milliseconds(2));
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{.lastUpdated = std::chrono::steady_clock::now(), .response = obj}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
result = cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
});
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateCachedValueNotExpiredDoesNotCallUpdaterOrVerifier)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
runSpawn([&](boost::asio::yield_context yield) {
// First call to populate cache
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = obj,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
// Second call should use cached value and not call updater/verifier
result = cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
});
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateHandlesErrorFromUpdater)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
ResponseExpirationCache::Error const error{
.status = rpc::Status{rpc::ClioError::EtlConnectionError}, .warnings = {}
};
runSpawn([&](boost::asio::yield_context yield) {
EXPECT_CALL(mockUpdater, Call).WillOnce(Return(std::unexpected(error)));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_FALSE(result.has_value());
EXPECT_EQ(result.error(), error);
});
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateVerifierRejection)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
runSpawn([&](boost::asio::yield_context yield) {
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = obj,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(false));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
boost::json::object const anotherObj = {{"some key", "another value"}};
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = anotherObj,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
result = cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), anotherObj);
});
}
TEST_F(ResponseExpirationCacheTest, GetOrUpdateMultipleConcurrentUpdates)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
bool waitingCoroutineFinished = false;
auto waitingCoroutine = [&](boost::asio::yield_context yield) {
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
waitingCoroutineFinished = true;
};
EXPECT_CALL(mockUpdater, Call)
.WillOnce(
[this, &waitingCoroutine](
boost::asio::yield_context yield
) -> std::expected<ResponseExpirationCache::EntryData, ResponseExpirationCache::Error> {
boost::asio::spawn(yield, waitingCoroutine);
return ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = obj,
};
}
);
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
runSpawnWithTimeout(std::chrono::seconds{1}, [&](boost::asio::yield_context yield) {
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
ASSERT_FALSE(waitingCoroutineFinished);
});
}
TEST_F(ResponseExpirationCacheTest, InvalidateForcesRefresh)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
runSpawn([&](boost::asio::yield_context yield) {
boost::json::object const oldObject = {{"some key", "old value"}};
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = oldObject,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
auto result =
cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), oldObject);
cache.invalidate();
EXPECT_CALL(mockUpdater, Call)
.WillOnce(Return(
ResponseExpirationCache::EntryData{
.lastUpdated = std::chrono::steady_clock::now(),
.response = obj,
}
));
EXPECT_CALL(mockVerifier, Call).WillOnce(Return(true));
result = cache.getOrUpdate(yield, "server_info", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction());
ASSERT_TRUE(result.has_value());
EXPECT_EQ(result.value(), obj);
});
}
struct ResponseExpirationCacheAssertTest : common::util::WithMockAssert, ResponseExpirationCacheTest {};
TEST_F(ResponseExpirationCacheAssertTest, NonCacheableCommandThrowsAssertion)
{
ResponseExpirationCache cache{std::chrono::seconds(10), {cmd}};
ASSERT_FALSE(cache.shouldCache("non_cacheable_command"));
runSpawn([&](boost::asio::yield_context yield) {
EXPECT_CLIO_ASSERT_FAIL({
[[maybe_unused]]
auto const v = cache.getOrUpdate(
yield, "non_cacheable_command", mockUpdater.AsStdFunction(), mockVerifier.AsStdFunction()
);
});
});
}

View File

@@ -246,7 +246,6 @@ TEST_F(ServerHttpTest, ClientDisconnects)
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
client.disconnect();
server_->stop(yield);
ctx_.stop();
});
@@ -305,7 +304,6 @@ TEST_F(ServerHttpTest, OnConnectCheck)
timer.async_wait(yield[error]);
client.gracefulShutdown();
server_->stop(yield);
ctx_.stop();
});
@@ -364,7 +362,6 @@ TEST_F(ServerHttpTest, OnConnectCheckFailed)
EXPECT_EQ(response->version(), 11);
client.gracefulShutdown();
server_->stop(yield);
ctx_.stop();
});
@@ -418,7 +415,6 @@ TEST_F(ServerHttpTest, OnDisconnectHook)
boost::system::error_code error;
timer.async_wait(yield[error]);
server_->stop(yield);
ctx_.stop();
});
@@ -481,7 +477,6 @@ TEST_P(ServerHttpTest, RequestResponse)
}
client.gracefulShutdown();
server_->stop(yield);
ctx_.stop();
});
@@ -521,7 +516,6 @@ TEST_F(ServerTest, WsClientDisconnects)
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
client.close();
server_->stop(yield);
ctx_.stop();
});
@@ -552,7 +546,6 @@ TEST_F(ServerTest, WsRequestResponse)
}
client.gracefulClose(yield, std::chrono::milliseconds{100});
server_->stop(yield);
ctx_.stop();
});

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include "util/AsioContextTestFixture.hpp"
#include "util/MockAssert.hpp"
#include "util/Taggable.hpp"
#include "util/config/ConfigDefinition.hpp"
#include "util/config/ConfigValue.hpp"
@@ -29,8 +28,10 @@
#include "web/ng/SubscriptionContext.hpp"
#include "web/ng/impl/MockWsConnection.hpp"
#include <boost/asio/buffer.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/beast/core/buffers_to_string.hpp>
#include <boost/beast/core/flat_buffer.hpp>
#include <boost/system/errc.hpp>
#include <gmock/gmock.h>
@@ -65,8 +66,8 @@ TEST_F(NgSubscriptionContextTests, Send)
auto subscriptionContext = makeSubscriptionContext(yield);
auto const message = std::make_shared<std::string>("some message");
EXPECT_CALL(connection_, sendShared).WillOnce([&message](std::shared_ptr<std::string> sendingMessage, auto&&) {
EXPECT_EQ(sendingMessage, message);
EXPECT_CALL(connection_, sendBuffer).WillOnce([&message](boost::asio::const_buffer buffer, auto&&) {
EXPECT_EQ(boost::beast::buffers_to_string(buffer), *message);
return std::nullopt;
});
subscriptionContext.send(message);
@@ -82,16 +83,16 @@ TEST_F(NgSubscriptionContextTests, SendOrder)
auto const message2 = std::make_shared<std::string>("message2");
testing::Sequence const sequence;
EXPECT_CALL(connection_, sendShared)
EXPECT_CALL(connection_, sendBuffer)
.InSequence(sequence)
.WillOnce([&message1](std::shared_ptr<std::string> sendingMessage, auto&&) {
EXPECT_EQ(sendingMessage, message1);
.WillOnce([&message1](boost::asio::const_buffer buffer, auto&&) {
EXPECT_EQ(boost::beast::buffers_to_string(buffer), *message1);
return std::nullopt;
});
EXPECT_CALL(connection_, sendShared)
EXPECT_CALL(connection_, sendBuffer)
.InSequence(sequence)
.WillOnce([&message2](std::shared_ptr<std::string> sendingMessage, auto&&) {
EXPECT_EQ(sendingMessage, message2);
.WillOnce([&message2](boost::asio::const_buffer buffer, auto&&) {
EXPECT_EQ(boost::beast::buffers_to_string(buffer), *message2);
return std::nullopt;
});
@@ -107,8 +108,8 @@ TEST_F(NgSubscriptionContextTests, SendFailed)
auto subscriptionContext = makeSubscriptionContext(yield);
auto const message = std::make_shared<std::string>("some message");
EXPECT_CALL(connection_, sendShared).WillOnce([&message](std::shared_ptr<std::string> sendingMessage, auto&&) {
EXPECT_EQ(sendingMessage, message);
EXPECT_CALL(connection_, sendBuffer).WillOnce([&message](boost::asio::const_buffer buffer, auto&&) {
EXPECT_EQ(boost::beast::buffers_to_string(buffer), *message);
return boost::system::errc::make_error_code(boost::system::errc::not_supported);
});
EXPECT_CALL(errorHandler_, Call).WillOnce(testing::Return(true));
@@ -124,10 +125,10 @@ TEST_F(NgSubscriptionContextTests, SendTooManySubscriptions)
auto subscriptionContext = makeSubscriptionContext(yield, 1);
auto const message = std::make_shared<std::string>("message1");
EXPECT_CALL(connection_, sendShared)
.WillOnce([&message](std::shared_ptr<std::string> sendingMessage, boost::asio::yield_context innerYield) {
EXPECT_CALL(connection_, sendBuffer)
.WillOnce([&message](boost::asio::const_buffer buffer, boost::asio::yield_context innerYield) {
boost::asio::post(innerYield); // simulate send is slow by switching to another coroutine
EXPECT_EQ(sendingMessage, message);
EXPECT_EQ(boost::beast::buffers_to_string(buffer), *message);
return std::nullopt;
});
EXPECT_CALL(connection_, close);
@@ -167,15 +168,5 @@ TEST_F(NgSubscriptionContextTests, SetApiSubversion)
auto subscriptionContext = makeSubscriptionContext(yield);
subscriptionContext.setApiSubversion(42);
EXPECT_EQ(subscriptionContext.apiSubversion(), 42);
subscriptionContext.disconnect(yield);
});
}
struct NgSubscriptionContextAssertTests : common::util::WithMockAssertNoThrow, NgSubscriptionContextTests {};
TEST_F(NgSubscriptionContextAssertTests, AssertFailsWhenNotDisconnected)
{
runSpawn([&](boost::asio::yield_context yield) {
EXPECT_CLIO_ASSERT_FAIL({ auto subscriptionContext = makeSubscriptionContext(yield); });
});
}

View File

@@ -34,9 +34,11 @@
#include "web/ng/impl/MockHttpConnection.hpp"
#include "web/ng/impl/MockWsConnection.hpp"
#include <boost/asio/buffer.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/beast/core/buffers_to_string.hpp>
#include <boost/beast/core/flat_buffer.hpp>
#include <boost/beast/http/error.hpp>
#include <boost/beast/http/message.hpp>
@@ -275,9 +277,9 @@ TEST_F(ConnectionHandlerSequentialProcessingTest, SendSubscriptionMessage)
EXPECT_CALL(*mockWsConnection, send).WillOnce(Return(std::nullopt));
EXPECT_CALL(*mockWsConnection, sendShared)
.WillOnce([&subscriptionMessage](std::shared_ptr<std::string> sendingMessage, auto&&) {
EXPECT_EQ(*sendingMessage, subscriptionMessage);
EXPECT_CALL(*mockWsConnection, sendBuffer)
.WillOnce([&subscriptionMessage](boost::asio::const_buffer buffer, auto&&) {
EXPECT_EQ(boost::beast::buffers_to_string(buffer), subscriptionMessage);
return std::nullopt;
});

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include "util/AsioContextTestFixture.hpp"
#include "util/CoroutineGroup.hpp"
#include "util/Taggable.hpp"
#include "util/TestHttpClient.hpp"
#include "util/TestHttpServer.hpp"
@@ -30,6 +29,7 @@
#include "web/ng/Response.hpp"
#include "web/ng/impl/HttpConnection.hpp"
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/beast/core/flat_buffer.hpp>
#include <boost/beast/http/field.hpp>
@@ -42,10 +42,8 @@
#include <chrono>
#include <cstddef>
#include <memory>
#include <optional>
#include <ranges>
#include <string>
#include <utility>
using namespace web::ng::impl;
@@ -54,16 +52,16 @@ using namespace util::config;
namespace http = boost::beast::http;
struct HttpConnectionTests : SyncAsioContextTest {
std::unique_ptr<PlainHttpConnection>
PlainHttpConnection
acceptConnection(boost::asio::yield_context yield)
{
auto expectedSocket = httpServer_.accept(yield);
[&]() { ASSERT_TRUE(expectedSocket.has_value()) << expectedSocket.error().message(); }();
auto ip = expectedSocket->remote_endpoint().address().to_string();
auto connection = std::make_unique<PlainHttpConnection>(
PlainHttpConnection connection{
std::move(expectedSocket).value(), std::move(ip), boost::beast::flat_buffer{}, tagDecoratorFactory_
);
connection->setTimeout(std::chrono::milliseconds{100});
};
connection.setTimeout(std::chrono::milliseconds{100});
return connection;
}
@@ -85,7 +83,7 @@ TEST_F(HttpConnectionTests, wasUpgraded)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
EXPECT_FALSE(connection->wasUpgraded());
EXPECT_FALSE(connection.wasUpgraded());
});
}
@@ -104,7 +102,7 @@ TEST_F(HttpConnectionTests, Receive)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
auto expectedRequest = connection->receive(yield);
auto expectedRequest = connection.receive(yield);
ASSERT_TRUE(expectedRequest.has_value()) << expectedRequest.error().message();
ASSERT_TRUE(expectedRequest->isHttp());
@@ -128,8 +126,8 @@ TEST_F(HttpConnectionTests, ReceiveTimeout)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
auto expectedRequest = connection->receive(yield);
connection.setTimeout(std::chrono::milliseconds{1});
auto expectedRequest = connection.receive(yield);
EXPECT_FALSE(expectedRequest.has_value());
});
}
@@ -144,8 +142,8 @@ TEST_F(HttpConnectionTests, ReceiveClientDisconnected)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
auto expectedRequest = connection->receive(yield);
connection.setTimeout(std::chrono::milliseconds{1});
auto expectedRequest = connection.receive(yield);
EXPECT_FALSE(expectedRequest.has_value());
});
}
@@ -172,7 +170,7 @@ TEST_F(HttpConnectionTests, Send)
runSpawn([this, &response](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
auto maybeError = connection->send(response, yield);
auto maybeError = connection.send(response, yield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
});
}
@@ -188,7 +186,7 @@ TEST_F(HttpConnectionTests, SendMultipleTimes)
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
auto const expectedResponse = httpClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedResponse.has_value()) << expectedResponse.error().message(); }();
[&]() { ASSERT_TRUE(expectedResponse.has_value()) << maybeError->message(); }();
auto const receivedResponse = expectedResponse.value();
auto const sentResponse = Response{response}.intoHttpResponse();
@@ -203,77 +201,12 @@ TEST_F(HttpConnectionTests, SendMultipleTimes)
auto connection = acceptConnection(yield);
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
auto maybeError = connection->send(response, yield);
auto maybeError = connection.send(response, yield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
}
});
}
TEST_F(HttpConnectionTests, SendMultipleTimesFromMultipleCoroutines)
{
Request const request{request_};
Response const response{http::status::ok, "some response data", request};
boost::asio::spawn(ctx_, [this, response = response](boost::asio::yield_context yield) mutable {
auto const maybeError =
httpClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{100});
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
auto const expectedResponse = httpClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedResponse.has_value()) << expectedResponse.error().message(); }();
auto const receivedResponse = expectedResponse.value();
auto const sentResponse = Response{response}.intoHttpResponse();
EXPECT_EQ(receivedResponse.result(), sentResponse.result());
EXPECT_EQ(receivedResponse.body(), sentResponse.body());
EXPECT_EQ(receivedResponse.version(), request_.version());
EXPECT_TRUE(receivedResponse.keep_alive());
}
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
util::CoroutineGroup group{yield};
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
group.spawn(yield, [&response, &connection](boost::asio::yield_context innerYield) {
auto const maybeError = connection->send(response, innerYield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
});
}
group.asyncWait(yield);
});
}
TEST_F(HttpConnectionTests, SendMultipleTimesClientDisconnected)
{
Response const response{http::status::ok, "some response data", Request{request_}};
boost::asio::spawn(ctx_, [this, response = response](boost::asio::yield_context yield) mutable {
auto const maybeError =
httpClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{1});
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError->message(); }();
auto const expectedResponse = httpClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedResponse.has_value()) << expectedResponse.error().message(); }();
httpClient_.disconnect();
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
auto maybeError = connection->send(response, yield);
size_t counter{1};
while (not maybeError.has_value() and counter < 100) {
++counter;
maybeError = connection->send(response, yield);
}
// Sending after getting an error should be safe
maybeError = connection->send(response, yield);
EXPECT_TRUE(maybeError.has_value());
EXPECT_LT(counter, 100);
});
}
TEST_F(HttpConnectionTests, SendClientDisconnected)
{
Response const response{http::status::ok, "some response data", Request{request_}};
@@ -284,12 +217,12 @@ TEST_F(HttpConnectionTests, SendClientDisconnected)
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
auto maybeError = connection->send(response, yield);
connection.setTimeout(std::chrono::milliseconds{1});
auto maybeError = connection.send(response, yield);
size_t counter{1};
while (not maybeError.has_value() and counter < 100) {
++counter;
maybeError = connection->send(response, yield);
maybeError = connection.send(response, yield);
}
EXPECT_TRUE(maybeError.has_value());
EXPECT_LT(counter, 100);
@@ -313,8 +246,8 @@ TEST_F(HttpConnectionTests, Close)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
connection->close(yield);
connection.setTimeout(std::chrono::milliseconds{1});
connection.close(yield);
});
}
@@ -330,7 +263,7 @@ TEST_F(HttpConnectionTests, IsUpgradeRequested_GotHttpRequest)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
auto result = connection->isUpgradeRequested(yield);
auto result = connection.isUpgradeRequested(yield);
[&]() { ASSERT_TRUE(result.has_value()) << result.error().message(); }();
EXPECT_FALSE(result.value());
});
@@ -345,8 +278,8 @@ TEST_F(HttpConnectionTests, IsUpgradeRequested_FailedToFetch)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
connection->setTimeout(std::chrono::milliseconds{1});
auto result = connection->isUpgradeRequested(yield);
connection.setTimeout(std::chrono::milliseconds{1});
auto result = connection.isUpgradeRequested(yield);
EXPECT_FALSE(result.has_value());
});
}
@@ -362,11 +295,11 @@ TEST_F(HttpConnectionTests, Upgrade)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
auto const expectedResult = connection->isUpgradeRequested(yield);
auto const expectedResult = connection.isUpgradeRequested(yield);
[&]() { ASSERT_TRUE(expectedResult.has_value()) << expectedResult.error().message(); }();
[&]() { ASSERT_TRUE(expectedResult.value()); }();
auto expectedWsConnection = connection->upgrade(tagDecoratorFactory_, yield);
auto expectedWsConnection = connection.upgrade(tagDecoratorFactory_, yield);
[&]() { ASSERT_TRUE(expectedWsConnection.has_value()) << expectedWsConnection.error().message(); }();
});
}
@@ -380,7 +313,7 @@ TEST_F(HttpConnectionTests, Ip)
runSpawn([this](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
EXPECT_TRUE(connection->ip() == "127.0.0.1" or connection->ip() == "::1") << connection->ip();
EXPECT_TRUE(connection.ip() == "127.0.0.1" or connection.ip() == "::1") << connection.ip();
});
}
@@ -396,13 +329,13 @@ TEST_F(HttpConnectionTests, isAdminSetAdmin)
runSpawn([&](boost::asio::yield_context yield) {
auto connection = acceptConnection(yield);
EXPECT_FALSE(connection->isAdmin());
EXPECT_FALSE(connection.isAdmin());
connection->setIsAdmin(adminSetter.AsStdFunction());
EXPECT_TRUE(connection->isAdmin());
connection.setIsAdmin(adminSetter.AsStdFunction());
EXPECT_TRUE(connection.isAdmin());
// Setter shouldn't not be called here because isAdmin is already set
connection->setIsAdmin(adminSetter.AsStdFunction());
EXPECT_TRUE(connection->isAdmin());
connection.setIsAdmin(adminSetter.AsStdFunction());
EXPECT_TRUE(connection.isAdmin());
});
}

View File

@@ -153,25 +153,6 @@ TEST_F(WebWsConnectionTests, Send)
});
}
TEST_F(WebWsConnectionTests, SendShared)
{
auto const response = std::make_shared<std::string>("some response");
boost::asio::spawn(ctx_, [this, &response](boost::asio::yield_context yield) {
auto maybeError = wsClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{100});
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
auto const expectedMessage = wsClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedMessage.has_value()) << expectedMessage.error().message(); }();
EXPECT_EQ(expectedMessage.value(), *response);
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto wsConnection = acceptConnection(yield);
auto maybeError = wsConnection->sendShared(response, yield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
});
}
TEST_F(WebWsConnectionTests, MultipleSend)
{
Response const response{boost::beast::http::status::ok, "some response", request_};
@@ -190,42 +171,13 @@ TEST_F(WebWsConnectionTests, MultipleSend)
runSpawn([this, &response](boost::asio::yield_context yield) {
auto wsConnection = acceptConnection(yield);
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
for ([[maybe_unused]] auto unused : std::ranges::iota_view{0, 3}) {
auto maybeError = wsConnection->send(response, yield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
}
});
}
TEST_F(WebWsConnectionTests, MultipleSendFromMultipleCoroutines)
{
Response const response{boost::beast::http::status::ok, "some response", request_};
boost::asio::spawn(ctx_, [this, &response](boost::asio::yield_context yield) {
auto maybeError = wsClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{100});
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
auto const expectedMessage = wsClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedMessage.has_value()) << expectedMessage.error().message(); }();
EXPECT_EQ(expectedMessage.value(), response.message());
}
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto wsConnection = acceptConnection(yield);
util::CoroutineGroup group{yield};
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
group.spawn(yield, [&wsConnection, &response](boost::asio::yield_context innerYield) {
auto maybeError = wsConnection->send(response, innerYield);
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
});
}
group.asyncWait(yield);
});
}
TEST_F(WebWsConnectionTests, SendFailed)
{
Response const response{boost::beast::http::status::ok, "some response", request_};
@@ -250,36 +202,6 @@ TEST_F(WebWsConnectionTests, SendFailed)
});
}
TEST_F(WebWsConnectionTests, SendFailedSendingFromMultipleCoroutines)
{
Response const response{boost::beast::http::status::ok, "some response", request_};
boost::asio::spawn(ctx_, [this, &response](boost::asio::yield_context yield) {
auto maybeError = wsClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{100});
[&]() { ASSERT_FALSE(maybeError.has_value()) << maybeError.value().message(); }();
auto const expectedMessage = wsClient_.receive(yield, std::chrono::milliseconds{100});
[&]() { ASSERT_TRUE(expectedMessage.has_value()) << expectedMessage.error().message(); }();
EXPECT_EQ(expectedMessage.value(), response.message());
wsClient_.close();
});
runSpawn([this, &response](boost::asio::yield_context yield) {
auto wsConnection = acceptConnection(yield);
wsConnection->setTimeout(std::chrono::milliseconds{1});
std::optional<Error> maybeError;
size_t counter = 0;
while (not maybeError.has_value() and counter < 100) {
maybeError = wsConnection->send(response, yield);
++counter;
}
// Sending after getting an error should be safe
maybeError = wsConnection->send(response, yield);
EXPECT_TRUE(maybeError.has_value());
EXPECT_LT(counter, 100);
});
}
TEST_F(WebWsConnectionTests, Receive)
{
boost::asio::spawn(ctx_, [this](boost::asio::yield_context yield) {
@@ -314,7 +236,7 @@ TEST_F(WebWsConnectionTests, MultipleReceive)
runSpawn([this](boost::asio::yield_context yield) {
auto wsConnection = acceptConnection(yield);
for ([[maybe_unused]] auto i : std::ranges::iota_view{0, 3}) {
for ([[maybe_unused]] auto unused : std::ranges::iota_view{0, 3}) {
auto maybeRequest = wsConnection->receive(yield);
[&]() { ASSERT_TRUE(maybeRequest.has_value()) << maybeRequest.error().message(); }();
EXPECT_EQ(maybeRequest->message(), request_.message());

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env python3
import json
import plumbum
from pathlib import Path
THIS_DIR = Path(__file__).parent.resolve()
ROOT_DIR = THIS_DIR.parent.resolve()
CONAN = plumbum.local["conan"]
def get_profiles():
profiles = CONAN("profile", "list", "--format=json")
return json.loads(profiles)
def rebuild():
profiles = get_profiles()
for build_type in ["Release", "Debug"]:
for profile in profiles:
print(f"Rebuilding {profile} with build type {build_type}")
with plumbum.local.cwd(ROOT_DIR):
CONAN[
"install", ".",
"--build=missing",
f"--output-folder=build_{profile}_{build_type}",
"-s", f"build_type={build_type}",
"-o", "&:tests=True",
"-o", "&:integration_tests=True",
"--profile:all", profile
] & plumbum.FG
if __name__ == "__main__":
rebuild()