mirror of
https://github.com/Xahau/xahaud.git
synced 2026-02-10 08:52:22 +00:00
Compare commits
5 Commits
coverage
...
subscripti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7241fed6d0 | ||
|
|
a6dfb40413 | ||
|
|
3e0d6b9cd2 | ||
|
|
a8388e48a4 | ||
|
|
49908096d5 |
@@ -1,6 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 60%
|
||||
threshold: 2%
|
||||
22
.github/actions/xahau-ga-build/action.yml
vendored
22
.github/actions/xahau-ga-build/action.yml
vendored
@@ -2,14 +2,6 @@ name: build
|
||||
description: 'Builds the project with ccache integration'
|
||||
|
||||
inputs:
|
||||
cmake-target:
|
||||
description: 'CMake target to build'
|
||||
required: false
|
||||
default: all
|
||||
cmake-args:
|
||||
description: 'Additional CMake arguments'
|
||||
required: false
|
||||
default: null
|
||||
generator:
|
||||
description: 'CMake generator to use'
|
||||
required: true
|
||||
@@ -28,10 +20,6 @@ inputs:
|
||||
description: 'C++ compiler to use'
|
||||
required: false
|
||||
default: ''
|
||||
gcov:
|
||||
description: 'Gcov to use'
|
||||
required: false
|
||||
default: ''
|
||||
compiler-id:
|
||||
description: 'Unique identifier: compiler-version-stdlib[-gccversion] (e.g. clang-14-libstdcxx-gcc11, gcc-13-libstdcxx)'
|
||||
required: false
|
||||
@@ -134,11 +122,6 @@ runs:
|
||||
export CXX="${{ inputs.cxx }}"
|
||||
fi
|
||||
|
||||
if [ -n "${{ inputs.gcov }}" ]; then
|
||||
ln -sf /usr/bin/${{ inputs.gcov }} /usr/local/bin/gcov
|
||||
export CMAKE_BUILD_PARALLEL_LEVEL=$(nproc)
|
||||
fi
|
||||
|
||||
# Create wrapper toolchain that overlays ccache on top of Conan's toolchain
|
||||
# This enables ccache for the main app build without affecting Conan dependency builds
|
||||
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
|
||||
@@ -200,8 +183,7 @@ runs:
|
||||
-G "${{ inputs.generator }}" \
|
||||
${CMAKE_CXX_FLAGS:+-DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS"} \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=${TOOLCHAIN_FILE} \
|
||||
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
|
||||
${{ inputs.cmake-args }}
|
||||
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }}
|
||||
|
||||
- name: Show ccache config before build
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
@@ -225,7 +207,7 @@ runs:
|
||||
VERBOSE_FLAG="-- -v"
|
||||
fi
|
||||
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) ${VERBOSE_FLAG} --target ${{ inputs.cmake-target }}
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) ${VERBOSE_FLAG}
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
|
||||
113
.github/workflows/xahau-ga-nix.yml
vendored
113
.github/workflows/xahau-ga-nix.yml
vendored
@@ -47,8 +47,7 @@ jobs:
|
||||
"cxx": "g++-11",
|
||||
"compiler_version": 11,
|
||||
"stdlib": "libstdcxx",
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "gcc-13-libstdcxx",
|
||||
@@ -56,19 +55,8 @@ jobs:
|
||||
"cc": "gcc-13",
|
||||
"cxx": "g++-13",
|
||||
"compiler_version": 13,
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
},
|
||||
{
|
||||
"compiler_id": "gcc-13-libstdcxx",
|
||||
"compiler": "gcc",
|
||||
"cc": "gcc-13",
|
||||
"cxx": "g++-13",
|
||||
"gcov": "gcov-13",
|
||||
"compiler_version": 13,
|
||||
"stdlib": "libstdcxx",
|
||||
"configuration": "Debug",
|
||||
"job_type": "coverage"
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-14-libstdcxx-gcc11",
|
||||
@@ -78,8 +66,7 @@ jobs:
|
||||
"compiler_version": 14,
|
||||
"stdlib": "libstdcxx",
|
||||
"clang_gcc_toolchain": 11,
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-16-libstdcxx-gcc13",
|
||||
@@ -89,8 +76,7 @@ jobs:
|
||||
"compiler_version": 16,
|
||||
"stdlib": "libstdcxx",
|
||||
"clang_gcc_toolchain": 13,
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-17-libcxx",
|
||||
@@ -99,8 +85,7 @@ jobs:
|
||||
"cxx": "clang++-17",
|
||||
"compiler_version": 17,
|
||||
"stdlib": "libcxx",
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
# Clang 18 - testing if it's faster than Clang 17 with libc++
|
||||
@@ -111,16 +96,14 @@ jobs:
|
||||
"cxx": "clang++-18",
|
||||
"compiler_version": 18,
|
||||
"stdlib": "libcxx",
|
||||
"configuration": "Debug",
|
||||
"job_type": "build"
|
||||
"configuration": "Debug"
|
||||
}
|
||||
]
|
||||
|
||||
# Minimal matrix for PRs and feature branches
|
||||
minimal_matrix = [
|
||||
full_matrix[1], # gcc-13 (middle-ground gcc)
|
||||
full_matrix[2], # gcc-13 coverage
|
||||
full_matrix[3] # clang-14 (mature, stable clang)
|
||||
full_matrix[2] # clang-14 (mature, stable clang)
|
||||
]
|
||||
|
||||
# Determine which matrix to use based on the target branch
|
||||
@@ -178,21 +161,14 @@ jobs:
|
||||
# Select the appropriate matrix
|
||||
if use_full:
|
||||
if force_full:
|
||||
print(f"Using FULL matrix (7 configs) - forced by [ci-nix-full-matrix] tag")
|
||||
print(f"Using FULL matrix (6 configs) - forced by [ci-nix-full-matrix] tag")
|
||||
else:
|
||||
print(f"Using FULL matrix (7 configs) - targeting main branch")
|
||||
print(f"Using FULL matrix (6 configs) - targeting main branch")
|
||||
matrix = full_matrix
|
||||
else:
|
||||
print(f"Using MINIMAL matrix (3 configs) - feature branch/PR")
|
||||
print(f"Using MINIMAL matrix (2 configs) - feature branch/PR")
|
||||
matrix = minimal_matrix
|
||||
|
||||
# Add runs_on based on job_type
|
||||
for entry in matrix:
|
||||
if entry.get("job_type") == "coverage":
|
||||
entry["runs_on"] = '["self-hosted", "generic", 24.04]'
|
||||
else:
|
||||
entry["runs_on"] = '["self-hosted", "generic", 20.04]'
|
||||
|
||||
|
||||
# Output the matrix as JSON
|
||||
output = json.dumps({"include": matrix})
|
||||
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||
@@ -200,7 +176,7 @@ jobs:
|
||||
|
||||
build:
|
||||
needs: matrix-setup
|
||||
runs-on: ${{ fromJSON(matrix.runs_on) }}
|
||||
runs-on: [self-hosted, generic, 20.04]
|
||||
container:
|
||||
image: ubuntu:24.04
|
||||
volumes:
|
||||
@@ -229,7 +205,7 @@ jobs:
|
||||
apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test -y
|
||||
apt-get update
|
||||
apt-get install -y git python3 python-is-python3 pipx
|
||||
apt-get install -y python3 python-is-python3 pipx
|
||||
pipx ensurepath
|
||||
apt-get install -y cmake ninja-build ${{ matrix.cc }} ${{ matrix.cxx }} ccache
|
||||
apt-get install -y perl # for openssl build
|
||||
@@ -300,12 +276,6 @@ jobs:
|
||||
pipx install "conan>=2.0,<3"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
# Install gcovr for coverage jobs
|
||||
if [ "${{ matrix.job_type }}" = "coverage" ]; then
|
||||
pipx install "gcovr>=7,<9"
|
||||
apt-get install -y lcov
|
||||
fi
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
echo "PATH:"
|
||||
@@ -315,13 +285,6 @@ jobs:
|
||||
which ${{ matrix.cc }} && ${{ matrix.cc }} --version || echo "${{ matrix.cc }} not found"
|
||||
which ${{ matrix.cxx }} && ${{ matrix.cxx }} --version || echo "${{ matrix.cxx }} not found"
|
||||
which ccache && ccache --version || echo "ccache not found"
|
||||
|
||||
# Check gcovr for coverage jobs
|
||||
if [ "${{ matrix.job_type }}" = "coverage" ]; then
|
||||
which gcov && gcov --version || echo "gcov not found"
|
||||
which gcovr && gcovr --version || echo "gcovr not found"
|
||||
fi
|
||||
|
||||
echo "---- Full Environment ----"
|
||||
env
|
||||
|
||||
@@ -349,7 +312,6 @@ jobs:
|
||||
gha_cache_enabled: 'false' # Disable caching for self hosted runner
|
||||
|
||||
- name: Build
|
||||
if: matrix.job_type == 'build'
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
with:
|
||||
generator: Ninja
|
||||
@@ -364,25 +326,7 @@ jobs:
|
||||
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
|
||||
ccache_max_size: '100G'
|
||||
|
||||
- name: Build (Coverage)
|
||||
if: matrix.job_type == 'coverage'
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
with:
|
||||
generator: Ninja
|
||||
configuration: ${{ matrix.configuration }}
|
||||
build_dir: ${{ env.build_dir }}
|
||||
cc: ${{ matrix.cc }}
|
||||
cxx: ${{ matrix.cxx }}
|
||||
gcov: ${{ matrix.gcov }}
|
||||
compiler-id: ${{ matrix.compiler_id }}
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
cmake-args: '-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_CXX_FLAGS="-O0" -DCMAKE_C_FLAGS="-O0"'
|
||||
cmake-target: 'coverage'
|
||||
ccache_max_size: '100G'
|
||||
|
||||
- name: Set artifact name
|
||||
if: matrix.job_type == 'build'
|
||||
id: set-artifact-name
|
||||
run: |
|
||||
ARTIFACT_NAME="build-output-nix-${{ github.run_id }}-${{ matrix.compiler }}-${{ matrix.configuration }}"
|
||||
@@ -395,7 +339,6 @@ jobs:
|
||||
ls -la ${{ env.build_dir }} || echo "Build directory not found or empty"
|
||||
|
||||
- name: Run tests
|
||||
if: matrix.job_type == 'build'
|
||||
run: |
|
||||
# Ensure the binary exists before trying to run
|
||||
if [ -f "${{ env.build_dir }}/rippled" ]; then
|
||||
@@ -404,33 +347,3 @@ jobs:
|
||||
echo "Error: rippled executable not found in ${{ env.build_dir }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Coverage-specific steps
|
||||
- name: Move coverage report
|
||||
if: matrix.job_type == 'coverage'
|
||||
shell: bash
|
||||
run: |
|
||||
mv "${{ env.build_dir }}/coverage.xml" ./
|
||||
|
||||
- name: Archive coverage report
|
||||
if: matrix.job_type == 'coverage'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.xml
|
||||
path: coverage.xml
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload coverage report
|
||||
if: matrix.job_type == 'coverage'
|
||||
uses: wandalen/wretry.action/main@v3
|
||||
with:
|
||||
action: codecov/codecov-action@v4.3.0
|
||||
with: |
|
||||
files: coverage.xml
|
||||
fail_ci_if_error: true
|
||||
disable_search: true
|
||||
verbose: true
|
||||
plugin: noop
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
attempt_limit: 5
|
||||
attempt_delay: 210000 # in milliseconds
|
||||
|
||||
64
BUILD.md
64
BUILD.md
@@ -258,72 +258,12 @@ can't build earlier Boost versions.
|
||||
generator. Pass `--help` to see the rest of the command line options.
|
||||
|
||||
|
||||
## Coverage report
|
||||
|
||||
The coverage report is intended for developers using compilers GCC
|
||||
or Clang (including Apple Clang). It is generated by the build target `coverage`,
|
||||
which is only enabled when the `coverage` option is set, e.g. with
|
||||
`--options coverage=True` in `conan` or `-Dcoverage=ON` variable in `cmake`
|
||||
|
||||
Prerequisites for the coverage report:
|
||||
|
||||
- [gcovr tool][gcovr] (can be installed e.g. with [pip][python-pip])
|
||||
- `gcov` for GCC (installed with the compiler by default) or
|
||||
- `llvm-cov` for Clang (installed with the compiler by default)
|
||||
- `Debug` build type
|
||||
|
||||
A coverage report is created when the following steps are completed, in order:
|
||||
|
||||
1. `rippled` binary built with instrumentation data, enabled by the `coverage`
|
||||
option mentioned above
|
||||
2. completed run of unit tests, which populates coverage capture data
|
||||
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
|
||||
to assemble both instrumentation data and the coverage capture data into a coverage report
|
||||
|
||||
The above steps are automated into a single target `coverage`. The instrumented
|
||||
`rippled` binary can also be used for regular development or testing work, at
|
||||
the cost of extra disk space utilization and a small performance hit
|
||||
(to store coverage capture). In case of a spurious failure of unit tests, it is
|
||||
possible to re-run the `coverage` target without rebuilding the `rippled` binary
|
||||
(since it is simply a dependency of the coverage report target). It is also possible
|
||||
to select only specific tests for the purpose of the coverage report, by setting
|
||||
the `coverage_test` variable in `cmake`
|
||||
|
||||
The default coverage report format is `html-details`, but the user
|
||||
can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake`
|
||||
by setting the `coverage_format` variable in `cmake`. It is also possible
|
||||
to generate more than one format at a time by setting the `coverage_extra_args`
|
||||
variable in `cmake`. The specific command line used to run the `gcovr` tool will be
|
||||
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
|
||||
|
||||
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
|
||||
set to the number of available CPU cores. This may cause spurious test
|
||||
errors on Apple. Developers can override the number of unit test jobs with
|
||||
the `coverage_test_parallelism` variable in `cmake`.
|
||||
|
||||
Example use with some cmake variables set:
|
||||
|
||||
```
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
cmake -DCMAKE_BUILD_TYPE=Debug -Dcoverage=ON -Dcoverage_test_parallelism=2 -Dcoverage_format=html-details -Dcoverage_extra_args="--json coverage.json" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage
|
||||
```
|
||||
|
||||
After the `coverage` target is completed, the generated coverage report will be
|
||||
stored inside the build directory, as either of:
|
||||
|
||||
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
|
||||
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Default Value | Description |
|
||||
| --- | ---| ---|
|
||||
| `assert` | OFF | Enable assertions.
|
||||
| `reporting` | OFF | Build the reporting mode feature. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | ON | Build tests. |
|
||||
| `unity` | ON | Configure a unity build. |
|
||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||
@@ -516,10 +456,6 @@ but it is more convenient to put them in a [profile][profile].
|
||||
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[5]: https://en.wikipedia.org/wiki/Unity_build
|
||||
[6]: https://github.com/boostorg/beast/issues/2648
|
||||
[7]: https://github.com/boostorg/beast/issues/2661
|
||||
[gcovr]: https://gcovr.com/en/stable/getting-started.html
|
||||
[python-pip]: https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/
|
||||
[build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
|
||||
[runtime]: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html
|
||||
[toolchain]: https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html
|
||||
|
||||
@@ -1,440 +0,0 @@
|
||||
# Copyright (c) 2012 - 2017, Lars Bilke
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# CHANGES:
|
||||
#
|
||||
# 2012-01-31, Lars Bilke
|
||||
# - Enable Code Coverage
|
||||
#
|
||||
# 2013-09-17, Joakim Söderberg
|
||||
# - Added support for Clang.
|
||||
# - Some additional usage instructions.
|
||||
#
|
||||
# 2016-02-03, Lars Bilke
|
||||
# - Refactored functions to use named parameters
|
||||
#
|
||||
# 2017-06-02, Lars Bilke
|
||||
# - Merged with modified version from github.com/ufz/ogs
|
||||
#
|
||||
# 2019-05-06, Anatolii Kurotych
|
||||
# - Remove unnecessary --coverage flag
|
||||
#
|
||||
# 2019-12-13, FeRD (Frank Dana)
|
||||
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
|
||||
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
|
||||
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
|
||||
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
|
||||
# - Set lcov basedir with -b argument
|
||||
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
|
||||
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
|
||||
# - Delete output dir, .info file on 'make clean'
|
||||
# - Remove Python detection, since version mismatches will break gcovr
|
||||
# - Minor cleanup (lowercase function names, update examples...)
|
||||
#
|
||||
# 2019-12-19, FeRD (Frank Dana)
|
||||
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
|
||||
#
|
||||
# 2020-01-19, Bob Apthorpe
|
||||
# - Added gfortran support
|
||||
#
|
||||
# 2020-02-17, FeRD (Frank Dana)
|
||||
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
|
||||
# in EXCLUDEs, and remove manual escaping from gcovr targets
|
||||
#
|
||||
# 2021-01-19, Robin Mueller
|
||||
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
|
||||
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
|
||||
# flags to the gcovr command
|
||||
#
|
||||
# 2020-05-04, Mihchael Davis
|
||||
# - Add -fprofile-abs-path to make gcno files contain absolute paths
|
||||
# - Fix BASE_DIRECTORY not working when defined
|
||||
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
|
||||
#
|
||||
# 2021-05-10, Martin Stump
|
||||
# - Check if the generator is multi-config before warning about non-Debug builds
|
||||
#
|
||||
# 2022-02-22, Marko Wehle
|
||||
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
|
||||
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
|
||||
#
|
||||
# 2022-09-28, Sebastian Mueller
|
||||
# - fix append_coverage_compiler_flags_to_target to correctly add flags
|
||||
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
|
||||
#
|
||||
# 2024-01-04, Bronek Kozicki
|
||||
# - remove setup_target_for_coverage_lcov (slow) and setup_target_for_coverage_fastcov (no support for Clang)
|
||||
# - fix Clang support by adding find_program( ... llvm-cov )
|
||||
# - add Apple Clang support by adding execute_process( COMMAND xcrun -f llvm-cov ... )
|
||||
# - add CODE_COVERAGE_GCOV_TOOL to explicitly select gcov tool and disable find_program
|
||||
# - replace both functions setup_target_for_coverage_gcovr_* with a single setup_target_for_coverage_gcovr
|
||||
# - add support for all gcovr output formats
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# 1. Copy this file into your cmake modules path.
|
||||
#
|
||||
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
|
||||
# using a CMake option() to enable it just optionally):
|
||||
# include(CodeCoverage)
|
||||
#
|
||||
# 3. Append necessary compiler flags for all supported source files:
|
||||
# append_coverage_compiler_flags()
|
||||
# Or for specific target:
|
||||
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
|
||||
#
|
||||
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
|
||||
#
|
||||
# 4. If you need to exclude additional directories from the report, specify them
|
||||
# using full paths in the COVERAGE_EXCLUDES variable before calling
|
||||
# setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES
|
||||
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
|
||||
# '/path/to/my/src/dir2/*')
|
||||
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
|
||||
#
|
||||
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
|
||||
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES "dir1/*")
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# FORMAT html-details
|
||||
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
|
||||
# EXCLUDE "dir2/*")
|
||||
#
|
||||
# 4.b If you need to pass specific options to gcovr, specify them in
|
||||
# GCOVR_ADDITIONAL_ARGS variable.
|
||||
# Example:
|
||||
# set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches --exclude-noncode-lines -s)
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "src/dir1" "src/dir2")
|
||||
#
|
||||
# 5. Use the functions described below to create a custom make target which
|
||||
# runs your test executable and produces a code coverage report.
|
||||
#
|
||||
# 6. Build a Debug build:
|
||||
# cmake -DCMAKE_BUILD_TYPE=Debug ..
|
||||
# make
|
||||
# make my_coverage_target
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
|
||||
if(DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif(DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if(APPLE)
|
||||
execute_process( COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVMCOV_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
find_program( LLVMCOV_PATH llvm-cov )
|
||||
endif()
|
||||
if(LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif()
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program( GCOV_PATH gcov )
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif()
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||
if(HAVE_cxx_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
include(CheckCCompilerFlag)
|
||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||
if(HAVE_c_fprofile_abs_path)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_Fortran_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_CXX_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_C_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||
FORCE )
|
||||
mark_as_advanced(
|
||||
CMAKE_Fortran_FLAGS_COVERAGE
|
||||
CMAKE_CXX_FLAGS_COVERAGE
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
link_libraries(gcov)
|
||||
endif()
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# FORMAT "cobertura" # Output format, one of:
|
||||
# # xml cobertura sonarqube json-summary
|
||||
# # json-details coveralls csv txt
|
||||
# # html-single html-nested html-details
|
||||
# # (xml is an alias to cobertura;
|
||||
# # if no format is set, defaults to xml)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr)
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOV_TOOL)
|
||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||
endif()
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED Coverage_FORMAT)
|
||||
set(Coverage_FORMAT xml)
|
||||
endif()
|
||||
|
||||
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||
else()
|
||||
if((Coverage_FORMAT STREQUAL "html-details")
|
||||
OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||
elseif((Coverage_FORMAT STREQUAL "json-summary")
|
||||
OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||
else()
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if((Coverage_FORMAT STREQUAL "cobertura")
|
||||
OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
|
||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||
elseif(Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||
elseif(Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
|
||||
# Create folder
|
||||
if(DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
else()
|
||||
set(GCOVR_FOLDER_CMD echo) # dummy
|
||||
endif()
|
||||
|
||||
# Running gcovr
|
||||
set(GCOVR_CMD
|
||||
${GCOVR_PATH}
|
||||
--gcov-executable ${GCOV_TOOL}
|
||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||
-r ${BASEDIR}
|
||||
${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS}
|
||||
--object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
message(STATUS "Command to generate gcovr coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr
|
||||
|
||||
function(append_coverage_compiler_flags)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||
endfunction() # append_coverage_compiler_flags
|
||||
|
||||
# Setup coverage for specific library
|
||||
function(append_coverage_compiler_flags_to_target name)
|
||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||
target_compile_options(${name} PRIVATE ${_flag_list})
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
target_link_libraries(${name} PRIVATE gcov)
|
||||
endif()
|
||||
endfunction()
|
||||
@@ -957,6 +957,7 @@ if (tests)
|
||||
subdir: net
|
||||
#]===============================]
|
||||
src/test/net/DatabaseDownloader_test.cpp
|
||||
src/test/net/HTTPClient_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: nodestore
|
||||
|
||||
@@ -2,37 +2,97 @@
|
||||
coverage report target
|
||||
#]===================================================================]
|
||||
|
||||
if(NOT coverage)
|
||||
message(FATAL_ERROR "Code coverage not enabled! Aborting ...")
|
||||
endif()
|
||||
if (coverage)
|
||||
if (is_clang)
|
||||
if (APPLE)
|
||||
execute_process (COMMAND xcrun -f llvm-profdata
|
||||
OUTPUT_VARIABLE LLVM_PROFDATA
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
find_program (LLVM_PROFDATA llvm-profdata)
|
||||
endif ()
|
||||
if (NOT LLVM_PROFDATA)
|
||||
message (WARNING "unable to find llvm-profdata - skipping coverage_report target")
|
||||
endif ()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
|
||||
message(WARNING "Code coverage on Windows is not supported, ignoring 'coverage' flag")
|
||||
return()
|
||||
endif()
|
||||
if (APPLE)
|
||||
execute_process (COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVM_COV
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
find_program (LLVM_COV llvm-cov)
|
||||
endif ()
|
||||
if (NOT LLVM_COV)
|
||||
message (WARNING "unable to find llvm-cov - skipping coverage_report target")
|
||||
endif ()
|
||||
|
||||
include(CodeCoverage)
|
||||
set (extract_pattern "")
|
||||
if (coverage_core_only)
|
||||
set (extract_pattern "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/")
|
||||
endif ()
|
||||
|
||||
# The instructions for these commands come from the `CodeCoverage` module,
|
||||
# which was copied from https://github.com/bilke/cmake-modules, commit fb7d2a3,
|
||||
# then locally changed (see CHANGES: section in `CodeCoverage.cmake`)
|
||||
if (LLVM_COV AND LLVM_PROFDATA)
|
||||
add_custom_target (coverage_report
|
||||
USES_TERMINAL
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Generating coverage - results will be in ${CMAKE_BINARY_DIR}/coverage/index.html."
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Running rippled tests."
|
||||
COMMAND rippled --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --quiet --unittest-log
|
||||
COMMAND ${LLVM_PROFDATA}
|
||||
merge -sparse default.profraw -o rip.profdata
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Summary of coverage:"
|
||||
COMMAND ${LLVM_COV}
|
||||
report -instr-profile=rip.profdata
|
||||
$<TARGET_FILE:rippled> ${extract_pattern}
|
||||
# generate html report
|
||||
COMMAND ${LLVM_COV}
|
||||
show -format=html -output-dir=${CMAKE_BINARY_DIR}/coverage
|
||||
-instr-profile=rip.profdata
|
||||
$<TARGET_FILE:rippled> ${extract_pattern}
|
||||
BYPRODUCTS coverage/index.html)
|
||||
endif ()
|
||||
elseif (is_gcc)
|
||||
find_program (LCOV lcov)
|
||||
if (NOT LCOV)
|
||||
message (WARNING "unable to find lcov - skipping coverage_report target")
|
||||
endif ()
|
||||
|
||||
set(GCOVR_ADDITIONAL_ARGS ${coverage_extra_args})
|
||||
if(NOT GCOVR_ADDITIONAL_ARGS STREQUAL "")
|
||||
separate_arguments(GCOVR_ADDITIONAL_ARGS)
|
||||
endif()
|
||||
find_program (GENHTML genhtml)
|
||||
if (NOT GENHTML)
|
||||
message (WARNING "unable to find genhtml - skipping coverage_report target")
|
||||
endif ()
|
||||
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS
|
||||
--exclude-throw-branches
|
||||
--exclude-noncode-lines
|
||||
--exclude-unreachable-branches -s
|
||||
-j ${coverage_test_parallelism})
|
||||
set (extract_pattern "*")
|
||||
if (coverage_core_only)
|
||||
set (extract_pattern "*/src/ripple/*")
|
||||
endif ()
|
||||
|
||||
setup_target_for_coverage_gcovr(
|
||||
NAME coverage
|
||||
FORMAT ${coverage_format}
|
||||
EXECUTABLE rippled
|
||||
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
|
||||
EXCLUDE "src/test" "${CMAKE_BINARY_DIR}/proto_gen" "${CMAKE_BINARY_DIR}/proto_gen_grpc"
|
||||
DEPENDENCIES rippled
|
||||
)
|
||||
if (LCOV AND GENHTML)
|
||||
add_custom_target (coverage_report
|
||||
USES_TERMINAL
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Generating coverage- results will be in ${CMAKE_BINARY_DIR}/coverage/index.html."
|
||||
# create baseline info file
|
||||
COMMAND ${LCOV}
|
||||
--no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -i -o baseline.info
|
||||
| grep -v "ignoring data for external file"
|
||||
# run tests
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Running rippled tests for coverage report."
|
||||
COMMAND rippled --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --quiet --unittest-log
|
||||
# Create test coverage data file
|
||||
COMMAND ${LCOV}
|
||||
--no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -o tests.info
|
||||
| grep -v "ignoring data for external file"
|
||||
# Combine baseline and test coverage data
|
||||
COMMAND ${LCOV}
|
||||
-a baseline.info -a tests.info -o lcov-all.info
|
||||
# extract our files
|
||||
COMMAND ${LCOV}
|
||||
-e lcov-all.info "${extract_pattern}" -o lcov.info
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Summary of coverage:"
|
||||
COMMAND ${LCOV} --summary lcov.info
|
||||
# generate HTML report
|
||||
COMMAND ${GENHTML}
|
||||
-o ${CMAKE_BINARY_DIR}/coverage lcov.info
|
||||
BYPRODUCTS coverage/index.html)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
@@ -23,15 +23,15 @@ target_compile_options (opts
|
||||
INTERFACE
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
|
||||
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-fprofile-arcs -ftest-coverage>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-fprofile-instr-generate -fcoverage-mapping>
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
target_link_libraries (opts
|
||||
INTERFACE
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-fprofile-arcs -ftest-coverage>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-fprofile-instr-generate -fcoverage-mapping>
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
convenience variables and sanity checks
|
||||
#]===================================================================]
|
||||
|
||||
include(ProcessorCount)
|
||||
|
||||
if (NOT ep_procs)
|
||||
ProcessorCount(ep_procs)
|
||||
if (ep_procs GREATER 1)
|
||||
|
||||
@@ -2,129 +2,121 @@
|
||||
declare user options/settings
|
||||
#]===================================================================]
|
||||
|
||||
include(ProcessorCount)
|
||||
option (assert "Enables asserts, even in release builds" OFF)
|
||||
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
option (reporting "Build rippled with reporting mode enabled" OFF)
|
||||
|
||||
option(assert "Enables asserts, even in release builds" OFF)
|
||||
option (tests "Build tests" ON)
|
||||
|
||||
option(reporting "Build rippled with reporting mode enabled" OFF)
|
||||
|
||||
option(tests "Build tests" ON)
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
if(unity)
|
||||
if(NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif()
|
||||
endif()
|
||||
if(is_gcc OR is_clang)
|
||||
option(coverage "Generates coverage info." OFF)
|
||||
option(profile "Add profiling flags" OFF)
|
||||
set(coverage_test_parallelism "${PROCESSOR_COUNT}" CACHE STRING
|
||||
"Unit tests parallelism for the purpose of coverage report.")
|
||||
set(coverage_format "html-details" CACHE STRING
|
||||
"Output format of the coverage report.")
|
||||
set(coverage_extra_args "" CACHE STRING
|
||||
"Additional arguments to pass to gcovr.")
|
||||
set(coverage_test "" CACHE STRING
|
||||
option (unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
if (unity)
|
||||
if (NOT is_ci)
|
||||
set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
endif ()
|
||||
if (is_gcc OR is_clang)
|
||||
option (coverage "Generates coverage info." OFF)
|
||||
option (profile "Add profiling flags" OFF)
|
||||
set (coverage_test "" CACHE STRING
|
||||
"On gcc & clang, the specific unit test(s) to run for coverage. Default is all tests.")
|
||||
if(coverage_test AND NOT coverage)
|
||||
set(coverage ON CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif()
|
||||
option(wextra "compile with extra gcc/clang warnings enabled" ON)
|
||||
else()
|
||||
set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif()
|
||||
if(is_linux)
|
||||
option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
|
||||
option(static "link protobuf, openssl, libc++, and boost statically" ON)
|
||||
option(perf "Enables flags that assist with perf recording" OFF)
|
||||
option(use_gold "enables detection of gold (binutils) linker" ON)
|
||||
option(use_mold "enables detection of mold (binutils) linker" ON)
|
||||
else()
|
||||
if (coverage_test AND NOT coverage)
|
||||
set (coverage ON CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif ()
|
||||
option (coverage_core_only
|
||||
"Include only src/ripple files when generating coverage report. \
|
||||
Set to OFF to include all sources in coverage report."
|
||||
ON)
|
||||
option (wextra "compile with extra gcc/clang warnings enabled" ON)
|
||||
else ()
|
||||
set (profile OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set (coverage OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set (wextra OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif ()
|
||||
if (is_linux)
|
||||
option (BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
|
||||
option (static "link protobuf, openssl, libc++, and boost statically" ON)
|
||||
option (perf "Enables flags that assist with perf recording" OFF)
|
||||
option (use_gold "enables detection of gold (binutils) linker" ON)
|
||||
else ()
|
||||
# we are not ready to allow shared-libs on windows because it would require
|
||||
# export declarations. On macos it's more feasible, but static openssl
|
||||
# produces odd linker errors, thus we disable shared lib builds for now.
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared ripple libraries - OFF for win/macos" FORCE)
|
||||
set(static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
|
||||
set(perf OFF CACHE BOOL "perf flags, linux only" FORCE)
|
||||
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
|
||||
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
|
||||
endif()
|
||||
if(is_clang)
|
||||
option(use_lld "enables detection of lld linker" ON)
|
||||
else()
|
||||
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
|
||||
endif()
|
||||
option(jemalloc "Enables jemalloc for heap profiling" OFF)
|
||||
option(werr "treat warnings as errors" OFF)
|
||||
option(local_protobuf
|
||||
set (BUILD_SHARED_LIBS OFF CACHE BOOL "build shared ripple libraries - OFF for win/macos" FORCE)
|
||||
set (static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
|
||||
set (perf OFF CACHE BOOL "perf flags, linux only" FORCE)
|
||||
set (use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
|
||||
endif ()
|
||||
if (is_clang)
|
||||
option (use_lld "enables detection of lld linker" ON)
|
||||
else ()
|
||||
set (use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
|
||||
endif ()
|
||||
option (jemalloc "Enables jemalloc for heap profiling" OFF)
|
||||
option (werr "treat warnings as errors" OFF)
|
||||
option (local_protobuf
|
||||
"Force a local build of protobuf instead of looking for an installed version." OFF)
|
||||
option(local_grpc
|
||||
option (local_grpc
|
||||
"Force a local build of gRPC instead of looking for an installed version." OFF)
|
||||
|
||||
# this one is a string and therefore can't be an option
|
||||
set(san "" CACHE STRING "On gcc & clang, add sanitizer instrumentation")
|
||||
set_property(CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
if(san)
|
||||
string(TOLOWER ${san} san)
|
||||
set(SAN_FLAG "-fsanitize=${san}")
|
||||
set(SAN_LIB "")
|
||||
if(is_gcc)
|
||||
if(san STREQUAL "address")
|
||||
set(SAN_LIB "asan")
|
||||
elseif(san STREQUAL "thread")
|
||||
set(SAN_LIB "tsan")
|
||||
elseif(san STREQUAL "memory")
|
||||
set(SAN_LIB "msan")
|
||||
elseif(san STREQUAL "undefined")
|
||||
set(SAN_LIB "ubsan")
|
||||
endif()
|
||||
endif()
|
||||
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if(NOT COMPILER_SUPPORTS_SAN)
|
||||
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif()
|
||||
endif()
|
||||
set(container_label "" CACHE STRING "tag to use for package building containers")
|
||||
option(packages_only
|
||||
set (san "" CACHE STRING "On gcc & clang, add sanitizer instrumentation")
|
||||
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
if (san)
|
||||
string (TOLOWER ${san} san)
|
||||
set (SAN_FLAG "-fsanitize=${san}")
|
||||
set (SAN_LIB "")
|
||||
if (is_gcc)
|
||||
if (san STREQUAL "address")
|
||||
set (SAN_LIB "asan")
|
||||
elseif (san STREQUAL "thread")
|
||||
set (SAN_LIB "tsan")
|
||||
elseif (san STREQUAL "memory")
|
||||
set (SAN_LIB "msan")
|
||||
elseif (san STREQUAL "undefined")
|
||||
set (SAN_LIB "ubsan")
|
||||
endif ()
|
||||
endif ()
|
||||
set (_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set (CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
check_cxx_compiler_flag (${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set (CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if (NOT COMPILER_SUPPORTS_SAN)
|
||||
message (FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif ()
|
||||
endif ()
|
||||
set (container_label "" CACHE STRING "tag to use for package building containers")
|
||||
option (packages_only
|
||||
"ONLY generate package building targets. This is special use-case and almost \
|
||||
certainly not what you want. Use with caution as you won't be able to build \
|
||||
any compiled targets locally." OFF)
|
||||
option(have_package_container
|
||||
option (have_package_container
|
||||
"Sometimes you already have the tagged container you want to use for package \
|
||||
building and you don't want docker to rebuild it. This flag will detach the \
|
||||
dependency of the package build from the container build. It's an advanced \
|
||||
use case and most likely you should not be touching this flag." OFF)
|
||||
|
||||
# the remaining options are obscure and rarely used
|
||||
option(beast_no_unit_test_inline
|
||||
option (beast_no_unit_test_inline
|
||||
"Prevents unit test definitions from being inserted into global table"
|
||||
OFF)
|
||||
option(single_io_service_thread
|
||||
option (single_io_service_thread
|
||||
"Restricts the number of threads calling io_service::run to one. \
|
||||
This can be useful when debugging."
|
||||
OFF)
|
||||
option(boost_show_deprecated
|
||||
option (boost_show_deprecated
|
||||
"Allow boost to fail on deprecated usage. Only useful if you're trying\
|
||||
to find deprecated calls."
|
||||
OFF)
|
||||
option(beast_hashers
|
||||
option (beast_hashers
|
||||
"Use local implementations for sha/ripemd hashes (experimental, not recommended)"
|
||||
OFF)
|
||||
|
||||
if(WIN32)
|
||||
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
|
||||
else()
|
||||
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
|
||||
endif()
|
||||
if(coverage)
|
||||
message(STATUS "coverage build requested - forcing Debug build")
|
||||
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)
|
||||
endif()
|
||||
if (WIN32)
|
||||
option (beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
|
||||
else ()
|
||||
set (beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
|
||||
endif ()
|
||||
if (coverage)
|
||||
message (STATUS "coverage build requested - forcing Debug build")
|
||||
set (CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)
|
||||
endif ()
|
||||
|
||||
@@ -149,6 +149,7 @@ test.ledger > ripple.ledger
|
||||
test.ledger > ripple.protocol
|
||||
test.ledger > test.jtx
|
||||
test.ledger > test.toplevel
|
||||
test.net > ripple.basics
|
||||
test.net > ripple.net
|
||||
test.net > test.jtx
|
||||
test.net > test.toplevel
|
||||
|
||||
@@ -64,6 +64,7 @@ include (CheckCXXCompilerFlag)
|
||||
include (FetchContent)
|
||||
include (ExternalProject)
|
||||
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
include (ProcessorCount)
|
||||
if (target)
|
||||
message (FATAL_ERROR "The target option has been removed - use native cmake options to control build")
|
||||
endif ()
|
||||
@@ -142,14 +143,11 @@ target_link_libraries(ripple_libs INTERFACE
|
||||
SQLite::SQLite3
|
||||
)
|
||||
|
||||
if(coverage)
|
||||
include(RippledCov)
|
||||
endif()
|
||||
|
||||
###
|
||||
|
||||
include(RippledCore)
|
||||
include(RippledInstall)
|
||||
include(RippledCov)
|
||||
include(RippledMultiConfig)
|
||||
include(RippledDocs)
|
||||
include(RippledValidatorKeys)
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
#include <ripple/core/JobQueue.h>
|
||||
#include <ripple/net/InfoSub.h>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -39,11 +38,9 @@ protected:
|
||||
explicit RPCSub(InfoSub::Source& source);
|
||||
};
|
||||
|
||||
// VFALCO Why is the io_service needed?
|
||||
std::shared_ptr<RPCSub>
|
||||
make_RPCSub(
|
||||
InfoSub::Source& source,
|
||||
boost::asio::io_service& io_service,
|
||||
JobQueue& jobQueue,
|
||||
std::string const& strUrl,
|
||||
std::string const& strUsername,
|
||||
|
||||
@@ -451,6 +451,12 @@ public:
|
||||
if (mShutdown)
|
||||
{
|
||||
JLOG(j_.trace()) << "Complete.";
|
||||
|
||||
mResponse.commit(bytes_transferred);
|
||||
std::string strBody{
|
||||
{std::istreambuf_iterator<char>(&mResponse)},
|
||||
std::istreambuf_iterator<char>()};
|
||||
invokeComplete(ecResult, mStatus, mBody + strBody);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -1805,6 +1805,7 @@ rpcClient(
|
||||
}
|
||||
|
||||
{
|
||||
//@@start blocking-request
|
||||
boost::asio::io_service isService;
|
||||
RPCCall::fromNetwork(
|
||||
isService,
|
||||
@@ -1828,6 +1829,7 @@ rpcClient(
|
||||
headers);
|
||||
isService.run(); // This blocks until there are no more
|
||||
// outstanding async calls.
|
||||
//@@end blocking-request
|
||||
}
|
||||
if (jvOutput.isMember("result"))
|
||||
{
|
||||
@@ -1946,6 +1948,7 @@ fromNetwork(
|
||||
// HTTP call?
|
||||
auto constexpr RPC_NOTIFY = 30s;
|
||||
|
||||
//@@start async-request
|
||||
HTTPClient::request(
|
||||
bSSL,
|
||||
io_service,
|
||||
@@ -1970,6 +1973,7 @@ fromNetwork(
|
||||
std::placeholders::_3,
|
||||
j),
|
||||
j);
|
||||
//@@end async-request
|
||||
}
|
||||
|
||||
} // namespace RPCCall
|
||||
|
||||
@@ -33,14 +33,12 @@ class RPCSubImp : public RPCSub
|
||||
public:
|
||||
RPCSubImp(
|
||||
InfoSub::Source& source,
|
||||
boost::asio::io_service& io_service,
|
||||
JobQueue& jobQueue,
|
||||
std::string const& strUrl,
|
||||
std::string const& strUsername,
|
||||
std::string const& strPassword,
|
||||
Logs& logs)
|
||||
: RPCSub(source)
|
||||
, m_io_service(io_service)
|
||||
, m_jobQueue(jobQueue)
|
||||
, mUrl(strUrl)
|
||||
, mSSL(false)
|
||||
@@ -78,14 +76,14 @@ public:
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
// Wietse: we're not going to limit this, this is admin-port only, scale
|
||||
// accordingly Dropping events just like this results in inconsistent
|
||||
// data on the receiving end if (mDeque.size() >= eventQueueMax)
|
||||
// {
|
||||
// // Drop the previous event.
|
||||
// JLOG(j_.warn()) << "RPCCall::fromNetwork drop";
|
||||
// mDeque.pop_back();
|
||||
// }
|
||||
if (mDeque.size() >= maxQueueSize)
|
||||
{
|
||||
JLOG(j_.warn())
|
||||
<< "RPCCall::fromNetwork drop: queue full (" << mDeque.size()
|
||||
<< "), seq=" << mSeq << ", endpoint=" << mIp;
|
||||
++mSeq;
|
||||
return;
|
||||
}
|
||||
|
||||
auto jm = broadcast ? j_.debug() : j_.info();
|
||||
JLOG(jm) << "RPCCall::fromNetwork push: " << jvObj;
|
||||
@@ -121,48 +119,49 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
// XXX Could probably create a bunch of send jobs in a single get of the
|
||||
// lock.
|
||||
// Maximum concurrent HTTP deliveries per batch. Bounds file
|
||||
// descriptor usage while still allowing parallel delivery to
|
||||
// capable endpoints. With a 1024 FD process limit shared across
|
||||
// peers, clients, and the node store, 32 per subscriber is a
|
||||
// meaningful but survivable chunk even with multiple subscribers.
|
||||
static constexpr int maxInFlight = 32;
|
||||
|
||||
// Maximum queued events before dropping. At ~5-10KB per event
|
||||
// this is ~80-160MB worst case — trivial memory-wise. The real
|
||||
// purpose is detecting a hopelessly behind endpoint: at 100+
|
||||
// events per ledger (every ~4s), 16384 events is ~10 minutes
|
||||
// of buffer. Consumers detect gaps via the seq field.
|
||||
static constexpr std::size_t maxQueueSize = 16384;
|
||||
|
||||
void
|
||||
sendThread()
|
||||
{
|
||||
Json::Value jvEvent;
|
||||
bool bSend;
|
||||
|
||||
do
|
||||
{
|
||||
// Local io_service per batch — cheap to create (just an
|
||||
// internal event queue, no threads, no syscalls). Using a
|
||||
// local io_service is what makes .run() block until exactly
|
||||
// this batch completes, giving us flow control. Same
|
||||
// pattern used by rpcClient() in RPCCall.cpp for CLI
|
||||
// commands.
|
||||
boost::asio::io_service io_service;
|
||||
int dispatched = 0;
|
||||
|
||||
{
|
||||
// Obtain the lock to manipulate the queue and change sending.
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
if (mDeque.empty())
|
||||
{
|
||||
mSending = false;
|
||||
bSend = false;
|
||||
}
|
||||
else
|
||||
while (!mDeque.empty() && dispatched < maxInFlight)
|
||||
{
|
||||
auto const [seq, env] = mDeque.front();
|
||||
|
||||
mDeque.pop_front();
|
||||
|
||||
jvEvent = env;
|
||||
Json::Value jvEvent = env;
|
||||
jvEvent["seq"] = seq;
|
||||
|
||||
bSend = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Send outside of the lock.
|
||||
if (bSend)
|
||||
{
|
||||
// XXX Might not need this in a try.
|
||||
try
|
||||
{
|
||||
JLOG(j_.info()) << "RPCCall::fromNetwork: " << mIp;
|
||||
|
||||
RPCCall::fromNetwork(
|
||||
m_io_service,
|
||||
io_service,
|
||||
mIp,
|
||||
mPort,
|
||||
mUsername,
|
||||
@@ -173,21 +172,38 @@ private:
|
||||
mSSL,
|
||||
true,
|
||||
logs_);
|
||||
++dispatched;
|
||||
}
|
||||
|
||||
if (dispatched == 0)
|
||||
mSending = false;
|
||||
}
|
||||
|
||||
bSend = dispatched > 0;
|
||||
|
||||
if (bSend)
|
||||
{
|
||||
try
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "RPCCall::fromNetwork: " << mIp << " dispatching "
|
||||
<< dispatched << " events";
|
||||
io_service.run();
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
JLOG(j_.info())
|
||||
JLOG(j_.warn())
|
||||
<< "RPCCall::fromNetwork exception: " << e.what();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(j_.warn()) << "RPCCall::fromNetwork unknown exception";
|
||||
}
|
||||
}
|
||||
} while (bSend);
|
||||
}
|
||||
|
||||
private:
|
||||
// Wietse: we're not going to limit this, this is admin-port only, scale
|
||||
// accordingly enum { eventQueueMax = 32 };
|
||||
|
||||
boost::asio::io_service& m_io_service;
|
||||
JobQueue& m_jobQueue;
|
||||
|
||||
std::string mUrl;
|
||||
@@ -217,7 +233,6 @@ RPCSub::RPCSub(InfoSub::Source& source) : InfoSub(source, Consumer())
|
||||
std::shared_ptr<RPCSub>
|
||||
make_RPCSub(
|
||||
InfoSub::Source& source,
|
||||
boost::asio::io_service& io_service,
|
||||
JobQueue& jobQueue,
|
||||
std::string const& strUrl,
|
||||
std::string const& strUsername,
|
||||
@@ -226,7 +241,6 @@ make_RPCSub(
|
||||
{
|
||||
return std::make_shared<RPCSubImp>(
|
||||
std::ref(source),
|
||||
std::ref(io_service),
|
||||
std::ref(jobQueue),
|
||||
strUrl,
|
||||
strUsername,
|
||||
|
||||
@@ -76,7 +76,6 @@ doSubscribe(RPC::JsonContext& context)
|
||||
{
|
||||
auto rspSub = make_RPCSub(
|
||||
context.app.getOPs(),
|
||||
context.app.getIOService(),
|
||||
context.app.getJobQueue(),
|
||||
strUrl,
|
||||
strUsername,
|
||||
|
||||
819
src/test/net/HTTPClient_test.cpp
Normal file
819
src/test/net/HTTPClient_test.cpp
Normal file
@@ -0,0 +1,819 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/ByteUtilities.h>
|
||||
#include <ripple/net/HTTPClient.h>
|
||||
#include <test/jtx.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
// Minimal TCP server for testing HTTPClient behavior.
|
||||
// Accepts connections and sends configurable HTTP responses.
|
||||
class MockHTTPServer
|
||||
{
|
||||
boost::asio::io_service ios_;
|
||||
std::unique_ptr<boost::asio::io_service::work> work_;
|
||||
boost::asio::ip::tcp::acceptor acceptor_;
|
||||
std::thread thread_;
|
||||
std::atomic<bool> running_{true};
|
||||
unsigned short port_;
|
||||
|
||||
// Metrics
|
||||
std::atomic<int> activeConnections_{0};
|
||||
std::atomic<int> peakConnections_{0};
|
||||
std::atomic<int> totalAccepted_{0};
|
||||
|
||||
// Configurable behavior
|
||||
std::atomic<int> statusCode_{200};
|
||||
std::atomic<int> delayMs_{0};
|
||||
std::atomic<bool> sendResponse_{true};
|
||||
std::atomic<bool> closeImmediately_{false};
|
||||
std::atomic<bool> noContentLength_{false};
|
||||
|
||||
public:
|
||||
MockHTTPServer()
|
||||
: work_(std::make_unique<boost::asio::io_service::work>(ios_))
|
||||
, acceptor_(
|
||||
ios_,
|
||||
boost::asio::ip::tcp::endpoint(
|
||||
boost::asio::ip::address::from_string("127.0.0.1"),
|
||||
0))
|
||||
{
|
||||
port_ = acceptor_.local_endpoint().port();
|
||||
accept();
|
||||
thread_ = std::thread([this] { ios_.run(); });
|
||||
}
|
||||
|
||||
~MockHTTPServer()
|
||||
{
|
||||
running_ = false;
|
||||
work_.reset(); // Allow io_service to stop.
|
||||
boost::system::error_code ec;
|
||||
acceptor_.close(ec);
|
||||
ios_.stop();
|
||||
if (thread_.joinable())
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
unsigned short
|
||||
port() const
|
||||
{
|
||||
return port_;
|
||||
}
|
||||
int
|
||||
activeConnectionCount() const
|
||||
{
|
||||
return activeConnections_;
|
||||
}
|
||||
int
|
||||
peakConnectionCount() const
|
||||
{
|
||||
return peakConnections_;
|
||||
}
|
||||
int
|
||||
totalAcceptedCount() const
|
||||
{
|
||||
return totalAccepted_;
|
||||
}
|
||||
|
||||
void
|
||||
setStatus(int code)
|
||||
{
|
||||
statusCode_ = code;
|
||||
}
|
||||
void
|
||||
setDelay(int ms)
|
||||
{
|
||||
delayMs_ = ms;
|
||||
}
|
||||
void
|
||||
setSendResponse(bool send)
|
||||
{
|
||||
sendResponse_ = send;
|
||||
}
|
||||
void
|
||||
setCloseImmediately(bool close)
|
||||
{
|
||||
closeImmediately_ = close;
|
||||
}
|
||||
void
|
||||
setNoContentLength(bool noContentLength)
|
||||
{
|
||||
noContentLength_ = noContentLength;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
accept()
|
||||
{
|
||||
auto sock = std::make_shared<boost::asio::ip::tcp::socket>(ios_);
|
||||
acceptor_.async_accept(*sock, [this, sock](auto ec) {
|
||||
if (!ec && running_)
|
||||
{
|
||||
++totalAccepted_;
|
||||
int current = ++activeConnections_;
|
||||
int prev = peakConnections_.load();
|
||||
while (current > prev &&
|
||||
!peakConnections_.compare_exchange_weak(prev, current))
|
||||
;
|
||||
|
||||
handleConnection(sock);
|
||||
accept();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
handleConnection(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
|
||||
{
|
||||
if (closeImmediately_)
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
|
||||
sock->close(ec);
|
||||
--activeConnections_;
|
||||
return;
|
||||
}
|
||||
|
||||
auto buf = std::make_shared<boost::asio::streambuf>();
|
||||
boost::asio::async_read_until(
|
||||
*sock, *buf, "\r\n\r\n", [this, sock, buf](auto ec, size_t) {
|
||||
if (ec)
|
||||
{
|
||||
--activeConnections_;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sendResponse_)
|
||||
{
|
||||
// Hold connection open without responding.
|
||||
// The socket shared_ptr prevents cleanup.
|
||||
// This simulates a server that accepts but
|
||||
// never responds (e.g., overloaded).
|
||||
return;
|
||||
}
|
||||
|
||||
auto delay = delayMs_.load();
|
||||
if (delay > 0)
|
||||
{
|
||||
auto timer =
|
||||
std::make_shared<boost::asio::steady_timer>(ios_);
|
||||
timer->expires_from_now(std::chrono::milliseconds(delay));
|
||||
timer->async_wait(
|
||||
[this, sock, timer](auto) { sendHTTPResponse(sock); });
|
||||
}
|
||||
else
|
||||
{
|
||||
sendHTTPResponse(sock);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
sendHTTPResponse(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
|
||||
{
|
||||
auto body = std::string("{}");
|
||||
std::string header =
|
||||
"HTTP/1.0 " + std::to_string(statusCode_.load()) + " OK\r\n";
|
||||
if (!noContentLength_)
|
||||
header += "Content-Length: " + std::to_string(body.size()) + "\r\n";
|
||||
header += "\r\n";
|
||||
auto response = std::make_shared<std::string>(header + body);
|
||||
|
||||
boost::asio::async_write(
|
||||
*sock,
|
||||
boost::asio::buffer(*response),
|
||||
[this, sock, response](auto, size_t) {
|
||||
boost::system::error_code ec;
|
||||
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
|
||||
sock->close(ec);
|
||||
--activeConnections_;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class HTTPClient_test : public beast::unit_test::suite
|
||||
{
|
||||
// Helper: fire an HTTP request and track completion via atomic counter.
|
||||
void
|
||||
fireRequest(
|
||||
boost::asio::io_service& ios,
|
||||
std::string const& host,
|
||||
unsigned short port,
|
||||
std::atomic<int>& completed,
|
||||
beast::Journal& j,
|
||||
std::chrono::seconds timeout = std::chrono::seconds{5})
|
||||
{
|
||||
HTTPClient::request(
|
||||
false, // no SSL
|
||||
ios,
|
||||
host,
|
||||
port,
|
||||
[](boost::asio::streambuf& sb, std::string const& strHost) {
|
||||
std::ostream os(&sb);
|
||||
os << "POST / HTTP/1.0\r\n"
|
||||
<< "Host: " << strHost << "\r\n"
|
||||
<< "Content-Type: application/json\r\n"
|
||||
<< "Content-Length: 2\r\n"
|
||||
<< "\r\n"
|
||||
<< "{}";
|
||||
},
|
||||
megabytes(1),
|
||||
timeout,
|
||||
[&completed](
|
||||
const boost::system::error_code&, int, std::string const&) {
|
||||
++completed;
|
||||
return false;
|
||||
},
|
||||
j);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void
|
||||
testCleanupAfterSuccess()
|
||||
{
|
||||
testcase("Socket cleanup after successful response");
|
||||
|
||||
// After a successful HTTP request completes, the
|
||||
// HTTPClientImp should be destroyed and its socket
|
||||
// closed promptly — not held until the deadline fires.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == 1);
|
||||
BEAST_EXPECT(server.totalAcceptedCount() == 1);
|
||||
// After io_service.run() returns, the server should
|
||||
// see zero active connections — socket was released.
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testCleanupAfter500()
|
||||
{
|
||||
testcase("Socket cleanup after HTTP 500");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(500);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == 1);
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testCleanupAfterConnectionRefused()
|
||||
{
|
||||
testcase("Socket cleanup after connection refused");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Bind a port, then close it — guarantees nothing is listening.
|
||||
boost::asio::io_service tmp;
|
||||
boost::asio::ip::tcp::acceptor acc(
|
||||
tmp,
|
||||
boost::asio::ip::tcp::endpoint(
|
||||
boost::asio::ip::address::from_string("127.0.0.1"), 0));
|
||||
auto port = acc.local_endpoint().port();
|
||||
acc.close();
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(ios, "127.0.0.1", port, completed, j);
|
||||
ios.run();
|
||||
}
|
||||
|
||||
// Callback should still be invoked (with error).
|
||||
BEAST_EXPECT(completed == 1);
|
||||
}
|
||||
|
||||
void
|
||||
testCleanupAfterTimeout()
|
||||
{
|
||||
testcase("Socket cleanup after timeout");
|
||||
|
||||
// Server accepts but never responds. HTTPClient should
|
||||
// time out, clean up, and invoke the callback.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setSendResponse(false); // accept, read, but never respond
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
// Short timeout to keep the test fast.
|
||||
fireRequest(
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
completed,
|
||||
j,
|
||||
std::chrono::seconds{2});
|
||||
ios.run();
|
||||
}
|
||||
|
||||
// Callback must be invoked even on timeout.
|
||||
BEAST_EXPECT(completed == 1);
|
||||
}
|
||||
|
||||
void
|
||||
testCleanupAfterServerCloseBeforeResponse()
|
||||
{
|
||||
testcase("Socket cleanup after server closes before response");
|
||||
|
||||
// Server accepts the connection then immediately closes
|
||||
// it without sending anything.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setCloseImmediately(true);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == 1);
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testEOFCompletionCallsCallback()
|
||||
{
|
||||
testcase("EOF completion invokes callback (handleData bug)");
|
||||
|
||||
// HTTPClientImp::handleData has a code path where
|
||||
// mShutdown == eof results in logging "Complete." but
|
||||
// never calling invokeComplete(). This means:
|
||||
// - The completion callback is never invoked
|
||||
// - The deadline timer is never cancelled
|
||||
// - The socket is held open until the 30s deadline
|
||||
//
|
||||
// This test verifies the callback IS invoked after an
|
||||
// EOF response. If this test fails (completed == 0 after
|
||||
// ios.run()), the handleData EOF bug is confirmed.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
completed,
|
||||
j,
|
||||
std::chrono::seconds{3});
|
||||
ios.run();
|
||||
}
|
||||
|
||||
// If handleData EOF path doesn't call invokeComplete,
|
||||
// the callback won't fire until the deadline (3s) expires,
|
||||
// and even then handleDeadline doesn't invoke mComplete.
|
||||
// The io_service.run() will still return (deadline fires,
|
||||
// handleShutdown runs, all handlers done), but completed
|
||||
// will be 0.
|
||||
if (completed != 1)
|
||||
{
|
||||
log << " BUG CONFIRMED: handleData EOF path does not"
|
||||
<< " call invokeComplete(). Callback was not invoked."
|
||||
<< " Socket held open until deadline." << std::endl;
|
||||
}
|
||||
BEAST_EXPECT(completed == 1);
|
||||
}
|
||||
|
||||
void
|
||||
testConcurrentRequestCleanup()
|
||||
{
|
||||
testcase("Concurrent requests all clean up");
|
||||
|
||||
// Fire N requests at once on the same io_service.
|
||||
// All should complete and release their sockets.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
|
||||
static constexpr int N = 50;
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
for (int i = 0; i < N; ++i)
|
||||
{
|
||||
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
|
||||
}
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == N);
|
||||
// Brief sleep to let server-side shutdown complete.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
|
||||
log << " Completed: " << completed
|
||||
<< ", Peak concurrent: " << server.peakConnectionCount()
|
||||
<< ", Active after: " << server.activeConnectionCount()
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
void
|
||||
testConcurrent500Cleanup()
|
||||
{
|
||||
testcase("Concurrent 500 requests all clean up");
|
||||
|
||||
// Fire N requests that all get 500 responses. Verify
|
||||
// all sockets are released and no FDs leak.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(500);
|
||||
|
||||
static constexpr int N = 50;
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
for (int i = 0; i < N; ++i)
|
||||
{
|
||||
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
|
||||
}
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == N);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testEOFWithoutContentLength()
|
||||
{
|
||||
testcase("EOF without Content-Length (handleData EOF path)");
|
||||
|
||||
// When a server sends a response WITHOUT Content-Length,
|
||||
// HTTPClientImp reads up to maxResponseSize. The server
|
||||
// closes the connection, causing EOF in handleData.
|
||||
//
|
||||
// In handleData, the EOF path (mShutdown == eof) logs
|
||||
// "Complete." but does NOT call invokeComplete(). This
|
||||
// means:
|
||||
// - mComplete (callback) is never invoked
|
||||
// - deadline timer is never cancelled
|
||||
// - socket + object held alive until deadline fires
|
||||
//
|
||||
// This test uses a SHORT deadline to keep it fast. If
|
||||
// the callback IS invoked, ios.run() returns quickly.
|
||||
// If NOT, ios.run() blocks until the deadline (2s).
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
server.setNoContentLength(true);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
fireRequest(
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
completed,
|
||||
j,
|
||||
std::chrono::seconds{2});
|
||||
ios.run();
|
||||
}
|
||||
auto elapsed = std::chrono::steady_clock::now() - start;
|
||||
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
|
||||
.count();
|
||||
|
||||
if (completed == 0)
|
||||
{
|
||||
log << " BUG CONFIRMED: handleData EOF path does not"
|
||||
<< " call invokeComplete(). Callback never invoked."
|
||||
<< " io_service.run() blocked for " << ms << "ms"
|
||||
<< " (deadline timeout)." << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
log << " Callback invoked in " << ms << "ms." << std::endl;
|
||||
}
|
||||
// This WILL fail if the EOF bug exists — the callback
|
||||
// is only invoked via the deadline timeout path, which
|
||||
// does NOT call mComplete.
|
||||
BEAST_EXPECT(completed == 1);
|
||||
}
|
||||
|
||||
void
|
||||
testPersistentIOServiceCleanup()
|
||||
{
|
||||
testcase("Cleanup on persistent io_service (no destructor mask)");
|
||||
|
||||
// Previous tests destroy the io_service after run(),
|
||||
// which releases all pending handlers' shared_ptrs.
|
||||
// This masks leaks. Here we use a PERSISTENT io_service
|
||||
// (with work guard, running on its own thread) and check
|
||||
// that HTTPClientImp objects are destroyed WITHOUT relying
|
||||
// on io_service destruction.
|
||||
//
|
||||
// We track the object's lifetime via the completion
|
||||
// callback — if it fires, the async chain completed
|
||||
// normally. If it doesn't fire within a reasonable time
|
||||
// but the io_service is still running, something is stuck.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
// Persistent io_service — stays alive the whole test.
|
||||
boost::asio::io_service ios;
|
||||
auto work = std::make_unique<boost::asio::io_service::work>(ios);
|
||||
std::thread runner([&ios] { ios.run(); });
|
||||
|
||||
// Fire request on the persistent io_service.
|
||||
HTTPClient::request(
|
||||
false,
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
[](boost::asio::streambuf& sb, std::string const& strHost) {
|
||||
std::ostream os(&sb);
|
||||
os << "POST / HTTP/1.0\r\n"
|
||||
<< "Host: " << strHost << "\r\n"
|
||||
<< "Content-Type: application/json\r\n"
|
||||
<< "Content-Length: 2\r\n"
|
||||
<< "\r\n"
|
||||
<< "{}";
|
||||
},
|
||||
megabytes(1),
|
||||
std::chrono::seconds{5},
|
||||
[&completed](
|
||||
const boost::system::error_code&, int, std::string const&) {
|
||||
++completed;
|
||||
return false;
|
||||
},
|
||||
j);
|
||||
|
||||
// Wait for completion without destroying io_service.
|
||||
auto deadline =
|
||||
std::chrono::steady_clock::now() + std::chrono::seconds{5};
|
||||
while (completed == 0 && std::chrono::steady_clock::now() < deadline)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == 1);
|
||||
|
||||
// Give server-side shutdown a moment.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
|
||||
if (server.activeConnectionCount() != 0)
|
||||
{
|
||||
log << " BUG: Socket still open on persistent"
|
||||
<< " io_service. FD leaked." << std::endl;
|
||||
}
|
||||
|
||||
// Clean shutdown.
|
||||
work.reset();
|
||||
ios.stop();
|
||||
runner.join();
|
||||
}
|
||||
|
||||
void
|
||||
testPersistentIOService500Cleanup()
|
||||
{
|
||||
testcase("500 cleanup on persistent io_service");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(500);
|
||||
|
||||
static constexpr int N = 20;
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
boost::asio::io_service ios;
|
||||
auto work = std::make_unique<boost::asio::io_service::work>(ios);
|
||||
std::thread runner([&ios] { ios.run(); });
|
||||
|
||||
for (int i = 0; i < N; ++i)
|
||||
{
|
||||
HTTPClient::request(
|
||||
false,
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
[](boost::asio::streambuf& sb, std::string const& strHost) {
|
||||
std::ostream os(&sb);
|
||||
os << "POST / HTTP/1.0\r\n"
|
||||
<< "Host: " << strHost << "\r\n"
|
||||
<< "Content-Type: application/json\r\n"
|
||||
<< "Content-Length: 2\r\n"
|
||||
<< "\r\n"
|
||||
<< "{}";
|
||||
},
|
||||
megabytes(1),
|
||||
std::chrono::seconds{5},
|
||||
[&completed](
|
||||
const boost::system::error_code&, int, std::string const&) {
|
||||
++completed;
|
||||
return false;
|
||||
},
|
||||
j);
|
||||
}
|
||||
|
||||
auto deadline =
|
||||
std::chrono::steady_clock::now() + std::chrono::seconds{10};
|
||||
while (completed < N && std::chrono::steady_clock::now() < deadline)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == N);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
|
||||
log << " Completed: " << completed << "/" << N
|
||||
<< ", Active connections after: " << server.activeConnectionCount()
|
||||
<< std::endl;
|
||||
|
||||
work.reset();
|
||||
ios.stop();
|
||||
runner.join();
|
||||
}
|
||||
|
||||
void
|
||||
testGetSelfReferenceCleanup()
|
||||
{
|
||||
testcase("get() shared_from_this cycle releases");
|
||||
|
||||
// HTTPClientImp::get() binds shared_from_this() into
|
||||
// mBuild via makeGet. This creates a reference cycle:
|
||||
// object -> mBuild -> shared_ptr<object>
|
||||
// The object can only be destroyed if mBuild is cleared.
|
||||
// Since mBuild is never explicitly cleared, this may be
|
||||
// a permanent FD leak.
|
||||
//
|
||||
// This test fires a GET request and checks whether the
|
||||
// HTTPClientImp is destroyed (and socket closed) after
|
||||
// completion.
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this};
|
||||
|
||||
MockHTTPServer server;
|
||||
server.setStatus(200);
|
||||
|
||||
std::atomic<int> completed{0};
|
||||
auto j = env.app().journal("HTTPClient");
|
||||
|
||||
{
|
||||
boost::asio::io_service ios;
|
||||
HTTPClient::get(
|
||||
false, // no SSL
|
||||
ios,
|
||||
"127.0.0.1",
|
||||
server.port(),
|
||||
"/test",
|
||||
megabytes(1),
|
||||
std::chrono::seconds{5},
|
||||
[&completed](
|
||||
const boost::system::error_code&, int, std::string const&) {
|
||||
++completed;
|
||||
return false;
|
||||
},
|
||||
j);
|
||||
ios.run();
|
||||
}
|
||||
|
||||
BEAST_EXPECT(completed == 1);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
|
||||
// If the get() self-reference cycle leaks, the server
|
||||
// will still show an active connection here (the socket
|
||||
// in the leaked HTTPClientImp is never closed).
|
||||
if (server.activeConnectionCount() != 0)
|
||||
{
|
||||
log << " BUG CONFIRMED: get() self-reference cycle"
|
||||
<< " prevents HTTPClientImp destruction."
|
||||
<< " Socket FD leaked." << std::endl;
|
||||
}
|
||||
BEAST_EXPECT(server.activeConnectionCount() == 0);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testCleanupAfterSuccess();
|
||||
testCleanupAfter500();
|
||||
testCleanupAfterConnectionRefused();
|
||||
testCleanupAfterTimeout();
|
||||
testCleanupAfterServerCloseBeforeResponse();
|
||||
testEOFCompletionCallsCallback();
|
||||
testConcurrentRequestCleanup();
|
||||
testConcurrent500Cleanup();
|
||||
testEOFWithoutContentLength();
|
||||
testPersistentIOServiceCleanup();
|
||||
testPersistentIOService500Cleanup();
|
||||
testGetSelfReferenceCleanup();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(HTTPClient, net, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
Reference in New Issue
Block a user