Compare commits

..

10 Commits

Author SHA1 Message Date
Nicholas Dudfield
a8388e48a4 fix: call invokeComplete on EOF in HTTPClient handleData
When an HTTP response has no Content-Length header, HTTPClient reads
until EOF. The EOF path in handleData logged "Complete." but never
called invokeComplete(), leaving the socket held open for the full
30s deadline timeout and the completion callback never invoked.

This is the likely root cause of webhook delivery permanently stalling
after repeated 500 errors — many web frameworks omit Content-Length on
error responses, triggering this path. Each leaked socket holds an FD
for 30s, eventually exhausting the process FD budget.

Includes HTTPClient_test with 12 test cases covering resource cleanup
across success, error, timeout, connection-refused, concurrent request,
and EOF-without-Content-Length scenarios.
2026-02-10 07:33:31 +07:00
Nicholas Dudfield
49908096d5 fix: bound subscription webhook delivery concurrency and queue size
fromNetwork() is async — it posts handlers to the io_service and
returns immediately. The original sendThread() loop fires all queued
events as concurrent HTTP connections at once. Under sustained load
with a slow/failing endpoint, connections accumulate (each held up to
30s by RPC_NOTIFY timeout), exhausting file descriptors and breaking
all network I/O for the entire process.

Fix: use a local io_service per batch with .run() to block until the
batch completes (same pattern as rpcClient() in RPCCall.cpp). This
bounds concurrent connections to maxInFlight (32) per subscriber while
still allowing parallel delivery.

Also add a queue cap (maxQueueSize = 16384, ~80-160MB) so a hopelessly
behind endpoint doesn't grow the deque indefinitely. Consumers detect
gaps via the existing seq field.

Ref: XRPLF/rippled#6341
2026-02-09 18:31:25 +07:00
tequ
12e1afb694 Enhance dependency export process in GitHub Action to check for existing exports before executing. (#660) 2026-01-28 13:14:40 +10:00
tequ
c355ad9971 update mise-action to use cmake as aqua:Kitware/CMake (#671) 2026-01-27 19:30:50 +10:00
Niq Dudfield
a8d7b2619e fix: restore [ips_fixed] to use addFixedPeer instead of addFallbackStrings (#641) 2026-01-05 13:46:02 +10:00
Niq Dudfield
775fb3a8b2 fix: increment manifest sequence for client code cache invalidation (#631) 2025-12-24 11:16:00 +10:00
Niq Dudfield
5a118a4e2b fix(logs): formatting fixes, color handling, and debug build defaults (#607) 2025-12-17 09:45:41 +10:00
tequ
960f87857e Self hosted macos runner (#652) 2025-12-17 09:43:25 +10:00
tequ
f731bcfeba Increase ccache size from 10G to 100G in release-builder.sh for improved build performance (#643) 2025-12-16 14:45:45 +10:00
tequ
374b361daa Use Self hosted runner (#639) 2025-12-16 14:16:36 +10:00
17 changed files with 1046 additions and 221 deletions

View File

@@ -75,37 +75,17 @@ runs:
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Restore ccache directory for main branch
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true'
id: ccache-restore
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.ccache-main
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: ccache-main
- name: Restore ccache directory for current branch
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
id: ccache-restore-current-branch
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.ccache-current
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: ccache-current
- name: Configure ccache
if: inputs.ccache_enabled == 'true'
shell: bash
run: |
# Create cache directories
mkdir -p ~/.ccache-main ~/.ccache-current
mkdir -p ~/.ccache-cache
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
mkdir -p ~/.config/ccache
export CCACHE_CONFIGPATH="$HOME/.config/ccache/ccache.conf"
echo "CCACHE_CONFIGPATH=$CCACHE_CONFIGPATH" >> $GITHUB_ENV
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
mkdir -p ~/.config/ccache
@@ -116,20 +96,9 @@ runs:
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
# Determine if we're on the main branch
if [ "${{ steps.safe-branch.outputs.name }}" = "${{ inputs.main_branch }}" ]; then
# Main branch: use main branch cache only
ccache --set-config=cache_dir="$HOME/.ccache-main"
echo "CCACHE_DIR=$HOME/.ccache-main" >> $GITHUB_ENV
echo "📦 Main branch: using ~/.ccache-main"
else
# Feature branch: use current branch cache with main as secondary (read-only fallback)
ccache --set-config=cache_dir="$HOME/.ccache-current"
ccache --set-config=secondary_storage="file:$HOME/.ccache-main"
echo "CCACHE_DIR=$HOME/.ccache-current" >> $GITHUB_ENV
echo "📦 Feature branch: using ~/.ccache-current with ~/.ccache-main as secondary"
fi
ccache --set-config=cache_dir="$HOME/.ccache-cache"
echo "CCACHE_DIR=$HOME/.ccache-cache" >> $GITHUB_ENV
echo "📦 using ~/.ccache-cache as ccache cache directory"
# Print config for verification
echo "=== ccache configuration ==="
@@ -244,17 +213,3 @@ runs:
if: inputs.ccache_enabled == 'true'
shell: bash
run: ccache -s
- name: Save ccache directory for main branch
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-main
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
- name: Save ccache directory for current branch
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-current
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}

View File

@@ -17,10 +17,6 @@ inputs:
description: 'Cache version for invalidation'
required: false
default: '1'
gha_cache_enabled:
description: 'Whether to use actions/cache (disable for self-hosted with volume mounts)'
required: false
default: 'true'
main_branch:
description: 'Main branch name for restore keys'
required: false
@@ -63,18 +59,14 @@ outputs:
runs:
using: 'composite'
steps:
- name: Restore Conan cache
if: inputs.gha_cache_enabled == 'true'
id: cache-restore-conan
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.conan2
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-${{ inputs.configuration }}
restore-keys: |
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: Conan
- name: Configure Conan cache paths
if: inputs.os == 'Linux'
shell: bash
run: |
mkdir -p /.conan-cache/conan2 /.conan-cache/conan2_download /.conan-cache/conan2_sources
echo 'core.cache:storage_path=/.conan-cache/conan2' > ~/.conan2/global.conf
echo 'core.download:download_cache=/.conan-cache/conan2_download' >> ~/.conan2/global.conf
echo 'core.sources:download_cache=/.conan-cache/conan2_sources' >> ~/.conan2/global.conf
- name: Configure Conan cache paths
if: inputs.gha_cache_enabled == 'false'
@@ -142,10 +134,17 @@ runs:
- name: Export custom recipes
shell: bash
run: |
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
conan export external/soci --version 4.0.3 --user xahaud --channel stable
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
# Export snappy if not already exported
conan list snappy/1.1.10@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
# Export soci if not already exported
conan list soci/4.0.3@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/soci --version 4.0.3 --user xahaud --channel stable
# Export wasmedge if not already exported
conan list wasmedge/0.11.2@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
- name: Install dependencies
shell: bash
env:
@@ -161,10 +160,3 @@ runs:
--build missing \
--settings build_type=${{ inputs.configuration }} \
..
- name: Save Conan cache
if: success() && inputs.gha_cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: ~/.conan2
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}

View File

@@ -20,7 +20,7 @@ jobs:
- Ninja
configuration:
- Debug
runs-on: macos-15
runs-on: [self-hosted, macOS]
env:
build_dir: .build
# Bump this number to invalidate all caches globally.
@@ -30,61 +30,37 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Get commit message
id: get-commit-message
uses: ./.github/actions/xahau-ga-get-commit-message
with:
event-name: ${{ github.event_name }}
head-commit-message: ${{ github.event.head_commit.message }}
pr-head-sha: ${{ github.event.pull_request.head.sha }}
- name: Install Conan
- name: Add Homebrew to PATH
run: |
brew install conan
# Verify Conan 2 is installed
conan --version
echo "/opt/homebrew/bin" >> "$GITHUB_PATH"
echo "/opt/homebrew/sbin" >> "$GITHUB_PATH"
- name: Install Coreutils
run: |
brew install coreutils
echo "Num proc: $(nproc)"
- name: Install Ninja
if: matrix.generator == 'Ninja'
run: brew install ninja
# To isolate environments for each Runner, instead of installing globally with brew,
# use mise to isolate environments for each Runner directory.
- name: Setup toolchain (mise)
uses: jdx/mise-action@v3.6.1
with:
cache: false
install: true
mise_toml: |
[tools]
cmake = "3.23.1"
python = "3.12"
pipx = "latest"
conan = "2"
ninja = "latest"
ccache = "latest"
- name: Install Python
- name: Install tools via mise
run: |
if which python3 > /dev/null 2>&1; then
echo "Python 3 executable exists"
python3 --version
else
brew install python@3.12
fi
# Create 'python' symlink if it doesn't exist (for tools expecting 'python')
if ! which python > /dev/null 2>&1; then
sudo ln -sf $(which python3) /usr/local/bin/python
fi
- name: Install CMake
run: |
# Install CMake 3.x to match local dev environments
# With Conan 2 and the policy args passed to CMake, newer versions
# can have issues with dependencies that require cmake_minimum_required < 3.5
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
# Download and install CMake 3.31.7 directly
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
tar -xzf cmake.tar.gz
# Move the entire CMake.app to /Applications
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
/Applications/CMake.app/Contents/bin/cmake --version
- name: Install ccache
run: brew install ccache
mise install
mise reshim
echo "$HOME/.local/share/mise/shims" >> "$GITHUB_PATH"
- name: Check environment
run: |
@@ -98,6 +74,14 @@ jobs:
echo "---- Full Environment ----"
env
- name: Get commit message
id: get-commit-message
uses: ./.github/actions/xahau-ga-get-commit-message
with:
event-name: ${{ github.event_name }}
head-commit-message: ${{ github.event.head_commit.message }}
pr-head-sha: ${{ github.event.pull_request.head.sha }}
- name: Detect compiler version
id: detect-compiler
run: |
@@ -129,6 +113,7 @@ jobs:
cache_version: ${{ env.CACHE_VERSION }}
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: libcxx
ccache_max_size: '100G'
- name: Test
run: |

View File

@@ -181,8 +181,7 @@ jobs:
image: ubuntu:24.04
volumes:
- /home/runner/.conan-cache:/.conan-cache
- /home/runner/.ccache-main:/github/home/.ccache-main
- /home/runner/.ccache-current:/github/home/.ccache-current
- /home/runner/.ccache-cache:/github/home/.ccache-cache
defaults:
run:
shell: bash
@@ -325,7 +324,6 @@ jobs:
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: ${{ matrix.stdlib }}
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
gha_cache_enabled: 'false' # Disable caching for self hosted runner
ccache_max_size: '100G'
- name: Set artifact name

View File

@@ -48,13 +48,9 @@ target_sources (xrpl_core PRIVATE
src/ripple/beast/net/impl/IPAddressV6.cpp
src/ripple/beast/net/impl/IPEndpoint.cpp
src/ripple/beast/utility/src/beast_Journal.cpp
src/ripple/beast/utility/src/beast_PropertyStream.cpp)
# Conditionally add enhanced logging source when BEAST_ENHANCED_LOGGING is enabled
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
target_sources(xrpl_core PRIVATE
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
endif()
src/ripple/beast/utility/src/beast_PropertyStream.cpp
# Enhanced logging - compiles to empty when BEAST_ENHANCED_LOGGING is not defined
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
#[===============================[
core sources
@@ -162,12 +158,16 @@ target_link_libraries (xrpl_core
date::date
Ripple::opts)
# Link date-tz library when enhanced logging is enabled
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
if(TARGET date::date-tz)
target_link_libraries(xrpl_core PUBLIC date::date-tz)
endif()
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
if(TARGET date::date-tz)
target_link_libraries(xrpl_core PUBLIC date::date-tz)
endif()
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
target_compile_definitions(xrpl_core PUBLIC
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
)
#[=================================[
main/core headers installation
#]=================================]
@@ -957,6 +957,7 @@ if (tests)
subdir: net
#]===============================]
src/test/net/DatabaseDownloader_test.cpp
src/test/net/HTTPClient_test.cpp
#[===============================[
test sources:
subdir: nodestore

View File

@@ -37,20 +37,11 @@ endif() #git
set(SOURCE_ROOT_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src/")
add_definitions(-DSOURCE_ROOT_PATH="${SOURCE_ROOT_PATH}")
# BEAST_ENHANCED_LOGGING option - adds file:line numbers and formatting to logs
# Default to ON for Debug builds, OFF for Release
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" ON)
else()
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" OFF)
endif()
if(BEAST_ENHANCED_LOGGING)
add_definitions(-DBEAST_ENHANCED_LOGGING=1)
message(STATUS "Log line numbers enabled")
else()
message(STATUS "Log line numbers disabled")
endif()
# BEAST_ENHANCED_LOGGING - adds file:line numbers and formatting to logs
# Automatically enabled for Debug builds via generator expression
# Can be explicitly controlled with -DBEAST_ENHANCED_LOGGING=ON/OFF
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages (auto: Debug=ON, Release=OFF)" OFF)
message(STATUS "BEAST_ENHANCED_LOGGING option: ${BEAST_ENHANCED_LOGGING}")
if(thread_safety_analysis)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)

View File

@@ -192,7 +192,7 @@ ENV PATH=/usr/local/bin:$PATH
# Configure ccache and Conan 2
# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky
RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \
RUN /hbb_exe/activate-exec bash -c "ccache -M 100G && \
ccache -o cache_dir=/cache/ccache && \
ccache -o compiler_check=content && \
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \

View File

@@ -471,6 +471,10 @@ ManifestCache::applyManifest(Manifest m)
auto masterKey = m.masterKey;
map_.emplace(std::move(masterKey), std::move(m));
// Increment sequence to invalidate cached manifest messages
seq_++;
return ManifestDisposition::accepted;
}

View File

@@ -360,7 +360,8 @@ Logs::format(
if (!partition.empty())
{
#ifdef BEAST_ENHANCED_LOGGING
output += beast::detail::get_log_highlight_color();
if (beast::detail::should_log_use_colors())
output += beast::detail::get_log_highlight_color();
#endif
output += partition + ":";
}
@@ -392,7 +393,8 @@ Logs::format(
}
#ifdef BEAST_ENHANCED_LOGGING
output += "\033[0m";
if (beast::detail::should_log_use_colors())
output += "\033[0m";
#endif
output += message;

View File

@@ -41,6 +41,14 @@ get_log_highlight_color();
constexpr const char*
strip_source_root(const char* file)
{
// Handle relative paths from build/ directory (common with ccache)
// e.g., "../src/ripple/..." -> "ripple/..."
if (file && file[0] == '.' && file[1] == '.' && file[2] == '/' &&
file[3] == 's' && file[4] == 'r' && file[5] == 'c' && file[6] == '/')
{
return file + 7; // skip "../src/"
}
#ifdef SOURCE_ROOT_PATH
constexpr const char* sourceRoot = SOURCE_ROOT_PATH;
constexpr auto strlen_constexpr = [](const char* s) constexpr

View File

@@ -17,6 +17,8 @@
*/
//==============================================================================
#ifdef BEAST_ENHANCED_LOGGING
#include <ripple/beast/utility/EnhancedLogging.h>
#include <cstdlib>
#include <cstring>
@@ -112,3 +114,5 @@ log_write_location_string(std::ostream& os, const char* file, int line)
} // namespace detail
} // namespace beast
#endif // BEAST_ENHANCED_LOGGING

View File

@@ -155,14 +155,43 @@ Journal::ScopedStream::~ScopedStream()
#ifdef BEAST_ENHANCED_LOGGING
// Add suffix if location is enabled
if (file_ && detail::should_show_location() && !s.empty() && s != "\n")
if (file_ && detail::should_show_location() && !s.empty())
{
std::ostringstream combined;
combined << s;
if (!s.empty() && s.back() != ' ')
combined << " ";
detail::log_write_location_string(combined, file_, line_);
s = combined.str();
// Single optimized scan from the end
size_t const lastNonWhitespace = s.find_last_not_of(" \n\r\t");
// Skip if message is only whitespace (e.g., just "\n" or " \n\n")
if (lastNonWhitespace != std::string::npos)
{
// Count only the trailing newlines (tiny range)
size_t trailingNewlines = 0;
for (size_t i = lastNonWhitespace + 1; i < s.length(); ++i)
{
if (s[i] == '\n')
++trailingNewlines;
}
// Build location string once
std::ostringstream locStream;
detail::log_write_location_string(locStream, file_, line_);
std::string const location = locStream.str();
// Pre-allocate exact size → zero reallocations
size_t const finalSize = lastNonWhitespace + 1 + 1 +
location.length() + trailingNewlines;
std::string result;
result.reserve(finalSize);
// Direct string ops (no ostringstream overhead)
result.append(s, 0, lastNonWhitespace + 1);
result.push_back(' ');
result += location;
if (trailingNewlines > 0)
result.append(trailingNewlines, '\n');
s = std::move(result); // Move, no copy
}
}
#endif

View File

@@ -451,6 +451,12 @@ public:
if (mShutdown)
{
JLOG(j_.trace()) << "Complete.";
mResponse.commit(bytes_transferred);
std::string strBody{
{std::istreambuf_iterator<char>(&mResponse)},
std::istreambuf_iterator<char>()};
invokeComplete(ecResult, mStatus, mBody + strBody);
}
else
{

View File

@@ -1805,6 +1805,7 @@ rpcClient(
}
{
//@@start blocking-request
boost::asio::io_service isService;
RPCCall::fromNetwork(
isService,
@@ -1828,6 +1829,7 @@ rpcClient(
headers);
isService.run(); // This blocks until there are no more
// outstanding async calls.
//@@end blocking-request
}
if (jvOutput.isMember("result"))
{
@@ -1946,6 +1948,7 @@ fromNetwork(
// HTTP call?
auto constexpr RPC_NOTIFY = 30s;
//@@start async-request
HTTPClient::request(
bSSL,
io_service,
@@ -1970,6 +1973,7 @@ fromNetwork(
std::placeholders::_3,
j),
j);
//@@end async-request
}
} // namespace RPCCall

View File

@@ -78,14 +78,12 @@ public:
{
std::lock_guard sl(mLock);
// Wietse: we're not going to limit this, this is admin-port only, scale
// accordingly Dropping events just like this results in inconsistent
// data on the receiving end if (mDeque.size() >= eventQueueMax)
// {
// // Drop the previous event.
// JLOG(j_.warn()) << "RPCCall::fromNetwork drop";
// mDeque.pop_back();
// }
if (mDeque.size() >= maxQueueSize)
{
JLOG(j_.warn()) << "RPCCall::fromNetwork drop: queue full ("
<< mDeque.size() << "), endpoint=" << mIp;
return;
}
auto jm = broadcast ? j_.debug() : j_.info();
JLOG(jm) << "RPCCall::fromNetwork push: " << jvObj;
@@ -121,48 +119,49 @@ public:
}
private:
// XXX Could probably create a bunch of send jobs in a single get of the
// lock.
// Maximum concurrent HTTP deliveries per batch. Bounds file
// descriptor usage while still allowing parallel delivery to
// capable endpoints. With a 1024 FD process limit shared across
// peers, clients, and the node store, 32 per subscriber is a
// meaningful but survivable chunk even with multiple subscribers.
static constexpr int maxInFlight = 32;
// Maximum queued events before dropping. At ~5-10KB per event
// this is ~80-160MB worst case — trivial memory-wise. The real
// purpose is detecting a hopelessly behind endpoint: at 100+
// events per ledger (every ~4s), 16384 events is ~10 minutes
// of buffer. Consumers detect gaps via the seq field.
static constexpr std::size_t maxQueueSize = 16384;
void
sendThread()
{
Json::Value jvEvent;
bool bSend;
do
{
// Local io_service per batch — cheap to create (just an
// internal event queue, no threads, no syscalls). Using a
// local rather than the app's m_io_service is what makes
// .run() block until exactly this batch completes, giving
// us flow control. Same pattern used by rpcClient() in
// RPCCall.cpp for CLI commands.
boost::asio::io_service io_service;
int dispatched = 0;
{
// Obtain the lock to manipulate the queue and change sending.
std::lock_guard sl(mLock);
if (mDeque.empty())
{
mSending = false;
bSend = false;
}
else
while (!mDeque.empty() && dispatched < maxInFlight)
{
auto const [seq, env] = mDeque.front();
mDeque.pop_front();
jvEvent = env;
Json::Value jvEvent = env;
jvEvent["seq"] = seq;
bSend = true;
}
}
// Send outside of the lock.
if (bSend)
{
// XXX Might not need this in a try.
try
{
JLOG(j_.info()) << "RPCCall::fromNetwork: " << mIp;
RPCCall::fromNetwork(
m_io_service,
io_service,
mIp,
mPort,
mUsername,
@@ -173,20 +172,38 @@ private:
mSSL,
true,
logs_);
++dispatched;
}
if (dispatched == 0)
mSending = false;
}
bSend = dispatched > 0;
if (bSend)
{
try
{
JLOG(j_.info())
<< "RPCCall::fromNetwork: " << mIp << " dispatching "
<< dispatched << " events";
io_service.run();
}
catch (const std::exception& e)
{
JLOG(j_.info())
JLOG(j_.warn())
<< "RPCCall::fromNetwork exception: " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "RPCCall::fromNetwork unknown exception";
}
}
} while (bSend);
}
private:
// Wietse: we're not going to limit this, this is admin-port only, scale
// accordingly enum { eventQueueMax = 32 };
boost::asio::io_service& m_io_service;
JobQueue& m_jobQueue;

View File

@@ -484,44 +484,61 @@ OverlayImpl::start()
m_peerFinder->setConfig(config);
m_peerFinder->start();
auto addIps = [&](std::vector<std::string> bootstrapIps) -> void {
auto addIps = [this](std::vector<std::string> ips, bool fixed) {
beast::Journal const& j = app_.journal("Overlay");
for (auto& ip : bootstrapIps)
for (auto& ip : ips)
{
std::size_t pos = ip.find('#');
if (pos != std::string::npos)
ip.erase(pos);
JLOG(j.trace()) << "Found boostrap IP: " << ip;
JLOG(j.trace())
<< "Found " << (fixed ? "fixed" : "bootstrap") << " IP: " << ip;
}
m_resolver.resolve(
bootstrapIps,
[&](std::string const& name,
ips,
[this, fixed](
std::string const& name,
std::vector<beast::IP::Endpoint> const& addresses) {
std::vector<std::string> ips;
ips.reserve(addresses.size());
beast::Journal const& j = app_.journal("Overlay");
std::string const base("config: ");
std::vector<beast::IP::Endpoint> eps;
eps.reserve(addresses.size());
for (auto const& addr : addresses)
{
std::string addrStr = addr.port() == 0
? to_string(addr.at_port(DEFAULT_PEER_PORT))
: to_string(addr);
JLOG(j.trace()) << "Parsed boostrap IP: " << addrStr;
ips.push_back(addrStr);
auto ep = addr.port() == 0 ? addr.at_port(DEFAULT_PEER_PORT)
: addr;
JLOG(j.trace())
<< "Parsed " << (fixed ? "fixed" : "bootstrap")
<< " IP: " << ep;
eps.push_back(ep);
}
std::string const base("config: ");
if (!ips.empty())
m_peerFinder->addFallbackStrings(base + name, ips);
if (eps.empty())
return;
if (fixed)
{
m_peerFinder->addFixedPeer(base + name, eps);
}
else
{
std::vector<std::string> strs;
strs.reserve(eps.size());
for (auto const& ep : eps)
strs.push_back(to_string(ep));
m_peerFinder->addFallbackStrings(base + name, strs);
}
});
};
if (!app_.config().IPS.empty())
addIps(app_.config().IPS);
addIps(app_.config().IPS, false);
if (!app_.config().IPS_FIXED.empty())
addIps(app_.config().IPS_FIXED);
addIps(app_.config().IPS_FIXED, true);
auto const timer = std::make_shared<Timer>(*this);
std::lock_guard lock(mutex_);

View File

@@ -0,0 +1,812 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/ByteUtilities.h>
#include <ripple/net/HTTPClient.h>
#include <test/jtx.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <atomic>
#include <chrono>
#include <memory>
#include <string>
#include <thread>
namespace ripple {
namespace test {
// Minimal TCP server for testing HTTPClient behavior.
// Accepts connections and sends configurable HTTP responses.
class MockHTTPServer
{
boost::asio::io_service ios_;
std::unique_ptr<boost::asio::io_service::work> work_;
boost::asio::ip::tcp::acceptor acceptor_;
std::thread thread_;
std::atomic<bool> running_{true};
unsigned short port_;
// Metrics
std::atomic<int> activeConnections_{0};
std::atomic<int> peakConnections_{0};
std::atomic<int> totalAccepted_{0};
// Configurable behavior
std::atomic<int> statusCode_{200};
std::atomic<int> delayMs_{0};
std::atomic<bool> sendResponse_{true};
std::atomic<bool> closeImmediately_{false};
std::atomic<bool> noContentLength_{false};
public:
MockHTTPServer()
: work_(std::make_unique<boost::asio::io_service::work>(ios_))
, acceptor_(
ios_,
boost::asio::ip::tcp::endpoint(
boost::asio::ip::address::from_string("127.0.0.1"),
0))
{
port_ = acceptor_.local_endpoint().port();
accept();
thread_ = std::thread([this] { ios_.run(); });
}
~MockHTTPServer()
{
running_ = false;
work_.reset(); // Allow io_service to stop.
boost::system::error_code ec;
acceptor_.close(ec);
ios_.stop();
if (thread_.joinable())
thread_.join();
}
unsigned short
port() const
{
return port_;
}
int
activeConnectionCount() const
{
return activeConnections_;
}
int
peakConnectionCount() const
{
return peakConnections_;
}
int
totalAcceptedCount() const
{
return totalAccepted_;
}
void
setStatus(int code)
{
statusCode_ = code;
}
void
setDelay(int ms)
{
delayMs_ = ms;
}
void
setSendResponse(bool send)
{
sendResponse_ = send;
}
void
setCloseImmediately(bool close)
{
closeImmediately_ = close;
}
void
setNoContentLength(bool noContentLength)
{
noContentLength_ = noContentLength;
}
private:
void
accept()
{
auto sock = std::make_shared<boost::asio::ip::tcp::socket>(ios_);
acceptor_.async_accept(*sock, [this, sock](auto ec) {
if (!ec && running_)
{
++totalAccepted_;
int current = ++activeConnections_;
int prev = peakConnections_.load();
while (current > prev &&
!peakConnections_.compare_exchange_weak(prev, current))
;
handleConnection(sock);
accept();
}
});
}
void
handleConnection(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
{
if (closeImmediately_)
{
boost::system::error_code ec;
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
sock->close(ec);
--activeConnections_;
return;
}
auto buf = std::make_shared<boost::asio::streambuf>();
boost::asio::async_read_until(
*sock, *buf, "\r\n\r\n", [this, sock, buf](auto ec, size_t) {
if (ec)
{
--activeConnections_;
return;
}
if (!sendResponse_)
{
// Hold connection open without responding.
// The socket shared_ptr prevents cleanup.
// This simulates a server that accepts but
// never responds (e.g., overloaded).
return;
}
auto delay = delayMs_.load();
if (delay > 0)
{
auto timer =
std::make_shared<boost::asio::steady_timer>(ios_);
timer->expires_from_now(std::chrono::milliseconds(delay));
timer->async_wait(
[this, sock, timer](auto) { sendHTTPResponse(sock); });
}
else
{
sendHTTPResponse(sock);
}
});
}
void
sendHTTPResponse(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
{
auto body = std::string("{}");
std::string header =
"HTTP/1.0 " + std::to_string(statusCode_.load()) + " OK\r\n";
if (!noContentLength_)
header += "Content-Length: " + std::to_string(body.size()) + "\r\n";
header += "\r\n";
auto response = std::make_shared<std::string>(header + body);
boost::asio::async_write(
*sock,
boost::asio::buffer(*response),
[this, sock, response](auto, size_t) {
boost::system::error_code ec;
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
sock->close(ec);
--activeConnections_;
});
}
};
//------------------------------------------------------------------------------
class HTTPClient_test : public beast::unit_test::suite
{
// Helper: fire an HTTP request and track completion via atomic counter.
void
fireRequest(
boost::asio::io_service& ios,
std::string const& host,
unsigned short port,
std::atomic<int>& completed,
beast::Journal& j,
std::chrono::seconds timeout = std::chrono::seconds{5})
{
HTTPClient::request(
false, // no SSL
ios,
host,
port,
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
timeout,
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
}
//--------------------------------------------------------------------------
void
testCleanupAfterSuccess()
{
testcase("Socket cleanup after successful response");
// After a successful HTTP request completes, the
// HTTPClientImp should be destroyed and its socket
// closed promptly — not held until the deadline fires.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.totalAcceptedCount() == 1);
// After io_service.run() returns, the server should
// see zero active connections — socket was released.
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testCleanupAfter500()
{
testcase("Socket cleanup after HTTP 500");
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testCleanupAfterConnectionRefused()
{
testcase("Socket cleanup after connection refused");
using namespace jtx;
Env env{*this};
// No server listening — connection refused.
// Use a port that's very unlikely to be in use.
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", 19999, completed, j);
ios.run();
}
// Callback should still be invoked (with error).
BEAST_EXPECT(completed == 1);
}
void
testCleanupAfterTimeout()
{
testcase("Socket cleanup after timeout");
// Server accepts but never responds. HTTPClient should
// time out, clean up, and invoke the callback.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setSendResponse(false); // accept, read, but never respond
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
// Short timeout to keep the test fast.
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{2});
ios.run();
}
// Callback must be invoked even on timeout.
BEAST_EXPECT(completed == 1);
}
void
testCleanupAfterServerCloseBeforeResponse()
{
testcase("Socket cleanup after server closes before response");
// Server accepts the connection then immediately closes
// it without sending anything.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setCloseImmediately(true);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testEOFCompletionCallsCallback()
{
testcase("EOF completion invokes callback (handleData bug)");
// HTTPClientImp::handleData has a code path where
// mShutdown == eof results in logging "Complete." but
// never calling invokeComplete(). This means:
// - The completion callback is never invoked
// - The deadline timer is never cancelled
// - The socket is held open until the 30s deadline
//
// This test verifies the callback IS invoked after an
// EOF response. If this test fails (completed == 0 after
// ios.run()), the handleData EOF bug is confirmed.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{3});
ios.run();
}
// If handleData EOF path doesn't call invokeComplete,
// the callback won't fire until the deadline (3s) expires,
// and even then handleDeadline doesn't invoke mComplete.
// The io_service.run() will still return (deadline fires,
// handleShutdown runs, all handlers done), but completed
// will be 0.
if (completed != 1)
{
log << " BUG CONFIRMED: handleData EOF path does not"
<< " call invokeComplete(). Callback was not invoked."
<< " Socket held open until deadline." << std::endl;
}
BEAST_EXPECT(completed == 1);
}
void
testConcurrentRequestCleanup()
{
testcase("Concurrent requests all clean up");
// Fire N requests at once on the same io_service.
// All should complete and release their sockets.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
static constexpr int N = 50;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
for (int i = 0; i < N; ++i)
{
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
}
ios.run();
}
BEAST_EXPECT(completed == N);
// Brief sleep to let server-side shutdown complete.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
log << " Completed: " << completed
<< ", Peak concurrent: " << server.peakConnectionCount()
<< ", Active after: " << server.activeConnectionCount()
<< std::endl;
}
void
testConcurrent500Cleanup()
{
testcase("Concurrent 500 requests all clean up");
// Fire N requests that all get 500 responses. Verify
// all sockets are released and no FDs leak.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
static constexpr int N = 50;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
for (int i = 0; i < N; ++i)
{
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
}
ios.run();
}
BEAST_EXPECT(completed == N);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testEOFWithoutContentLength()
{
testcase("EOF without Content-Length (handleData EOF path)");
// When a server sends a response WITHOUT Content-Length,
// HTTPClientImp reads up to maxResponseSize. The server
// closes the connection, causing EOF in handleData.
//
// In handleData, the EOF path (mShutdown == eof) logs
// "Complete." but does NOT call invokeComplete(). This
// means:
// - mComplete (callback) is never invoked
// - deadline timer is never cancelled
// - socket + object held alive until deadline fires
//
// This test uses a SHORT deadline to keep it fast. If
// the callback IS invoked, ios.run() returns quickly.
// If NOT, ios.run() blocks until the deadline (2s).
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
server.setNoContentLength(true);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
auto start = std::chrono::steady_clock::now();
{
boost::asio::io_service ios;
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{2});
ios.run();
}
auto elapsed = std::chrono::steady_clock::now() - start;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
if (completed == 0)
{
log << " BUG CONFIRMED: handleData EOF path does not"
<< " call invokeComplete(). Callback never invoked."
<< " io_service.run() blocked for " << ms << "ms"
<< " (deadline timeout)." << std::endl;
}
else
{
log << " Callback invoked in " << ms << "ms." << std::endl;
}
// This WILL fail if the EOF bug exists — the callback
// is only invoked via the deadline timeout path, which
// does NOT call mComplete.
BEAST_EXPECT(completed == 1);
}
void
testPersistentIOServiceCleanup()
{
testcase("Cleanup on persistent io_service (no destructor mask)");
// Previous tests destroy the io_service after run(),
// which releases all pending handlers' shared_ptrs.
// This masks leaks. Here we use a PERSISTENT io_service
// (with work guard, running on its own thread) and check
// that HTTPClientImp objects are destroyed WITHOUT relying
// on io_service destruction.
//
// We track the object's lifetime via the completion
// callback — if it fires, the async chain completed
// normally. If it doesn't fire within a reasonable time
// but the io_service is still running, something is stuck.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
// Persistent io_service — stays alive the whole test.
boost::asio::io_service ios;
auto work = std::make_unique<boost::asio::io_service::work>(ios);
std::thread runner([&ios] { ios.run(); });
// Fire request on the persistent io_service.
HTTPClient::request(
false,
ios,
"127.0.0.1",
server.port(),
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
// Wait for completion without destroying io_service.
auto deadline =
std::chrono::steady_clock::now() + std::chrono::seconds{5};
while (completed == 0 && std::chrono::steady_clock::now() < deadline)
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
BEAST_EXPECT(completed == 1);
// Give server-side shutdown a moment.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
if (server.activeConnectionCount() != 0)
{
log << " BUG: Socket still open on persistent"
<< " io_service. FD leaked." << std::endl;
}
// Clean shutdown.
work.reset();
ios.stop();
runner.join();
}
void
testPersistentIOService500Cleanup()
{
testcase("500 cleanup on persistent io_service");
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
static constexpr int N = 20;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
boost::asio::io_service ios;
auto work = std::make_unique<boost::asio::io_service::work>(ios);
std::thread runner([&ios] { ios.run(); });
for (int i = 0; i < N; ++i)
{
HTTPClient::request(
false,
ios,
"127.0.0.1",
server.port(),
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
}
auto deadline =
std::chrono::steady_clock::now() + std::chrono::seconds{10};
while (completed < N && std::chrono::steady_clock::now() < deadline)
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
BEAST_EXPECT(completed == N);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
log << " Completed: " << completed << "/" << N
<< ", Active connections after: " << server.activeConnectionCount()
<< std::endl;
work.reset();
ios.stop();
runner.join();
}
void
testGetSelfReferenceCleanup()
{
testcase("get() shared_from_this cycle releases");
// HTTPClientImp::get() binds shared_from_this() into
// mBuild via makeGet. This creates a reference cycle:
// object -> mBuild -> shared_ptr<object>
// The object can only be destroyed if mBuild is cleared.
// Since mBuild is never explicitly cleared, this may be
// a permanent FD leak.
//
// This test fires a GET request and checks whether the
// HTTPClientImp is destroyed (and socket closed) after
// completion.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
HTTPClient::get(
false, // no SSL
ios,
"127.0.0.1",
server.port(),
"/test",
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
ios.run();
}
BEAST_EXPECT(completed == 1);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
// If the get() self-reference cycle leaks, the server
// will still show an active connection here (the socket
// in the leaked HTTPClientImp is never closed).
if (server.activeConnectionCount() != 0)
{
log << " BUG CONFIRMED: get() self-reference cycle"
<< " prevents HTTPClientImp destruction."
<< " Socket FD leaked." << std::endl;
}
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
public:
void
run() override
{
testCleanupAfterSuccess();
testCleanupAfter500();
testCleanupAfterConnectionRefused();
testCleanupAfterTimeout();
testCleanupAfterServerCloseBeforeResponse();
testEOFCompletionCallsCallback();
testConcurrentRequestCleanup();
testConcurrent500Cleanup();
testEOFWithoutContentLength();
testPersistentIOServiceCleanup();
testPersistentIOService500Cleanup();
testGetSelfReferenceCleanup();
}
};
BEAST_DEFINE_TESTSUITE(HTTPClient, net, ripple);
} // namespace test
} // namespace ripple