Compare commits

...

3 Commits

Author SHA1 Message Date
Nicholas Dudfield
a8388e48a4 fix: call invokeComplete on EOF in HTTPClient handleData
When an HTTP response has no Content-Length header, HTTPClient reads
until EOF. The EOF path in handleData logged "Complete." but never
called invokeComplete(), leaving the socket held open for the full
30s deadline timeout and the completion callback never invoked.

This is the likely root cause of webhook delivery permanently stalling
after repeated 500 errors — many web frameworks omit Content-Length on
error responses, triggering this path. Each leaked socket holds an FD
for 30s, eventually exhausting the process FD budget.

Includes HTTPClient_test with 12 test cases covering resource cleanup
across success, error, timeout, connection-refused, concurrent request,
and EOF-without-Content-Length scenarios.
2026-02-10 07:33:31 +07:00
Nicholas Dudfield
49908096d5 fix: bound subscription webhook delivery concurrency and queue size
fromNetwork() is async — it posts handlers to the io_service and
returns immediately. The original sendThread() loop fires all queued
events as concurrent HTTP connections at once. Under sustained load
with a slow/failing endpoint, connections accumulate (each held up to
30s by RPC_NOTIFY timeout), exhausting file descriptors and breaking
all network I/O for the entire process.

Fix: use a local io_service per batch with .run() to block until the
batch completes (same pattern as rpcClient() in RPCCall.cpp). This
bounds concurrent connections to maxInFlight (32) per subscriber while
still allowing parallel delivery.

Also add a queue cap (maxQueueSize = 16384, ~80-160MB) so a hopelessly
behind endpoint doesn't grow the deque indefinitely. Consumers detect
gaps via the existing seq field.

Ref: XRPLF/rippled#6341
2026-02-09 18:31:25 +07:00
tequ
12e1afb694 Enhance dependency export process in GitHub Action to check for existing exports before executing. (#660) 2026-01-28 13:14:40 +10:00
6 changed files with 888 additions and 41 deletions

View File

@@ -134,10 +134,17 @@ runs:
- name: Export custom recipes
shell: bash
run: |
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
conan export external/soci --version 4.0.3 --user xahaud --channel stable
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
# Export snappy if not already exported
conan list snappy/1.1.10@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
# Export soci if not already exported
conan list soci/4.0.3@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/soci --version 4.0.3 --user xahaud --channel stable
# Export wasmedge if not already exported
conan list wasmedge/0.11.2@xahaud/stable 2>/dev/null | (grep -q "not found" && exit 1 || exit 0) || \
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
- name: Install dependencies
shell: bash
env:

View File

@@ -957,6 +957,7 @@ if (tests)
subdir: net
#]===============================]
src/test/net/DatabaseDownloader_test.cpp
src/test/net/HTTPClient_test.cpp
#[===============================[
test sources:
subdir: nodestore

View File

@@ -451,6 +451,12 @@ public:
if (mShutdown)
{
JLOG(j_.trace()) << "Complete.";
mResponse.commit(bytes_transferred);
std::string strBody{
{std::istreambuf_iterator<char>(&mResponse)},
std::istreambuf_iterator<char>()};
invokeComplete(ecResult, mStatus, mBody + strBody);
}
else
{

View File

@@ -1805,6 +1805,7 @@ rpcClient(
}
{
//@@start blocking-request
boost::asio::io_service isService;
RPCCall::fromNetwork(
isService,
@@ -1828,6 +1829,7 @@ rpcClient(
headers);
isService.run(); // This blocks until there are no more
// outstanding async calls.
//@@end blocking-request
}
if (jvOutput.isMember("result"))
{
@@ -1946,6 +1948,7 @@ fromNetwork(
// HTTP call?
auto constexpr RPC_NOTIFY = 30s;
//@@start async-request
HTTPClient::request(
bSSL,
io_service,
@@ -1970,6 +1973,7 @@ fromNetwork(
std::placeholders::_3,
j),
j);
//@@end async-request
}
} // namespace RPCCall

View File

@@ -78,14 +78,12 @@ public:
{
std::lock_guard sl(mLock);
// Wietse: we're not going to limit this, this is admin-port only, scale
// accordingly Dropping events just like this results in inconsistent
// data on the receiving end if (mDeque.size() >= eventQueueMax)
// {
// // Drop the previous event.
// JLOG(j_.warn()) << "RPCCall::fromNetwork drop";
// mDeque.pop_back();
// }
if (mDeque.size() >= maxQueueSize)
{
JLOG(j_.warn()) << "RPCCall::fromNetwork drop: queue full ("
<< mDeque.size() << "), endpoint=" << mIp;
return;
}
auto jm = broadcast ? j_.debug() : j_.info();
JLOG(jm) << "RPCCall::fromNetwork push: " << jvObj;
@@ -121,48 +119,49 @@ public:
}
private:
// XXX Could probably create a bunch of send jobs in a single get of the
// lock.
// Maximum concurrent HTTP deliveries per batch. Bounds file
// descriptor usage while still allowing parallel delivery to
// capable endpoints. With a 1024 FD process limit shared across
// peers, clients, and the node store, 32 per subscriber is a
// meaningful but survivable chunk even with multiple subscribers.
static constexpr int maxInFlight = 32;
// Maximum queued events before dropping. At ~5-10KB per event
// this is ~80-160MB worst case — trivial memory-wise. The real
// purpose is detecting a hopelessly behind endpoint: at 100+
// events per ledger (every ~4s), 16384 events is ~10 minutes
// of buffer. Consumers detect gaps via the seq field.
static constexpr std::size_t maxQueueSize = 16384;
void
sendThread()
{
Json::Value jvEvent;
bool bSend;
do
{
// Local io_service per batch — cheap to create (just an
// internal event queue, no threads, no syscalls). Using a
// local rather than the app's m_io_service is what makes
// .run() block until exactly this batch completes, giving
// us flow control. Same pattern used by rpcClient() in
// RPCCall.cpp for CLI commands.
boost::asio::io_service io_service;
int dispatched = 0;
{
// Obtain the lock to manipulate the queue and change sending.
std::lock_guard sl(mLock);
if (mDeque.empty())
{
mSending = false;
bSend = false;
}
else
while (!mDeque.empty() && dispatched < maxInFlight)
{
auto const [seq, env] = mDeque.front();
mDeque.pop_front();
jvEvent = env;
Json::Value jvEvent = env;
jvEvent["seq"] = seq;
bSend = true;
}
}
// Send outside of the lock.
if (bSend)
{
// XXX Might not need this in a try.
try
{
JLOG(j_.info()) << "RPCCall::fromNetwork: " << mIp;
RPCCall::fromNetwork(
m_io_service,
io_service,
mIp,
mPort,
mUsername,
@@ -173,20 +172,38 @@ private:
mSSL,
true,
logs_);
++dispatched;
}
if (dispatched == 0)
mSending = false;
}
bSend = dispatched > 0;
if (bSend)
{
try
{
JLOG(j_.info())
<< "RPCCall::fromNetwork: " << mIp << " dispatching "
<< dispatched << " events";
io_service.run();
}
catch (const std::exception& e)
{
JLOG(j_.info())
JLOG(j_.warn())
<< "RPCCall::fromNetwork exception: " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "RPCCall::fromNetwork unknown exception";
}
}
} while (bSend);
}
private:
// Wietse: we're not going to limit this, this is admin-port only, scale
// accordingly enum { eventQueueMax = 32 };
boost::asio::io_service& m_io_service;
JobQueue& m_jobQueue;

View File

@@ -0,0 +1,812 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/ByteUtilities.h>
#include <ripple/net/HTTPClient.h>
#include <test/jtx.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <atomic>
#include <chrono>
#include <memory>
#include <string>
#include <thread>
namespace ripple {
namespace test {
// Minimal TCP server for testing HTTPClient behavior.
// Accepts connections and sends configurable HTTP responses.
class MockHTTPServer
{
boost::asio::io_service ios_;
std::unique_ptr<boost::asio::io_service::work> work_;
boost::asio::ip::tcp::acceptor acceptor_;
std::thread thread_;
std::atomic<bool> running_{true};
unsigned short port_;
// Metrics
std::atomic<int> activeConnections_{0};
std::atomic<int> peakConnections_{0};
std::atomic<int> totalAccepted_{0};
// Configurable behavior
std::atomic<int> statusCode_{200};
std::atomic<int> delayMs_{0};
std::atomic<bool> sendResponse_{true};
std::atomic<bool> closeImmediately_{false};
std::atomic<bool> noContentLength_{false};
public:
MockHTTPServer()
: work_(std::make_unique<boost::asio::io_service::work>(ios_))
, acceptor_(
ios_,
boost::asio::ip::tcp::endpoint(
boost::asio::ip::address::from_string("127.0.0.1"),
0))
{
port_ = acceptor_.local_endpoint().port();
accept();
thread_ = std::thread([this] { ios_.run(); });
}
~MockHTTPServer()
{
running_ = false;
work_.reset(); // Allow io_service to stop.
boost::system::error_code ec;
acceptor_.close(ec);
ios_.stop();
if (thread_.joinable())
thread_.join();
}
unsigned short
port() const
{
return port_;
}
int
activeConnectionCount() const
{
return activeConnections_;
}
int
peakConnectionCount() const
{
return peakConnections_;
}
int
totalAcceptedCount() const
{
return totalAccepted_;
}
void
setStatus(int code)
{
statusCode_ = code;
}
void
setDelay(int ms)
{
delayMs_ = ms;
}
void
setSendResponse(bool send)
{
sendResponse_ = send;
}
void
setCloseImmediately(bool close)
{
closeImmediately_ = close;
}
void
setNoContentLength(bool noContentLength)
{
noContentLength_ = noContentLength;
}
private:
void
accept()
{
auto sock = std::make_shared<boost::asio::ip::tcp::socket>(ios_);
acceptor_.async_accept(*sock, [this, sock](auto ec) {
if (!ec && running_)
{
++totalAccepted_;
int current = ++activeConnections_;
int prev = peakConnections_.load();
while (current > prev &&
!peakConnections_.compare_exchange_weak(prev, current))
;
handleConnection(sock);
accept();
}
});
}
void
handleConnection(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
{
if (closeImmediately_)
{
boost::system::error_code ec;
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
sock->close(ec);
--activeConnections_;
return;
}
auto buf = std::make_shared<boost::asio::streambuf>();
boost::asio::async_read_until(
*sock, *buf, "\r\n\r\n", [this, sock, buf](auto ec, size_t) {
if (ec)
{
--activeConnections_;
return;
}
if (!sendResponse_)
{
// Hold connection open without responding.
// The socket shared_ptr prevents cleanup.
// This simulates a server that accepts but
// never responds (e.g., overloaded).
return;
}
auto delay = delayMs_.load();
if (delay > 0)
{
auto timer =
std::make_shared<boost::asio::steady_timer>(ios_);
timer->expires_from_now(std::chrono::milliseconds(delay));
timer->async_wait(
[this, sock, timer](auto) { sendHTTPResponse(sock); });
}
else
{
sendHTTPResponse(sock);
}
});
}
void
sendHTTPResponse(std::shared_ptr<boost::asio::ip::tcp::socket> sock)
{
auto body = std::string("{}");
std::string header =
"HTTP/1.0 " + std::to_string(statusCode_.load()) + " OK\r\n";
if (!noContentLength_)
header += "Content-Length: " + std::to_string(body.size()) + "\r\n";
header += "\r\n";
auto response = std::make_shared<std::string>(header + body);
boost::asio::async_write(
*sock,
boost::asio::buffer(*response),
[this, sock, response](auto, size_t) {
boost::system::error_code ec;
sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
sock->close(ec);
--activeConnections_;
});
}
};
//------------------------------------------------------------------------------
class HTTPClient_test : public beast::unit_test::suite
{
// Helper: fire an HTTP request and track completion via atomic counter.
void
fireRequest(
boost::asio::io_service& ios,
std::string const& host,
unsigned short port,
std::atomic<int>& completed,
beast::Journal& j,
std::chrono::seconds timeout = std::chrono::seconds{5})
{
HTTPClient::request(
false, // no SSL
ios,
host,
port,
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
timeout,
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
}
//--------------------------------------------------------------------------
void
testCleanupAfterSuccess()
{
testcase("Socket cleanup after successful response");
// After a successful HTTP request completes, the
// HTTPClientImp should be destroyed and its socket
// closed promptly — not held until the deadline fires.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.totalAcceptedCount() == 1);
// After io_service.run() returns, the server should
// see zero active connections — socket was released.
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testCleanupAfter500()
{
testcase("Socket cleanup after HTTP 500");
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testCleanupAfterConnectionRefused()
{
testcase("Socket cleanup after connection refused");
using namespace jtx;
Env env{*this};
// No server listening — connection refused.
// Use a port that's very unlikely to be in use.
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", 19999, completed, j);
ios.run();
}
// Callback should still be invoked (with error).
BEAST_EXPECT(completed == 1);
}
void
testCleanupAfterTimeout()
{
testcase("Socket cleanup after timeout");
// Server accepts but never responds. HTTPClient should
// time out, clean up, and invoke the callback.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setSendResponse(false); // accept, read, but never respond
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
// Short timeout to keep the test fast.
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{2});
ios.run();
}
// Callback must be invoked even on timeout.
BEAST_EXPECT(completed == 1);
}
void
testCleanupAfterServerCloseBeforeResponse()
{
testcase("Socket cleanup after server closes before response");
// Server accepts the connection then immediately closes
// it without sending anything.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setCloseImmediately(true);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
ios.run();
}
BEAST_EXPECT(completed == 1);
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testEOFCompletionCallsCallback()
{
testcase("EOF completion invokes callback (handleData bug)");
// HTTPClientImp::handleData has a code path where
// mShutdown == eof results in logging "Complete." but
// never calling invokeComplete(). This means:
// - The completion callback is never invoked
// - The deadline timer is never cancelled
// - The socket is held open until the 30s deadline
//
// This test verifies the callback IS invoked after an
// EOF response. If this test fails (completed == 0 after
// ios.run()), the handleData EOF bug is confirmed.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{3});
ios.run();
}
// If handleData EOF path doesn't call invokeComplete,
// the callback won't fire until the deadline (3s) expires,
// and even then handleDeadline doesn't invoke mComplete.
// The io_service.run() will still return (deadline fires,
// handleShutdown runs, all handlers done), but completed
// will be 0.
if (completed != 1)
{
log << " BUG CONFIRMED: handleData EOF path does not"
<< " call invokeComplete(). Callback was not invoked."
<< " Socket held open until deadline." << std::endl;
}
BEAST_EXPECT(completed == 1);
}
void
testConcurrentRequestCleanup()
{
testcase("Concurrent requests all clean up");
// Fire N requests at once on the same io_service.
// All should complete and release their sockets.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
static constexpr int N = 50;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
for (int i = 0; i < N; ++i)
{
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
}
ios.run();
}
BEAST_EXPECT(completed == N);
// Brief sleep to let server-side shutdown complete.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
log << " Completed: " << completed
<< ", Peak concurrent: " << server.peakConnectionCount()
<< ", Active after: " << server.activeConnectionCount()
<< std::endl;
}
void
testConcurrent500Cleanup()
{
testcase("Concurrent 500 requests all clean up");
// Fire N requests that all get 500 responses. Verify
// all sockets are released and no FDs leak.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
static constexpr int N = 50;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
for (int i = 0; i < N; ++i)
{
fireRequest(ios, "127.0.0.1", server.port(), completed, j);
}
ios.run();
}
BEAST_EXPECT(completed == N);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
void
testEOFWithoutContentLength()
{
testcase("EOF without Content-Length (handleData EOF path)");
// When a server sends a response WITHOUT Content-Length,
// HTTPClientImp reads up to maxResponseSize. The server
// closes the connection, causing EOF in handleData.
//
// In handleData, the EOF path (mShutdown == eof) logs
// "Complete." but does NOT call invokeComplete(). This
// means:
// - mComplete (callback) is never invoked
// - deadline timer is never cancelled
// - socket + object held alive until deadline fires
//
// This test uses a SHORT deadline to keep it fast. If
// the callback IS invoked, ios.run() returns quickly.
// If NOT, ios.run() blocks until the deadline (2s).
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
server.setNoContentLength(true);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
auto start = std::chrono::steady_clock::now();
{
boost::asio::io_service ios;
fireRequest(
ios,
"127.0.0.1",
server.port(),
completed,
j,
std::chrono::seconds{2});
ios.run();
}
auto elapsed = std::chrono::steady_clock::now() - start;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
if (completed == 0)
{
log << " BUG CONFIRMED: handleData EOF path does not"
<< " call invokeComplete(). Callback never invoked."
<< " io_service.run() blocked for " << ms << "ms"
<< " (deadline timeout)." << std::endl;
}
else
{
log << " Callback invoked in " << ms << "ms." << std::endl;
}
// This WILL fail if the EOF bug exists — the callback
// is only invoked via the deadline timeout path, which
// does NOT call mComplete.
BEAST_EXPECT(completed == 1);
}
void
testPersistentIOServiceCleanup()
{
testcase("Cleanup on persistent io_service (no destructor mask)");
// Previous tests destroy the io_service after run(),
// which releases all pending handlers' shared_ptrs.
// This masks leaks. Here we use a PERSISTENT io_service
// (with work guard, running on its own thread) and check
// that HTTPClientImp objects are destroyed WITHOUT relying
// on io_service destruction.
//
// We track the object's lifetime via the completion
// callback — if it fires, the async chain completed
// normally. If it doesn't fire within a reasonable time
// but the io_service is still running, something is stuck.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
// Persistent io_service — stays alive the whole test.
boost::asio::io_service ios;
auto work = std::make_unique<boost::asio::io_service::work>(ios);
std::thread runner([&ios] { ios.run(); });
// Fire request on the persistent io_service.
HTTPClient::request(
false,
ios,
"127.0.0.1",
server.port(),
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
// Wait for completion without destroying io_service.
auto deadline =
std::chrono::steady_clock::now() + std::chrono::seconds{5};
while (completed == 0 && std::chrono::steady_clock::now() < deadline)
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
BEAST_EXPECT(completed == 1);
// Give server-side shutdown a moment.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
if (server.activeConnectionCount() != 0)
{
log << " BUG: Socket still open on persistent"
<< " io_service. FD leaked." << std::endl;
}
// Clean shutdown.
work.reset();
ios.stop();
runner.join();
}
void
testPersistentIOService500Cleanup()
{
testcase("500 cleanup on persistent io_service");
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(500);
static constexpr int N = 20;
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
boost::asio::io_service ios;
auto work = std::make_unique<boost::asio::io_service::work>(ios);
std::thread runner([&ios] { ios.run(); });
for (int i = 0; i < N; ++i)
{
HTTPClient::request(
false,
ios,
"127.0.0.1",
server.port(),
[](boost::asio::streambuf& sb, std::string const& strHost) {
std::ostream os(&sb);
os << "POST / HTTP/1.0\r\n"
<< "Host: " << strHost << "\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: 2\r\n"
<< "\r\n"
<< "{}";
},
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
}
auto deadline =
std::chrono::steady_clock::now() + std::chrono::seconds{10};
while (completed < N && std::chrono::steady_clock::now() < deadline)
{
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
BEAST_EXPECT(completed == N);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
BEAST_EXPECT(server.activeConnectionCount() == 0);
log << " Completed: " << completed << "/" << N
<< ", Active connections after: " << server.activeConnectionCount()
<< std::endl;
work.reset();
ios.stop();
runner.join();
}
void
testGetSelfReferenceCleanup()
{
testcase("get() shared_from_this cycle releases");
// HTTPClientImp::get() binds shared_from_this() into
// mBuild via makeGet. This creates a reference cycle:
// object -> mBuild -> shared_ptr<object>
// The object can only be destroyed if mBuild is cleared.
// Since mBuild is never explicitly cleared, this may be
// a permanent FD leak.
//
// This test fires a GET request and checks whether the
// HTTPClientImp is destroyed (and socket closed) after
// completion.
using namespace jtx;
Env env{*this};
MockHTTPServer server;
server.setStatus(200);
std::atomic<int> completed{0};
auto j = env.app().journal("HTTPClient");
{
boost::asio::io_service ios;
HTTPClient::get(
false, // no SSL
ios,
"127.0.0.1",
server.port(),
"/test",
megabytes(1),
std::chrono::seconds{5},
[&completed](
const boost::system::error_code&, int, std::string const&) {
++completed;
return false;
},
j);
ios.run();
}
BEAST_EXPECT(completed == 1);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
// If the get() self-reference cycle leaks, the server
// will still show an active connection here (the socket
// in the leaked HTTPClientImp is never closed).
if (server.activeConnectionCount() != 0)
{
log << " BUG CONFIRMED: get() self-reference cycle"
<< " prevents HTTPClientImp destruction."
<< " Socket FD leaked." << std::endl;
}
BEAST_EXPECT(server.activeConnectionCount() == 0);
}
public:
void
run() override
{
testCleanupAfterSuccess();
testCleanupAfter500();
testCleanupAfterConnectionRefused();
testCleanupAfterTimeout();
testCleanupAfterServerCloseBeforeResponse();
testEOFCompletionCallsCallback();
testConcurrentRequestCleanup();
testConcurrent500Cleanup();
testEOFWithoutContentLength();
testPersistentIOServiceCleanup();
testPersistentIOService500Cleanup();
testGetSelfReferenceCleanup();
}
};
BEAST_DEFINE_TESTSUITE(HTTPClient, net, ripple);
} // namespace test
} // namespace ripple