mirror of
https://github.com/XRPLF/clio.git
synced 2025-12-06 17:27:58 +00:00
@@ -74,6 +74,7 @@ target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/data/BackendCounters.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
@@ -213,6 +214,7 @@ if (tests)
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
# Backend
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/BackendCountersTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
|
||||
149
src/data/BackendCounters.cpp
Normal file
149
src/data/BackendCounters.cpp
Normal file
@@ -0,0 +1,149 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
|
||||
namespace data {
|
||||
|
||||
BackendCounters::PtrType
|
||||
BackendCounters::make()
|
||||
{
|
||||
struct EnableMakeShared : public BackendCounters
|
||||
{
|
||||
};
|
||||
return std::make_shared<EnableMakeShared>();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerTooBusy()
|
||||
{
|
||||
++tooBusyCounter_;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSync()
|
||||
{
|
||||
++writeSyncCounter_;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSyncRetry()
|
||||
{
|
||||
++writeSyncRetryCounter_;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteStarted()
|
||||
{
|
||||
asyncWriteCounters_.registerStarted(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteFinished()
|
||||
{
|
||||
asyncWriteCounters_.registerFinished(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteRetry()
|
||||
{
|
||||
asyncWriteCounters_.registerRetry(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadStarted(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerStarted(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadFinished(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerFinished(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadRetry(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerRetry(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadError(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerError(count);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::report() const
|
||||
{
|
||||
boost::json::object result;
|
||||
result["too_busy"] = tooBusyCounter_;
|
||||
result["write_sync"] = writeSyncCounter_;
|
||||
result["write_sync_retry"] = writeSyncRetryCounter_;
|
||||
for (auto const& [key, value] : asyncWriteCounters_.report())
|
||||
result[key] = value;
|
||||
for (auto const& [key, value] : asyncReadCounters_.report())
|
||||
result[key] = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name) : name_(std::move(name))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
|
||||
{
|
||||
pendingCounter_ += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
|
||||
{
|
||||
assert(pendingCounter_ >= count);
|
||||
pendingCounter_ -= count;
|
||||
completedCounter_ += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
|
||||
{
|
||||
retryCounter_ += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
|
||||
{
|
||||
assert(pendingCounter_ >= count);
|
||||
pendingCounter_ -= count;
|
||||
errorCounter_ += count;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::AsyncOperationCounters::report() const
|
||||
{
|
||||
return boost::json::object{
|
||||
{name_ + "_pending", pendingCounter_},
|
||||
{name_ + "_completed", completedCounter_},
|
||||
{name_ + "_retry", retryCounter_},
|
||||
{name_ + "_error", errorCounter_}};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
136
src/data/BackendCounters.h
Normal file
136
src/data/BackendCounters.h
Normal file
@@ -0,0 +1,136 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can be used to count backend operations.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{ a.registerTooBusy() } -> std::same_as<void>;
|
||||
{ a.registerWriteSync() } -> std::same_as<void>;
|
||||
{ a.registerWriteSyncRetry() } -> std::same_as<void>;
|
||||
{ a.registerWriteStarted() } -> std::same_as<void>;
|
||||
{ a.registerWriteFinished() } -> std::same_as<void>;
|
||||
{ a.registerWriteRetry() } -> std::same_as<void>;
|
||||
{ a.registerReadStarted(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadFinished(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadRetry(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadError(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.report() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief Holds statistics about the backend.
|
||||
* @note This class is thread-safe.
|
||||
*/
|
||||
class BackendCounters
|
||||
{
|
||||
public:
|
||||
using PtrType = std::shared_ptr<BackendCounters>;
|
||||
|
||||
static PtrType
|
||||
make();
|
||||
|
||||
void
|
||||
registerTooBusy();
|
||||
|
||||
void
|
||||
registerWriteSync();
|
||||
|
||||
void
|
||||
registerWriteSyncRetry();
|
||||
|
||||
void
|
||||
registerWriteStarted();
|
||||
|
||||
void
|
||||
registerWriteFinished();
|
||||
|
||||
void
|
||||
registerWriteRetry();
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadFinished(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1u);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
BackendCounters() = default;
|
||||
|
||||
class AsyncOperationCounters
|
||||
{
|
||||
public:
|
||||
AsyncOperationCounters(std::string name);
|
||||
|
||||
void
|
||||
registerStarted(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerFinished(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerRetry(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerError(std::uint64_t count);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
std::atomic_uint64_t pendingCounter_ = 0u;
|
||||
std::atomic_uint64_t completedCounter_ = 0u;
|
||||
std::atomic_uint64_t retryCounter_ = 0u;
|
||||
std::atomic_uint64_t errorCounter_ = 0u;
|
||||
};
|
||||
|
||||
std::atomic_uint64_t tooBusyCounter_ = 0u;
|
||||
|
||||
std::atomic_uint64_t writeSyncCounter_ = 0u;
|
||||
std::atomic_uint64_t writeSyncRetryCounter_ = 0u;
|
||||
|
||||
AsyncOperationCounters asyncWriteCounters_{"write_async"};
|
||||
AsyncOperationCounters asyncReadCounters_{"read_async"};
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -553,6 +553,12 @@ public:
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
/**
|
||||
* @return json object containing backend usage statistics
|
||||
*/
|
||||
virtual boost::json::object
|
||||
stats() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
|
||||
|
||||
@@ -794,6 +794,12 @@ public:
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
return executor_.stats();
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
executeSyncUpdate(Statement statement)
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <data/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
@@ -69,6 +70,7 @@ concept SomeExecutionStrategy = requires(
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
{ a.stats() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
|
||||
@@ -52,12 +52,14 @@ class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<Statemen
|
||||
{
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
using RetryCallbackType = std::function<void()>;
|
||||
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
CallbackType onComplete_;
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
@@ -68,24 +70,36 @@ public:
|
||||
* @brief Create a new instance of the AsyncExecutor and execute it.
|
||||
*/
|
||||
static void
|
||||
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
|
||||
run(boost::asio::io_context& ioc,
|
||||
HandleType const& handle,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
{
|
||||
// this is a helper that allows us to use std::make_shared below
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
|
||||
{
|
||||
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
|
||||
EnableMakeShared(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete), std::move(onRetry))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
|
||||
ptr->execute(handle);
|
||||
}
|
||||
|
||||
private:
|
||||
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
|
||||
AsyncExecutor(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -104,6 +118,7 @@ private:
|
||||
{
|
||||
if (retryPolicy_.shouldRetry(res.error()))
|
||||
{
|
||||
onRetry_();
|
||||
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
|
||||
}
|
||||
else
|
||||
|
||||
@@ -85,16 +85,16 @@ struct Settings
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO;
|
||||
std::optional<uint32_t> queueSizeIO{};
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
std::optional<std::string> certificate{}; // ssl context
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username;
|
||||
std::optional<std::string> username{};
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password;
|
||||
std::optional<std::string> password{};
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/AsyncExecutor.h>
|
||||
@@ -46,7 +48,7 @@ namespace data::cassandra::detail {
|
||||
* Note: A lot of the code that uses yield is repeated below.
|
||||
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
template <typename HandleType = Handle, SomeBackendCounters BackendCountersType = BackendCounters>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
util::Logger log_{"Backend"};
|
||||
@@ -69,6 +71,8 @@ class DefaultExecutionStrategy
|
||||
std::reference_wrapper<HandleType const> handle_;
|
||||
std::thread thread_;
|
||||
|
||||
typename BackendCountersType::PtrType counters_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
|
||||
using StatementType = typename HandleType::StatementType;
|
||||
@@ -82,12 +86,16 @@ public:
|
||||
* @param settings The settings to use
|
||||
* @param handle A handle to the cassandra database
|
||||
*/
|
||||
DefaultExecutionStrategy(Settings const& settings, HandleType const& handle)
|
||||
DefaultExecutionStrategy(
|
||||
Settings const& settings,
|
||||
HandleType const& handle,
|
||||
typename BackendCountersType::PtrType counters = BackendCountersType::make())
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
, counters_{std::move(counters)}
|
||||
{
|
||||
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
@@ -118,7 +126,10 @@ public:
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
bool const result = numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
if (result)
|
||||
counters_->registerTooBusy();
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -129,6 +140,7 @@ public:
|
||||
ResultOrErrorType
|
||||
writeSync(StatementType const& statement)
|
||||
{
|
||||
counters_->registerWriteSync();
|
||||
while (true)
|
||||
{
|
||||
auto res = handle_.get().execute(statement);
|
||||
@@ -137,6 +149,7 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
counters_->registerWriteSyncRetry();
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
@@ -170,9 +183,18 @@ public:
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statement),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -191,9 +213,17 @@ public:
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statements),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -229,6 +259,7 @@ public:
|
||||
{
|
||||
auto const numStatements = statements.size();
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted(numStatements);
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
@@ -251,11 +282,21 @@ public:
|
||||
|
||||
if (res)
|
||||
{
|
||||
counters_->registerReadFinished(numStatements);
|
||||
return res;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
try
|
||||
{
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
counters_->registerReadError(numStatements);
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry(numStatements);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -273,6 +314,7 @@ public:
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted();
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
@@ -293,10 +335,22 @@ public:
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
if (res)
|
||||
{
|
||||
counters_->registerReadFinished();
|
||||
return res;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
try
|
||||
{
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
counters_->registerReadError();
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,18 +368,19 @@ public:
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
std::atomic_bool hadError = false;
|
||||
std::atomic_uint64_t errorsCount = 0u;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
counters_->registerReadStarted(statements.size());
|
||||
|
||||
auto init = [this, &statements, &futures, &hadError, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto init = [this, &statements, &futures, &errorsCount, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
auto executionHandler = [&hadError, &numOutstanding, sself](auto const& res) mutable {
|
||||
auto executionHandler = [&errorsCount, &numOutstanding, sself](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
++errorsCount;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
@@ -348,8 +403,14 @@ public:
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
if (errorsCount > 0)
|
||||
{
|
||||
assert(errorsCount <= statements.size());
|
||||
counters_->registerReadError(errorsCount);
|
||||
counters_->registerReadFinished(statements.size() - errorsCount);
|
||||
throw DatabaseTimeout{};
|
||||
}
|
||||
counters_->registerReadFinished(statements.size());
|
||||
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
@@ -370,6 +431,15 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get statistics about the backend.
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const
|
||||
{
|
||||
return counters_->report();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
incrementOutstandingRequestCount()
|
||||
|
||||
@@ -49,6 +49,8 @@ namespace rpc {
|
||||
template <typename SubscriptionManagerType, typename LoadBalancerType, typename ETLServiceType, typename CountersType>
|
||||
class BaseServerInfoHandler
|
||||
{
|
||||
static constexpr auto BACKEND_COUNTERS_KEY = "backend_counters";
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions_;
|
||||
std::shared_ptr<LoadBalancerType> balancer_;
|
||||
@@ -56,9 +58,15 @@ class BaseServerInfoHandler
|
||||
std::reference_wrapper<CountersType const> counters_;
|
||||
|
||||
public:
|
||||
struct Input
|
||||
{
|
||||
bool backendCounters = false;
|
||||
};
|
||||
|
||||
struct AdminSection
|
||||
{
|
||||
boost::json::object counters = {};
|
||||
std::optional<boost::json::object> backendCounters = {};
|
||||
boost::json::object subscriptions = {};
|
||||
boost::json::object etl = {};
|
||||
};
|
||||
@@ -119,8 +127,15 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
static RpcSpecConstRef
|
||||
spec([[maybe_unused]] uint32_t apiVersion)
|
||||
{
|
||||
static const RpcSpec rpcSpec = {};
|
||||
return rpcSpec;
|
||||
}
|
||||
|
||||
Result
|
||||
process(Context const& ctx) const
|
||||
process(Input input, Context const& ctx) const
|
||||
{
|
||||
using namespace rpc;
|
||||
using namespace std::chrono;
|
||||
@@ -143,7 +158,13 @@ public:
|
||||
output.info.completeLedgers = fmt::format("{}-{}", range->minSequence, range->maxSequence);
|
||||
|
||||
if (ctx.isAdmin)
|
||||
output.info.adminSection = {counters_.get().report(), subscriptions_->report(), etl_->getInfo()};
|
||||
{
|
||||
output.info.adminSection = {
|
||||
.counters = counters_.get().report(),
|
||||
.backendCounters = input.backendCounters ? std::make_optional(backend_->stats()) : std::nullopt,
|
||||
.subscriptions = subscriptions_->report(),
|
||||
.etl = etl_->getInfo()};
|
||||
}
|
||||
|
||||
auto const serverInfoRippled =
|
||||
balancer_->forwardToRippled({{"command", "server_info"}}, ctx.clientIp, ctx.yield);
|
||||
@@ -223,6 +244,10 @@ private:
|
||||
jv.as_object()["etl"] = info.adminSection->etl;
|
||||
jv.as_object()[JS(counters)] = info.adminSection->counters;
|
||||
jv.as_object()[JS(counters)].as_object()["subscriptions"] = info.adminSection->subscriptions;
|
||||
if (info.adminSection->backendCounters.has_value())
|
||||
{
|
||||
jv.as_object()[BACKEND_COUNTERS_KEY] = *info.adminSection->backendCounters;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,6 +275,16 @@ private:
|
||||
{"successor_hit_rate", cache.successorHitRate},
|
||||
};
|
||||
}
|
||||
|
||||
friend Input
|
||||
tag_invoke(boost::json::value_to_tag<Input>, boost::json::value const& jv)
|
||||
{
|
||||
auto input = BaseServerInfoHandler::Input{};
|
||||
auto const jsonObject = jv.as_object();
|
||||
if (jsonObject.contains(BACKEND_COUNTERS_KEY) && jsonObject.at(BACKEND_COUNTERS_KEY).is_bool())
|
||||
input.backendCounters = jv.at(BACKEND_COUNTERS_KEY).as_bool();
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -274,7 +274,6 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerAccountProposedT
|
||||
CheckSubscriberMessage(dummyTransaction, session);
|
||||
auto rawIdle = dynamic_cast<MockSession*>(sessionIdle.get());
|
||||
ASSERT_NE(rawIdle, nullptr);
|
||||
ASSERT_NE(rawIdle, nullptr);
|
||||
EXPECT_EQ("", rawIdle->message);
|
||||
}
|
||||
|
||||
|
||||
184
unittests/data/BackendCountersTests.cpp
Normal file
184
unittests/data/BackendCountersTests.cpp
Normal file
@@ -0,0 +1,184 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace data;
|
||||
|
||||
class BackendCountersTest : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
static boost::json::object
|
||||
emptyReport()
|
||||
{
|
||||
return boost::json::parse(R"({
|
||||
"too_busy": 0,
|
||||
"write_sync": 0,
|
||||
"write_sync_retry": 0,
|
||||
"write_async_pending": 0,
|
||||
"write_async_completed": 0,
|
||||
"write_async_retry": 0,
|
||||
"write_async_error": 0,
|
||||
"read_async_pending": 0,
|
||||
"read_async_completed": 0,
|
||||
"read_async_retry": 0,
|
||||
"read_async_error": 0
|
||||
})")
|
||||
.as_object();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(BackendCountersTest, EmptyByDefault)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
EXPECT_EQ(counters->report(), emptyReport());
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterTooBusy)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerTooBusy();
|
||||
counters->registerTooBusy();
|
||||
counters->registerTooBusy();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["too_busy"] = 3;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterWriteSync)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerWriteSync();
|
||||
counters->registerWriteSync();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["write_sync"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterWriteSyncRetry)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerWriteSyncRetry();
|
||||
counters->registerWriteSyncRetry();
|
||||
counters->registerWriteSyncRetry();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["write_sync_retry"] = 3;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterWriteStarted)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerWriteStarted();
|
||||
counters->registerWriteStarted();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["write_async_pending"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterWriteFinished)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerWriteStarted();
|
||||
counters->registerWriteStarted();
|
||||
counters->registerWriteStarted();
|
||||
counters->registerWriteFinished();
|
||||
counters->registerWriteFinished();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["write_async_pending"] = 1;
|
||||
expectedReport["write_async_completed"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterWriteRetry)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerWriteRetry();
|
||||
counters->registerWriteRetry();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["write_async_retry"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterReadStarted)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerReadStarted();
|
||||
counters->registerReadStarted();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["read_async_pending"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterReadFinished)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerReadStarted();
|
||||
counters->registerReadStarted();
|
||||
counters->registerReadStarted();
|
||||
counters->registerReadFinished();
|
||||
counters->registerReadFinished();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["read_async_pending"] = 1;
|
||||
expectedReport["read_async_completed"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterReadStartedFinishedWithCounters)
|
||||
{
|
||||
static constexpr auto OPERATIONS_STARTED = 7u;
|
||||
static constexpr auto OPERATIONS_COMPLETED = 4u;
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerReadStarted(OPERATIONS_STARTED);
|
||||
counters->registerReadFinished(OPERATIONS_COMPLETED);
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["read_async_pending"] = OPERATIONS_STARTED - OPERATIONS_COMPLETED;
|
||||
expectedReport["read_async_completed"] = OPERATIONS_COMPLETED;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterReadRetry)
|
||||
{
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerReadRetry();
|
||||
counters->registerReadRetry();
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["read_async_retry"] = 2;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
|
||||
TEST_F(BackendCountersTest, RegisterReadError)
|
||||
{
|
||||
static constexpr auto OPERATIONS_STARTED = 7u;
|
||||
static constexpr auto OPERATIONS_ERROR = 2u;
|
||||
static constexpr auto OPERATIONS_COMPLETED = 1u;
|
||||
auto const counters = BackendCounters::make();
|
||||
counters->registerReadStarted(OPERATIONS_STARTED);
|
||||
counters->registerReadError(OPERATIONS_ERROR);
|
||||
counters->registerReadFinished(OPERATIONS_COMPLETED);
|
||||
auto expectedReport = emptyReport();
|
||||
expectedReport["read_async_pending"] = OPERATIONS_STARTED - OPERATIONS_COMPLETED - OPERATIONS_ERROR;
|
||||
expectedReport["read_async_completed"] = OPERATIONS_COMPLETED;
|
||||
expectedReport["read_async_error"] = OPERATIONS_ERROR;
|
||||
EXPECT_EQ(counters->report(), expectedReport);
|
||||
}
|
||||
@@ -31,6 +31,14 @@ using namespace testing;
|
||||
|
||||
class BackendCassandraAsyncExecutorTest : public SyncAsioContextTest
|
||||
{
|
||||
protected:
|
||||
struct CallbackMock
|
||||
{
|
||||
MOCK_METHOD(void, onComplete, (FakeResultOrError));
|
||||
MOCK_METHOD(void, onRetry, ());
|
||||
};
|
||||
CallbackMock callbackMock_;
|
||||
std::function<void()> onRetry_ = [this]() { callbackMock_.onRetry(); };
|
||||
};
|
||||
|
||||
TEST_F(BackendCassandraAsyncExecutorTest, CompletionCalledOnSuccess)
|
||||
@@ -45,16 +53,20 @@ TEST_F(BackendCassandraAsyncExecutorTest, CompletionCalledOnSuccess)
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(AtLeast(1));
|
||||
|
||||
auto called = std::atomic_bool{false};
|
||||
auto work = std::optional<boost::asio::io_context::work>{ctx};
|
||||
EXPECT_CALL(callbackMock_, onComplete);
|
||||
|
||||
AsyncExecutor<FakeStatement, MockHandle>::run(ctx, handle, FakeStatement{}, [&called, &work](auto&&) {
|
||||
called = true;
|
||||
work.reset();
|
||||
});
|
||||
AsyncExecutor<FakeStatement, MockHandle>::run(
|
||||
ctx,
|
||||
handle,
|
||||
FakeStatement{},
|
||||
[&work, this](auto resultOrError) {
|
||||
callbackMock_.onComplete(std::move(resultOrError));
|
||||
work.reset();
|
||||
},
|
||||
std::move(onRetry_));
|
||||
|
||||
ctx.run();
|
||||
ASSERT_TRUE(called);
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnMainThread)
|
||||
@@ -80,17 +92,22 @@ TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnMa
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(3);
|
||||
|
||||
auto called = std::atomic_bool{false};
|
||||
auto work = std::optional<boost::asio::io_context::work>{ctx};
|
||||
EXPECT_CALL(callbackMock_, onComplete);
|
||||
EXPECT_CALL(callbackMock_, onRetry).Times(2);
|
||||
|
||||
AsyncExecutor<FakeStatement, MockHandle>::run(ctx, handle, FakeStatement{}, [&called, &work](auto&&) {
|
||||
called = true;
|
||||
work.reset();
|
||||
});
|
||||
AsyncExecutor<FakeStatement, MockHandle>::run(
|
||||
ctx,
|
||||
handle,
|
||||
FakeStatement{},
|
||||
[this, &work](auto resultOrError) {
|
||||
callbackMock_.onComplete(std::move(resultOrError));
|
||||
work.reset();
|
||||
},
|
||||
std::move(onRetry_));
|
||||
|
||||
ctx.run();
|
||||
ASSERT_TRUE(callCount >= 3);
|
||||
ASSERT_TRUE(called);
|
||||
ASSERT_EQ(callCount, 3);
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnOtherThread)
|
||||
@@ -120,19 +137,23 @@ TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnOt
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(3);
|
||||
|
||||
auto called = std::atomic_bool{false};
|
||||
auto work2 = std::optional<boost::asio::io_context::work>{ctx};
|
||||
EXPECT_CALL(callbackMock_, onComplete);
|
||||
EXPECT_CALL(callbackMock_, onRetry).Times(2);
|
||||
|
||||
AsyncExecutor<FakeStatement, MockHandle>::run(
|
||||
threadedCtx, handle, FakeStatement{}, [&called, &work, &work2](auto&&) {
|
||||
called = true;
|
||||
threadedCtx,
|
||||
handle,
|
||||
FakeStatement{},
|
||||
[this, &work, &work2](auto resultOrError) {
|
||||
callbackMock_.onComplete(std::move(resultOrError));
|
||||
work.reset();
|
||||
work2.reset();
|
||||
});
|
||||
},
|
||||
std::move(onRetry_));
|
||||
|
||||
ctx.run();
|
||||
ASSERT_TRUE(callCount >= 3);
|
||||
ASSERT_TRUE(called);
|
||||
EXPECT_EQ(callCount, 3);
|
||||
threadedCtx.stop();
|
||||
thread.join();
|
||||
}
|
||||
@@ -151,19 +172,22 @@ TEST_F(BackendCassandraAsyncExecutorTest, CompletionCalledOnFailureAfterRetryCou
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
|
||||
auto called = std::atomic_bool{false};
|
||||
auto work = std::optional<boost::asio::io_context::work>{ctx};
|
||||
EXPECT_CALL(callbackMock_, onComplete);
|
||||
|
||||
AsyncExecutor<FakeStatement, MockHandle, FakeRetryPolicy>::run(
|
||||
ctx, handle, FakeStatement{}, [&called, &work](auto&& res) {
|
||||
ctx,
|
||||
handle,
|
||||
FakeStatement{},
|
||||
[this, &work](auto res) {
|
||||
EXPECT_FALSE(res);
|
||||
EXPECT_EQ(res.error().code(), CASS_ERROR_LIB_INTERNAL_ERROR);
|
||||
EXPECT_EQ(res.error().message(), "not a timeout");
|
||||
|
||||
called = true;
|
||||
callbackMock_.onComplete(std::move(res));
|
||||
work.reset();
|
||||
});
|
||||
},
|
||||
std::move(onRetry_));
|
||||
|
||||
ctx.run();
|
||||
ASSERT_TRUE(called);
|
||||
}
|
||||
|
||||
@@ -30,20 +30,89 @@ using namespace testing;
|
||||
|
||||
class BackendCassandraExecutionStrategyTest : public SyncAsioContextTest
|
||||
{
|
||||
protected:
|
||||
class MockBackendCounters
|
||||
{
|
||||
public:
|
||||
using PtrType = std::shared_ptr<StrictMock<MockBackendCounters>>;
|
||||
static PtrType
|
||||
make()
|
||||
{
|
||||
return std::make_shared<StrictMock<MockBackendCounters>>();
|
||||
}
|
||||
|
||||
MOCK_METHOD(void, registerTooBusy, (), ());
|
||||
MOCK_METHOD(void, registerWriteSync, (), ());
|
||||
MOCK_METHOD(void, registerWriteSyncRetry, (), ());
|
||||
MOCK_METHOD(void, registerWriteStarted, (), ());
|
||||
MOCK_METHOD(void, registerWriteFinished, (), ());
|
||||
MOCK_METHOD(void, registerWriteRetry, (), ());
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1)
|
||||
{
|
||||
registerReadStartedImpl(count);
|
||||
}
|
||||
MOCK_METHOD(void, registerReadStartedImpl, (std::uint64_t), ());
|
||||
|
||||
void
|
||||
registerReadFinished(std::uint64_t count = 1)
|
||||
{
|
||||
registerReadFinishedImpl(count);
|
||||
}
|
||||
MOCK_METHOD(void, registerReadFinishedImpl, (std::uint64_t), ());
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1)
|
||||
{
|
||||
registerReadRetryImpl(count);
|
||||
}
|
||||
MOCK_METHOD(void, registerReadRetryImpl, (std::uint64_t), ());
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1)
|
||||
{
|
||||
registerReadErrorImpl(count);
|
||||
}
|
||||
MOCK_METHOD(void, registerReadErrorImpl, (std::uint64_t), ());
|
||||
MOCK_METHOD(boost::json::object, report, (), ());
|
||||
};
|
||||
|
||||
MockHandle handle{};
|
||||
MockBackendCounters::PtrType counters = MockBackendCounters::make();
|
||||
static constexpr auto NUM_STATEMENTS = 3u;
|
||||
|
||||
DefaultExecutionStrategy<MockHandle, MockBackendCounters>
|
||||
makeStrategy(Settings s = {})
|
||||
{
|
||||
return DefaultExecutionStrategy<MockHandle, MockBackendCounters>(s, handle, counters);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, IsTooBusy)
|
||||
{
|
||||
{
|
||||
auto strat = makeStrategy(Settings{.maxReadRequestsOutstanding = 0});
|
||||
EXPECT_CALL(*counters, registerTooBusy());
|
||||
EXPECT_TRUE(strat.isTooBusy());
|
||||
}
|
||||
auto strat = makeStrategy(Settings{.maxReadRequestsOutstanding = 1});
|
||||
EXPECT_FALSE(strat.isTooBusy());
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineSuccessful)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const& /* statement */, auto&& cb) {
|
||||
cb({}); // pretend we got data
|
||||
return FakeFutureWithCallback{};
|
||||
});
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
EXPECT_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(1));
|
||||
EXPECT_CALL(*counters, registerReadFinishedImpl(1));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statement = FakeStatement{};
|
||||
@@ -53,17 +122,18 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineSuccessful)
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnTimeoutFailure)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const&, auto&& cb) {
|
||||
auto res = FakeResultOrError{CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}};
|
||||
cb(res); // notify that item is ready
|
||||
return FakeFutureWithCallback{res};
|
||||
});
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
EXPECT_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(1));
|
||||
EXPECT_CALL(*counters, registerReadErrorImpl(1));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statement = FakeStatement{};
|
||||
@@ -73,17 +143,18 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnTimeoutF
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnInvalidQueryFailure)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const&, auto&& cb) {
|
||||
auto res = FakeResultOrError{CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}};
|
||||
cb(res); // notify that item is ready
|
||||
return FakeFutureWithCallback{res};
|
||||
});
|
||||
EXPECT_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
EXPECT_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(1));
|
||||
EXPECT_CALL(*counters, registerReadErrorImpl(1));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statement = FakeStatement{};
|
||||
@@ -93,95 +164,94 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnInvalidQ
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineSuccessful)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const& statements, auto&& cb) {
|
||||
EXPECT_EQ(statements.size(), 3);
|
||||
EXPECT_EQ(statements.size(), NUM_STATEMENTS);
|
||||
cb({}); // pretend we got data
|
||||
return FakeFutureWithCallback{};
|
||||
});
|
||||
EXPECT_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadFinishedImpl(NUM_STATEMENTS));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
strat.read(yield, statements);
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineThrowsOnTimeoutFailure)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const& statements, auto&& cb) {
|
||||
EXPECT_EQ(statements.size(), 3);
|
||||
EXPECT_EQ(statements.size(), NUM_STATEMENTS);
|
||||
auto res = FakeResultOrError{CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}};
|
||||
cb(res); // notify that item is ready
|
||||
return FakeFutureWithCallback{res};
|
||||
});
|
||||
EXPECT_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadErrorImpl(NUM_STATEMENTS));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
EXPECT_THROW(strat.read(yield, statements), DatabaseTimeout);
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineThrowsOnInvalidQueryFailure)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const& statements, auto&& cb) {
|
||||
EXPECT_EQ(statements.size(), 3);
|
||||
EXPECT_EQ(statements.size(), NUM_STATEMENTS);
|
||||
auto res = FakeResultOrError{CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}};
|
||||
cb(res); // notify that item is ready
|
||||
return FakeFutureWithCallback{res};
|
||||
});
|
||||
EXPECT_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadErrorImpl(NUM_STATEMENTS));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
EXPECT_THROW(strat.read(yield, statements), std::runtime_error);
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineMarksBusyIfRequestsOutstandingExceeded)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto settings = Settings{};
|
||||
settings.maxReadRequestsOutstanding = 2;
|
||||
auto strat = DefaultExecutionStrategy{settings, handle};
|
||||
auto strat = makeStrategy(Settings{.maxReadRequestsOutstanding = 2});
|
||||
|
||||
ON_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([&strat](auto const& statements, auto&& cb) {
|
||||
EXPECT_EQ(statements.size(), 3);
|
||||
ON_CALL(handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([this, &strat](auto const& statements, auto&& cb) {
|
||||
EXPECT_EQ(statements.size(), NUM_STATEMENTS);
|
||||
EXPECT_CALL(*counters, registerTooBusy());
|
||||
EXPECT_TRUE(strat.isTooBusy()); // 2 was the limit, we sent 3
|
||||
|
||||
cb({}); // notify that item is ready
|
||||
return FakeFutureWithCallback{};
|
||||
});
|
||||
EXPECT_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadFinishedImpl(NUM_STATEMENTS));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
EXPECT_FALSE(strat.isTooBusy()); // 2 was the limit, 0 atm
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
strat.read(yield, statements);
|
||||
EXPECT_FALSE(strat.isTooBusy()); // after read completes it's 0 again
|
||||
});
|
||||
@@ -189,10 +259,9 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineMarksBusyIfReq
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineSuccessful)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([](auto const&, auto&& cb) {
|
||||
cb({}); // pretend we got data
|
||||
return FakeFutureWithCallback{};
|
||||
@@ -200,12 +269,14 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineSuccessful)
|
||||
EXPECT_CALL(
|
||||
handle,
|
||||
asyncExecute(
|
||||
An<FakeStatement const&>(),
|
||||
An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(3); // once per statement
|
||||
A<FakeStatement const&>(),
|
||||
A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(NUM_STATEMENTS); // once per statement
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadFinishedImpl(NUM_STATEMENTS));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
auto res = strat.readEach(yield, statements);
|
||||
EXPECT_EQ(res.size(), statements.size());
|
||||
});
|
||||
@@ -213,11 +284,10 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineSuccessful)
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineThrowsOnFailure)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
auto callCount = std::atomic_int{0};
|
||||
|
||||
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<FakeStatement const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([&callCount](auto const&, auto&& cb) {
|
||||
if (callCount == 1)
|
||||
{ // error happens on one of the entries
|
||||
@@ -233,57 +303,59 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineThrowsOnFailure
|
||||
EXPECT_CALL(
|
||||
handle,
|
||||
asyncExecute(
|
||||
An<FakeStatement const&>(),
|
||||
An<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(3); // once per statement
|
||||
A<FakeStatement const&>(),
|
||||
A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(NUM_STATEMENTS); // once per statement
|
||||
EXPECT_CALL(*counters, registerReadStartedImpl(NUM_STATEMENTS));
|
||||
EXPECT_CALL(*counters, registerReadErrorImpl(1));
|
||||
EXPECT_CALL(*counters, registerReadFinishedImpl(2));
|
||||
|
||||
runSpawn([&strat](boost::asio::yield_context yield) {
|
||||
auto statements = std::vector<FakeStatement>(3);
|
||||
auto statements = std::vector<FakeStatement>(NUM_STATEMENTS);
|
||||
EXPECT_THROW(strat.readEach(yield, statements), DatabaseTimeout);
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, WriteSyncFirstTrySuccessful)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
|
||||
ON_CALL(handle, execute(An<FakeStatement const&>())).WillByDefault([](auto const&) { return FakeResultOrError{}; });
|
||||
ON_CALL(handle, execute(A<FakeStatement const&>())).WillByDefault([](auto const&) { return FakeResultOrError{}; });
|
||||
EXPECT_CALL(handle,
|
||||
execute(An<FakeStatement const&>())).Times(1); // first one will succeed
|
||||
execute(A<FakeStatement const&>())).Times(1); // first one will succeed
|
||||
EXPECT_CALL(*counters, registerWriteSync());
|
||||
|
||||
EXPECT_TRUE(strat.writeSync({}));
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, WriteSyncRetrySuccessful)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto strat = makeStrategy();
|
||||
auto callCount = 0;
|
||||
|
||||
ON_CALL(handle, execute(An<FakeStatement const&>())).WillByDefault([&callCount](auto const&) {
|
||||
ON_CALL(handle, execute(A<FakeStatement const&>())).WillByDefault([&callCount](auto const&) {
|
||||
if (callCount++ == 1)
|
||||
return FakeResultOrError{};
|
||||
return FakeResultOrError{CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA}};
|
||||
});
|
||||
EXPECT_CALL(handle,
|
||||
execute(An<FakeStatement const&>())).Times(2); // first one will fail, second will succeed
|
||||
execute(A<FakeStatement const&>())).Times(2); // first one will fail, second will succeed
|
||||
EXPECT_CALL(*counters, registerWriteSyncRetry());
|
||||
EXPECT_CALL(*counters, registerWriteSync());
|
||||
|
||||
EXPECT_TRUE(strat.writeSync({}));
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, WriteMultipleAndCallSyncSucceeds)
|
||||
{
|
||||
auto handle = MockHandle{};
|
||||
auto strat = DefaultExecutionStrategy{Settings{}, handle};
|
||||
auto totalRequests = 1024u;
|
||||
auto strat = makeStrategy();
|
||||
auto const totalRequests = 1024u;
|
||||
auto callCount = std::atomic_uint{0u};
|
||||
|
||||
auto work = std::optional<boost::asio::io_context::work>{ctx};
|
||||
auto thread = std::thread{[this]() { ctx.run(); }};
|
||||
|
||||
ON_CALL(
|
||||
handle, asyncExecute(An<std::vector<FakeStatement> const&>(), An<std::function<void(FakeResultOrError)>&&>()))
|
||||
ON_CALL(handle, asyncExecute(A<std::vector<FakeStatement> const&>(), A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.WillByDefault([this, &callCount](auto const&, auto&& cb) {
|
||||
// run on thread to emulate concurrency model of real asyncExecute
|
||||
boost::asio::post(ctx, [&callCount, cb = std::forward<decltype(cb)>(cb)] {
|
||||
@@ -295,9 +367,11 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteMultipleAndCallSyncSucceeds)
|
||||
EXPECT_CALL(
|
||||
handle,
|
||||
asyncExecute(
|
||||
An<std::vector<FakeStatement> const&>(),
|
||||
An<std::function<void(FakeResultOrError)>&&>()))
|
||||
A<std::vector<FakeStatement> const&>(),
|
||||
A<std::function<void(FakeResultOrError)>&&>()))
|
||||
.Times(totalRequests); // one per write call
|
||||
EXPECT_CALL(*counters, registerWriteStarted()).Times(totalRequests);
|
||||
EXPECT_CALL(*counters, registerWriteFinished()).Times(totalRequests);
|
||||
|
||||
auto makeStatements = [] { return std::vector<FakeStatement>(16); };
|
||||
for (auto i = 0u; i < totalRequests; ++i)
|
||||
@@ -309,3 +383,10 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteMultipleAndCallSyncSucceeds)
|
||||
work.reset();
|
||||
thread.join();
|
||||
}
|
||||
|
||||
TEST_F(BackendCassandraExecutionStrategyTest, StatsCallsCountersReport)
|
||||
{
|
||||
auto strat = makeStrategy();
|
||||
EXPECT_CALL(*counters, report());
|
||||
strat.stats();
|
||||
}
|
||||
|
||||
@@ -47,6 +47,11 @@ protected:
|
||||
MockSubscriptionManagerTest::SetUp();
|
||||
MockETLServiceTest::SetUp();
|
||||
MockCountersTest::SetUp();
|
||||
|
||||
rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
}
|
||||
|
||||
void
|
||||
@@ -99,12 +104,18 @@ protected:
|
||||
}
|
||||
|
||||
static void
|
||||
validateAdminOutput(rpc::ReturnType const& output)
|
||||
validateAdminOutput(rpc::ReturnType const& output, bool shouldHaveBackendCounters = false)
|
||||
{
|
||||
auto const& result = output.value().as_object();
|
||||
auto const& info = result.at("info").as_object();
|
||||
EXPECT_TRUE(info.contains("etl"));
|
||||
EXPECT_TRUE(info.contains("counters"));
|
||||
if (shouldHaveBackendCounters)
|
||||
{
|
||||
ASSERT_TRUE(info.contains("backend_counters")) << boost::json::serialize(info);
|
||||
EXPECT_TRUE(info.at("backend_counters").is_object());
|
||||
EXPECT_TRUE(!info.at("backend_counters").as_object().empty());
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -134,18 +145,13 @@ protected:
|
||||
EXPECT_EQ(cache.at("object_hit_rate").as_double(), 1.0);
|
||||
EXPECT_EQ(cache.at("successor_hit_rate").as_double(), 1.0);
|
||||
}
|
||||
|
||||
MockBackend* rawBackendPtr = nullptr;
|
||||
};
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, NoLedgerInfoErrorsOutWithInternal)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::nullopt));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(std::nullopt));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -163,17 +169,9 @@ TEST_F(RPCServerInfoHandlerTest, NoLedgerInfoErrorsOutWithInternal)
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, NoFeesErrorsOutWithInternal)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30);
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::nullopt));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(std::nullopt));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -191,31 +189,22 @@ TEST_F(RPCServerInfoHandlerTest, NoFeesErrorsOutWithInternal)
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, DefaultOutputIsPresent)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(std::nullopt));
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(testing::_, testing::Eq(CLIENTIP), testing::_)).Times(1);
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(testing::_, testing::Eq(CLIENTIP), testing::_))
|
||||
.WillOnce(Return(std::nullopt));
|
||||
|
||||
ON_CALL(*rawCountersPtr, uptime).WillByDefault(Return(std::chrono::seconds{1234}));
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, isAmendmentBlocked).WillByDefault(Return(false));
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(false));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -236,31 +225,22 @@ TEST_F(RPCServerInfoHandlerTest, DefaultOutputIsPresent)
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, AmendmentBlockedIsPresentIfSet)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(std::nullopt));
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(testing::_, testing::Eq(CLIENTIP), testing::_)).Times(1);
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(testing::_, testing::Eq(CLIENTIP), testing::_))
|
||||
.WillOnce(Return(std::nullopt));
|
||||
|
||||
ON_CALL(*rawCountersPtr, uptime).WillByDefault(Return(std::chrono::seconds{1234}));
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, isAmendmentBlocked).WillByDefault(Return(true));
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(true));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -279,43 +259,30 @@ TEST_F(RPCServerInfoHandlerTest, AmendmentBlockedIsPresentIfSet)
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, AdminSectionPresentWhenAdminFlagIsSet)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockSubscriptionManager* rawSubscriptionManagerPtr = mockSubscriptionManagerPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
auto const empty = json::object{};
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).Times(1);
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawCountersPtr, uptime).WillByDefault(Return(std::chrono::seconds{1234}));
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, isAmendmentBlocked).WillByDefault(Return(false));
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(false));
|
||||
|
||||
// admin calls
|
||||
ON_CALL(*rawCountersPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawCountersPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawSubscriptionManagerPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, getInfo).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).WillOnce(Return(empty));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -329,32 +296,68 @@ TEST_F(RPCServerInfoHandlerTest, AdminSectionPresentWhenAdminFlagIsSet)
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, RippledForwardedValuesPresent)
|
||||
TEST_F(RPCServerInfoHandlerTest, BackendCountersPresentWhenRequestWithParam)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockSubscriptionManager* rawSubscriptionManagerPtr = mockSubscriptionManagerPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
auto const empty = json::object{};
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).WillOnce(Return(empty));
|
||||
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(false));
|
||||
|
||||
// admin calls
|
||||
EXPECT_CALL(*rawCountersPtr, report).WillOnce(Return(empty));
|
||||
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).WillOnce(Return(empty));
|
||||
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).WillOnce(Return(empty));
|
||||
|
||||
EXPECT_CALL(*rawBackendPtr, stats).WillOnce(Return(boost::json::object{{"read_cout", 10}, {"write_count", 3}}));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
|
||||
runSpawn([&](auto yield) {
|
||||
auto const req = json::parse(R"(
|
||||
{
|
||||
"backend_counters": true
|
||||
}
|
||||
)");
|
||||
auto const output = handler.process(req, Context{yield, {}, true});
|
||||
|
||||
validateNormalOutput(output);
|
||||
validateAdminOutput(output, true);
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, RippledForwardedValuesPresent)
|
||||
{
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockSubscriptionManager* rawSubscriptionManagerPtr = mockSubscriptionManagerPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
auto const empty = json::object{};
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
ON_CALL(*rawCountersPtr, uptime).WillByDefault(Return(std::chrono::seconds{1234}));
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, isAmendmentBlocked).WillByDefault(Return(false));
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(false));
|
||||
|
||||
auto const rippledObj = json::parse(R"({
|
||||
"result": {
|
||||
@@ -366,18 +369,14 @@ TEST_F(RPCServerInfoHandlerTest, RippledForwardedValuesPresent)
|
||||
}
|
||||
}
|
||||
})");
|
||||
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(rippledObj.as_object()));
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).Times(1);
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).WillOnce(Return(rippledObj.as_object()));
|
||||
|
||||
// admin calls
|
||||
ON_CALL(*rawCountersPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawCountersPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawSubscriptionManagerPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, getInfo).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).WillOnce(Return(empty));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
@@ -394,48 +393,35 @@ TEST_F(RPCServerInfoHandlerTest, RippledForwardedValuesPresent)
|
||||
|
||||
TEST_F(RPCServerInfoHandlerTest, RippledForwardedValuesMissingNoExceptionThrown)
|
||||
{
|
||||
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
|
||||
ASSERT_NE(rawBackendPtr, nullptr);
|
||||
MockLoadBalancer* rawBalancerPtr = mockLoadBalancerPtr.get();
|
||||
MockCounters* rawCountersPtr = mockCountersPtr.get();
|
||||
MockSubscriptionManager* rawSubscriptionManagerPtr = mockSubscriptionManagerPtr.get();
|
||||
MockETLService* rawETLServicePtr = mockETLServicePtr.get();
|
||||
|
||||
mockBackendPtr->updateRange(10); // min
|
||||
mockBackendPtr->updateRange(30); // max
|
||||
|
||||
auto const empty = json::object{};
|
||||
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30, 3); // 3 seconds old
|
||||
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).WillOnce(Return(ledgerinfo));
|
||||
|
||||
auto const feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0);
|
||||
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob));
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
|
||||
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).WillOnce(Return(feeBlob));
|
||||
|
||||
ON_CALL(*rawCountersPtr, uptime).WillByDefault(Return(std::chrono::seconds{1234}));
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, uptime).WillOnce(Return(std::chrono::seconds{1234}));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, isAmendmentBlocked).WillByDefault(Return(false));
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, isAmendmentBlocked).WillOnce(Return(false));
|
||||
|
||||
auto const rippledObj = json::parse(R"({
|
||||
"result": {
|
||||
"info": {}
|
||||
}
|
||||
})");
|
||||
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(rippledObj.as_object()));
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).Times(1);
|
||||
EXPECT_CALL(*rawBalancerPtr, forwardToRippled).WillOnce(Return(rippledObj.as_object()));
|
||||
|
||||
// admin calls
|
||||
ON_CALL(*rawCountersPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawCountersPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawCountersPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawSubscriptionManagerPtr, report).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).Times(1);
|
||||
EXPECT_CALL(*rawSubscriptionManagerPtr, report).WillOnce(Return(empty));
|
||||
|
||||
ON_CALL(*rawETLServicePtr, getInfo).WillByDefault(Return(empty));
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).Times(1);
|
||||
EXPECT_CALL(*rawETLServicePtr, getInfo).WillOnce(Return(empty));
|
||||
|
||||
auto const handler = AnyHandler{TestServerInfoHandler{
|
||||
mockBackendPtr, mockSubscriptionManagerPtr, mockLoadBalancerPtr, mockETLServicePtr, *mockCountersPtr}};
|
||||
|
||||
@@ -146,6 +146,8 @@ struct MockBackend : public BackendInterface
|
||||
|
||||
MOCK_METHOD(bool, isTooBusy, (), (const, override));
|
||||
|
||||
MOCK_METHOD(boost::json::object, stats, (), (const, override));
|
||||
|
||||
MOCK_METHOD(void, doWriteLedgerObject, (std::string&&, std::uint32_t const, std::string&&), (override));
|
||||
|
||||
MOCK_METHOD(bool, doFinishWrites, (), (override));
|
||||
|
||||
Reference in New Issue
Block a user