Make database reads async

* yield on db read using asio
* PostgresBackend fetches multiple transactions or objects in parallel
This commit is contained in:
natenichols
2022-02-07 07:00:15 -06:00
committed by CJ Cobb
parent 7c2bef70bc
commit d016253264
50 changed files with 3612 additions and 2593 deletions

View File

@@ -30,7 +30,6 @@ target_sources(clio PRIVATE
## Backend
src/backend/BackendInterface.cpp
src/backend/CassandraBackend.cpp
src/backend/DBHelpers.cpp
src/backend/LayeredCache.cpp
src/backend/Pg.cpp
src/backend/PostgresBackend.cpp

View File

@@ -8,7 +8,7 @@
namespace Backend {
std::shared_ptr<BackendInterface>
make_Backend(boost::json::object const& config)
make_Backend(boost::asio::io_context& ioc, boost::json::object const& config)
{
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
@@ -27,13 +27,13 @@ make_Backend(boost::json::object const& config)
if (config.contains("online_delete"))
dbConfig.at(type).as_object()["ttl"] =
config.at("online_delete").as_int64() * 4;
backend =
std::make_shared<CassandraBackend>(dbConfig.at(type).as_object());
backend = std::make_shared<CassandraBackend>(
ioc, dbConfig.at(type).as_object());
}
else if (boost::iequals(type, "postgres"))
{
backend =
std::make_shared<PostgresBackend>(dbConfig.at(type).as_object());
backend = std::make_shared<PostgresBackend>(
ioc, dbConfig.at(type).as_object());
}
if (!backend)

View File

@@ -3,7 +3,7 @@
#include <backend/BackendInterface.h>
namespace Backend {
bool
BackendInterface::finishWrites(uint32_t ledgerSequence)
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
{
auto commitRes = doFinishWrites();
if (commitRes)
@@ -15,7 +15,7 @@ BackendInterface::finishWrites(uint32_t ledgerSequence)
void
BackendInterface::writeLedgerObject(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
@@ -23,17 +23,37 @@ BackendInterface::writeLedgerObject(
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow(
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(debug) << __func__;
while (true)
{
try
{
return hardFetchLedgerRange(yield);
}
catch (DatabaseTimeout& t)
{
;
}
}
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const
{
BOOST_LOG_TRIVIAL(debug) << __func__;
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
}
// *** state data methods
std::optional<Blob>
BackendInterface::fetchLedgerObject(
ripple::uint256 const& key,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
auto obj = cache_.get(key, sequence);
if (obj)
@@ -46,7 +66,7 @@ BackendInterface::fetchLedgerObject(
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj)
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - missed cache and missed in db";
@@ -60,7 +80,8 @@ BackendInterface::fetchLedgerObject(
std::vector<Blob>
BackendInterface::fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
std::vector<Blob> results;
results.resize(keys.size());
@@ -79,7 +100,7 @@ BackendInterface::fetchLedgerObjects(
if (misses.size())
{
auto objs = doFetchLedgerObjects(misses, sequence);
auto objs = doFetchLedgerObjects(misses, sequence, yield);
for (size_t i = 0, j = 0; i < results.size(); ++i)
{
if (results[i].size() == 0)
@@ -89,13 +110,15 @@ BackendInterface::fetchLedgerObjects(
}
}
}
return results;
}
// Fetches the successor to key/index
std::optional<ripple::uint256>
BackendInterface::fetchSuccessorKey(
ripple::uint256 key,
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ)
@@ -104,28 +127,32 @@ BackendInterface::fetchSuccessorKey(
else
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - cache miss - " << ripple::strHex(key);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
std::optional<LedgerObject>
BackendInterface::fetchSuccessorObject(
ripple::uint256 key,
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = fetchSuccessorKey(key, ledgerSequence);
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
if (succ)
{
auto obj = fetchLedgerObject(*succ, ledgerSequence);
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
assert(obj);
return {{*succ, *obj}};
}
return {};
}
BookOffersPage
BackendInterface::fetchBookOffers(
ripple::uint256 const& book,
uint32_t ledgerSequence,
std::uint32_t limit,
std::optional<ripple::uint256> const& cursor) const
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursor,
boost::asio::yield_context& yield) const
{
// TODO try to speed this up. This can take a few seconds. The goal is
// to get it down to a few hundred milliseconds.
@@ -139,14 +166,14 @@ BackendInterface::fetchBookOffers(
.count();
};
auto begin = std::chrono::system_clock::now();
uint32_t numSucc = 0;
uint32_t numPages = 0;
std::uint32_t numSucc = 0;
std::uint32_t numPages = 0;
long succMillis = 0;
long pageMillis = 0;
while (keys.size() < limit)
{
auto mid1 = std::chrono::system_clock::now();
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence);
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
@@ -174,7 +201,8 @@ BackendInterface::fetchBookOffers(
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence);
auto nextDir =
fetchLedgerObject(nextKey.key, ledgerSequence, yield);
assert(nextDir);
offerDir->blob = *nextDir;
offerDir->key = nextKey.key;
@@ -183,7 +211,7 @@ BackendInterface::fetchBookOffers(
pageMillis += getMillis(mid3 - mid2);
}
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence);
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i)
{
BOOST_LOG_TRIVIAL(debug)
@@ -215,9 +243,10 @@ BackendInterface::fetchBookOffers(
LedgerPage
BackendInterface::fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit,
std::uint32_t limitHint) const
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::uint32_t const limitHint,
boost::asio::yield_context& yield) const
{
LedgerPage page;
@@ -226,13 +255,14 @@ BackendInterface::fetchLedgerPage(
{
ripple::uint256 const& curCursor =
keys.size() ? keys.back() : cursor ? *cursor : firstKey;
auto succ = fetchSuccessorKey(curCursor, ledgerSequence);
auto succ = fetchSuccessorKey(curCursor, ledgerSequence, yield);
if (!succ)
break;
keys.push_back(std::move(*succ));
}
auto objects = fetchLedgerObjects(keys, ledgerSequence);
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < objects.size(); ++i)
{
assert(objects[i].size());
@@ -240,16 +270,19 @@ BackendInterface::fetchLedgerPage(
}
if (page.objects.size() >= limit)
page.cursor = page.objects.back().key;
return page;
}
std::optional<ripple::Fees>
BackendInterface::fetchFees(std::uint32_t seq) const
BackendInterface::fetchFees(
std::uint32_t const seq,
boost::asio::yield_context& yield) const
{
ripple::Fees fees;
auto key = ripple::keylet::fees().key;
auto bytes = fetchLedgerObject(key, seq);
auto bytes = fetchLedgerObject(key, seq, yield);
if (!bytes)
{

View File

@@ -37,12 +37,35 @@ retryOnTimeout(F func, size_t waitMs = 500)
}
}
// Please note, this function only works w/ non-void return type. Writes are
// synchronous anyways, so
template <class F>
void
synchronous(F&& f)
{
boost::asio::io_context ctx;
std::optional<boost::asio::io_context::work> work;
work.emplace(ctx);
boost::asio::spawn(ctx, [&f, &work](boost::asio::yield_context yield) {
f(yield);
work.reset();
});
ctx.run();
}
class BackendInterface
{
protected:
std::optional<LedgerRange> range;
SimpleCache cache_;
// mutex used for open() and close()
mutable std::mutex mutex_;
public:
BackendInterface(boost::json::object const& config)
{
@@ -72,98 +95,150 @@ public:
}
virtual std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(uint32_t sequence) const = 0;
fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
virtual std::optional<ripple::LedgerInfo>
fetchLedgerByHash(ripple::uint256 const& hash) const = 0;
fetchLedgerByHash(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const = 0;
virtual std::optional<uint32_t>
fetchLatestLedgerSequence() const = 0;
virtual std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
std::optional<LedgerRange>
fetchLedgerRange() const
{
std::lock_guard lk(mutex_);
return range;
}
std::optional<ripple::Fees>
fetchFees(std::uint32_t seq) const;
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
// *** transaction methods
virtual std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash) const = 0;
fetchTransaction(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes) const = 0;
fetchTransactions(
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const = 0;
virtual AccountTransactions
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
bool forward = false,
std::optional<AccountTransactionsCursor> const& cursor = {}) const = 0;
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(uint32_t ledgerSequence) const = 0;
fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const = 0;
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
// *** state data methods
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, uint32_t sequence) const;
fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const;
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const;
std::uint32_t const sequence,
boost::asio::yield_context& yield) const;
virtual std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, uint32_t sequence)
const = 0;
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const = 0;
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<LedgerObject>
fetchLedgerDiff(uint32_t ledgerSequence) const = 0;
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
// Fetches a page of ledger objects, ordered by key/index.
// Used by ledger_data
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit,
std::uint32_t limitHint = 0) const;
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::uint32_t const limitHint,
boost::asio::yield_context& yield) const;
// Fetches the successor to key/index
std::optional<LedgerObject>
fetchSuccessorObject(ripple::uint256 key, uint32_t ledgerSequence) const;
fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const;
std::optional<ripple::uint256>
fetchSuccessorKey(ripple::uint256 key, uint32_t ledgerSequence) const;
fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const;
// Fetches the successor to key/index
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, uint32_t ledgerSequence) const = 0;
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
uint32_t ledgerSequence,
std::uint32_t limit,
std::optional<ripple::uint256> const& cursor = {}) const;
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursor,
boost::asio::yield_context& yield) const;
std::optional<LedgerRange>
hardFetchLedgerRange() const
{
std::optional<LedgerRange> range = {};
synchronous([&](boost::asio::yield_context yield) {
range = hardFetchLedgerRange(yield);
});
return range;
}
virtual std::optional<LedgerRange>
hardFetchLedgerRange() const = 0;
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
// Doesn't throw DatabaseTimeout. Should be used with care.
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow() const;
// Doesn't throw DatabaseTimeout. Should be used with care.
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
void
updateRange(uint32_t newMax)
updateRange(std::uint32_t const newMax)
{
std::lock_guard lk(mutex_);
if (!range)
range = {newMax, newMax};
else
@@ -175,14 +250,17 @@ public:
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader) = 0;
void
writeLedgerObject(std::string&& key, uint32_t seq, std::string&& blob);
virtual void
writeLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob);
virtual void
writeTransaction(
std::string&& hash,
uint32_t seq,
uint32_t date,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata) = 0;
@@ -192,20 +270,23 @@ public:
virtual void
writeSuccessor(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& successor) = 0;
// Tell the database we are about to begin writing data for a particular
// ledger.
virtual void
startWrites() = 0;
startWrites() const = 0;
// Tell the database we have finished writing all data for a particular
// ledger
bool
finishWrites(uint32_t ledgerSequence);
finishWrites(std::uint32_t const ledgerSequence);
virtual bool
doOnlineDelete(uint32_t numLedgersToKeep) const = 0;
doOnlineDelete(
std::uint32_t numLedgersToKeep,
boost::asio::yield_context& yield) const = 0;
// Open the database. Set up all of the necessary objects and
// datastructures. After this call completes, the database is ready for
@@ -215,18 +296,18 @@ public:
// Close the database, releasing any resources
virtual void
close() = 0;
close(){};
// *** private helper methods
private:
virtual void
doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& blob) = 0;
virtual bool
doFinishWrites() = 0;
doFinishWrites() const = 0;
};
} // namespace Backend

View File

@@ -3,6 +3,13 @@
#include <functional>
#include <unordered_map>
namespace Backend {
// Type alias for async completion handlers
using completion_token = boost::asio::yield_context;
using function_type = void(boost::system::error_code);
using result_type = boost::asio::async_result<completion_token, function_type>;
using handler_type = typename result_type::completion_handler_type;
template <class T, class F>
void
processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
@@ -50,7 +57,7 @@ struct WriteCallbackData
CassandraBackend const* backend;
T data;
std::function<void(WriteCallbackData<T, B>&, bool)> retry;
uint32_t currentRetries;
std::uint32_t currentRetries;
std::atomic<int> refs = 1;
std::string id;
@@ -95,6 +102,7 @@ struct WriteCallbackData
return id;
}
};
template <class T, class B>
struct BulkWriteCallbackData : public WriteCallbackData<T, B>
{
@@ -162,7 +170,7 @@ makeAndExecuteBulkAsyncWrite(
void
CassandraBackend::doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& blob)
{
BOOST_LOG_TRIVIAL(trace) << "Writing ledger object to cassandra";
@@ -196,7 +204,7 @@ CassandraBackend::doWriteLedgerObject(
void
CassandraBackend::writeSuccessor(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& successor)
{
BOOST_LOG_TRIVIAL(trace)
@@ -277,8 +285,8 @@ CassandraBackend::writeAccountTransactions(
void
CassandraBackend::writeTransaction(
std::string&& hash,
uint32_t seq,
uint32_t date,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata)
{
@@ -317,11 +325,12 @@ CassandraBackend::writeTransaction(
}
std::optional<LedgerRange>
CassandraBackend::hardFetchLedgerRange() const
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
CassandraStatement statement{selectLedgerRange_};
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
@@ -339,26 +348,31 @@ CassandraBackend::hardFetchLedgerRange() const
}
return range;
}
std::vector<TransactionAndMetadata>
CassandraBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const
CassandraBackend::fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence);
return fetchTransactions(hashes);
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
return fetchTransactions(hashes, yield);
}
template <class Result>
struct ReadCallbackData
{
std::function<void(CassandraResult&)> onSuccess;
using handler_type = typename Result::completion_handler_type;
std::atomic_int& numOutstanding;
std::mutex& mtx;
std::condition_variable& cv;
handler_type handler;
std::function<void(CassandraResult&)> onSuccess;
std::atomic_bool errored = false;
ReadCallbackData(
std::atomic_int& numOutstanding,
std::mutex& m,
std::condition_variable& cv,
handler_type& handler,
std::function<void(CassandraResult&)> onSuccess)
: numOutstanding(numOutstanding), mtx(m), cv(cv), onSuccess(onSuccess)
: numOutstanding(numOutstanding), handler(handler), onSuccess(onSuccess)
{
}
@@ -375,35 +389,55 @@ struct ReadCallbackData
CassandraResult result{cass_future_get_result(fut)};
onSuccess(result);
}
std::lock_guard lck(mtx);
if (--numOutstanding == 0)
cv.notify_one();
resume();
}
void
resume()
{
boost::asio::post(
boost::asio::get_associated_executor(handler),
[handler = std::move(handler)]() mutable {
handler(boost::system::error_code{});
});
}
};
void
processAsyncRead(CassFuture* fut, void* cbData)
{
ReadCallbackData& cb = *static_cast<ReadCallbackData*>(cbData);
ReadCallbackData<result_type>& cb =
*static_cast<ReadCallbackData<result_type>*>(cbData);
cb.finish(fut);
}
std::vector<TransactionAndMetadata>
CassandraBackend::fetchTransactions(
std::vector<ripple::uint256> const& hashes) const
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const
{
if (hashes.size() == 0)
return {};
handler_type handler(std::forward<decltype(yield)>(yield));
result_type result(handler);
std::size_t const numHashes = hashes.size();
std::atomic_int numOutstanding = numHashes;
std::condition_variable cv;
std::mutex mtx;
std::vector<TransactionAndMetadata> results{numHashes};
std::vector<std::shared_ptr<ReadCallbackData>> cbs;
std::vector<std::shared_ptr<ReadCallbackData<result_type>>> cbs;
cbs.reserve(numHashes);
auto start = std::chrono::system_clock::now();
for (std::size_t i = 0; i < hashes.size(); ++i)
{
CassandraStatement statement{selectTransaction_};
statement.bindNextBytes(hashes[i]);
cbs.push_back(std::make_shared<ReadCallbackData>(
numOutstanding, mtx, cv, [i, &results](auto& result) {
cbs.push_back(std::make_shared<ReadCallbackData<result_type>>(
numOutstanding, handler, [i, &results](auto& result) {
if (result.hasResult())
results[i] = {
result.getBytes(),
@@ -411,12 +445,14 @@ CassandraBackend::fetchTransactions(
result.getUInt32(),
result.getUInt32()};
}));
executeAsyncRead(statement, processAsyncRead, *cbs[i]);
}
assert(results.size() == cbs.size());
std::unique_lock<std::mutex> lck(mtx);
cv.wait(lck, [&numOutstanding]() { return numOutstanding == 0; });
// suspend the coroutine until completion handler is called.
result.get();
auto end = std::chrono::system_clock::now();
for (auto const& cb : cbs)
{
@@ -431,14 +467,18 @@ CassandraBackend::fetchTransactions(
<< " milliseconds";
return results;
}
std::vector<ripple::uint256>
CassandraBackend::fetchAllTransactionHashesInLedger(
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
CassandraStatement statement{selectAllTransactionHashesInLedger_};
statement.bindNextInt(ledgerSequence);
auto start = std::chrono::system_clock::now();
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
auto end = std::chrono::system_clock::now();
if (!result)
{
@@ -464,9 +504,10 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
AccountTransactions
CassandraBackend::fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursorIn) const
std::uint32_t const limit,
bool const forward,
std::optional<AccountTransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const
{
auto rng = fetchLedgerRange();
if (!rng)
@@ -494,7 +535,8 @@ CassandraBackend::fetchAccountTransactions(
else
{
int seq = forward ? rng->minSequence : rng->maxSequence;
int placeHolder = forward ? 0 : std::numeric_limits<uint32_t>::max();
int placeHolder =
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug)
@@ -503,7 +545,8 @@ CassandraBackend::fetchAccountTransactions(
}
statement.bindNextUInt(limit);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
@@ -520,13 +563,16 @@ CassandraBackend::fetchAccountTransactions(
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor";
auto [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = {(uint32_t)lgrSeq, (uint32_t)txnIdx};
cursor = {
static_cast<std::uint32_t>(lgrSeq),
static_cast<std::uint32_t>(txnIdx)};
if (forward)
++cursor->transactionIndex;
}
} while (result.nextRow());
auto txns = fetchTransactions(hashes);
auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << "txns = " << txns.size();
if (txns.size() == limit)
@@ -540,13 +586,16 @@ CassandraBackend::fetchAccountTransactions(
std::optional<ripple::uint256>
CassandraBackend::doFetchSuccessorKey(
ripple::uint256 key,
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
CassandraStatement statement{selectSuccessor_};
statement.bindNextBytes(key);
statement.bindNextInt(ledgerSequence);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows";
@@ -557,16 +606,20 @@ CassandraBackend::doFetchSuccessorKey(
return {};
return next;
}
std::optional<Blob>
CassandraBackend::doFetchLedgerObject(
ripple::uint256 const& key,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
CassandraStatement statement{selectObject_};
statement.bindNextBytes(key);
statement.bindNextInt(sequence);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows";
@@ -581,21 +634,26 @@ CassandraBackend::doFetchLedgerObject(
std::vector<Blob>
CassandraBackend::doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
if (keys.size() == 0)
return {};
handler_type handler(std::forward<decltype(yield)>(yield));
result_type result(handler);
std::size_t const numKeys = keys.size();
BOOST_LOG_TRIVIAL(trace)
<< "Fetching " << numKeys << " records from Cassandra";
std::atomic_int numOutstanding = numKeys;
std::condition_variable cv;
std::mutex mtx;
std::vector<Blob> results{numKeys};
std::vector<std::shared_ptr<ReadCallbackData>> cbs;
std::vector<std::shared_ptr<ReadCallbackData<result_type>>> cbs;
cbs.reserve(numKeys);
for (std::size_t i = 0; i < keys.size(); ++i)
{
cbs.push_back(std::make_shared<ReadCallbackData>(
numOutstanding, mtx, cv, [i, &results](auto& result) {
cbs.push_back(std::make_shared<ReadCallbackData<result_type>>(
numOutstanding, handler, [i, &results](auto& result) {
if (result.hasResult())
results[i] = result.getBytes();
}));
@@ -606,8 +664,9 @@ CassandraBackend::doFetchLedgerObjects(
}
assert(results.size() == cbs.size());
std::unique_lock<std::mutex> lck(mtx);
cv.wait(lck, [&numOutstanding]() { return numOutstanding == 0; });
// suspend the coroutine until completion handler is called.
result.get();
for (auto const& cb : cbs)
{
if (cb->errored)
@@ -618,14 +677,20 @@ CassandraBackend::doFetchLedgerObjects(
<< "Fetched " << numKeys << " records from Cassandra";
return results;
}
std::vector<LedgerObject>
CassandraBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
CassandraBackend::fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
CassandraStatement statement{selectDiff_};
statement.bindNextInt(ledgerSequence);
auto start = std::chrono::system_clock::now();
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
auto end = std::chrono::system_clock::now();
if (!result)
{
BOOST_LOG_TRIVIAL(error)
@@ -643,7 +708,7 @@ CassandraBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count()
<< " milliseconds";
auto objs = fetchLedgerObjects(keys, ledgerSequence);
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
std::vector<LedgerObject> results;
std::transform(
keys.begin(),
@@ -657,7 +722,9 @@ CassandraBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
}
bool
CassandraBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
CassandraBackend::doOnlineDelete(
std::uint32_t const numLedgersToKeep,
boost::asio::yield_context& yield) const
{
// calculate TTL
// ledgers close roughly every 4 seconds. We double the TTL so that way
@@ -666,7 +733,7 @@ CassandraBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
auto rng = fetchLedgerRange();
if (!rng)
return false;
uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
if (minLedger <= rng->minSequence)
return false;
auto bind = [this](auto& params) {
@@ -680,18 +747,19 @@ CassandraBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
std::condition_variable cv;
std::mutex mtx;
std::vector<std::shared_ptr<BulkWriteCallbackData<
std::tuple<ripple::uint256, uint32_t, Blob>,
std::tuple<ripple::uint256, std::uint32_t, Blob>,
typename std::remove_reference<decltype(bind)>::type>>>
cbs;
uint32_t concurrentLimit = 10;
std::uint32_t concurrentLimit = 10;
std::atomic_int numOutstanding = 0;
// iterate through latest ledger, updating TTL
std::optional<ripple::uint256> cursor;
while (true)
{
auto [objects, curCursor, warning] = retryOnTimeout(
[&]() { return fetchLedgerPage(cursor, minLedger, 256); });
auto [objects, curCursor, warning] = retryOnTimeout([&]() {
return fetchLedgerPage(cursor, minLedger, 256, 0, yield);
});
if (warning)
{
BOOST_LOG_TRIVIAL(warning)
@@ -830,9 +898,7 @@ CassandraBackend::open(bool readOnly)
std::string username = getString("username");
if (username.size())
{
BOOST_LOG_TRIVIAL(debug)
<< "user = " << username.c_str()
<< " password = " << getString("password").c_str();
BOOST_LOG_TRIVIAL(debug) << "user = " << username.c_str();
cass_cluster_set_credentials(
cluster, username.c_str(), getString("password").c_str());
}
@@ -1282,10 +1348,8 @@ CassandraBackend::open(bool readOnly)
setupPreparedStatements = true;
}
work_.emplace(ioContext_);
ioThread_ = std::thread{[this]() { ioContext_.run(); }};
open_ = true;
BOOST_LOG_TRIVIAL(info) << "Opened CassandraBackend successfully";
} // namespace Backend
}
} // namespace Backend

View File

@@ -3,6 +3,8 @@
#include <ripple/basics/base_uint.h>
#include <boost/asio.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/filesystem.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
@@ -97,6 +99,7 @@ public:
curBindingIndex_ = other.curBindingIndex_;
other.curBindingIndex_ = 0;
}
CassandraStatement(CassandraStatement const& other) = delete;
CassStatement*
@@ -125,9 +128,9 @@ public:
}
void
bindNextBytes(const char* data, uint32_t size)
bindNextBytes(const char* data, std::uint32_t const size)
{
bindNextBytes((unsigned char*)data, size);
bindNextBytes((unsigned const char*)(data), size);
}
void
@@ -153,13 +156,13 @@ public:
}
void
bindNextBytes(void const* key, uint32_t size)
bindNextBytes(void const* key, std::uint32_t const size)
{
bindNextBytes(static_cast<const unsigned char*>(key), size);
}
void
bindNextBytes(const unsigned char* data, uint32_t size)
bindNextBytes(const unsigned char* data, std::uint32_t const size)
{
if (!statement_)
throw std::runtime_error(
@@ -181,7 +184,7 @@ public:
}
void
bindNextUInt(uint32_t value)
bindNextUInt(std::uint32_t const value)
{
if (!statement_)
throw std::runtime_error(
@@ -202,9 +205,9 @@ public:
}
void
bindNextInt(uint32_t value)
bindNextInt(std::uint32_t const value)
{
bindNextInt((int64_t)value);
bindNextInt(static_cast<std::int64_t>(value));
}
void
@@ -227,7 +230,7 @@ public:
}
void
bindNextIntTuple(uint32_t first, uint32_t second)
bindNextIntTuple(std::uint32_t const first, std::uint32_t const second)
{
CassTuple* tuple = cass_tuple_new(2);
CassError rc = cass_tuple_set_int64(tuple, 0, first);
@@ -366,26 +369,6 @@ public:
curGetIndex_++;
return {buf, buf + bufSize};
}
/*
uint32_t
getNumBytes()
{
if (!row_)
throw std::runtime_error("CassandraResult::getBytes - no result");
cass_byte_t const* buf;
std::size_t bufSize;
CassError rc = cass_value_get_bytes(
cass_row_get_column(row_, curGetIndex_), &buf, &bufSize);
if (rc != CASS_OK)
{
std::stringstream msg;
msg << "CassandraResult::getBytes - error getting value: " << rc
<< ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str());
}
return bufSize;
}*/
ripple::uint256
getUInt256()
@@ -428,13 +411,13 @@ public:
return val;
}
uint32_t
std::uint32_t
getUInt32()
{
return (uint32_t)getInt64();
return static_cast<std::uint32_t>(getInt64());
}
std::pair<int64_t, int64_t>
std::pair<std::int64_t, std::int64_t>
getInt64Tuple()
{
if (!row_)
@@ -446,13 +429,13 @@ public:
throw std::runtime_error(
"CassandraResult::getInt64Tuple - failed to iterate tuple");
CassValue const* value = cass_iterator_get_value(tupleIter);
int64_t first;
std::int64_t first;
cass_value_get_int64(value, &first);
if (!cass_iterator_next(tupleIter))
throw std::runtime_error(
"CassandraResult::getInt64Tuple - failed to iterate tuple");
value = cass_iterator_get_value(tupleIter);
int64_t second;
std::int64_t second;
cass_value_get_int64(value, &second);
++curGetIndex_;
return {first, second};
@@ -506,6 +489,52 @@ isTimeout(CassError rc)
return false;
}
template <typename CompletionToken>
CassError
cass_future_error_code(CassFuture* fut, CompletionToken&& token)
{
using function_type = void(boost::system::error_code, CassError);
using result_type =
boost::asio::async_result<CompletionToken, function_type>;
using handler_type = typename result_type::completion_handler_type;
handler_type handler(std::forward<decltype(token)>(token));
result_type result(handler);
struct HandlerWrapper
{
handler_type handler;
HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_))
{
}
};
auto resume = [](CassFuture* fut, void* data) -> void {
HandlerWrapper* hw = (HandlerWrapper*)data;
boost::asio::post(
boost::asio::get_associated_executor(hw->handler),
[fut, hw, handler = std::move(hw->handler)]() mutable {
delete hw;
handler(
boost::system::error_code{}, cass_future_error_code(fut));
});
};
HandlerWrapper* wrapper = new HandlerWrapper(std::move(handler));
cass_future_set_callback(fut, resume, wrapper);
// Suspend the coroutine until completion handler is called.
// The handler will populate rc, the error code describing
// the state of the cassandra future.
auto rc = result.get();
return rc;
}
class CassandraBackend : public BackendInterface
{
private:
@@ -529,9 +558,6 @@ private:
std::atomic<bool> open_{false};
// mutex used for open() and close()
std::mutex mutex_;
std::unique_ptr<CassSession, void (*)(CassSession*)> session_{
nullptr,
[](CassSession* session) {
@@ -571,17 +597,12 @@ private:
CassandraPreparedStatement selectLatestLedger_;
CassandraPreparedStatement selectLedgerRange_;
// io_context used for exponential backoff for write retries
mutable boost::asio::io_context ioContext_;
std::optional<boost::asio::io_context::work> work_;
std::thread ioThread_;
// maximum number of concurrent in flight requests. New requests will wait
// for earlier requests to finish if this limit is exceeded
uint32_t maxRequestsOutstanding = 10000;
std::uint32_t maxRequestsOutstanding = 10000;
// we keep this small because the indexer runs in the background, and we
// don't want the database to be swamped when the indexer is running
uint32_t indexerMaxRequestsOutstanding = 10;
std::uint32_t indexerMaxRequestsOutstanding = 10;
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
// mutex and condition_variable to limit the number of concurrent in flight
@@ -594,22 +615,40 @@ private:
mutable std::mutex syncMutex_;
mutable std::condition_variable syncCv_;
// io_context for read/write retries
mutable boost::asio::io_context ioContext_;
std::optional<boost::asio::io_context::work> work_;
std::thread ioThread_;
boost::json::object config_;
mutable uint32_t ledgerSequence_ = 0;
mutable std::uint32_t ledgerSequence_ = 0;
public:
CassandraBackend(boost::json::object const& config)
CassandraBackend(
boost::asio::io_context& ioc,
boost::json::object const& config)
: BackendInterface(config), config_(config)
{
work_.emplace(ioContext_);
ioThread_ = std::thread([this]() { ioContext_.run(); });
}
~CassandraBackend() override
{
work_.reset();
ioThread_.join();
if (open_)
close();
}
boost::asio::io_context&
getIOContext() const
{
return ioContext_;
}
bool
isOpen()
{
@@ -626,23 +665,19 @@ public:
void
close() override
{
{
std::lock_guard<std::mutex> lock(mutex_);
work_.reset();
ioThread_.join();
}
open_ = false;
}
AccountTransactions
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor) const override;
std::optional<AccountTransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const override;
bool
doFinishWrites() override
doFinishWrites() const override
{
// wait for all other writes to finish
sync();
@@ -674,12 +709,12 @@ public:
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header)
override;
std::optional<uint32_t>
fetchLatestLedgerSequence() const override
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
CassandraStatement statement{selectLatestLedger_};
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(error)
@@ -690,13 +725,14 @@ public:
}
std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(uint32_t sequence) const override
fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
CassandraStatement statement{selectLedgerBySeq_};
statement.bindNextInt(sequence);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
@@ -707,46 +743,57 @@ public:
}
std::optional<ripple::LedgerInfo>
fetchLedgerByHash(ripple::uint256 const& hash) const override
fetchLedgerByHash(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override
{
CassandraStatement statement{selectLedgerByHash_};
statement.bindNextBytes(hash);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
return {};
}
std::uint32_t sequence = result.getInt64();
std::uint32_t const sequence = result.getInt64();
return fetchLedgerBySequence(sequence);
return fetchLedgerBySequence(sequence, yield);
}
std::optional<LedgerRange>
hardFetchLedgerRange() const override;
hardFetchLedgerRange(boost::asio::yield_context& yield) const override;
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(uint32_t ledgerSequence) const override;
fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const override;
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
// Synchronously fetch the object with key key, as of ledger with sequence
// sequence
std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, uint32_t sequence)
const override;
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
std::optional<int64_t>
getToken(void const* key) const
getToken(void const* key, boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
CassandraStatement statement{getToken_};
statement.bindNextBytes(key, 32);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
@@ -760,12 +807,15 @@ public:
}
std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash) const override
fetchTransaction(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
CassandraStatement statement{selectTransaction_};
statement.bindNextBytes(hash);
CassandraResult result = executeSyncRead(statement);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
@@ -777,29 +827,40 @@ public:
result.getUInt32(),
result.getUInt32()}};
}
std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, uint32_t ledgerSequence)
const override;
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::vector<TransactionAndMetadata>
fetchTransactions(
std::vector<ripple::uint256> const& hashes) const override;
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const override;
std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const override;
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
std::vector<LedgerObject>
fetchLedgerDiff(uint32_t ledgerSequence) const override;
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
void
doWriteLedgerObject(std::string&& key, uint32_t seq, std::string&& blob)
override;
doWriteLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob) override;
void
writeSuccessor(std::string&& key, uint32_t seq, std::string&& successor)
override;
writeSuccessor(
std::string&& key,
std::uint32_t const seq,
std::string&& successor) override;
void
writeAccountTransactions(
@@ -808,13 +869,13 @@ public:
void
writeTransaction(
std::string&& hash,
uint32_t seq,
uint32_t date,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata) override;
void
startWrites() override
startWrites() const override
{
}
@@ -825,14 +886,11 @@ public:
syncCv_.wait(lck, [this]() { return finishedAllRequests(); });
}
bool
doOnlineDelete(uint32_t numLedgersToKeep) const override;
boost::asio::io_context&
getIOContext() const
{
return ioContext_;
}
bool
doOnlineDelete(
std::uint32_t const numLedgersToKeep,
boost::asio::yield_context& yield) const override;
inline void
incremementOutstandingRequestCount() const
@@ -904,8 +962,10 @@ public:
cass_future_set_callback(
fut, callback, static_cast<void*>(&callbackData));
cass_future_free(fut);
}
template <class T, class S>
void
executeAsyncWrite(
@@ -918,6 +978,7 @@ public:
incremementOutstandingRequestCount();
executeAsyncHelper(statement, callback, callbackData);
}
template <class T, class S>
void
executeAsyncRead(
@@ -927,6 +988,7 @@ public:
{
executeAsyncHelper(statement, callback, callbackData);
}
void
executeSyncWrite(CassandraStatement const& statement) const
{
@@ -1003,18 +1065,32 @@ public:
}
CassandraResult
executeSyncRead(CassandraStatement const& statement) const
executeAsyncRead(
CassandraStatement const& statement,
boost::asio::yield_context& yield) const
{
using result = boost::asio::async_result<
boost::asio::yield_context,
void(boost::system::error_code, CassError)>;
CassFuture* fut;
CassError rc;
do
{
fut = cass_session_execute(session_.get(), statement.get());
rc = cass_future_error_code(fut);
boost::system::error_code ec;
rc = cass_future_error_code(fut, yield[ec]);
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< "Cannot read async cass_future_error_code";
}
if (rc != CASS_OK)
{
std::stringstream ss;
ss << "Cassandra executeSyncRead error";
ss << "Cassandra executeAsyncRead error";
ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str();
}
@@ -1030,6 +1106,8 @@ public:
}
} while (rc != CASS_OK);
// The future should have returned at the earlier cass_future_error_code
// so we can use the sync version of this function.
CassResult const* res = cass_future_get_result(fut);
cass_future_free(fut);
return {res};

View File

@@ -1,151 +0,0 @@
#include <boost/format.hpp>
#include <backend/DBHelpers.h>
#include <memory>
static bool
writeToLedgersDB(ripple::LedgerInfo const& info, PgQuery& pgQuery)
{
BOOST_LOG_TRIVIAL(debug) << __func__;
auto cmd = boost::format(
R"(INSERT INTO ledgers
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
auto ledgerInsert = boost::str(
cmd % info.seq % ripple::strHex(info.hash) %
ripple::strHex(info.parentHash) % info.drops.drops() %
info.closeTime.time_since_epoch().count() %
info.parentCloseTime.time_since_epoch().count() %
info.closeTimeResolution.count() % info.closeFlags %
ripple::strHex(info.accountHash) % ripple::strHex(info.txHash));
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
<< " : "
<< "query string = " << ledgerInsert;
auto res = pgQuery(ledgerInsert.data());
return res;
}
/*
bool
writeBooks(std::vector<BookDirectoryData> const& bookDirData, PgQuery& pg)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Writing " << bookDirData.size() << "books to Postgres";
try
{
std::stringstream booksCopyBuffer;
for (auto const& data : bookDirData)
{
std::string directoryIndex = ripple::strHex(data.directoryIndex);
std::string bookIndex = ripple::strHex(data.bookIndex);
auto ledgerSeq = data.ledgerSequence;
booksCopyBuffer << "\\\\x" << directoryIndex << '\t'
<< std::to_string(ledgerSeq) << '\t' << "\\\\x"
<< bookIndex << '\n';
}
pg.bulkInsert("books", booksCopyBuffer.str());
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Successfully inserted books";
return true;
}
catch (std::exception& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << "Caught exception inserting books : " << e.what();
assert(false);
return false;
}
}
*/
/*
bool
writeToPostgres(
ripple::LedgerInfo const& info,
std::vector<AccountTransactionsData> const& accountTxData,
std::shared_ptr<PgPool> const& pgPool)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Beginning write to Postgres";
try
{
// Create a PgQuery object to run multiple commands over the
// same connection in a single transaction block.
PgQuery pg(pgPool);
auto res = pg("BEGIN");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "bulkWriteToTable : Postgres insert error: " << res.msg();
throw std::runtime_error(msg.str());
}
// Writing to the ledgers db fails if the ledger already
// exists in the db. In this situation, the ETL process has
// detected there is another writer, and falls back to only
// publishing
if (!writeToLedgersDB(info, pg))
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to write to ledgers database.";
return false;
}
std::stringstream transactionsCopyBuffer;
std::stringstream accountTransactionsCopyBuffer;
for (auto const& data : accountTxData)
{
std::string txHash = ripple::strHex(data.txHash);
std::string nodestoreHash = ripple::strHex(data.nodestoreHash);
auto idx = data.transactionIndex;
auto ledgerSeq = data.ledgerSequence;
transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t'
<< std::to_string(idx) << '\t' << "\\\\x"
<< txHash << '\t' << "\\\\x" << nodestoreHash
<< '\n';
for (auto const& a : data.accounts)
{
std::string acct = ripple::strHex(a);
accountTransactionsCopyBuffer
<< "\\\\x" << acct << '\t' << std::to_string(ledgerSeq)
<< '\t' << std::to_string(idx) << '\n';
}
}
pg.bulkInsert("transactions", transactionsCopyBuffer.str());
pg.bulkInsert(
"account_transactions", accountTransactionsCopyBuffer.str());
res = pg("COMMIT");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "bulkWriteToTable : Postgres insert error: " << res.msg();
assert(false);
throw std::runtime_error(msg.str());
}
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Successfully wrote to Postgres";
return true;
}
catch (std::exception& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
<< "Caught exception writing to Postgres : " << e.what();
assert(false);
return false;
}
}
*/

View File

@@ -14,8 +14,8 @@
struct AccountTransactionsData
{
boost::container::flat_set<ripple::AccountID> accounts;
uint32_t ledgerSequence;
uint32_t transactionIndex;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
ripple::uint256 txHash;
AccountTransactionsData(
@@ -39,6 +39,7 @@ isOffer(T const& object)
short offer_bytes = (object[1] << 8) | object[2];
return offer_bytes == 0x006f;
}
template <class T>
inline bool
isOfferHex(T const& object)
@@ -51,6 +52,7 @@ isOfferHex(T const& object)
}
return false;
}
template <class T>
inline bool
isDirNode(T const& object)
@@ -58,6 +60,7 @@ isDirNode(T const& object)
short spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == 0x0064;
}
template <class T, class R>
inline bool
isBookDir(T const& key, R const& object)
@@ -69,6 +72,7 @@ isBookDir(T const& key, R const& object)
ripple::SerialIter{object.data(), object.size()}, key};
return !sle[~ripple::sfOwner].has_value();
}
template <class T>
inline ripple::uint256
getBook(T const& offer)
@@ -115,11 +119,12 @@ deserializeHeader(ripple::Slice data)
return info;
}
inline std::string
uint256ToString(ripple::uint256 const& uint)
{
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
}
static constexpr uint32_t rippleEpochStart = 946684800;
static constexpr std::uint32_t rippleEpochStart = 946684800;
#endif

View File

@@ -14,6 +14,7 @@
#include <boost/log/trivial.hpp>
#include <algorithm>
#include <array>
#include <backend/BackendInterface.h>
#include <backend/Pg.h>
#include <cassert>
#include <cstdlib>
@@ -61,28 +62,33 @@ PgResult::msg() const
https://www.postgresql.org/docs/10/libpq-connect.html
*/
void
Pg::connect()
Pg::connect(boost::asio::yield_context& yield)
{
std::function<PostgresPollingStatusType(PGconn*)> poller;
if (conn_)
{
// Nothing to do if we already have a good connection.
if (PQstatus(conn_.get()) == CONNECTION_OK)
return;
/* Try resetting connection. */
PQreset(conn_.get());
/* Try resetting connection, or disconnect and retry if that fails.
PQfinish() is synchronous so first try to asynchronously reset. */
if (PQresetStart(conn_.get()))
poller = PQresetPoll;
else
disconnect();
}
else // Make new connection.
if (!conn_)
{
conn_.reset(PQconnectdbParams(
conn_.reset(PQconnectStartParams(
reinterpret_cast<char const* const*>(&config_.keywordsIdx[0]),
reinterpret_cast<char const* const*>(&config_.valuesIdx[0]),
0));
if (!conn_)
throw std::runtime_error("No db connection struct");
poller = PQconnectPoll;
}
/** Results from a synchronous connection attempt can only be either
* CONNECTION_OK or CONNECTION_BAD. */
if (!conn_)
throw std::runtime_error("No db connection object");
if (PQstatus(conn_.get()) == CONNECTION_BAD)
{
std::stringstream ss;
@@ -91,30 +97,182 @@ Pg::connect()
throw std::runtime_error(ss.str());
}
// Log server session console messages.
PQsetNoticeReceiver(conn_.get(), noticeReceiver, nullptr);
try
{
socket_ = getSocket(yield);
/* Asynchronously connecting entails several messages between
* client and server. */
PostgresPollingStatusType poll = PGRES_POLLING_WRITING;
while (poll != PGRES_POLLING_OK)
{
switch (poll)
{
case PGRES_POLLING_FAILED: {
std::stringstream ss;
ss << "DB connection failed";
char* err = PQerrorMessage(conn_.get());
if (err)
ss << ":" << err;
else
ss << '.';
throw std::runtime_error(ss.str());
}
case PGRES_POLLING_READING:
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_read, yield);
break;
case PGRES_POLLING_WRITING:
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_write, yield);
break;
default: {
assert(false);
std::stringstream ss;
ss << "unknown DB polling status: " << poll;
throw std::runtime_error(ss.str());
}
}
poll = poller(conn_.get());
}
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " "
<< "error, polling connection"
<< "error = " << e.what();
// Sever connection upon any error.
disconnect();
std::stringstream ss;
ss << "polling connection error: " << e.what();
throw std::runtime_error(ss.str());
}
/* Enable asynchronous writes. */
if (PQsetnonblocking(conn_.get(), 1) == -1)
{
std::stringstream ss;
char* err = PQerrorMessage(conn_.get());
if (err)
ss << "Error setting connection to non-blocking: " << err;
else
ss << "Unknown error setting connection to non-blocking";
throw std::runtime_error(ss.str());
}
if (PQstatus(conn_.get()) != CONNECTION_OK)
{
std::stringstream ss;
ss << "bad connection" << std::to_string(PQstatus(conn_.get()));
char* err = PQerrorMessage(conn_.get());
if (err)
ss << ": " << err;
else
ss << '.';
throw std::runtime_error(ss.str());
}
}
inline void
Pg::flush(boost::asio::yield_context& yield)
{
// non-blocking connection requires manually flushing write.
int flushed;
do
{
flushed = PQflush(conn_.get());
if (flushed == 1)
{
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_write, yield);
}
else if (flushed == -1)
{
std::stringstream ss;
ss << "error flushing query " << PQerrorMessage(conn_.get());
throw std::runtime_error(ss.str());
}
} while (flushed);
}
inline PgResult
Pg::waitForStatus(boost::asio::yield_context& yield, ExecStatusType expected)
{
PgResult ret;
while (true)
{
if (PQisBusy(conn_.get()))
{
socket_->async_wait(boost::asio::ip::tcp::socket::wait_read, yield);
}
if (!PQconsumeInput(conn_.get()))
{
std::stringstream ss;
ss << "query consume input error: " << PQerrorMessage(conn_.get());
throw std::runtime_error(ss.str());
}
if (PQisBusy(conn_.get()))
continue;
pg_result_type res{PQgetResult(conn_.get()), [](PGresult* result) {
PQclear(result);
}};
if (!res)
break;
auto status = PQresultStatus(res.get());
ret = PgResult(std::move(res));
if (status == expected)
break;
}
return ret;
}
inline asio_socket_type
Pg::getSocket(boost::asio::yield_context& yield)
{
asio_socket_type s{
new boost::asio::ip::tcp::socket(
boost::asio::get_associated_executor(yield),
boost::asio::ip::tcp::v4(),
PQsocket(conn_.get())),
[](boost::asio::ip::tcp::socket* socket) {
socket->cancel();
socket->release();
delete socket;
}};
return std::move(s);
}
PgResult
Pg::query(char const* command, std::size_t nParams, char const* const* values)
Pg::query(
char const* command,
std::size_t const nParams,
char const* const* values,
boost::asio::yield_context& yield)
{
// The result object must be freed using the libpq API PQclear() call.
pg_result_type ret{nullptr, [](PGresult* result) { PQclear(result); }};
// Connect then submit query.
while (true)
{
{
std::lock_guard<std::mutex> lock(mutex_);
if (stop_)
return PgResult();
}
try
{
connect();
connect(yield);
int sent;
if (nParams)
{
// PQexecParams can process only a single command.
ret.reset(PQexecParams(
sent = PQsendQueryParams(
conn_.get(),
command,
nParams,
@@ -122,29 +280,89 @@ Pg::query(char const* command, std::size_t nParams, char const* const* values)
values,
nullptr,
nullptr,
0));
0);
}
else
{
// PQexec can process multiple commands separated by
// semi-colons. Returns the response from the last
// command processed.
ret.reset(PQexec(conn_.get(), command));
sent = PQsendQuery(conn_.get(), command);
}
if (!ret)
throw std::runtime_error("no result structure returned");
if (!sent)
{
std::stringstream ss;
ss << "Can't send query: " << PQerrorMessage(conn_.get());
throw std::runtime_error(ss.str());
}
flush(yield);
/* Only read response if query was submitted successfully.
Only a single response is expected, but the API requires
responses to be read until nullptr is returned.
It is possible for pending reads on the connection to interfere
with the current query. For simplicity, this implementation
only flushes pending writes and assumes there are no pending reads.
To avoid this, all pending reads from each query must be consumed,
and all connections with any type of error be severed. */
while (true)
{
if (PQisBusy(conn_.get()))
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_read, yield);
if (!PQconsumeInput(conn_.get()))
{
std::stringstream ss;
ss << "query consume input error: "
<< PQerrorMessage(conn_.get());
throw std::runtime_error(ss.str());
}
if (PQisBusy(conn_.get()))
continue;
pg_result_type res{PQgetResult(conn_.get()), [](PGresult* result) {
PQclear(result);
}};
if (!res)
break;
ret.reset(res.release());
// ret is never null in these cases, so need to break.
bool copyStatus = false;
switch (PQresultStatus(ret.get()))
{
case PGRES_COPY_IN:
case PGRES_COPY_OUT:
case PGRES_COPY_BOTH:
copyStatus = true;
break;
default:;
}
if (copyStatus)
break;
}
}
catch (std::exception const& e)
{
// Sever connection and retry until successful.
BOOST_LOG_TRIVIAL(error) << __func__ << " "
<< "error, severing connection "
<< "error = " << e.what();
// Sever connection upon any error.
disconnect();
BOOST_LOG_TRIVIAL(error)
<< "database error, retrying: " << e.what();
std::this_thread::sleep_for(std::chrono::seconds(1));
}
std::stringstream ss;
ss << "query error: " << e.what();
throw std::runtime_error(ss.str());
}
if (!ret)
throw std::runtime_error("no result structure returned");
// Ensure proper query execution.
switch (PQresultStatus(ret.get()))
{
@@ -161,6 +379,7 @@ Pg::query(char const* command, std::size_t nParams, char const* const* values)
<< ", number of tuples: " << PQntuples(ret.get())
<< ", number of fields: " << PQnfields(ret.get());
BOOST_LOG_TRIVIAL(error) << ss.str();
PgResult retRes(ret.get(), conn_.get());
disconnect();
@@ -206,7 +425,7 @@ formatParams(pg_params const& dbParams)
}
PgResult
Pg::query(pg_params const& dbParams)
Pg::query(pg_params const& dbParams, boost::asio::yield_context& yield)
{
char const* const& command = dbParams.first;
auto const formattedParams = formatParams(dbParams);
@@ -215,18 +434,21 @@ Pg::query(pg_params const& dbParams)
formattedParams.size(),
formattedParams.size()
? reinterpret_cast<char const* const*>(&formattedParams[0])
: nullptr);
: nullptr,
yield);
}
void
Pg::bulkInsert(char const* table, std::string const& records)
Pg::bulkInsert(
char const* table,
std::string const& records,
boost::asio::yield_context& yield)
{
// https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND
assert(conn_.get());
auto copyCmd = boost::format(R"(COPY %s FROM stdin)");
auto formattedCmd = boost::str(copyCmd % table);
BOOST_LOG_TRIVIAL(debug) << __func__ << " " << formattedCmd;
auto res = query(formattedCmd.c_str());
auto res = query(formattedCmd.c_str(), yield);
if (!res || res.status() != PGRES_COPY_IN)
{
std::stringstream ss;
@@ -238,7 +460,14 @@ Pg::bulkInsert(char const* table, std::string const& records)
throw std::runtime_error(ss.str());
}
if (PQputCopyData(conn_.get(), records.c_str(), records.size()) == -1)
try
{
while (true)
{
std::int32_t const putCopy =
PQputCopyData(conn_.get(), records.c_str(), records.size());
if (putCopy == -1)
{
std::stringstream ss;
ss << "bulkInsert to " << table
@@ -248,7 +477,34 @@ Pg::bulkInsert(char const* table, std::string const& records)
throw std::runtime_error(ss.str());
}
if (PQputCopyEnd(conn_.get(), nullptr) == -1)
else if (putCopy == 0)
// If the value is zero, wait for write-ready and try again.
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_write, yield);
else
break;
}
flush(yield);
auto copyRes = waitForStatus(yield, PGRES_COPY_IN);
if (!copyRes || copyRes.status() != PGRES_COPY_IN)
{
std::stringstream ss;
ss << "bulkInsert to " << table
<< ". Postgres insert error: " << copyRes.msg();
if (res)
ss << ". CopyPut status not PGRES_COPY_IN: "
<< copyRes.status();
BOOST_LOG_TRIVIAL(error) << __func__ << " " << records;
throw std::runtime_error(ss.str());
}
std::int32_t copyEnd;
do
{
copyEnd = PQputCopyEnd(conn_.get(), nullptr);
if (copyEnd == -1)
{
std::stringstream ss;
ss << "bulkInsert to " << table
@@ -258,21 +514,53 @@ Pg::bulkInsert(char const* table, std::string const& records)
throw std::runtime_error(ss.str());
}
// The result object must be freed using the libpq API PQclear() call.
pg_result_type copyEndResult{
nullptr, [](PGresult* result) { PQclear(result); }};
copyEndResult.reset(PQgetResult(conn_.get()));
ExecStatusType status = PQresultStatus(copyEndResult.get());
if (status != PGRES_COMMAND_OK)
// If the value is zero, wait for write-ready and try again.
if (copyEnd == 0)
socket_->async_wait(
boost::asio::ip::tcp::socket::wait_write, yield);
} while (copyEnd == 0);
flush(yield);
auto endRes = waitForStatus(yield, PGRES_COMMAND_OK);
if (!endRes || endRes.status() != PGRES_COMMAND_OK)
{
std::stringstream ss;
ss << "bulkInsert to " << table
<< ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status
<< " message = " << PQerrorMessage(conn_.get());
disconnect();
<< ". Postgres insert error: " << endRes.msg();
if (res)
ss << ". CopyEnd status not PGRES_COMMAND_OK: "
<< endRes.status();
BOOST_LOG_TRIVIAL(error) << __func__ << " " << records;
throw std::runtime_error(ss.str());
}
pg_result_type finalRes{PQgetResult(conn_.get()), [](PGresult* result) {
PQclear(result);
}};
if (finalRes)
{
std::stringstream ss;
ss << "bulkInsert to " << table
<< ". Postgres insert error: " << res.msg();
if (res)
ss << ". Query status not NULL: " << res.status();
BOOST_LOG_TRIVIAL(error) << __func__ << " " << records;
throw std::runtime_error(ss.str());
}
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " "
<< "error, bulk insertion"
<< "error = " << e.what();
// Sever connection upon any error.
disconnect();
std::stringstream ss;
ss << "query error: " << e.what();
throw std::runtime_error(ss.str());
}
}
bool
@@ -306,12 +594,21 @@ Pg::clear()
}
} while (res && conn_);
try
{
socket_->cancel();
}
catch (std::exception const& e)
{
}
return conn_ != nullptr;
}
//-----------------------------------------------------------------------------
PgPool::PgPool(boost::json::object const& config)
PgPool::PgPool(boost::asio::io_context& ioc, boost::json::object const& config)
: ioc_(ioc)
{
// Make sure that boost::asio initializes the SSL library.
{
@@ -477,7 +774,7 @@ PgPool::PgPool(boost::json::object const& config)
config_.valuesIdx.push_back(nullptr);
if (config.contains("max_connections"))
config_.max_connections = config.at("max_connections").as_uint64();
config_.max_connections = config.at("max_connections").as_int64();
std::size_t timeout;
if (config.contains("timeout"))
config_.timeout =
@@ -516,32 +813,6 @@ PgPool::onStop()
BOOST_LOG_TRIVIAL(info) << "stopped";
}
void
PgPool::idleSweeper()
{
std::size_t before, after;
{
std::lock_guard<std::mutex> lock(mutex_);
before = idle_.size();
if (config_.timeout != std::chrono::seconds(0))
{
auto const found =
idle_.upper_bound(clock_type::now() - config_.timeout);
for (auto it = idle_.begin(); it != found;)
{
it = idle_.erase(it);
--connections_;
}
}
after = idle_.size();
}
BOOST_LOG_TRIVIAL(info)
<< "Idle sweeper. connections: " << connections_
<< ". checked out: " << connections_ - after
<< ". idle before, after sweep: " << before << ", " << after;
}
std::unique_ptr<Pg>
PgPool::checkout()
{
@@ -563,7 +834,7 @@ PgPool::checkout()
else if (connections_ < config_.max_connections)
{
++connections_;
ret = std::make_unique<Pg>(config_, stop_, mutex_);
ret = std::make_unique<Pg>(config_, ioc_, stop_, mutex_);
}
// Otherwise, wait until a connection becomes available or we stop.
else
@@ -585,6 +856,7 @@ PgPool::checkin(std::unique_ptr<Pg>& pg)
std::lock_guard<std::mutex> lock(mutex_);
if (!stop_ && pg->clear())
{
pg->clear();
idle_.emplace(clock_type::now(), std::move(pg));
}
else
@@ -600,11 +872,11 @@ PgPool::checkin(std::unique_ptr<Pg>& pg)
//-----------------------------------------------------------------------------
std::shared_ptr<PgPool>
make_PgPool(boost::json::object const& config)
make_PgPool(boost::asio::io_context& ioc, boost::json::object const& config)
{
try
{
auto ret = std::make_shared<PgPool>(config);
auto ret = std::make_shared<PgPool>(ioc, config);
ret->setup();
return ret;
}
@@ -612,13 +884,18 @@ make_PgPool(boost::json::object const& config)
{
boost::json::object configCopy = config;
configCopy["database"] = "postgres";
auto ret = std::make_shared<PgPool>(configCopy);
auto ret = std::make_shared<PgPool>(ioc, configCopy);
ret->setup();
PgQuery pgQuery{ret};
Backend::synchronous([&](boost::asio::yield_context yield) {
PgQuery pgQuery(ret);
std::string query = "CREATE DATABASE " +
std::string{config.at("database").as_string().c_str()};
pgQuery(query.c_str());
ret = std::make_shared<PgPool>(config);
pgQuery(query.c_str(), yield);
});
ret = std::make_shared<PgPool>(ioc, config);
ret->setup();
return ret;
}
@@ -767,7 +1044,7 @@ CREATE TABLE IF NOT EXISTS ledgers (
CREATE TABLE IF NOT EXISTS objects (
key bytea NOT NULL,
ledger_seq bigint NOT NULL REFERENCES ledgers ON DELETE CASCADE,
ledger_seq bigint NOT NULL,
object bytea
) PARTITION BY RANGE (ledger_seq);
@@ -1297,8 +1574,8 @@ void
applySchema(
std::shared_ptr<PgPool> const& pool,
char const* schema,
std::uint32_t currentVersion,
std::uint32_t schemaVersion)
std::uint32_t const currentVersion,
std::uint32_t const schemaVersion)
{
if (currentVersion != 0 && schemaVersion != currentVersion + 1)
{
@@ -1310,7 +1587,11 @@ applySchema(
throw std::runtime_error(ss.str());
}
auto res = PgQuery(pool)({schema, {}});
PgResult res;
Backend::synchronous([&](boost::asio::yield_context yield) {
res = PgQuery(pool)(schema, yield);
});
if (!res)
{
std::stringstream ss;
@@ -1320,7 +1601,10 @@ applySchema(
}
auto cmd = boost::format(R"(SELECT set_schema_version(%u, 0))");
res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}});
Backend::synchronous([&](boost::asio::yield_context yield) {
res = PgQuery(pool)(boost::str(cmd % schemaVersion).c_str(), yield);
});
if (!res)
{
std::stringstream ss;
@@ -1333,7 +1617,11 @@ applySchema(
void
initAccountTx(std::shared_ptr<PgPool> const& pool)
{
auto res = PgQuery(pool)(accountTxSchema);
PgResult res;
Backend::synchronous([&](boost::asio::yield_context yield) {
res = PgQuery(pool)(accountTxSchema, yield);
});
if (!res)
{
std::stringstream ss;
@@ -1346,7 +1634,11 @@ void
initSchema(std::shared_ptr<PgPool> const& pool)
{
// Figure out what schema version, if any, is already installed.
auto res = PgQuery(pool)({version_query, {}});
PgResult res;
Backend::synchronous([&](boost::asio::yield_context yield) {
res = PgQuery(pool)(version_query, yield);
});
if (!res)
{
std::stringstream ss;
@@ -1370,7 +1662,10 @@ initSchema(std::shared_ptr<PgPool> const& pool)
// This protects against corruption in an aborted install that is
// followed by a fresh installation attempt with a new schema.
auto cmd = boost::format(R"(SELECT set_schema_version(0, %u))");
res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}});
Backend::synchronous([&](boost::asio::yield_context yield) {
res = PgQuery(pool)(boost::str(cmd % freshVersion).c_str(), yield);
});
if (!res)
{
std::stringstream ss;
@@ -1405,7 +1700,9 @@ initSchema(std::shared_ptr<PgPool> const& pool)
// @return LedgerInfo
std::optional<ripple::LedgerInfo>
getLedger(
std::variant<std::monostate, ripple::uint256, uint32_t> const& whichLedger,
boost::asio::yield_context yield,
std::variant<std::monostate, ripple::uint256, std::uint32_t> const&
whichLedger,
std::shared_ptr<PgPool>& pgPool)
{
ripple::LedgerInfo lgrInfo;
@@ -1414,9 +1711,9 @@ getLedger(
"total_coins, closing_time, prev_closing_time, close_time_res, "
"close_flags, ledger_seq FROM ledgers ";
uint32_t expNumResults = 1;
std::uint32_t expNumResults = 1;
if (auto ledgerSeq = std::get_if<uint32_t>(&whichLedger))
if (auto ledgerSeq = std::get_if<std::uint32_t>(&whichLedger))
{
sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq);
}
@@ -1432,7 +1729,7 @@ getLedger(
BOOST_LOG_TRIVIAL(trace) << __func__ << " : sql = " << sql.str();
auto res = PgQuery(pgPool)(sql.str().data());
auto res = PgQuery(pgPool)(sql.str().data(), yield);
if (!res)
{
BOOST_LOG_TRIVIAL(error)

View File

@@ -4,6 +4,9 @@
#include <ripple/basics/StringUtilities.h>
#include <ripple/basics/chrono.h>
#include <ripple/ledger/ReadView.h>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/icl/closed_interval.hpp>
#include <boost/json.hpp>
#include <boost/lexical_cast.hpp>
@@ -24,6 +27,9 @@
// These postgres structs must be freed only by the postgres API.
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
using asio_socket_type = std::unique_ptr<
boost::asio::ip::tcp::socket,
void (*)(boost::asio::ip::tcp::socket*)>;
/** first: command
* second: parameter values
@@ -46,7 +52,7 @@ using pg_formatted_params = std::vector<char const*>;
struct PgConfig
{
/** Maximum connections allowed to db. */
std::size_t max_connections{std::numeric_limits<std::size_t>::max()};
std::size_t max_connections{1000};
/** Close idle connections past this duration. */
std::chrono::seconds timeout{600};
@@ -255,12 +261,24 @@ class Pg
friend class PgQuery;
PgConfig const& config_;
boost::asio::io_context::strand strand_;
bool& stop_;
std::mutex& mutex_;
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
// The connection object must be freed using the libpq API PQfinish() call.
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
inline asio_socket_type
getSocket(boost::asio::yield_context& strand);
inline PgResult
waitForStatus(boost::asio::yield_context& yield, ExecStatusType expected);
inline void
flush(boost::asio::yield_context& yield);
/** Clear results from the connection.
*
* Results from previous commands must be cleared before new commands
@@ -280,13 +298,14 @@ class Pg
* or in an errored state, reconnects to the database.
*/
void
connect();
connect(boost::asio::yield_context& yield);
/** Disconnect from postgres. */
void
disconnect()
{
conn_.reset();
socket_.reset();
}
/** Execute postgres query.
@@ -302,7 +321,11 @@ class Pg
* @return Query result object.
*/
PgResult
query(char const* command, std::size_t nParams, char const* const* values);
query(
char const* command,
std::size_t const nParams,
char const* const* values,
boost::asio::yield_context& yield);
/** Execute postgres query with no parameters.
*
@@ -310,9 +333,9 @@ class Pg
* @return Query result object;
*/
PgResult
query(char const* command)
query(char const* command, boost::asio::yield_context& yield)
{
return query(command, 0, nullptr);
return query(command, 0, nullptr, yield);
}
/** Execute postgres query with parameters.
@@ -321,7 +344,7 @@ class Pg
* @return Query result object.
*/
PgResult
query(pg_params const& dbParams);
query(pg_params const& dbParams, boost::asio::yield_context& yield);
/** Insert multiple records into a table using Postgres' bulk COPY.
*
@@ -331,7 +354,10 @@ class Pg
* @param records Records in the COPY IN format.
*/
void
bulkInsert(char const* table, std::string const& records);
bulkInsert(
char const* table,
std::string const& records,
boost::asio::yield_context& yield);
public:
/** Constructor for Pg class.
@@ -341,8 +367,11 @@ public:
* @param stop Reference to connection pool's stop flag.
* @param mutex Reference to connection pool's mutex.
*/
Pg(PgConfig const& config, bool& stop, std::mutex& mutex)
: config_(config), stop_(stop), mutex_(mutex)
Pg(PgConfig const& config,
boost::asio::io_context& ctx,
bool& stop,
std::mutex& mutex)
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
{
}
};
@@ -368,6 +397,7 @@ class PgPool
using clock_type = std::chrono::steady_clock;
boost::asio::io_context& ioc_;
PgConfig config_;
std::mutex mutex_;
std::condition_variable cond_;
@@ -406,13 +436,19 @@ public:
* @param j Logger object.
* @param parent Stoppable parent.
*/
PgPool(boost::json::object const& config);
PgPool(boost::asio::io_context& ioc, boost::json::object const& config);
~PgPool()
{
onStop();
}
PgConfig&
config()
{
return config_;
}
/** Initiate idle connection timer.
*
* The PgPool object needs to be fully constructed to support asynchronous
@@ -424,10 +460,6 @@ public:
/** Prepare for process shutdown. (Stoppable) */
void
onStop();
/** Disconnect idle postgres connections. */
void
idleSweeper();
};
//-----------------------------------------------------------------------------
@@ -467,11 +499,11 @@ public:
* @return Result of query, including errors.
*/
PgResult
operator()(pg_params const& dbParams)
operator()(pg_params const& dbParams, boost::asio::yield_context& yield)
{
if (!pg_) // It means we're stopping. Return empty result.
return PgResult();
return pg_->query(dbParams);
return pg_->query(dbParams, yield);
}
/** Execute postgres query with only command statement.
@@ -480,9 +512,9 @@ public:
* @return Result of query, including errors.
*/
PgResult
operator()(char const* command)
operator()(char const* command, boost::asio::yield_context& yield)
{
return operator()(pg_params{command, {}});
return operator()(pg_params{command, {}}, yield);
}
/** Insert multiple records into a table using Postgres' bulk COPY.
@@ -493,9 +525,12 @@ public:
* @param records Records in the COPY IN format.
*/
void
bulkInsert(char const* table, std::string const& records)
bulkInsert(
char const* table,
std::string const& records,
boost::asio::yield_context& yield)
{
pg_->bulkInsert(table, records);
pg_->bulkInsert(table, records, yield);
}
};
@@ -509,7 +544,7 @@ public:
* @return Postgres connection pool manager
*/
std::shared_ptr<PgPool>
make_PgPool(boost::json::object const& pgConfig);
make_PgPool(boost::asio::io_context& ioc, boost::json::object const& pgConfig);
/** Initialize the Postgres schema.
*
@@ -529,7 +564,8 @@ initAccountTx(std::shared_ptr<PgPool> const& pool);
// @return vector of LedgerInfos
std::optional<ripple::LedgerInfo>
getLedger(
std::variant<std::monostate, ripple::uint256, uint32_t> const& whichLedger,
std::variant<std::monostate, ripple::uint256, std::uint32_t> const&
whichLedger,
std::shared_ptr<PgPool>& pgPool);
#endif // RIPPLE_CORE_PG_H_INCLUDED

View File

@@ -4,9 +4,26 @@
#include <thread>
namespace Backend {
PostgresBackend::PostgresBackend(boost::json::object const& config)
// Type alias for async completion handlers
using completion_token = boost::asio::yield_context;
using function_type = void(boost::system::error_code);
using result_type = boost::asio::async_result<completion_token, function_type>;
using handler_type = typename result_type::completion_handler_type;
struct HandlerWrapper
{
handler_type handler;
HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_))
{
}
};
PostgresBackend::PostgresBackend(
boost::asio::io_context& ioc,
boost::json::object const& config)
: BackendInterface(config)
, pgPool_(make_PgPool(config))
, pgPool_(make_PgPool(ioc, config))
, writeConnection_(pgPool_)
{
if (config.contains("write_interval"))
@@ -19,6 +36,7 @@ PostgresBackend::writeLedger(
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader)
{
synchronous([&](boost::asio::yield_context yield) {
auto cmd = boost::format(
R"(INSERT INTO ledgers
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
@@ -32,9 +50,10 @@ PostgresBackend::writeLedger(
ripple::strHex(ledgerInfo.accountHash) %
ripple::strHex(ledgerInfo.txHash));
auto res = writeConnection_(ledgerInsert.data());
auto res = writeConnection_(ledgerInsert.data(), yield);
abortWrite_ = !res;
inProcessLedger = ledgerInfo.seq;
});
}
void
@@ -57,12 +76,14 @@ PostgresBackend::writeAccountTransactions(
}
}
}
void
PostgresBackend::doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& blob)
{
synchronous([&](boost::asio::yield_context yield) {
if (abortWrite_)
return;
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
@@ -76,18 +97,20 @@ PostgresBackend::doWriteLedgerObject(
BOOST_LOG_TRIVIAL(info)
<< __func__ << " Flushing large buffer. num objects = "
<< numRowsInObjectsBuffer_;
writeConnection_.bulkInsert("objects", objectsBuffer_.str());
writeConnection_.bulkInsert("objects", objectsBuffer_.str(), yield);
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
objectsBuffer_.str("");
}
});
}
void
PostgresBackend::writeSuccessor(
std::string&& key,
uint32_t seq,
std::uint32_t const seq,
std::string&& successor)
{
synchronous([&](boost::asio::yield_context yield) {
if (range)
{
if (successors_.count(key) > 0)
@@ -105,17 +128,19 @@ PostgresBackend::writeSuccessor(
BOOST_LOG_TRIVIAL(info)
<< __func__ << " Flushing large buffer. num successors = "
<< numRowsInSuccessorBuffer_;
writeConnection_.bulkInsert("successor", successorBuffer_.str());
writeConnection_.bulkInsert(
"successor", successorBuffer_.str(), yield);
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
successorBuffer_.str("");
}
});
}
void
PostgresBackend::writeTransaction(
std::string&& hash,
uint32_t seq,
uint32_t date,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata)
{
@@ -127,8 +152,8 @@ PostgresBackend::writeTransaction(
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
}
uint32_t
checkResult(PgResult const& res, uint32_t numFieldsExpected)
std::uint32_t
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
{
if (!res)
{
@@ -201,50 +226,62 @@ parseLedgerInfo(PgResult const& res)
info.validated = true;
return info;
}
std::optional<uint32_t>
PostgresBackend::fetchLatestLedgerSequence() const
std::optional<std::uint32_t>
PostgresBackend::fetchLatestLedgerSequence(
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
auto res = pgQuery(
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1");
if (checkResult(res, 1))
pgQuery(set_timeout, yield);
auto const query =
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1";
if (auto res = pgQuery(query, yield); checkResult(res, 1))
return res.asBigInt(0, 0);
return {};
}
std::optional<ripple::LedgerInfo>
PostgresBackend::fetchLedgerBySequence(uint32_t sequence) const
PostgresBackend::fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT * FROM ledgers WHERE ledger_seq = "
<< std::to_string(sequence);
auto res = pgQuery(sql.str().data());
if (checkResult(res, 10))
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
return parseLedgerInfo(res);
return {};
}
std::optional<ripple::LedgerInfo>
PostgresBackend::fetchLedgerByHash(ripple::uint256 const& hash) const
PostgresBackend::fetchLedgerByHash(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT * FROM ledgers WHERE ledger_hash = "
<< ripple::to_string(hash);
auto res = pgQuery(sql.str().data());
if (checkResult(res, 10))
sql << "SELECT * FROM ledgers WHERE ledger_hash = \'\\x"
<< ripple::to_string(hash) << "\'";
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
return parseLedgerInfo(res);
return {};
}
std::optional<LedgerRange>
PostgresBackend::hardFetchLedgerRange() const
PostgresBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
{
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()");
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()", yield);
if (!range)
return {};
@@ -279,17 +316,19 @@ PostgresBackend::hardFetchLedgerRange() const
std::optional<Blob>
PostgresBackend::doFetchLedgerObject(
ripple::uint256 const& key,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT object FROM objects WHERE key = "
<< "\'\\x" << ripple::strHex(key) << "\'"
<< " AND ledger_seq <= " << std::to_string(sequence)
<< " ORDER BY ledger_seq DESC LIMIT 1";
auto res = pgQuery(sql.str().data());
if (checkResult(res, 1))
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
{
auto blob = res.asUnHexedBlob(0, 0);
if (blob.size())
@@ -301,16 +340,19 @@ PostgresBackend::doFetchLedgerObject(
// returns a transaction, metadata pair
std::optional<TransactionAndMetadata>
PostgresBackend::fetchTransaction(ripple::uint256 const& hash) const
PostgresBackend::fetchTransaction(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT transaction,metadata,ledger_seq,date FROM transactions "
"WHERE hash = "
<< "\'\\x" << ripple::strHex(hash) << "\'";
auto res = pgQuery(sql.str().data());
if (checkResult(res, 4))
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 4))
{
return {
{res.asUnHexedBlob(0, 0),
@@ -322,15 +364,19 @@ PostgresBackend::fetchTransaction(ripple::uint256 const& hash) const
return {};
}
std::vector<TransactionAndMetadata>
PostgresBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const
PostgresBackend::fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT transaction, metadata, ledger_seq,date FROM transactions "
"WHERE "
<< "ledger_seq = " << std::to_string(ledgerSequence);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (size_t numRows = checkResult(res, 4))
{
std::vector<TransactionAndMetadata> txns;
@@ -348,14 +394,17 @@ PostgresBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const
}
std::vector<ripple::uint256>
PostgresBackend::fetchAllTransactionHashesInLedger(
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT hash FROM transactions WHERE "
<< "ledger_seq = " << std::to_string(ledgerSequence);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (size_t numRows = checkResult(res, 1))
{
std::vector<ripple::uint256> hashes;
@@ -365,22 +414,26 @@ PostgresBackend::fetchAllTransactionHashesInLedger(
}
return hashes;
}
return {};
}
std::optional<ripple::uint256>
PostgresBackend::doFetchSuccessorKey(
ripple::uint256 key,
uint32_t ledgerSequence) const
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT next FROM successor WHERE key = "
<< "\'\\x" << ripple::strHex(key) << "\'"
<< " AND ledger_seq <= " << std::to_string(ledgerSequence)
<< " ORDER BY ledger_seq DESC LIMIT 1";
auto res = pgQuery(sql.str().data());
if (checkResult(res, 1))
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
{
auto next = res.asUInt256(0, 0);
if (next == lastKey)
@@ -393,34 +446,44 @@ PostgresBackend::doFetchSuccessorKey(
std::vector<TransactionAndMetadata>
PostgresBackend::fetchTransactions(
std::vector<ripple::uint256> const& hashes) const
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const
{
if (!hashes.size())
return {};
std::vector<TransactionAndMetadata> results;
constexpr bool doAsync = true;
if (doAsync)
{
results.resize(hashes.size());
handler_type handler(std::forward<decltype(yield)>(yield));
result_type result(handler);
auto hw = new HandlerWrapper(std::move(handler));
auto start = std::chrono::system_clock::now();
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
results.resize(hashes.size());
std::condition_variable cv;
std::mutex mtx;
std::atomic_uint numRemaining = hashes.size();
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
boost::asio::post(
pool_, [this, &hash, &results, &numRemaining, &cv, &mtx, i]() {
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " getting txn = " << i;
boost::asio::spawn(
get_associated_executor(yield),
[this, &hash, &results, hw, &numRemaining, i](
boost::asio::yield_context yield) {
BOOST_LOG_TRIVIAL(trace) << __func__ << " getting txn = " << i;
PgQuery pgQuery(pgPool_);
std::stringstream sql;
sql << "SELECT transaction,metadata,ledger_seq,date FROM "
"transactions "
"WHERE HASH = \'\\x"
<< ripple::strHex(hash) << "\'";
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (size_t numRows = checkResult(res, 4))
{
results[i] = {
@@ -429,78 +492,61 @@ PostgresBackend::fetchTransactions(
res.asBigInt(0, 2),
res.asBigInt(0, 3)};
}
if (--numRemaining == 0)
{
std::unique_lock lck(mtx);
cv.notify_one();
handler_type h(std::move(hw->handler));
h(boost::system::error_code{});
}
});
}
std::unique_lock lck(mtx);
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
// Yields the worker to the io_context until handler is called.
result.get();
delete hw;
auto end2 = std::chrono::system_clock::now();
duration = ((end2 - end).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(hashes.size())
<< " transactions with threadpool. took "
<< std::to_string(duration);
}
else
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
sql << "SELECT transaction,metadata,ledger_seq,date FROM "
"transactions "
"WHERE HASH = \'\\x"
<< ripple::strHex(hash) << "\'";
if (i + 1 < hashes.size())
sql << " UNION ALL ";
}
auto start = std::chrono::system_clock::now();
auto res = pgQuery(sql.str().data());
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(hashes.size())
<< " transactions with union all. took "
<< std::to_string(duration);
if (size_t numRows = checkResult(res, 3))
{
for (size_t i = 0; i < numRows; ++i)
results.push_back(
{res.asUnHexedBlob(i, 0),
res.asUnHexedBlob(i, 1),
res.asBigInt(i, 2),
res.asBigInt(i, 3)});
}
}
<< " transactions with threadpool. took " << std::to_string(duration);
return results;
}
std::vector<Blob>
PostgresBackend::doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
if (!keys.size())
return {};
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::vector<Blob> results;
results.resize(keys.size());
std::condition_variable cv;
std::mutex mtx;
handler_type handler(std::forward<decltype(yield)>(yield));
result_type result(handler);
auto hw = new HandlerWrapper(std::move(handler));
std::atomic_uint numRemaining = keys.size();
auto start = std::chrono::system_clock::now();
for (size_t i = 0; i < keys.size(); ++i)
{
auto const& key = keys[i];
boost::asio::post(
pool_,
[this, &key, &results, &numRemaining, &cv, &mtx, i, sequence]() {
boost::asio::spawn(
boost::asio::get_associated_executor(yield),
[this, &key, &results, &numRemaining, hw, i, sequence](
boost::asio::yield_context yield) {
PgQuery pgQuery(pgPool_);
std::stringstream sql;
sql << "SELECT object FROM "
"objects "
@@ -509,37 +555,48 @@ PostgresBackend::doFetchLedgerObjects(
<< " AND ledger_seq <= " << std::to_string(sequence)
<< " ORDER BY ledger_seq DESC LIMIT 1";
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (size_t numRows = checkResult(res, 1))
{
results[i] = res.asUnHexedBlob();
}
if (--numRemaining == 0)
{
std::unique_lock lck(mtx);
cv.notify_one();
handler_type h(std::move(hw->handler));
h(boost::system::error_code{});
}
});
}
std::unique_lock lck(mtx);
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
// Yields the worker to the io_context until handler is called.
result.get();
delete hw;
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(keys.size())
<< " objects with threadpool. took " << std::to_string(duration);
return results;
}
std::vector<LedgerObject>
PostgresBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
PostgresBackend::fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
std::stringstream sql;
sql << "SELECT key,object FROM objects "
"WHERE "
<< "ledger_seq = " << std::to_string(ledgerSequence);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (size_t numRows = checkResult(res, 2))
{
std::vector<LedgerObject> objects;
@@ -549,18 +606,20 @@ PostgresBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
}
return objects;
}
return {};
}
AccountTransactions
PostgresBackend::fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor) const
std::optional<AccountTransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pgQuery(set_timeout, yield);
pg_params dbParams;
char const*& command = dbParams.first;
@@ -587,7 +646,7 @@ PostgresBackend::fetchAccountTransactions(
}
auto start = std::chrono::system_clock::now();
auto res = pgQuery(dbParams);
auto res = pgQuery(dbParams, yield);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
@@ -595,6 +654,7 @@ PostgresBackend::fetchAccountTransactions(
<< __func__ << " : executed stored_procedure in "
<< std::to_string(duration)
<< " num records = " << std::to_string(checkResult(res, 1));
checkResult(res, 1);
char const* resultStr = res.c_str();
@@ -618,13 +678,13 @@ PostgresBackend::fetchAccountTransactions(
if (responseObj.contains("cursor"))
{
return {
fetchTransactions(hashes),
fetchTransactions(hashes, yield),
{{responseObj.at("cursor").at("ledger_sequence").as_int64(),
responseObj.at("cursor")
.at("transaction_index")
.as_int64()}}};
}
return {fetchTransactions(hashes), {}};
return {fetchTransactions(hashes, yield), {}};
}
return {{}, {}};
} // namespace Backend
@@ -643,38 +703,40 @@ PostgresBackend::close()
}
void
PostgresBackend::startWrites()
PostgresBackend::startWrites() const
{
synchronous([&](boost::asio::yield_context yield) {
numRowsInObjectsBuffer_ = 0;
abortWrite_ = false;
auto res = writeConnection_("BEGIN");
auto res = writeConnection_("BEGIN", yield);
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "Postgres error creating transaction: " << res.msg();
throw std::runtime_error(msg.str());
}
});
}
bool
PostgresBackend::doFinishWrites()
PostgresBackend::doFinishWrites() const
{
synchronous([&](boost::asio::yield_context yield) {
if (!abortWrite_)
{
std::string txStr = transactionsBuffer_.str();
writeConnection_.bulkInsert("transactions", txStr);
writeConnection_.bulkInsert("transactions", txStr, yield);
writeConnection_.bulkInsert(
"account_transactions", accountTxBuffer_.str());
"account_transactions", accountTxBuffer_.str(), yield);
std::string objectsStr = objectsBuffer_.str();
if (objectsStr.size())
writeConnection_.bulkInsert("objects", objectsStr);
writeConnection_.bulkInsert("objects", objectsStr, yield);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " objects size = " << objectsStr.size()
<< " txns size = " << txStr.size();
std::string successorStr = successorBuffer_.str();
if (successorStr.size())
writeConnection_.bulkInsert("successor", successorStr);
successors_.clear();
writeConnection_.bulkInsert("successor", successorStr, yield);
if (!range)
{
std::stringstream indexCreate;
@@ -683,10 +745,10 @@ PostgresBackend::doFinishWrites()
"WHERE NOT "
"ledger_seq = "
<< std::to_string(inProcessLedger);
writeConnection_(indexCreate.str().data());
writeConnection_(indexCreate.str().data(), yield);
}
}
auto res = writeConnection_("COMMIT");
auto res = writeConnection_("COMMIT", yield);
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
@@ -699,29 +761,35 @@ PostgresBackend::doFinishWrites()
objectsBuffer_.clear();
successorBuffer_.str("");
successorBuffer_.clear();
successors_.clear();
accountTxBuffer_.str("");
accountTxBuffer_.clear();
numRowsInObjectsBuffer_ = 0;
});
return !abortWrite_;
}
bool
PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
PostgresBackend::doOnlineDelete(
std::uint32_t const numLedgersToKeep,
boost::asio::yield_context& yield) const
{
auto rng = fetchLedgerRange();
if (!rng)
return false;
uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
if (minLedger <= rng->minSequence)
return false;
uint32_t limit = 2048;
std::uint32_t limit = 2048;
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 0");
pgQuery("SET statement_timeout TO 0", yield);
std::optional<ripple::uint256> cursor;
while (true)
{
auto [objects, curCursor, warning] = retryOnTimeout(
[&]() { return fetchLedgerPage(cursor, minLedger, 256); });
auto [objects, curCursor, warning] = retryOnTimeout([&]() {
return fetchLedgerPage(cursor, minLedger, 256, 0, yield);
});
if (warning)
{
BOOST_LOG_TRIVIAL(warning) << __func__
@@ -739,7 +807,7 @@ PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
<< std::to_string(minLedger) << '\t' << "\\\\x"
<< ripple::strHex(obj.blob) << '\n';
}
pgQuery.bulkInsert("objects", objectsBuffer.str());
pgQuery.bulkInsert("objects", objectsBuffer.str(), yield);
cursor = curCursor;
if (!cursor)
break;
@@ -749,7 +817,7 @@ PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
std::stringstream sql;
sql << "DELETE FROM ledgers WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from ledgers table");
}
@@ -757,7 +825,7 @@ PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
std::stringstream sql;
sql << "DELETE FROM keys WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from keys table");
}
@@ -765,7 +833,7 @@ PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
std::stringstream sql;
sql << "DELETE FROM books WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
auto res = pgQuery(sql.str().data(), yield);
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from books table");
}

View File

@@ -16,62 +16,84 @@ private:
std::shared_ptr<PgPool> pgPool_;
mutable PgQuery writeConnection_;
mutable bool abortWrite_ = false;
mutable boost::asio::thread_pool pool_{16};
uint32_t writeInterval_ = 1000000;
uint32_t inProcessLedger = 0;
std::unordered_set<std::string> successors_;
std::uint32_t writeInterval_ = 1000000;
std::uint32_t inProcessLedger = 0;
mutable std::unordered_set<std::string> successors_;
const char* const set_timeout = "SET statement_timeout TO 10000";
public:
PostgresBackend(boost::json::object const& config);
PostgresBackend(
boost::asio::io_context& ioc,
boost::json::object const& config);
std::optional<uint32_t>
fetchLatestLedgerSequence() const override;
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override;
std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(uint32_t sequence) const override;
fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
std::optional<ripple::LedgerInfo>
fetchLedgerByHash(ripple::uint256 const& hash) const override;
fetchLedgerByHash(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override;
std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, uint32_t sequence)
const override;
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
// returns a transaction, metadata pair
std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash) const override;
fetchTransaction(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override;
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(uint32_t ledgerSequence) const override;
fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const override;
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::vector<LedgerObject>
fetchLedgerDiff(uint32_t ledgerSequence) const override;
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::optional<LedgerRange>
hardFetchLedgerRange() const override;
hardFetchLedgerRange(boost::asio::yield_context& yield) const override;
std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, uint32_t ledgerSequence)
const override;
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::vector<TransactionAndMetadata>
fetchTransactions(
std::vector<ripple::uint256> const& hashes) const override;
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const override;
std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const override;
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
AccountTransactions
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor) const override;
std::optional<AccountTransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const override;
void
writeLedger(
@@ -79,18 +101,22 @@ public:
std::string&& ledgerHeader) override;
void
doWriteLedgerObject(std::string&& key, uint32_t seq, std::string&& blob)
override;
doWriteLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob) override;
void
writeSuccessor(std::string&& key, uint32_t seq, std::string&& successor)
override;
writeSuccessor(
std::string&& key,
std::uint32_t const seq,
std::string&& successor) override;
void
writeTransaction(
std::string&& hash,
uint32_t seq,
uint32_t date,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata) override;
@@ -105,13 +131,15 @@ public:
close() override;
void
startWrites() override;
startWrites() const override;
bool
doFinishWrites() override;
doFinishWrites() const override;
bool
doOnlineDelete(uint32_t numLedgersToKeep) const override;
doOnlineDelete(
std::uint32_t const numLedgersToKeep,
boost::asio::yield_context& yield) const override;
};
} // namespace Backend
#endif

View File

@@ -38,8 +38,8 @@ struct TransactionAndMetadata
{
Blob transaction;
Blob metadata;
uint32_t ledgerSequence;
uint32_t date;
std::uint32_t ledgerSequence;
std::uint32_t date;
bool
operator==(const TransactionAndMetadata& other) const
{
@@ -50,8 +50,8 @@ struct TransactionAndMetadata
struct AccountTransactionsCursor
{
uint32_t ledgerSequence;
uint32_t transactionIndex;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
};
struct AccountTransactions
@@ -62,8 +62,8 @@ struct AccountTransactions
struct LedgerRange
{
uint32_t minSequence;
uint32_t maxSequence;
std::uint32_t minSequence;
std::uint32_t maxSequence;
};
constexpr ripple::uint256 firstKey{
"0000000000000000000000000000000000000000000000000000000000000000"};

View File

@@ -179,4 +179,4 @@ getMarkers(size_t numMarkers)
return markers;
}
#endif
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED

View File

@@ -459,21 +459,18 @@ ETLSourceImpl<Derived>::handleMessage()
{
if (response.contains("transaction"))
{
// std::cout << "FORWARDING TX" << std::endl;
subscriptions_->forwardProposedTransaction(response);
}
else if (
response.contains("type") &&
response["type"] == "validationReceived")
{
// std::cout << "FORWARDING VAL" << std::endl;
subscriptions_->forwardValidation(response);
}
else if (
response.contains("type") &&
response["type"] == "manifestReceived")
{
// std::cout << "FORWARDING MAN" << std::endl;
subscriptions_->forwardManifest(response);
}
}
@@ -764,10 +761,13 @@ ETLSourceImpl<Derived>::loadInitialLedger(
{
assert(cur);
if (prev == Backend::firstKey)
{
backend_->writeSuccessor(
uint256ToString(prev),
sequence,
uint256ToString(cur->key));
}
if (isBookDir(cur->key, cur->blob))
{
auto base = getBookBase(cur->key);
@@ -783,6 +783,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
<< __func__ << " Writing book successor = "
<< ripple::strHex(base) << " - "
<< ripple::strHex(cur->key);
backend_->writeSuccessor(
uint256ToString(base),
sequence,
@@ -796,10 +797,12 @@ ETLSourceImpl<Derived>::loadInitialLedger(
BOOST_LOG_TRIVIAL(info) << __func__ << " Wrote "
<< numWrites << " book successors";
}
backend_->writeSuccessor(
uint256ToString(prev),
sequence,
uint256ToString(Backend::lastKey));
++numWrites;
auto end = std::chrono::system_clock::now();
auto seconds =
@@ -944,14 +947,16 @@ ETLLoadBalancer::fetchLedger(
std::optional<boost::json::object>
ETLLoadBalancer::forwardToRippled(
boost::json::object const& request,
std::string const& clientIp) const
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
srand((unsigned)time(0));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0;
while (numAttempts < sources_.size())
{
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp))
if (auto res =
sources_[sourceIdx]->forwardToRippled(request, clientIp, yield))
return res;
sourceIdx = (sourceIdx + 1) % sources_.size();
@@ -964,7 +969,8 @@ template <class Derived>
std::optional<boost::json::object>
ETLSourceImpl<Derived>::forwardToRippled(
boost::json::object const& request,
std::string const& clientIp) const
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
<< "request = " << boost::json::serialize(request);
@@ -983,21 +989,25 @@ ETLSourceImpl<Derived>::forwardToRippled(
using tcp = boost::asio::ip::tcp; // from
try
{
// The io_context is required for all I/O
net::io_context ioc;
boost::beast::error_code ec;
// These objects perform our I/O
tcp::resolver resolver{ioc};
tcp::resolver resolver{ioc_};
BOOST_LOG_TRIVIAL(debug) << "Creating websocket";
auto ws = std::make_unique<websocket::stream<tcp::socket>>(ioc);
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
// Look up the domain name
auto const results = resolver.resolve(ip_, wsPort_);
auto const results = resolver.async_resolve(ip_, wsPort_, yield[ec]);
if (ec)
return {};
ws->next_layer().expires_after(std::chrono::seconds(30));
BOOST_LOG_TRIVIAL(debug) << "Connecting websocket";
// Make the connection on the IP address we get from a lookup
net::connect(ws->next_layer(), results.begin(), results.end());
ws->next_layer().async_connect(results, yield[ec]);
if (ec)
return {};
// Set a decorator to change the User-Agent of the handshake
// and to tell rippled to charge the client IP for RPC
@@ -1016,14 +1026,21 @@ ETLSourceImpl<Derived>::forwardToRippled(
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
// Perform the websocket handshake
ws->handshake(ip_, "/");
ws->async_handshake(ip_, "/", yield[ec]);
if (ec)
return {};
BOOST_LOG_TRIVIAL(debug) << "Sending request";
// Send the message
ws->write(net::buffer(boost::json::serialize(request)));
ws->async_write(
net::buffer(boost::json::serialize(request)), yield[ec]);
if (ec)
return {};
beast::flat_buffer buffer;
ws->read(buffer);
ws->async_read(buffer, yield[ec]);
if (ec)
return {};
auto begin = static_cast<char const*>(buffer.data().data());
auto end = begin + buffer.data().size();

View File

@@ -58,7 +58,8 @@ public:
virtual std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp) const = 0;
std::string const& clientIp,
boost::asio::yield_context& yield) const = 0;
virtual ~ETLSource()
{
@@ -327,7 +328,8 @@ public:
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp) const override;
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
};
class PlainETLSource : public ETLSourceImpl<PlainETLSource>
@@ -562,7 +564,8 @@ public:
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp) const;
std::string const& clientIp,
boost::asio::yield_context& yield) const;
private:
/// f is a function that takes an ETLSource as an argument and returns a

View File

@@ -99,9 +99,12 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
auto start = std::chrono::system_clock::now();
backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " started writes";
backend_->writeLedger(
lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger";
std::vector<AccountTransactionsData> accountTxData =
insertTransactions(lgrInfo, *ledgerData);
@@ -116,10 +119,10 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger";
if (!stopping_)
{
backend_->writeAccountTransactions(std::move(accountTxData));
}
backend_->finishWrites(startingSequence);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time to download and store ledger = "
<< ((end - start).count()) / 1000000000.0;
@@ -135,15 +138,38 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
if (!writing_)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - Updating cache";
auto diff = Backend::retryOnTimeout(
[&]() { return backend_->fetchLedgerDiff(lgrInfo.seq); });
std::vector<Backend::LedgerObject> diff;
auto fetchDiffSynchronous = [&]() {
Backend::synchronous([&](boost::asio::yield_context yield) {
diff = backend_->fetchLedgerDiff(lgrInfo.seq, yield);
});
};
Backend::retryOnTimeout(fetchDiffSynchronous);
backend_->cache().update(diff, lgrInfo.seq);
}
backend_->updateRange(lgrInfo.seq);
auto fees = Backend::retryOnTimeout(
[&]() { return backend_->fetchFees(lgrInfo.seq); });
auto transactions = Backend::retryOnTimeout(
[&]() { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq); });
std::optional<ripple::Fees> fees = {};
std::vector<Backend::TransactionAndMetadata> transactions = {};
auto fetchFeesSynchronous = [&]() {
Backend::synchronous([&](boost::asio::yield_context yield) {
fees = backend_->fetchFees(lgrInfo.seq, yield);
});
};
auto fetchTxSynchronous = [&]() {
Backend::synchronous([&](boost::asio::yield_context yield) {
transactions =
backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield);
});
};
Backend::retryOnTimeout(fetchFeesSynchronous);
Backend::retryOnTimeout(fetchTxSynchronous);
auto ledgerRange = backend_->fetchLedgerRange();
assert(ledgerRange);
@@ -196,9 +222,16 @@ ReportingETL::publishLedger(
}
else
{
auto lgr = Backend::retryOnTimeout([&]() {
return backend_->fetchLedgerBySequence(ledgerSequence);
std::optional<ripple::LedgerInfo> lgr = {};
auto fetchLedgerSynchronous = [&]() {
Backend::synchronous([&](boost::asio::yield_context yield) {
lgr =
backend_->fetchLedgerBySequence(ledgerSequence, yield);
});
};
Backend::retryOnTimeout(fetchLedgerSynchronous);
assert(lgr);
publishLedger(*lgr);
@@ -248,11 +281,14 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "started writes";
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote ledger header";
@@ -265,10 +301,12 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
auto firstBook = std::move(*obj.mutable_first_book());
if (!firstBook.size())
firstBook = uint256ToString(Backend::lastKey);
backend_->writeSuccessor(
std::move(*obj.mutable_book_base()),
lgrInfo.seq,
std::move(firstBook));
BOOST_LOG_TRIVIAL(debug) << __func__ << " writing book successor "
<< ripple::strHex(obj.book_base()) << " - "
<< ripple::strHex(firstBook);
@@ -293,6 +331,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
<< ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr);
backend_->writeSuccessor(
std::move(*predPtr), lgrInfo.seq, std::move(*succPtr));
}
@@ -303,6 +342,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
<< ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr);
backend_->writeSuccessor(
std::move(*predPtr),
lgrInfo.seq,
@@ -412,6 +452,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
<< ripple::strHex(obj.key) << " - "
<< ripple::strHex(lb->key) << " - "
<< ripple::strHex(ub->key);
backend_->writeSuccessor(
uint256ToString(lb->key),
lgrInfo.seq,
@@ -427,6 +468,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
uint256ToString(obj.key),
lgrInfo.seq,
uint256ToString(ub->key));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " writing successor for new object "
<< ripple::strHex(lb->key) << " - "
@@ -443,6 +485,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
uint256ToString(base),
lgrInfo.seq,
uint256ToString(succ->key));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Updating book successor "
<< ripple::strHex(base) << " - "
@@ -454,6 +497,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
uint256ToString(base),
lgrInfo.seq,
uint256ToString(Backend::lastKey));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Updating book successor "
<< ripple::strHex(base) << " - "
@@ -472,11 +516,15 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
<< __func__ << " : "
<< "Inserted all transactions. Number of transactions = "
<< rawData.transactions_list().transactions_size();
backend_->writeAccountTransactions(std::move(accountTxData));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote account_tx";
auto start = std::chrono::system_clock::now();
bool success = backend_->finishWrites(lgrInfo.seq);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
@@ -685,7 +733,11 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
deleting_ = true;
ioContext_.post([this, &minSequence]() {
BOOST_LOG_TRIVIAL(info) << "Running online delete";
backend_->doOnlineDelete(*onlineDeleteInterval_);
Backend::synchronous([&](boost::asio::yield_context yield) {
backend_->doOnlineDelete(*onlineDeleteInterval_, yield);
});
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
auto rng = backend_->fetchLedgerRange();
minSequence = rng->minSequence;
@@ -729,8 +781,11 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
void
ReportingETL::monitor()
{
std::optional<uint32_t> latestSequence =
backend_->fetchLatestLedgerSequence();
std::optional<uint32_t> latestSequence = {};
Backend::synchronous([&](boost::asio::yield_context yield) {
latestSequence = backend_->fetchLatestLedgerSequence(yield);
});
if (!latestSequence)
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "

View File

@@ -43,8 +43,8 @@ private:
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
std::optional<uint32_t> onlineDeleteInterval_;
uint32_t extractorThreads_ = 1;
std::optional<std::uint32_t> onlineDeleteInterval_;
std::uint32_t extractorThreads_ = 1;
std::thread worker_;
boost::asio::io_context& ioContext_;
@@ -239,7 +239,7 @@ private:
/// This is equivelent to the degree of parallelism during the initial
/// ledger download
/// @return the number of markers
uint32_t
std::uint32_t
getNumMarkers()
{
return numMarkers_;

View File

@@ -1,3 +1,11 @@
#include <grpc/impl/codegen/port_platform.h>
#ifdef GRPC_TSAN_ENABLED
#undef GRPC_TSAN_ENABLED
#endif
#ifdef GRPC_ASAN_ENABLED
#undef GRPC_ASAN_ENABLED
#endif
#include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/websocket.hpp>
@@ -195,7 +203,8 @@ main(int argc, char* argv[])
DOSGuard dosGuard{config.value(), ioc};
// Interface to the database
std::shared_ptr<BackendInterface> backend{Backend::make_Backend(*config)};
std::shared_ptr<BackendInterface> backend{
Backend::make_Backend(ioc, *config)};
// Manages clients subscribed to streams
std::shared_ptr<SubscriptionManager> subscriptions{

View File

@@ -1,10 +1,13 @@
#include <boost/asio/spawn.hpp>
#include <etl/ETLSource.h>
#include <rpc/Handlers.h>
#include <unordered_map>
namespace RPC {
std::optional<Context>
make_WsContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,
@@ -20,6 +23,7 @@ make_WsContext(
std::string command = request.at("command").as_string().c_str();
return Context{
yc,
command,
1,
request,
@@ -34,6 +38,7 @@ make_WsContext(
std::optional<Context>
make_HttpContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,
@@ -62,6 +67,7 @@ make_HttpContext(
return {};
return Context{
yc,
command,
1,
array.at(0).as_object(),
@@ -169,7 +175,8 @@ buildResponse(Context const& ctx)
boost::json::object toForward = ctx.params;
toForward["command"] = ctx.method;
auto res = ctx.balancer->forwardToRippled(toForward, ctx.clientIp);
auto res =
ctx.balancer->forwardToRippled(toForward, ctx.clientIp, ctx.yield);
ctx.counters.rpcForwarded(ctx.method);

View File

@@ -2,6 +2,7 @@
#define REPORTING_RPC_H_INCLUDED
#include <ripple/protocol/ErrorCodes.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <optional>
@@ -27,6 +28,7 @@ namespace RPC {
struct Context
{
boost::asio::yield_context& yield;
std::string method;
std::uint32_t version;
boost::json::object const& params;
@@ -42,6 +44,7 @@ struct Context
std::string clientIp;
Context(
boost::asio::yield_context& yield_,
std::string const& command_,
std::uint32_t version_,
boost::json::object const& params_,
@@ -52,7 +55,8 @@ struct Context
Backend::LedgerRange const& range_,
Counters& counters_,
std::string const& clientIp_)
: method(command_)
: yield(yield_)
, method(command_)
, version(version_)
, params(params_)
, backend(backend_)
@@ -135,6 +139,7 @@ make_error(Error err);
std::optional<Context>
make_WsContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,
@@ -146,6 +151,7 @@ make_WsContext(
std::optional<Context>
make_HttpContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,

View File

@@ -32,7 +32,8 @@ getRequiredBool(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Missing field " + field);
}
std::optional<uint32_t>
std::optional<std::uint32_t>
getUInt(boost::json::object const& request, std::string const& field)
{
if (!request.contains(field))
@@ -44,18 +45,20 @@ getUInt(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Invalid field " + field + ", not uint.");
}
uint32_t
std::uint32_t
getUInt(
boost::json::object const& request,
std::string const& field,
uint32_t dfault)
std::uint32_t const dfault)
{
if (auto res = getUInt(request, field))
return *res;
else
return dfault;
}
uint32_t
std::uint32_t
getRequiredUInt(boost::json::object const& request, std::string const& field)
{
if (auto res = getUInt(request, field))
@@ -63,6 +66,7 @@ getRequiredUInt(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Missing field " + field);
}
std::optional<std::string>
getString(boost::json::object const& request, std::string const& field)
{
@@ -97,7 +101,7 @@ std::optional<ripple::STAmount>
getDeliveredAmount(
std::shared_ptr<ripple::STTx const> const& txn,
std::shared_ptr<ripple::TxMeta const> const& meta,
uint32_t ledgerSequence)
std::uint32_t const ledgerSequence)
{
if (meta->hasDeliveredAmount())
return meta->getDeliveredAmount();
@@ -358,7 +362,7 @@ ledgerInfoFromRequest(Context const& ctx)
if (!ledgerHash.parseHex(hashValue.as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash);
lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield);
}
else if (!indexValue.is_null())
{
@@ -370,11 +374,12 @@ ledgerInfoFromRequest(Context const& ctx)
else
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexMalformed"};
lgrInfo = ctx.backend->fetchLedgerBySequence(ledgerSequence);
lgrInfo = ctx.backend->fetchLedgerBySequence(ledgerSequence, ctx.yield);
}
else
{
lgrInfo = ctx.backend->fetchLedgerBySequence(ctx.range.maxSequence);
lgrInfo = ctx.backend->fetchLedgerBySequence(
ctx.range.maxSequence, ctx.yield);
}
if (!lgrInfo)
@@ -407,10 +412,11 @@ traverseOwnedNodes(
ripple::AccountID const& accountID,
std::uint32_t sequence,
ripple::uint256 const& cursor,
boost::asio::yield_context& yield,
std::function<bool(ripple::SLE)> atOwnedNode)
{
if (!backend.fetchLedgerObject(
ripple::keylet::account(accountID).key, sequence))
ripple::keylet::account(accountID).key, sequence, yield))
throw AccountNotFoundError(ripple::toBase58(accountID));
auto const rootIndex = ripple::keylet::ownerDir(accountID);
auto currentIndex = rootIndex;
@@ -421,7 +427,8 @@ traverseOwnedNodes(
auto start = std::chrono::system_clock::now();
for (;;)
{
auto ownedNode = backend.fetchLedgerObject(currentIndex.key, sequence);
auto ownedNode =
backend.fetchLedgerObject(currentIndex.key, sequence, yield);
if (!ownedNode)
{
@@ -449,7 +456,7 @@ traverseOwnedNodes(
<< ((end - start).count() / 1000000000.0);
start = std::chrono::system_clock::now();
auto objects = backend.fetchLedgerObjects(keys, sequence);
auto objects = backend.fetchLedgerObjects(keys, sequence, yield);
end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time loading owned entries: "
@@ -639,13 +646,14 @@ bool
isGlobalFrozen(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& issuer)
ripple::AccountID const& issuer,
boost::asio::yield_context& yield)
{
if (ripple::isXRP(issuer))
return false;
auto key = ripple::keylet::account(issuer).key;
auto blob = backend.fetchLedgerObject(key, sequence);
auto blob = backend.fetchLedgerObject(key, sequence, yield);
if (!blob)
return false;
@@ -662,13 +670,14 @@ isFrozen(
std::uint32_t sequence,
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer)
ripple::AccountID const& issuer,
boost::asio::yield_context& yield)
{
if (ripple::isXRP(currency))
return false;
auto key = ripple::keylet::account(issuer).key;
auto blob = backend.fetchLedgerObject(key, sequence);
auto blob = backend.fetchLedgerObject(key, sequence, yield);
if (!blob)
return false;
@@ -682,7 +691,7 @@ isFrozen(
if (issuer != account)
{
key = ripple::keylet::line(account, issuer, currency).key;
blob = backend.fetchLedgerObject(key, sequence);
blob = backend.fetchLedgerObject(key, sequence, yield);
if (!blob)
return false;
@@ -704,10 +713,11 @@ ripple::XRPAmount
xrpLiquid(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& id)
ripple::AccountID const& id,
boost::asio::yield_context& yield)
{
auto key = ripple::keylet::account(id).key;
auto blob = backend.fetchLedgerObject(key, sequence);
auto blob = backend.fetchLedgerObject(key, sequence, yield);
if (!blob)
return beast::zero;
@@ -718,7 +728,7 @@ xrpLiquid(
std::uint32_t const ownerCount = sle.getFieldU32(ripple::sfOwnerCount);
auto const reserve =
backend.fetchFees(sequence)->accountReserve(ownerCount);
backend.fetchFees(sequence, yield)->accountReserve(ownerCount);
auto const balance = sle.getFieldAmount(ripple::sfBalance);
@@ -732,9 +742,10 @@ xrpLiquid(
ripple::STAmount
accountFunds(
BackendInterface const& backend,
uint32_t sequence,
std::uint32_t const sequence,
ripple::STAmount const& amount,
ripple::AccountID const& id)
ripple::AccountID const& id,
boost::asio::yield_context& yield)
{
if (!amount.native() && amount.getIssuer() == id)
{
@@ -743,7 +754,13 @@ accountFunds(
else
{
return accountHolds(
backend, sequence, id, amount.getCurrency(), amount.getIssuer());
backend,
sequence,
id,
amount.getCurrency(),
amount.getIssuer(),
true,
yield);
}
}
@@ -754,16 +771,17 @@ accountHolds(
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer,
bool zeroIfFrozen)
bool const zeroIfFrozen,
boost::asio::yield_context& yield)
{
ripple::STAmount amount;
if (ripple::isXRP(currency))
{
return {xrpLiquid(backend, sequence, account)};
return {xrpLiquid(backend, sequence, account, yield)};
}
auto key = ripple::keylet::line(account, issuer, currency).key;
auto const blob = backend.fetchLedgerObject(key, sequence);
auto const blob = backend.fetchLedgerObject(key, sequence, yield);
if (!blob)
{
@@ -774,7 +792,8 @@ accountHolds(
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
if (zeroIfFrozen && isFrozen(backend, sequence, account, currency, issuer))
if (zeroIfFrozen &&
isFrozen(backend, sequence, account, currency, issuer, yield))
{
amount.clear(ripple::Issue(currency, issuer));
}
@@ -796,10 +815,11 @@ ripple::Rate
transferRate(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& issuer)
ripple::AccountID const& issuer,
boost::asio::yield_context& yield)
{
auto key = ripple::keylet::account(issuer).key;
auto blob = backend.fetchLedgerObject(key, sequence);
auto blob = backend.fetchLedgerObject(key, sequence, yield);
if (blob)
{
@@ -819,17 +839,18 @@ postProcessOrderBook(
ripple::Book const& book,
ripple::AccountID const& takerID,
Backend::BackendInterface const& backend,
uint32_t ledgerSequence)
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield)
{
boost::json::array jsonOffers;
std::map<ripple::AccountID, ripple::STAmount> umBalance;
bool globalFreeze =
isGlobalFrozen(backend, ledgerSequence, book.out.account) ||
isGlobalFrozen(backend, ledgerSequence, book.out.account);
isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) ||
isGlobalFrozen(backend, ledgerSequence, book.out.account, yield);
auto rate = transferRate(backend, ledgerSequence, book.out.account);
auto rate = transferRate(backend, ledgerSequence, book.out.account, yield);
for (auto const& obj : offers)
{
@@ -877,7 +898,8 @@ postProcessOrderBook(
uOfferOwnerID,
book.out.currency,
book.out.account,
zeroIfFrozen);
zeroIfFrozen,
yield);
if (saOwnerFunds < beast::zero)
saOwnerFunds.clear();

View File

@@ -64,7 +64,7 @@ generatePubLedgerMessage(
ripple::LedgerInfo const& lgrInfo,
ripple::Fees const& fees,
std::string const& ledgerRange,
uint32_t txnCount);
std::uint32_t txnCount);
std::variant<Status, ripple::LedgerInfo>
ledgerInfoFromRequest(Context const& ctx);
@@ -75,6 +75,7 @@ traverseOwnedNodes(
ripple::AccountID const& accountID,
std::uint32_t sequence,
ripple::uint256 const& cursor,
boost::asio::yield_context& yield,
std::function<bool(ripple::SLE)> atOwnedNode);
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
@@ -90,7 +91,8 @@ bool
isGlobalFrozen(
BackendInterface const& backend,
std::uint32_t seq,
ripple::AccountID const& issuer);
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
bool
isFrozen(
@@ -98,14 +100,16 @@ isFrozen(
std::uint32_t sequence,
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer);
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
ripple::STAmount
accountFunds(
BackendInterface const& backend,
uint32_t sequence,
std::uint32_t sequence,
ripple::STAmount const& amount,
ripple::AccountID const& id);
ripple::AccountID const& id,
boost::asio::yield_context& yield);
ripple::STAmount
accountHolds(
@@ -114,26 +118,31 @@ accountHolds(
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer,
bool zeroIfFrozen = false);
bool zeroIfFrozen,
boost::asio::yield_context& yield);
ripple::Rate
transferRate(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& issuer);
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
ripple::XRPAmount
xrpLiquid(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& id);
ripple::AccountID const& id,
boost::asio::yield_context& yield);
boost::json::array
postProcessOrderBook(
std::vector<Backend::LedgerObject> const& offers,
ripple::Book const& book,
ripple::AccountID const& takerID,
Backend::BackendInterface const& backend,
uint32_t ledgerSequence);
std::uint32_t ledgerSequence,
boost::asio::yield_context& yield);
std::variant<Status, ripple::Book>
parseBook(boost::json::object const& request);
@@ -141,16 +150,16 @@ parseBook(boost::json::object const& request);
std::variant<Status, ripple::AccountID>
parseTaker(boost::json::value const& request);
std::optional<uint32_t>
std::optional<std::uint32_t>
getUInt(boost::json::object const& request, std::string const& field);
uint32_t
std::uint32_t
getUInt(
boost::json::object const& request,
std::string const& field,
uint32_t dfault);
std::uint32_t dfault);
uint32_t
std::uint32_t
getRequiredUInt(boost::json::object const& request, std::string const& field);
std::optional<bool>

View File

@@ -122,7 +122,12 @@ doAccountChannels(Context const& context)
};
auto nextCursor = traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, marker, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
marker,
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;

View File

@@ -60,7 +60,12 @@ doAccountCurrencies(Context const& context)
};
traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, beast::zero, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
beast::zero,
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;

View File

@@ -64,7 +64,7 @@ doAccountInfo(Context const& context)
auto start = std::chrono::system_clock::now();
std::optional<std::vector<unsigned char>> dbResponse =
context.backend->fetchLedgerObject(key.key, lgrInfo.seq);
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
auto time =
@@ -103,8 +103,8 @@ doAccountInfo(Context const& context)
// This code will need to be revisited if in the future we
// support multiple SignerLists on one account.
auto const signers =
context.backend->fetchLedgerObject(signersKey.key, lgrInfo.seq);
auto const signers = context.backend->fetchLedgerObject(
signersKey.key, lgrInfo.seq, context.yield);
if (signers)
{
ripple::STLedgerEntry sleSigners{

View File

@@ -166,7 +166,12 @@ doAccountLines(Context const& context)
};
auto nextCursor = traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, cursor, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
cursor,
context.yield,
addToResponse);
if (nextCursor)
response["marker"] = ripple::strHex(*nextCursor);

View File

@@ -102,7 +102,12 @@ doAccountObjects(Context const& context)
};
auto nextCursor = traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, cursor, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
cursor,
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;

View File

@@ -128,7 +128,12 @@ doAccountOffers(Context const& context)
};
auto nextCursor = traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, cursor, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
cursor,
context.yield,
addToResponse);
if (nextCursor)
response["marker"] = ripple::strHex(*nextCursor);

View File

@@ -54,7 +54,8 @@ doAccountTx(Context const& context)
return Status{
Error::rpcINVALID_PARAMS, "transactionIndexNotInt"};
transactionIndex = value_to<std::uint32_t>(obj.at("seq"));
transactionIndex =
boost::json::value_to<std::uint32_t>(obj.at("seq"));
}
std::optional<std::uint32_t> ledgerIndex = {};
@@ -63,7 +64,8 @@ doAccountTx(Context const& context)
if (!obj.at("ledger").is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotInt"};
ledgerIndex = value_to<std::uint32_t>(obj.at("ledger"));
ledgerIndex =
boost::json::value_to<std::uint32_t>(obj.at("ledger"));
}
if (!transactionIndex || !ledgerIndex)
@@ -124,7 +126,8 @@ doAccountTx(Context const& context)
if (!request.at("ledger_index").is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotNumber"};
auto ledgerIndex = value_to<uint32_t>(request.at("ledger_index"));
auto ledgerIndex =
boost::json::value_to<std::uint32_t>(request.at("ledger_index"));
maxIndex = minIndex = ledgerIndex;
}
@@ -139,7 +142,8 @@ doAccountTx(Context const& context)
return RPC::Status{
RPC::Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
auto lgrInfo = context.backend->fetchLedgerByHash(ledgerHash);
auto lgrInfo =
context.backend->fetchLedgerByHash(ledgerHash, context.yield);
maxIndex = minIndex = lgrInfo->seq;
}
@@ -167,7 +171,7 @@ doAccountTx(Context const& context)
boost::json::array txns;
auto start = std::chrono::system_clock::now();
auto [blobs, retCursor] = context.backend->fetchAccountTransactions(
*accountID, limit, forward, cursor);
*accountID, limit, forward, cursor, context.yield);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "

View File

@@ -82,8 +82,8 @@ doBookOffers(Context const& context)
}
auto start = std::chrono::system_clock::now();
auto [offers, retCursor, warning] =
context.backend->fetchBookOffers(bookBase, lgrInfo.seq, limit, cursor);
auto [offers, retCursor, warning] = context.backend->fetchBookOffers(
bookBase, lgrInfo.seq, limit, cursor, context.yield);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning)
@@ -93,7 +93,7 @@ doBookOffers(Context const& context)
response["ledger_index"] = lgrInfo.seq;
response["offers"] = postProcessOrderBook(
offers, book, takerID, *context.backend, lgrInfo.seq);
offers, book, takerID, *context.backend, lgrInfo.seq, context.yield);
end = std::chrono::system_clock::now();

View File

@@ -145,8 +145,14 @@ doGatewayBalances(Context const& context)
}
return true;
};
traverseOwnedNodes(
*context.backend, *accountID, lgrInfo.seq, beast::zero, addToResponse);
*context.backend,
*accountID,
lgrInfo.seq,
beast::zero,
context.yield,
addToResponse);
if (!sums.empty())
{

View File

@@ -85,8 +85,8 @@ doLedger(Context const& context)
boost::json::array& jsonTxs = header.at("transactions").as_array();
if (expand)
{
auto txns =
context.backend->fetchAllTransactionsInLedger(lgrInfo.seq);
auto txns = context.backend->fetchAllTransactionsInLedger(
lgrInfo.seq, context.yield);
std::transform(
std::move_iterator(txns.begin()),
@@ -111,8 +111,8 @@ doLedger(Context const& context)
}
else
{
auto hashes =
context.backend->fetchAllTransactionHashesInLedger(lgrInfo.seq);
auto hashes = context.backend->fetchAllTransactionHashesInLedger(
lgrInfo.seq, context.yield);
std::transform(
std::move_iterator(hashes.begin()),
std::move_iterator(hashes.end()),
@@ -128,7 +128,8 @@ doLedger(Context const& context)
{
header["diff"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonDiff = header.at("diff").as_array();
auto diff = context.backend->fetchLedgerDiff(lgrInfo.seq);
auto diff =
context.backend->fetchLedgerDiff(lgrInfo.seq, context.yield);
for (auto const& obj : diff)
{
boost::json::object entry;

View File

@@ -43,7 +43,7 @@ doLedgerData(Context const& context)
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInteger"};
limit = value_to<int>(request.at("limit"));
limit = boost::json::value_to<int>(request.at("limit"));
}
std::optional<ripple::uint256> cursor;
@@ -67,7 +67,8 @@ doLedgerData(Context const& context)
Backend::LedgerPage page;
auto start = std::chrono::system_clock::now();
page = context.backend->fetchLedgerPage(cursor, lgrInfo.seq, limit);
page = context.backend->fetchLedgerPage(
cursor, lgrInfo.seq, limit, 0, context.yield);
auto end = std::chrono::system_clock::now();

View File

@@ -128,7 +128,8 @@ doLedgerEntry(Context const& context)
{
auto directory = request.at("directory").as_object();
std::uint64_t subIndex = directory.contains("sub_index")
? value_to<std::uint64_t>(directory.at("sub_index"))
? boost::json::value_to<std::uint64_t>(
directory.at("sub_index"))
: 0;
if (directory.contains("dir_root"))
@@ -242,7 +243,8 @@ doLedgerEntry(Context const& context)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
else
{
std::uint32_t seq = value_to<std::uint32_t>(offer.at("seq"));
std::uint32_t seq =
boost::json::value_to<std::uint32_t>(offer.at("seq"));
key = ripple::keylet::offer(*id, seq).key;
}
}
@@ -342,7 +344,8 @@ doLedgerEntry(Context const& context)
}
auto start = std::chrono::system_clock::now();
auto dbResponse = context.backend->fetchLedgerObject(key, lgrInfo.seq);
auto dbResponse =
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)

View File

@@ -45,14 +45,15 @@ doNoRippleCheck(Context const& context)
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
std::optional<ripple::Fees> fees =
includeTxs ? context.backend->fetchFees(lgrInfo.seq) : std::nullopt;
std::optional<ripple::Fees> fees = includeTxs
? context.backend->fetchFees(lgrInfo.seq, context.yield)
: std::nullopt;
boost::json::array transactions;
auto keylet = ripple::keylet::account(*accountID);
auto accountObj =
context.backend->fetchLedgerObject(keylet.key, lgrInfo.seq);
auto accountObj = context.backend->fetchLedgerObject(
keylet.key, lgrInfo.seq, context.yield);
if (!accountObj)
throw AccountNotFoundError(ripple::toBase58(*accountID));
@@ -90,6 +91,7 @@ doNoRippleCheck(Context const& context)
*accountID,
lgrInfo.seq,
{},
context.yield,
[roleGateway,
includeTxs,
&fees,

View File

@@ -30,12 +30,15 @@ doServerInfo(Context const& context)
info["counters"].as_object()["rpc"] = context.counters.report();
}
auto serverInfoRippled =
context.balancer->forwardToRippled(context.params, context.clientIp);
auto serverInfoRippled = context.balancer->forwardToRippled(
context.params, context.clientIp, context.yield);
if (serverInfoRippled && !serverInfoRippled->contains("error"))
response["info"].as_object()["load_factor"] = 1;
auto lgrInfo = context.backend->fetchLedgerBySequence(range->maxSequence);
auto lgrInfo = context.backend->fetchLedgerBySequence(
range->maxSequence, context.yield);
assert(lgrInfo.has_value());
auto age = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch())
@@ -46,7 +49,7 @@ doServerInfo(Context const& context)
validatedLgr["age"] = age;
validatedLgr["hash"] = ripple::strHex(lgrInfo->hash);
validatedLgr["seq"] = lgrInfo->seq;
auto fees = context.backend->fetchFees(lgrInfo->seq);
auto fees = context.backend->fetchFees(lgrInfo->seq, context.yield);
assert(fees.has_value());
validatedLgr["base_fee_xrp"] = fees->base.decimalXRP();
validatedLgr["reserve_base_xrp"] = fees->reserve.decimalXRP();

View File

@@ -35,6 +35,7 @@ validateStreams(boost::json::object const& request)
boost::json::object
subscribeToStreams(
boost::asio::yield_context& yield,
boost::json::object const& request,
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
@@ -47,7 +48,7 @@ subscribeToStreams(
std::string s = stream.as_string().c_str();
if (s == "ledger")
response = manager.subLedger(session);
response = manager.subLedger(yield, session);
else if (s == "transactions")
manager.subTransactions(session);
else if (s == "transactions_proposed")
@@ -207,6 +208,7 @@ unsubscribeToAccountsProposed(
std::variant<Status, std::pair<std::vector<ripple::Book>, boost::json::array>>
validateAndGetBooks(
boost::asio::yield_context& yield,
boost::json::object const& request,
std::shared_ptr<Backend::BackendInterface const> const& backend)
{
@@ -245,23 +247,29 @@ validateAndGetBooks(
takerID = std::get<ripple::AccountID>(parsed);
}
}
auto getOrderBook =
[&snapshot, &backend, &rng, &takerID](auto book) {
auto getOrderBook = [&snapshot, &backend, &rng, &takerID](
auto book,
boost::asio::yield_context& yield) {
auto bookBase = getBookBase(book);
auto [offers, retCursor, warning] =
backend->fetchBookOffers(
bookBase, rng->maxSequence, 200, {});
bookBase, rng->maxSequence, 200, {}, yield);
auto orderBook = postProcessOrderBook(
offers, book, takerID, *backend, rng->maxSequence);
offers,
book,
takerID,
*backend,
rng->maxSequence,
yield);
std::copy(
orderBook.begin(),
orderBook.end(),
std::back_inserter(snapshot));
};
getOrderBook(b);
getOrderBook(b, yield);
if (both)
getOrderBook(ripple::reversed(b));
getOrderBook(ripple::reversed(b), yield);
}
}
}
@@ -322,7 +330,8 @@ doSubscribe(Context const& context)
boost::json::array snapshot;
if (request.contains("books"))
{
auto parsed = validateAndGetBooks(request, context.backend);
auto parsed =
validateAndGetBooks(context.yield, request, context.backend);
if (auto status = std::get_if<Status>(&parsed))
return *status;
auto [bks, snap] =
@@ -335,7 +344,7 @@ doSubscribe(Context const& context)
boost::json::object response;
if (request.contains("streams"))
response = subscribeToStreams(
request, context.session, *context.subscriptions);
context.yield, request, context.session, *context.subscriptions);
if (request.contains("accounts"))
subscribeToAccounts(request, context.session, *context.subscriptions);

View File

@@ -16,7 +16,7 @@ doTransactionEntry(Context const& context)
if (!hash.parseHex(getRequiredString(context.params, "tx_hash")))
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
auto dbResponse = context.backend->fetchTransaction(hash);
auto dbResponse = context.backend->fetchTransaction(hash, context.yield);
// Note: transaction_entry is meant to only search a specified ledger for
// the specified transaction. tx searches the entire range of history. For
// rippled, having two separate commands made sense, as tx would use SQLite

View File

@@ -37,7 +37,7 @@ doTx(Context const& context)
if (!range)
return Status{Error::rpcNOT_READY};
auto dbResponse = context.backend->fetchTransaction(hash);
auto dbResponse = context.backend->fetchTransaction(hash, context.yield);
if (!dbResponse)
return Status{Error::rpcTXN_NOT_FOUND};

View File

@@ -118,17 +118,20 @@ getLedgerPubMessage(
}
boost::json::object
SubscriptionManager::subLedger(std::shared_ptr<WsBase>& session)
SubscriptionManager::subLedger(
boost::asio::yield_context& yield,
std::shared_ptr<WsBase>& session)
{
ledgerSubscribers_.subscribe(session);
auto ledgerRange = backend_->fetchLedgerRange();
assert(ledgerRange);
auto lgrInfo = backend_->fetchLedgerBySequence(ledgerRange->maxSequence);
auto lgrInfo =
backend_->fetchLedgerBySequence(ledgerRange->maxSequence, yield);
assert(lgrInfo);
std::optional<ripple::Fees> fees;
fees = backend_->fetchFees(lgrInfo->seq);
fees = backend_->fetchFees(lgrInfo->seq, yield);
assert(fees);
std::string range = std::to_string(ledgerRange->minSequence) + "-" +
@@ -232,10 +235,15 @@ SubscriptionManager::pubTransaction(
auto amount = tx->getFieldAmount(ripple::sfTakerGets);
if (account != amount.issue().account)
{
auto ownerFunds = Backend::retryOnTimeout([&]() {
return RPC::accountFunds(
*backend_, lgrInfo.seq, amount, account);
ripple::STAmount ownerFunds;
auto fetchFundsSynchronous = [&]() {
Backend::synchronous([&](boost::asio::yield_context yield) {
ownerFunds = RPC::accountFunds(
*backend_, lgrInfo.seq, amount, account, yield);
});
};
Backend::retryOnTimeout(fetchFundsSynchronous);
pubObj["transaction"].as_object()["owner_funds"] =
ownerFunds.getText();

View File

@@ -132,7 +132,9 @@ public:
}
boost::json::object
subLedger(std::shared_ptr<WsBase>& session);
subLedger(
boost::asio::yield_context& yield,
std::shared_ptr<WsBase>& session);
void
pubLedger(

View File

@@ -2,6 +2,7 @@
#define RIPPLE_REPORTING_HTTP_BASE_SESSION_H
#include <boost/asio/dispatch.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
@@ -59,13 +60,188 @@ httpFail(boost::beast::error_code ec, char const* what)
std::cerr << what << ": " << ec.message() << "\n";
}
// From Boost Beast examples http_server_flex.cpp
template <class Derived>
class HttpBase
{
// Access the derived class, this is part of
// the Curiously Recurring Template Pattern idiom.
Derived&
derived()
{
return static_cast<Derived&>(*this);
}
struct send_lambda
{
HttpBase& self_;
explicit send_lambda(HttpBase& self) : self_(self)
{
}
template <bool isRequest, class Body, class Fields>
void
operator()(http::message<isRequest, Body, Fields>&& msg) const
{
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
auto sp = std::make_shared<http::message<isRequest, Body, Fields>>(
std::move(msg));
// Store a type-erased version of the shared
// pointer in the class to keep it alive.
self_.res_ = sp;
// Write the response
http::async_write(
self_.derived().stream(),
*sp,
boost::beast::bind_front_handler(
&HttpBase::on_write,
self_.derived().shared_from_this(),
sp->need_eof()));
}
};
boost::asio::io_context& ioc_;
http::request<http::string_body> req_;
std::shared_ptr<void> res_;
std::shared_ptr<BackendInterface const> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
send_lambda lambda_;
protected:
boost::beast::flat_buffer buffer_;
public:
HttpBase(
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
RPC::Counters& counters,
boost::beast::flat_buffer buffer)
: ioc_(ioc)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, counters_(counters)
, lambda_(*this)
, buffer_(std::move(buffer))
{
}
void
do_read()
{
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
// Set the timeout.
boost::beast::get_lowest_layer(derived().stream())
.expires_after(std::chrono::seconds(30));
// Read a request
http::async_read(
derived().stream(),
buffer_,
req_,
boost::beast::bind_front_handler(
&HttpBase::on_read, derived().shared_from_this()));
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
// This means they closed the connection
if (ec == http::error::end_of_stream)
return derived().do_close();
if (ec)
return httpFail(ec, "read");
if (boost::beast::websocket::is_upgrade(req_))
{
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(derived().stream()).expires_never();
return make_websocket_session(
ioc_,
derived().release_stream(),
std::move(req_),
std::move(buffer_),
backend_,
subscriptions_,
balancer_,
dosGuard_,
counters_);
}
auto ip = derived().ip();
auto session = derived().shared_from_this();
// Requests are handed using coroutines. Here we spawn a coroutine
// which will asynchronously handle a request.
boost::asio::spawn(
derived().stream().get_executor(),
[this, ip, session](boost::asio::yield_context yield) {
handle_request(
yield,
std::move(req_),
lambda_,
backend_,
balancer_,
dosGuard_,
counters_,
ip,
session);
});
}
void
on_write(
bool close,
boost::beast::error_code ec,
std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return httpFail(ec, "write");
if (close)
{
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return derived().do_close();
}
// We're done with the response so delete it
res_ = nullptr;
// Read another request
do_read();
}
};
// This function produces an HTTP response for the given
// request. The type of the response object depends on the
// contents of the request, so the interface requires the
// caller to pass a generic lambda for receiving the response.
template <class Body, class Allocator, class Send>
template <class Body, class Allocator, class Send, class Session>
void
handle_request(
boost::asio::yield_context& yc,
boost::beast::http::
request<Body, boost::beast::http::basic_fields<Allocator>>&& req,
Send&& send,
@@ -73,7 +249,8 @@ handle_request(
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
RPC::Counters& counters,
std::string const& ip)
std::string const& ip,
std::shared_ptr<Session> http)
{
auto const httpResponse = [&req](
http::status status,
@@ -126,6 +303,13 @@ handle_request(
RPC::make_error(RPC::Error::rpcBAD_SYNTAX))));
}
if (!dosGuard.isOk(ip))
return send(httpResponse(
http::status::ok,
"application/json",
boost::json::serialize(
RPC::make_error(RPC::Error::rpcSLOW_DOWN))));
auto range = backend->fetchLedgerRange();
if (!range)
return send(httpResponse(
@@ -135,7 +319,7 @@ handle_request(
RPC::make_error(RPC::Error::rpcNOT_READY))));
std::optional<RPC::Context> context = RPC::make_HttpContext(
request, backend, nullptr, balancer, *range, counters, ip);
yc, request, backend, nullptr, balancer, *range, counters, ip);
if (!context)
return send(httpResponse(
@@ -176,7 +360,8 @@ handle_request(
responseStr = boost::json::serialize(response);
}
dosGuard.add(ip, responseStr.size());
if (!dosGuard.add(ip, responseStr.size()))
result["warning"] = "Too many requests";
return send(
httpResponse(http::status::ok, "application/json", responseStr));
@@ -192,166 +377,4 @@ handle_request(
}
}
// From Boost Beast examples http_server_flex.cpp
template <class Derived>
class HttpBase
{
// Access the derived class, this is part of
// the Curiously Recurring Template Pattern idiom.
Derived&
derived()
{
return static_cast<Derived&>(*this);
}
struct send_lambda
{
HttpBase& self_;
explicit send_lambda(HttpBase& self) : self_(self)
{
}
template <bool isRequest, class Body, class Fields>
void
operator()(http::message<isRequest, Body, Fields>&& msg) const
{
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
auto sp = std::make_shared<http::message<isRequest, Body, Fields>>(
std::move(msg));
// Store a type-erased version of the shared
// pointer in the class to keep it alive.
self_.res_ = sp;
// Write the response
http::async_write(
self_.derived().stream(),
*sp,
boost::beast::bind_front_handler(
&HttpBase::on_write,
self_.derived().shared_from_this(),
sp->need_eof()));
}
};
http::request<http::string_body> req_;
std::shared_ptr<void> res_;
std::shared_ptr<BackendInterface const> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
send_lambda lambda_;
protected:
boost::beast::flat_buffer buffer_;
public:
HttpBase(
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
RPC::Counters& counters,
boost::beast::flat_buffer buffer)
: backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, counters_(counters)
, lambda_(*this)
, buffer_(std::move(buffer))
{
}
void
do_read()
{
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
// Set the timeout.
boost::beast::get_lowest_layer(derived().stream())
.expires_after(std::chrono::seconds(30));
// Read a request
http::async_read(
derived().stream(),
buffer_,
req_,
boost::beast::bind_front_handler(
&HttpBase::on_read, derived().shared_from_this()));
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
// This means they closed the connection
if (ec == http::error::end_of_stream)
return derived().do_close();
if (ec)
return httpFail(ec, "read");
if (boost::beast::websocket::is_upgrade(req_))
{
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(derived().stream()).expires_never();
return make_websocket_session(
derived().release_stream(),
std::move(req_),
std::move(buffer_),
backend_,
subscriptions_,
balancer_,
dosGuard_,
counters_);
}
auto ip = derived().ip();
// Send the response
handle_request(
std::move(req_),
lambda_,
backend_,
balancer_,
dosGuard_,
counters_,
ip);
}
void
on_write(
bool close,
boost::beast::error_code ec,
std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return httpFail(ec, "write");
if (close)
{
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return derived().do_close();
}
// We're done with the response so delete it
res_ = nullptr;
// Read another request
do_read();
}
};
#endif // RIPPLE_REPORTING_HTTP_BASE_SESSION_H

View File

@@ -17,6 +17,7 @@ class HttpSession : public HttpBase<HttpSession>,
public:
// Take ownership of the socket
explicit HttpSession(
boost::asio::io_context& ioc,
tcp::socket&& socket,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -25,6 +26,7 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer buffer)
: HttpBase<HttpSession>(
ioc,
backend,
subscriptions,
balancer,

View File

@@ -21,6 +21,7 @@ class Detector
using std::enable_shared_from_this<
Detector<PlainSession, SslSession>>::shared_from_this;
boost::asio::io_context& ioc_;
boost::beast::tcp_stream stream_;
std::optional<std::reference_wrapper<ssl::context>> ctx_;
std::shared_ptr<BackendInterface const> backend_;
@@ -32,6 +33,7 @@ class Detector
public:
Detector(
boost::asio::io_context& ioc,
tcp::socket&& socket,
std::optional<std::reference_wrapper<ssl::context>> ctx,
std::shared_ptr<BackendInterface const> backend,
@@ -39,7 +41,8 @@ public:
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
RPC::Counters& counters)
: stream_(std::move(socket))
: ioc_(ioc)
, stream_(std::move(socket))
, ctx_(ctx)
, backend_(backend)
, subscriptions_(subscriptions)
@@ -76,6 +79,7 @@ public:
return httpFail(ec, "ssl not supported by this server");
// Launch SSL session
std::make_shared<SslSession>(
ioc_,
stream_.release_socket(),
*ctx_,
backend_,
@@ -90,6 +94,7 @@ public:
// Launch plain session
std::make_shared<PlainSession>(
ioc_,
stream_.release_socket(),
backend_,
subscriptions_,
@@ -103,6 +108,7 @@ public:
void
make_websocket_session(
boost::asio::io_context& ioc,
boost::beast::tcp_stream stream,
http::request<http::string_body> req,
boost::beast::flat_buffer buffer,
@@ -113,6 +119,7 @@ make_websocket_session(
RPC::Counters& counters)
{
std::make_shared<WsUpgrader>(
ioc,
std::move(stream),
backend,
subscriptions,
@@ -126,6 +133,7 @@ make_websocket_session(
void
make_websocket_session(
boost::asio::io_context& ioc,
boost::beast::ssl_stream<boost::beast::tcp_stream> stream,
http::request<http::string_body> req,
boost::beast::flat_buffer buffer,
@@ -136,6 +144,7 @@ make_websocket_session(
RPC::Counters& counters)
{
std::make_shared<SslWsUpgrader>(
ioc,
std::move(stream),
backend,
subscriptions,
@@ -154,7 +163,7 @@ class Listener
using std::enable_shared_from_this<
Listener<PlainSession, SslSession>>::shared_from_this;
net::io_context& ioc_;
boost::asio::io_context& ioc_;
std::optional<std::reference_wrapper<ssl::context>> ctx_;
tcp::acceptor acceptor_;
std::shared_ptr<BackendInterface const> backend_;
@@ -165,7 +174,7 @@ class Listener
public:
Listener(
net::io_context& ioc,
boost::asio::io_context& ioc,
std::optional<std::reference_wrapper<ssl::context>> ctx,
tcp::endpoint endpoint,
std::shared_ptr<BackendInterface const> backend,
@@ -248,6 +257,7 @@ private:
: std::nullopt;
// Create the detector session and run it
std::make_shared<Detector<PlainSession, SslSession>>(
ioc_,
std::move(socket),
ctxRef,
backend_,

View File

@@ -30,6 +30,7 @@ class PlainWsSession : public WsSession<PlainWsSession>
public:
// Take ownership of the socket
explicit PlainWsSession(
boost::asio::io_context& ioc,
boost::asio::ip::tcp::socket&& socket,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -38,6 +39,7 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer&& buffer)
: WsSession(
ioc,
backend,
subscriptions,
balancer,
@@ -70,6 +72,7 @@ public:
class WsUpgrader : public std::enable_shared_from_this<WsUpgrader>
{
boost::asio::io_context& ioc_;
boost::beast::tcp_stream http_;
boost::optional<http::request_parser<http::string_body>> parser_;
boost::beast::flat_buffer buffer_;
@@ -82,6 +85,7 @@ class WsUpgrader : public std::enable_shared_from_this<WsUpgrader>
public:
WsUpgrader(
boost::asio::io_context& ioc,
boost::asio::ip::tcp::socket&& socket,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -89,7 +93,8 @@ public:
DOSGuard& dosGuard,
RPC::Counters& counters,
boost::beast::flat_buffer&& b)
: http_(std::move(socket))
: ioc_(ioc)
, http_(std::move(socket))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
@@ -99,6 +104,7 @@ public:
{
}
WsUpgrader(
boost::asio::io_context& ioc,
boost::beast::tcp_stream&& stream,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -107,7 +113,8 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: http_(std::move(stream))
: ioc_(ioc)
, http_(std::move(stream))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
@@ -161,6 +168,7 @@ private:
boost::beast::get_lowest_layer(http_).expires_never();
std::make_shared<PlainWsSession>(
ioc_,
http_.release_socket(),
backend_,
subscriptions_,

View File

@@ -17,6 +17,7 @@ class SslHttpSession : public HttpBase<SslHttpSession>,
public:
// Take ownership of the socket
explicit SslHttpSession(
boost::asio::io_context& ioc,
tcp::socket&& socket,
ssl::context& ctx,
std::shared_ptr<BackendInterface const> backend,
@@ -26,6 +27,7 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer buffer)
: HttpBase<SslHttpSession>(
ioc,
backend,
subscriptions,
balancer,

View File

@@ -28,6 +28,7 @@ class SslWsSession : public WsSession<SslWsSession>
public:
// Take ownership of the socket
explicit SslWsSession(
boost::asio::io_context& ioc,
boost::beast::ssl_stream<boost::beast::tcp_stream>&& stream,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -36,6 +37,7 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer&& b)
: WsSession(
ioc,
backend,
subscriptions,
balancer,
@@ -66,6 +68,7 @@ public:
class SslWsUpgrader : public std::enable_shared_from_this<SslWsUpgrader>
{
boost::asio::io_context& ioc_;
boost::beast::ssl_stream<boost::beast::tcp_stream> https_;
boost::optional<http::request_parser<http::string_body>> parser_;
boost::beast::flat_buffer buffer_;
@@ -78,6 +81,7 @@ class SslWsUpgrader : public std::enable_shared_from_this<SslWsUpgrader>
public:
SslWsUpgrader(
boost::asio::io_context& ioc,
boost::asio::ip::tcp::socket&& socket,
ssl::context& ctx,
std::shared_ptr<BackendInterface const> backend,
@@ -86,7 +90,8 @@ public:
DOSGuard& dosGuard,
RPC::Counters& counters,
boost::beast::flat_buffer&& b)
: https_(std::move(socket), ctx)
: ioc_(ioc)
, https_(std::move(socket), ctx)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
@@ -96,6 +101,7 @@ public:
{
}
SslWsUpgrader(
boost::asio::io_context& ioc,
boost::beast::ssl_stream<boost::beast::tcp_stream> stream,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
@@ -104,7 +110,8 @@ public:
RPC::Counters& counters,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: https_(std::move(stream))
: ioc_(ioc)
, https_(std::move(stream))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
@@ -173,6 +180,7 @@ private:
boost::beast::get_lowest_layer(https_).expires_never();
std::make_shared<SslWsSession>(
ioc_,
std::move(https_),
backend_,
subscriptions_,

View File

@@ -80,6 +80,7 @@ class WsSession : public WsBase,
boost::beast::flat_buffer buffer_;
boost::asio::io_context& ioc_;
std::shared_ptr<BackendInterface const> backend_;
// has to be a weak ptr because SubscriptionManager maintains collections
// of std::shared_ptr<WsBase> objects. If this were shared, there would be
@@ -93,13 +94,15 @@ class WsSession : public WsBase,
public:
explicit WsSession(
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
RPC::Counters& counters,
boost::beast::flat_buffer&& buffer)
: backend_(backend)
: ioc_(ioc)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
@@ -205,23 +208,9 @@ public:
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return wsFail(ec, "read");
std::string msg{
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
boost::json::object response;
auto ip = derived().ip();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << ip;
if (!dosGuard_.isOk(ip))
response["error"] = "Too many requests. Slow down";
else
handle_request(std::string const&& msg, boost::asio::yield_context& yc)
{
boost::json::object response = {};
auto sendError = [this](auto error) {
send(boost::json::serialize(RPC::make_error(error)));
};
@@ -237,6 +226,7 @@ public:
return sendError(RPC::Error::rpcNOT_READY);
std::optional<RPC::Context> context = RPC::make_WsContext(
yc,
request,
backend_,
subscriptions_.lock(),
@@ -244,46 +234,43 @@ public:
shared_from_this(),
*range,
counters_,
ip);
derived().ip());
if (!context)
return sendError(RPC::Error::rpcBAD_SYNTAX);
auto id =
request.contains("id") ? request.at("id") : nullptr;
auto id = request.contains("id") ? request.at("id") : nullptr;
response = getDefaultWsResponse(id);
boost::json::object& result =
response["result"].as_object();
boost::json::object& result = response["result"].as_object();
auto start = std::chrono::system_clock::now();
auto v = RPC::buildResponse(*context);
auto end = std::chrono::system_clock::now();
auto us =
std::chrono::duration_cast<std::chrono::microseconds>(
auto us = std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
if (auto status = std::get_if<RPC::Status>(&v))
{
counters_.rpcErrored(context->method);
auto error = RPC::make_error(*status);
if (!id.is_null())
error["id"] = id;
error["request"] = request;
result = error;
response = error;
}
else
{
counters_.rpcComplete(context->method, us);
result = std::get<boost::json::object>(v);
}
}
catch (Backend::DatabaseTimeout const& t)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
// TODO this should be a diff error code. Rippled probably
// does not have an analagous error code
return sendError(RPC::Error::rpcNOT_READY);
}
}
@@ -294,13 +281,45 @@ public:
return sendError(RPC::Error::rpcINTERNAL);
}
}
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : " << boost::json::serialize(response);
std::string responseStr = boost::json::serialize(response);
dosGuard_.add(derived().ip(), responseStr.size());
send(std::move(responseStr));
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return wsFail(ec, "read");
std::string msg{
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
auto ip = derived().ip();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << ip;
if (!dosGuard_.isOk(ip))
{
boost::json::object response;
response["error"] = "Too many requests. Slow down";
std::string responseStr = boost::json::serialize(response);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
dosGuard_.add(ip, responseStr.size());
send(std::move(responseStr));
}
else
{
boost::asio::spawn(
ioc_,
[m = std::move(msg), this](boost::asio::yield_context yc) {
handle_request(std::move(m), yc);
});
}
do_read();
}
};

File diff suppressed because it is too large Load Diff