Implement logging abstraction (#371)

Fixes #290
This commit is contained in:
Alex Kremer
2022-11-17 23:02:16 +01:00
committed by GitHub
parent 4b8dd7b981
commit a47bf2e8fe
38 changed files with 1441 additions and 1003 deletions

View File

@@ -11,6 +11,7 @@ if(NOT googletest_POPULATED)
endif() endif()
target_link_libraries(clio_tests PUBLIC clio gtest_main) target_link_libraries(clio_tests PUBLIC clio gtest_main)
target_include_directories(clio_tests PRIVATE unittests)
enable_testing() enable_testing()

View File

@@ -1 +1 @@
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -Wno-narrowing -Wno-dangling-else -Wno-deprecated-declarations")

View File

@@ -99,10 +99,11 @@ target_sources(clio PRIVATE
src/rpc/handlers/Subscribe.cpp src/rpc/handlers/Subscribe.cpp
# Server # Server
src/rpc/handlers/ServerInfo.cpp src/rpc/handlers/ServerInfo.cpp
# Utility # Utilities
src/rpc/handlers/Random.cpp src/rpc/handlers/Random.cpp
src/util/Taggable.cpp src/config/Config.cpp
src/config/Config.cpp) src/log/Logger.cpp
src/util/Taggable.cpp)
add_executable(clio_server src/main/main.cpp) add_executable(clio_server src/main/main.cpp)
target_link_libraries(clio_server PUBLIC clio) target_link_libraries(clio_server PUBLIC clio)
@@ -110,8 +111,9 @@ target_link_libraries(clio_server PUBLIC clio)
if(BUILD_TESTS) if(BUILD_TESTS)
add_executable(clio_tests add_executable(clio_tests
unittests/RPCErrors.cpp unittests/RPCErrors.cpp
unittests/config.cpp unittests/Backend.cpp
unittests/main.cpp) unittests/Logger.cpp
unittests/Config.cpp)
include(CMake/deps/gtest.cmake) include(CMake/deps/gtest.cmake)
endif() endif()

View File

@@ -1,9 +1,7 @@
{ {
"database": "database": {
{
"type": "cassandra", "type": "cassandra",
"cassandra": "cassandra": {
{
"contact_points": "127.0.0.1", "contact_points": "127.0.0.1",
"port": 9042, "port": 9042,
"keyspace": "clio", "keyspace": "clio",
@@ -14,8 +12,7 @@
"threads": 8 "threads": 8
} }
}, },
"etl_sources": "etl_sources": [
[
{ {
"ip": "127.0.0.1", "ip": "127.0.0.1",
"ws_port": "6006", "ws_port": "6006",
@@ -34,7 +31,34 @@
"ip": "0.0.0.0", "ip": "0.0.0.0",
"port": 51233 "port": 51233
}, },
"log_level":"debug", "log_channels": [
{
"channel": "Backend",
"log_level": "fatal"
},
{
"channel": "WebServer",
"log_level": "info"
},
{
"channel": "Subscriptions",
"log_level": "info"
},
{
"channel": "RPC",
"log_level": "error"
},
{
"channel": "ETL",
"log_level": "debug"
},
{
"channel": "Performance",
"log_level": "trace"
}
],
"log_level": "info",
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
"log_to_console": true, "log_to_console": true,
"log_directory": "./clio_log", "log_directory": "./clio_log",
"log_rotation_size": 2048, "log_rotation_size": 2048,

View File

@@ -1,16 +1,19 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED #ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED #define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#include <boost/algorithm/string.hpp>
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <backend/CassandraBackend.h> #include <backend/CassandraBackend.h>
#include <config/Config.h> #include <config/Config.h>
#include <log/Logger.h>
#include <boost/algorithm/string.hpp>
namespace Backend { namespace Backend {
std::shared_ptr<BackendInterface> std::shared_ptr<BackendInterface>
make_Backend(boost::asio::io_context& ioc, clio::Config const& config) make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
{ {
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface"; static clio::Logger log{"Backend"};
log.info() << "Constructing BackendInterface";
auto readOnly = config.valueOr("read_only", false); auto readOnly = config.valueOr("read_only", false);
auto type = config.value<std::string>("database.type"); auto type = config.value<std::string>("database.type");
@@ -34,8 +37,7 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
backend->updateRange(rng->maxSequence); backend->updateRange(rng->maxSequence);
} }
BOOST_LOG_TRIVIAL(info) log.info() << "Constructed BackendInterface Successfully";
<< __func__ << ": Constructed BackendInterface Successfully";
return backend; return backend;
} }

View File

@@ -1,6 +1,15 @@
#include <ripple/protocol/Indexes.h> #include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h> #include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <log/Logger.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"Backend"};
} // namespace
namespace Backend { namespace Backend {
bool bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence) BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
@@ -26,7 +35,7 @@ std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow( BackendInterface::hardFetchLedgerRangeNoThrow(
boost::asio::yield_context& yield) const boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << __func__ << "(yield)"; gLog.trace() << "called";
while (true) while (true)
{ {
try try
@@ -43,7 +52,7 @@ BackendInterface::hardFetchLedgerRangeNoThrow(
std::optional<LedgerRange> std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const BackendInterface::hardFetchLedgerRangeNoThrow() const
{ {
BOOST_LOG_TRIVIAL(trace) << __func__ << "()"; gLog.trace() << "called";
return retryOnTimeout([&]() { return hardFetchLedgerRange(); }); return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
} }
@@ -57,21 +66,17 @@ BackendInterface::fetchLedgerObject(
auto obj = cache_.get(key, sequence); auto obj = cache_.get(key, sequence);
if (obj) if (obj)
{ {
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Cache hit - " << ripple::strHex(key);
<< __func__ << " - cache hit - " << ripple::strHex(key);
return *obj; return *obj;
} }
else else
{ {
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Cache miss - " << ripple::strHex(key);
<< __func__ << " - cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield); auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj) if (!dbObj)
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Missed cache and missed in db";
<< __func__ << " - missed cache and missed in db";
else else
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Missed cache but found in db";
<< __func__ << " - missed cache but found in db";
return dbObj; return dbObj;
} }
} }
@@ -93,8 +98,7 @@ BackendInterface::fetchLedgerObjects(
else else
misses.push_back(keys[i]); misses.push_back(keys[i]);
} }
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Cache hits = " << keys.size() - misses.size()
<< __func__ << " - cache hits = " << keys.size() - misses.size()
<< " - cache misses = " << misses.size(); << " - cache misses = " << misses.size();
if (misses.size()) if (misses.size())
@@ -121,11 +125,9 @@ BackendInterface::fetchSuccessorKey(
{ {
auto succ = cache_.getSuccessor(key, ledgerSequence); auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ) if (succ)
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Cache hit - " << ripple::strHex(key);
<< __func__ << " - cache hit - " << ripple::strHex(key);
else else
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Cache miss - " << ripple::strHex(key);
<< __func__ << " - cache miss - " << ripple::strHex(key);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield); return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
} }
@@ -179,8 +181,8 @@ BackendInterface::fetchBookOffers(
succMillis += getMillis(mid2 - mid1); succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd) if (!offerDir || offerDir->key >= bookEnd)
{ {
BOOST_LOG_TRIVIAL(trace) << __func__ << " - offerDir.has_value() " gLog.trace() << "offerDir.has_value() " << offerDir.has_value()
<< offerDir.has_value() << " breaking"; << " breaking";
break; break;
} }
uTipIndex = offerDir->key; uTipIndex = offerDir->key;
@@ -196,8 +198,7 @@ BackendInterface::fetchBookOffers(
auto next = sle.getFieldU64(ripple::sfIndexNext); auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next) if (!next)
{ {
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Next is empty. breaking";
<< __func__ << " next is empty. breaking";
break; break;
} }
auto nextKey = ripple::keylet::page(uTipIndex, next); auto nextKey = ripple::keylet::page(uTipIndex, next);
@@ -214,23 +215,21 @@ BackendInterface::fetchBookOffers(
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i) for (size_t i = 0; i < keys.size() && i < limit; ++i)
{ {
BOOST_LOG_TRIVIAL(trace) gLog.trace() << "Key = " << ripple::strHex(keys[i])
<< __func__ << " key = " << ripple::strHex(keys[i])
<< " blob = " << ripple::strHex(objs[i]) << " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence; << " ledgerSequence = " << ledgerSequence;
assert(objs[i].size()); assert(objs[i].size());
page.offers.push_back({keys[i], objs[i]}); page.offers.push_back({keys[i], objs[i]});
} }
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Fetching " << std::to_string(keys.size())
<< __func__ << " " << " offers took " << std::to_string(getMillis(mid - begin))
<< "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin))
<< " milliseconds. Fetching next dir took " << " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(succMillis)
<< std::to_string(numSucc) << " times" << " milliseonds. Fetched next dir " << std::to_string(numSucc)
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " times"
<< " milliseconds" << " Fetching next page of dir took "
<< std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages) << ". num pages = " << std::to_string(numPages)
<< ". Fetching all objects took " << ". Fetching all objects took "
<< std::to_string(getMillis(end - mid)) << std::to_string(getMillis(end - mid))
@@ -274,16 +273,15 @@ BackendInterface::fetchLedgerPage(
page.objects.push_back({std::move(keys[i]), std::move(objects[i])}); page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
else if (!outOfOrder) else if (!outOfOrder)
{ {
BOOST_LOG_TRIVIAL(error) gLog.error()
<< __func__ << "Deleted or non-existent object in successor table. key = "
<< " deleted or non-existent object in successor table. key = "
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence; << ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
std::stringstream msg; std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j) for (size_t j = 0; j < objects.size(); ++j)
{ {
msg << " - " << ripple::strHex(keys[j]); msg << " - " << ripple::strHex(keys[j]);
} }
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str(); gLog.error() << msg.str();
} }
} }
if (keys.size() && !reachedEnd) if (keys.size() && !reachedEnd)
@@ -304,7 +302,7 @@ BackendInterface::fetchFees(
if (!bytes) if (!bytes)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees"; gLog.error() << "Could not find fees";
return {}; return {};
} }

View File

@@ -1,17 +1,15 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED #ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED #define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <ripple/ledger/ReadView.h> #include <ripple/ledger/ReadView.h>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <backend/SimpleCache.h> #include <backend/SimpleCache.h>
#include <backend/Types.h> #include <backend/Types.h>
#include <config/Config.h> #include <config/Config.h>
#include <log/Logger.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <thread> #include <thread>
#include <type_traits> #include <type_traits>
@@ -46,6 +44,8 @@ template <class F>
auto auto
retryOnTimeout(F func, size_t waitMs = 500) retryOnTimeout(F func, size_t waitMs = 500)
{ {
static clio::Logger log{"Backend"};
while (true) while (true)
{ {
try try
@@ -54,8 +54,7 @@ retryOnTimeout(F func, size_t waitMs = 500)
} }
catch (DatabaseTimeout& t) catch (DatabaseTimeout& t)
{ {
BOOST_LOG_TRIVIAL(error) log.error()
<< __func__
<< "Database request timed out. Sleeping and retrying ... "; << "Database request timed out. Sleeping and retrying ... ";
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs)); std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
} }

View File

@@ -1,9 +1,13 @@
#include <ripple/app/tx/impl/details/NFTokenUtils.h> #include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <backend/CassandraBackend.h> #include <backend/CassandraBackend.h>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <functional> #include <functional>
#include <unordered_map> #include <unordered_map>
using namespace clio;
namespace Backend { namespace Backend {
// Type alias for async completion handlers // Type alias for async completion handlers
@@ -16,6 +20,8 @@ template <class T, class F>
void void
processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
{ {
static clio::Logger log{"Backend"};
CassandraBackend const& backend = *requestParams.backend; CassandraBackend const& backend = *requestParams.backend;
auto rc = cass_future_error_code(fut); auto rc = cass_future_error_code(fut);
if (rc != CASS_OK) if (rc != CASS_OK)
@@ -23,9 +29,9 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
// exponential backoff with a max wait of 2^10 ms (about 1 second) // exponential backoff with a max wait of 2^10 ms (about 1 second)
auto wait = std::chrono::milliseconds( auto wait = std::chrono::milliseconds(
lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
BOOST_LOG_TRIVIAL(error) log.error() << "ERROR!!! Cassandra write error: " << rc << ", "
<< "ERROR!!! Cassandra write error: " << rc << ", " << cass_error_desc(rc)
<< cass_error_desc(rc) << " id= " << requestParams.toString() << " id= " << requestParams.toString()
<< ", current retries " << requestParams.currentRetries << ", current retries " << requestParams.currentRetries
<< ", retrying in " << wait.count() << " milliseconds"; << ", retrying in " << wait.count() << " milliseconds";
++requestParams.currentRetries; ++requestParams.currentRetries;
@@ -40,11 +46,11 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
} }
else else
{ {
BOOST_LOG_TRIVIAL(trace) log.trace() << "Succesfully inserted a record";
<< __func__ << " Succesfully inserted a record";
requestParams.finish(); requestParams.finish();
} }
} }
template <class T> template <class T>
void void
processAsyncWrite(CassFuture* fut, void* cbData) processAsyncWrite(CassFuture* fut, void* cbData)
@@ -155,6 +161,7 @@ makeAndExecuteAsyncWrite(
auto* cb = new WriteCallbackData<T, B>(b, std::move(d), bind, id); auto* cb = new WriteCallbackData<T, B>(b, std::move(d), bind, id);
cb->start(); cb->start();
} }
template <class T, class B> template <class T, class B>
std::shared_ptr<BulkWriteCallbackData<T, B>> std::shared_ptr<BulkWriteCallbackData<T, B>>
makeAndExecuteBulkAsyncWrite( makeAndExecuteBulkAsyncWrite(
@@ -170,13 +177,14 @@ makeAndExecuteBulkAsyncWrite(
cb->start(); cb->start();
return cb; return cb;
} }
void void
CassandraBackend::doWriteLedgerObject( CassandraBackend::doWriteLedgerObject(
std::string&& key, std::string&& key,
std::uint32_t const seq, std::uint32_t const seq,
std::string&& blob) std::string&& blob)
{ {
BOOST_LOG_TRIVIAL(trace) << "Writing ledger object to cassandra"; log_.trace() << "Writing ledger object to cassandra";
if (range) if (range)
makeAndExecuteAsyncWrite( makeAndExecuteAsyncWrite(
this, this,
@@ -204,15 +212,16 @@ CassandraBackend::doWriteLedgerObject(
}, },
"ledger_object"); "ledger_object");
} }
void void
CassandraBackend::writeSuccessor( CassandraBackend::writeSuccessor(
std::string&& key, std::string&& key,
std::uint32_t const seq, std::uint32_t const seq,
std::string&& successor) std::string&& successor)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Writing successor. key = " << key.size() << " bytes. "
<< "Writing successor. key = " << key << " seq = " << std::to_string(seq)
<< " seq = " << std::to_string(seq) << " successor = " << successor; << " successor = " << successor.size() << " bytes.";
assert(key.size() != 0); assert(key.size() != 0);
assert(successor.size() != 0); assert(successor.size() != 0);
makeAndExecuteAsyncWrite( makeAndExecuteAsyncWrite(
@@ -319,7 +328,7 @@ CassandraBackend::writeTransaction(
std::string&& transaction, std::string&& transaction,
std::string&& metadata) std::string&& metadata)
{ {
BOOST_LOG_TRIVIAL(trace) << "Writing txn to cassandra"; log_.trace() << "Writing txn to cassandra";
std::string hashCpy = hash; std::string hashCpy = hash;
makeAndExecuteAsyncWrite( makeAndExecuteAsyncWrite(
@@ -393,13 +402,13 @@ CassandraBackend::writeNFTs(std::vector<NFTsData>&& data)
std::optional<LedgerRange> std::optional<LedgerRange>
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectLedgerRange_}; CassandraStatement statement{selectLedgerRange_};
CassandraResult result = executeAsyncRead(statement, yield); CassandraResult result = executeAsyncRead(statement, yield);
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; log_.error() << "No rows";
return {}; return {};
} }
LedgerRange range; LedgerRange range;
@@ -528,9 +537,10 @@ CassandraBackend::fetchTransactions(
throw DatabaseTimeout(); throw DatabaseTimeout();
} }
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Fetched " << numHashes
<< "Fetched " << numHashes << " transactions from Cassandra in " << " transactions from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) << std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count() .count()
<< " milliseconds"; << " milliseconds";
return results; return results;
@@ -550,9 +560,7 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence);
<< __func__
<< " - no rows . ledger = " << std::to_string(ledgerSequence);
return {}; return {};
} }
std::vector<ripple::uint256> hashes; std::vector<ripple::uint256> hashes;
@@ -560,10 +568,10 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
{ {
hashes.push_back(result.getUInt256()); hashes.push_back(result.getUInt256());
} while (result.nextRow()); } while (result.nextRow());
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Fetched " << hashes.size()
<< "Fetched " << hashes.size()
<< " transaction hashes from Cassandra in " << " transaction hashes from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) << std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count() .count()
<< " milliseconds"; << " milliseconds";
return hashes; return hashes;
@@ -613,9 +621,9 @@ CassandraBackend::fetchNFTTransactions(
{ {
statement.bindNextIntTuple( statement.bindNextIntTuple(
cursor->ledgerSequence, cursor->transactionIndex); cursor->ledgerSequence, cursor->transactionIndex);
BOOST_LOG_TRIVIAL(debug) << " token_id = " << ripple::strHex(tokenID) log_.debug() << "token_id = " << ripple::strHex(tokenID)
<< " tuple = " << cursor->ledgerSequence << " tuple = " << cursor->ledgerSequence
<< " : " << cursor->transactionIndex; << cursor->transactionIndex;
} }
else else
{ {
@@ -624,9 +632,8 @@ CassandraBackend::fetchNFTTransactions(
forward ? 0 : std::numeric_limits<std::uint32_t>::max(); forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder); statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug) log_.debug() << "token_id = " << ripple::strHex(tokenID)
<< " token_id = " << ripple::strHex(tokenID) << " idx = " << seq << " idx = " << seq << " tuple = " << placeHolder;
<< " tuple = " << placeHolder;
} }
statement.bindNextUInt(limit); statement.bindNextUInt(limit);
@@ -635,19 +642,19 @@ CassandraBackend::fetchNFTTransactions(
if (!result.hasResult()) if (!result.hasResult())
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; log_.debug() << "No rows returned";
return {}; return {};
} }
std::vector<ripple::uint256> hashes = {}; std::vector<ripple::uint256> hashes = {};
auto numRows = result.numRows(); auto numRows = result.numRows();
BOOST_LOG_TRIVIAL(info) << "num_rows = " << numRows; log_.info() << "num_rows = " << numRows;
do do
{ {
hashes.push_back(result.getUInt256()); hashes.push_back(result.getUInt256());
if (--numRows == 0) if (--numRows == 0)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor"; log_.debug() << "Setting cursor";
auto const [lgrSeq, txnIdx] = result.getInt64Tuple(); auto const [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = { cursor = {
static_cast<std::uint32_t>(lgrSeq), static_cast<std::uint32_t>(lgrSeq),
@@ -662,11 +669,11 @@ CassandraBackend::fetchNFTTransactions(
} while (result.nextRow()); } while (result.nextRow());
auto txns = fetchTransactions(hashes, yield); auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << " txns = " << txns.size(); log_.debug() << "Txns = " << txns.size();
if (txns.size() == limit) if (txns.size() == limit)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor"; log_.debug() << "Returning cursor";
return {txns, cursor}; return {txns, cursor};
} }
@@ -698,9 +705,9 @@ CassandraBackend::fetchAccountTransactions(
{ {
statement.bindNextIntTuple( statement.bindNextIntTuple(
cursor->ledgerSequence, cursor->transactionIndex); cursor->ledgerSequence, cursor->transactionIndex);
BOOST_LOG_TRIVIAL(debug) << " account = " << ripple::strHex(account) log_.debug() << "account = " << ripple::strHex(account)
<< " tuple = " << cursor->ledgerSequence << " tuple = " << cursor->ledgerSequence
<< " : " << cursor->transactionIndex; << cursor->transactionIndex;
} }
else else
{ {
@@ -709,9 +716,8 @@ CassandraBackend::fetchAccountTransactions(
forward ? 0 : std::numeric_limits<std::uint32_t>::max(); forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder); statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug) log_.debug() << "account = " << ripple::strHex(account)
<< " account = " << ripple::strHex(account) << " idx = " << seq << " idx = " << seq << " tuple = " << placeHolder;
<< " tuple = " << placeHolder;
} }
statement.bindNextUInt(limit); statement.bindNextUInt(limit);
@@ -719,19 +725,19 @@ CassandraBackend::fetchAccountTransactions(
if (!result.hasResult()) if (!result.hasResult())
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; log_.debug() << "No rows returned";
return {}; return {};
} }
std::vector<ripple::uint256> hashes = {}; std::vector<ripple::uint256> hashes = {};
auto numRows = result.numRows(); auto numRows = result.numRows();
BOOST_LOG_TRIVIAL(info) << "num_rows = " << std::to_string(numRows); log_.info() << "num_rows = " << std::to_string(numRows);
do do
{ {
hashes.push_back(result.getUInt256()); hashes.push_back(result.getUInt256());
if (--numRows == 0) if (--numRows == 0)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor"; log_.debug() << "Setting cursor";
auto [lgrSeq, txnIdx] = result.getInt64Tuple(); auto [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = { cursor = {
static_cast<std::uint32_t>(lgrSeq), static_cast<std::uint32_t>(lgrSeq),
@@ -746,11 +752,11 @@ CassandraBackend::fetchAccountTransactions(
} while (result.nextRow()); } while (result.nextRow());
auto txns = fetchTransactions(hashes, yield); auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << "txns = " << txns.size(); log_.debug() << "Txns = " << txns.size();
if (txns.size() == limit) if (txns.size() == limit)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor"; log_.debug() << "Returning cursor";
return {txns, cursor}; return {txns, cursor};
} }
@@ -763,7 +769,7 @@ CassandraBackend::doFetchSuccessorKey(
std::uint32_t const ledgerSequence, std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectSuccessor_}; CassandraStatement statement{selectSuccessor_};
statement.bindNextBytes(key); statement.bindNextBytes(key);
statement.bindNextInt(ledgerSequence); statement.bindNextInt(ledgerSequence);
@@ -772,7 +778,7 @@ CassandraBackend::doFetchSuccessorKey(
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows"; log_.debug() << "No rows";
return {}; return {};
} }
auto next = result.getUInt256(); auto next = result.getUInt256();
@@ -787,7 +793,7 @@ CassandraBackend::doFetchLedgerObject(
std::uint32_t const sequence, std::uint32_t const sequence,
boost::asio::yield_context& yield) const boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectObject_}; CassandraStatement statement{selectObject_};
statement.bindNextBytes(key); statement.bindNextBytes(key);
statement.bindNextInt(sequence); statement.bindNextInt(sequence);
@@ -796,7 +802,7 @@ CassandraBackend::doFetchLedgerObject(
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows"; log_.debug() << "No rows";
return {}; return {};
} }
auto res = result.getBytes(); auto res = result.getBytes();
@@ -820,8 +826,7 @@ CassandraBackend::doFetchLedgerObjects(
result_type result(handler); result_type result(handler);
std::size_t const numKeys = keys.size(); std::size_t const numKeys = keys.size();
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Fetching " << numKeys << " records from Cassandra";
<< "Fetching " << numKeys << " records from Cassandra";
std::atomic_int numOutstanding = numKeys; std::atomic_int numOutstanding = numKeys;
std::vector<Blob> results{numKeys}; std::vector<Blob> results{numKeys};
std::vector<std::shared_ptr<ReadCallbackData<result_type>>> cbs; std::vector<std::shared_ptr<ReadCallbackData<result_type>>> cbs;
@@ -850,8 +855,7 @@ CassandraBackend::doFetchLedgerObjects(
throw DatabaseTimeout(); throw DatabaseTimeout();
} }
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Fetched " << numKeys << " records from Cassandra";
<< "Fetched " << numKeys << " records from Cassandra";
return results; return results;
} }
@@ -870,9 +874,7 @@ CassandraBackend::fetchLedgerDiff(
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence);
<< __func__
<< " - no rows . ledger = " << std::to_string(ledgerSequence);
return {}; return {};
} }
std::vector<ripple::uint256> keys; std::vector<ripple::uint256> keys;
@@ -880,9 +882,10 @@ CassandraBackend::fetchLedgerDiff(
{ {
keys.push_back(result.getUInt256()); keys.push_back(result.getUInt256());
} while (result.nextRow()); } while (result.nextRow());
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Fetched " << keys.size()
<< "Fetched " << keys.size() << " diff hashes from Cassandra in " << " diff hashes from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) << std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count() .count()
<< " milliseconds"; << " milliseconds";
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
@@ -951,12 +954,12 @@ CassandraBackend::doOnlineDelete(
cv)); cv));
std::unique_lock<std::mutex> lck(mtx); std::unique_lock<std::mutex> lck(mtx);
BOOST_LOG_TRIVIAL(trace) << __func__ << "Got the mutex"; log_.trace() << "Got the mutex";
cv.wait(lck, [&numOutstanding, concurrentLimit]() { cv.wait(lck, [&numOutstanding, concurrentLimit]() {
return numOutstanding < concurrentLimit; return numOutstanding < concurrentLimit;
}); });
} }
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page"; log_.debug() << "Fetched a page";
cursor = curCursor; cursor = curCursor;
if (!cursor) if (!cursor)
break; break;
@@ -982,11 +985,11 @@ CassandraBackend::open(bool readOnly)
if (open_) if (open_)
{ {
assert(false); assert(false);
BOOST_LOG_TRIVIAL(error) << "database is already open"; log_.error() << "Database is already open";
return; return;
} }
BOOST_LOG_TRIVIAL(info) << "Opening Cassandra Backend"; log_.info() << "Opening Cassandra Backend";
CassCluster* cluster = cass_cluster_new(); CassCluster* cluster = cass_cluster_new();
if (!cluster) if (!cluster)
@@ -1002,7 +1005,7 @@ CassandraBackend::open(bool readOnly)
if (cass_cluster_set_cloud_secure_connection_bundle( if (cass_cluster_set_cloud_secure_connection_bundle(
cluster, secureConnectBundle.c_str()) != CASS_OK) cluster, secureConnectBundle.c_str()) != CASS_OK)
{ {
BOOST_LOG_TRIVIAL(error) << "Unable to configure cloud using the " log_.error() << "Unable to configure cloud using the "
"secure connection bundle: " "secure connection bundle: "
<< secureConnectBundle; << secureConnectBundle;
throw std::runtime_error( throw std::runtime_error(
@@ -1057,7 +1060,7 @@ CassandraBackend::open(bool readOnly)
auto username = config_.maybeValue<std::string>("username"); auto username = config_.maybeValue<std::string>("username");
if (username) if (username)
{ {
BOOST_LOG_TRIVIAL(debug) << "user = " << *username; log_.debug() << "user = " << *username;
auto password = config_.value<std::string>("password"); auto password = config_.value<std::string>("password");
cass_cluster_set_credentials( cass_cluster_set_credentials(
cluster, username->c_str(), password.c_str()); cluster, username->c_str(), password.c_str());
@@ -1080,10 +1083,11 @@ CassandraBackend::open(bool readOnly)
"max_read_requests_outstanding", maxReadRequestsOutstanding); "max_read_requests_outstanding", maxReadRequestsOutstanding);
syncInterval_ = config_.valueOr<int>("sync_interval", syncInterval_); syncInterval_ = config_.valueOr<int>("sync_interval", syncInterval_);
BOOST_LOG_TRIVIAL(info) log_.info() << "Sync interval is " << syncInterval_
<< __func__ << " sync interval is " << syncInterval_ << ". max write requests outstanding is "
<< ". max write requests outstanding is " << maxWriteRequestsOutstanding << maxWriteRequestsOutstanding
<< ". max read requests outstanding is " << maxReadRequestsOutstanding; << ". max read requests outstanding is "
<< maxReadRequestsOutstanding;
cass_cluster_set_request_timeout(cluster, 10000); cass_cluster_set_request_timeout(cluster, 10000);
@@ -1098,7 +1102,7 @@ CassandraBackend::open(bool readOnly)
ss << "nodestore: Error setting Cassandra max core connections per " ss << "nodestore: Error setting Cassandra max core connections per "
"host" "host"
<< ", result: " << rc << ", " << cass_error_desc(rc); << ", result: " << rc << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
@@ -1140,8 +1144,7 @@ CassandraBackend::open(bool readOnly)
auto keyspace = config_.valueOr<std::string>("keyspace", ""); auto keyspace = config_.valueOr<std::string>("keyspace", "");
if (keyspace.empty()) if (keyspace.empty())
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "No keyspace specified. Using keyspace clio";
<< "No keyspace specified. Using keyspace clio";
keyspace = "clio"; keyspace = "clio";
} }
@@ -1149,14 +1152,13 @@ CassandraBackend::open(bool readOnly)
auto tablePrefix = config_.valueOr<std::string>("table_prefix", ""); auto tablePrefix = config_.valueOr<std::string>("table_prefix", "");
if (tablePrefix.empty()) if (tablePrefix.empty())
{ {
BOOST_LOG_TRIVIAL(warning) << "Table prefix is empty"; log_.warn() << "Table prefix is empty";
} }
cass_cluster_set_connect_timeout(cluster, 10000); cass_cluster_set_connect_timeout(cluster, 10000);
auto ttl = ttl_ * 2; auto ttl = ttl_ * 2;
BOOST_LOG_TRIVIAL(info) log_.info() << "Setting ttl to " << std::to_string(ttl);
<< __func__ << " setting ttl to " << std::to_string(ttl);
auto executeSimpleStatement = [this](std::string const& query) { auto executeSimpleStatement = [this](std::string const& query) {
CassStatement* statement = makeStatement(query.c_str(), 0); CassStatement* statement = makeStatement(query.c_str(), 0);
@@ -1169,7 +1171,7 @@ CassandraBackend::open(bool readOnly)
std::stringstream ss; std::stringstream ss;
ss << "nodestore: Error executing simple statement: " << rc << ", " ss << "nodestore: Error executing simple statement: " << rc << ", "
<< cass_error_desc(rc) << " - " << query; << cass_error_desc(rc) << " - " << query;
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
return false; return false;
} }
return true; return true;
@@ -1192,7 +1194,7 @@ CassandraBackend::open(bool readOnly)
ss << "nodestore: Error connecting Cassandra session keyspace: " ss << "nodestore: Error connecting Cassandra session keyspace: "
<< rc << ", " << cass_error_desc(rc) << rc << ", " << cass_error_desc(rc)
<< ", trying to create it ourselves"; << ", trying to create it ourselves";
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
// if the keyspace doesn't exist, try to create it // if the keyspace doesn't exist, try to create it
session_.reset(cass_session_new()); session_.reset(cass_session_new());
fut = cass_session_connect(session_.get(), cluster); fut = cass_session_connect(session_.get(), cluster);
@@ -1203,7 +1205,7 @@ CassandraBackend::open(bool readOnly)
std::stringstream ss; std::stringstream ss;
ss << "nodestore: Error connecting Cassandra session at all: " ss << "nodestore: Error connecting Cassandra session at all: "
<< rc << ", " << cass_error_desc(rc); << rc << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
} }
else else
{ {
@@ -1626,6 +1628,6 @@ CassandraBackend::open(bool readOnly)
open_ = true; open_ = true;
BOOST_LOG_TRIVIAL(info) << "Opened CassandraBackend successfully"; log_.info() << "Opened CassandraBackend successfully";
} }
} // namespace Backend } // namespace Backend

View File

@@ -2,16 +2,19 @@
#define RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED #define RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED
#include <ripple/basics/base_uint.h> #include <ripple/basics/base_uint.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <cassandra.h>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/asio/async_result.hpp> #include <boost/asio/async_result.hpp>
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <atomic> #include <atomic>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <cassandra.h>
#include <cstddef> #include <cstddef>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
@@ -25,6 +28,7 @@ namespace Backend {
class CassandraPreparedStatement class CassandraPreparedStatement
{ {
private: private:
clio::Logger log_{"Backend"};
CassPrepared const* prepared_ = nullptr; CassPrepared const* prepared_ = nullptr;
public: public:
@@ -65,7 +69,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "nodestore: Error preparing statement : " << rc << ", " ss << "nodestore: Error preparing statement : " << rc << ", "
<< cass_error_desc(rc) << ". query : " << query; << cass_error_desc(rc) << ". query : " << query;
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
} }
cass_future_free(prepareFuture); cass_future_free(prepareFuture);
return rc == CASS_OK; return rc == CASS_OK;
@@ -73,7 +77,7 @@ public:
~CassandraPreparedStatement() ~CassandraPreparedStatement()
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; log_.trace() << "called";
if (prepared_) if (prepared_)
{ {
cass_prepared_free(prepared_); cass_prepared_free(prepared_);
@@ -86,6 +90,7 @@ class CassandraStatement
{ {
CassStatement* statement_ = nullptr; CassStatement* statement_ = nullptr;
size_t curBindingIndex_ = 0; size_t curBindingIndex_ = 0;
clio::Logger log_{"Backend"};
public: public:
CassandraStatement(CassandraPreparedStatement const& prepared) CassandraStatement(CassandraPreparedStatement const& prepared)
@@ -123,7 +128,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding boolean to statement: " << rc << ", " ss << "Error binding boolean to statement: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
curBindingIndex_++; curBindingIndex_++;
@@ -179,7 +184,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding bytes to statement: " << rc << ", " ss << "Error binding bytes to statement: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
curBindingIndex_++; curBindingIndex_++;
@@ -191,8 +196,8 @@ public:
if (!statement_) if (!statement_)
throw std::runtime_error( throw std::runtime_error(
"CassandraStatement::bindNextUInt - statement_ is null"); "CassandraStatement::bindNextUInt - statement_ is null");
BOOST_LOG_TRIVIAL(trace) log_.trace() << std::to_string(curBindingIndex_) << " "
<< std::to_string(curBindingIndex_) << " " << std::to_string(value); << std::to_string(value);
CassError rc = CassError rc =
cass_statement_bind_int32(statement_, curBindingIndex_, value); cass_statement_bind_int32(statement_, curBindingIndex_, value);
if (rc != CASS_OK) if (rc != CASS_OK)
@@ -200,7 +205,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding uint to statement: " << rc << ", " ss << "Error binding uint to statement: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
curBindingIndex_++; curBindingIndex_++;
@@ -225,7 +230,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding int to statement: " << rc << ", " ss << "Error binding int to statement: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
curBindingIndex_++; curBindingIndex_++;
@@ -241,7 +246,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding int to tuple: " << rc << ", " ss << "Error binding int to tuple: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
rc = cass_tuple_set_int64(tuple, 1, second); rc = cass_tuple_set_int64(tuple, 1, second);
@@ -250,7 +255,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding int to tuple: " << rc << ", " ss << "Error binding int to tuple: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple); rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple);
@@ -259,7 +264,7 @@ public:
std::stringstream ss; std::stringstream ss;
ss << "Error binding tuple to statement: " << rc << ", " ss << "Error binding tuple to statement: " << rc << ", "
<< cass_error_desc(rc); << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); log_.error() << ss.str();
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
cass_tuple_free(tuple); cass_tuple_free(tuple);
@@ -275,6 +280,7 @@ public:
class CassandraResult class CassandraResult
{ {
clio::Logger log_{"Backend"};
CassResult const* result_ = nullptr; CassResult const* result_ = nullptr;
CassRow const* row_ = nullptr; CassRow const* row_ = nullptr;
CassIterator* iter_ = nullptr; CassIterator* iter_ = nullptr;
@@ -365,7 +371,7 @@ public:
std::stringstream msg; std::stringstream msg;
msg << "CassandraResult::getBytes - error getting value: " << rc msg << "CassandraResult::getBytes - error getting value: " << rc
<< ", " << cass_error_desc(rc); << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str(); log_.error() << msg.str();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
curGetIndex_++; curGetIndex_++;
@@ -386,7 +392,7 @@ public:
std::stringstream msg; std::stringstream msg;
msg << "CassandraResult::getuint256 - error getting value: " << rc msg << "CassandraResult::getuint256 - error getting value: " << rc
<< ", " << cass_error_desc(rc); << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str(); log_.error() << msg.str();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
curGetIndex_++; curGetIndex_++;
@@ -406,7 +412,7 @@ public:
std::stringstream msg; std::stringstream msg;
msg << "CassandraResult::getInt64 - error getting value: " << rc msg << "CassandraResult::getInt64 - error getting value: " << rc
<< ", " << cass_error_desc(rc); << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str(); log_.error() << msg.str();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
++curGetIndex_; ++curGetIndex_;
@@ -490,10 +496,9 @@ public:
{ {
if (!row_) if (!row_)
{ {
std::stringstream msg; std::string msg{"No result"};
msg << __func__ << " - no result"; log_.error() << msg;
BOOST_LOG_TRIVIAL(error) << msg.str(); throw std::runtime_error(msg);
throw std::runtime_error(msg.str());
} }
cass_bool_t val; cass_bool_t val;
CassError rc = CassError rc =
@@ -501,9 +506,8 @@ public:
if (rc != CASS_OK) if (rc != CASS_OK)
{ {
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - error getting value: " << rc << ", " msg << "Error getting value: " << rc << ", " << cass_error_desc(rc);
<< cass_error_desc(rc); log_.error() << msg.str();
BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
++curGetIndex_; ++curGetIndex_;
@@ -518,6 +522,7 @@ public:
cass_iterator_free(iter_); cass_iterator_free(iter_);
} }
}; };
inline bool inline bool
isTimeout(CassError rc) isTimeout(CassError rc)
{ {
@@ -597,6 +602,7 @@ private:
return ret; return ret;
} }
clio::Logger log_{"Backend"};
std::atomic<bool> open_{false}; std::atomic<bool> open_{false};
std::unique_ptr<CassSession, void (*)(CassSession*)> session_{ std::unique_ptr<CassSession, void (*)(CassSession*)> session_{
@@ -751,13 +757,11 @@ public:
statement.bindNextInt(ledgerSequence_ - 1); statement.bindNextInt(ledgerSequence_ - 1);
if (!executeSyncUpdate(statement)) if (!executeSyncUpdate(statement))
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Update failed for ledger "
<< __func__ << " Update failed for ledger "
<< std::to_string(ledgerSequence_) << ". Returning"; << std::to_string(ledgerSequence_) << ". Returning";
return false; return false;
} }
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger " log_.info() << "Committed ledger " << std::to_string(ledgerSequence_);
<< std::to_string(ledgerSequence_);
return true; return true;
} }
@@ -791,19 +795,17 @@ public:
statement.bindNextInt(lastSync_); statement.bindNextInt(lastSync_);
if (!executeSyncUpdate(statement)) if (!executeSyncUpdate(statement))
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Update failed for ledger "
<< __func__ << " Update failed for ledger "
<< std::to_string(ledgerSequence_) << ". Returning"; << std::to_string(ledgerSequence_) << ". Returning";
return false; return false;
} }
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger " log_.info() << "Committed ledger "
<< std::to_string(ledgerSequence_); << std::to_string(ledgerSequence_);
lastSync_ = ledgerSequence_; lastSync_ = ledgerSequence_;
} }
else else
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Skipping commit. sync interval is "
<< __func__ << " Skipping commit. sync interval is "
<< std::to_string(syncInterval_) << " - last sync is " << std::to_string(syncInterval_) << " - last sync is "
<< std::to_string(lastSync_) << " - ledger sequence is " << std::to_string(lastSync_) << " - ledger sequence is "
<< std::to_string(ledgerSequence_); << std::to_string(ledgerSequence_);
@@ -826,12 +828,12 @@ public:
std::optional<std::uint32_t> std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; log_.trace() << "called";
CassandraStatement statement{selectLatestLedger_}; CassandraStatement statement{selectLatestLedger_};
CassandraResult result = executeAsyncRead(statement, yield); CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult()) if (!result.hasResult())
{ {
BOOST_LOG_TRIVIAL(error) log_.error()
<< "CassandraBackend::fetchLatestLedgerSequence - no rows"; << "CassandraBackend::fetchLatestLedgerSequence - no rows";
return {}; return {};
} }
@@ -843,13 +845,13 @@ public:
std::uint32_t const sequence, std::uint32_t const sequence,
boost::asio::yield_context& yield) const override boost::asio::yield_context& yield) const override
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; log_.trace() << "called";
CassandraStatement statement{selectLedgerBySeq_}; CassandraStatement statement{selectLedgerBySeq_};
statement.bindNextInt(sequence); statement.bindNextInt(sequence);
CassandraResult result = executeAsyncRead(statement, yield); CassandraResult result = executeAsyncRead(statement, yield);
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; log_.error() << "No rows";
return {}; return {};
} }
std::vector<unsigned char> header = result.getBytes(); std::vector<unsigned char> header = result.getBytes();
@@ -869,7 +871,7 @@ public:
if (!result.hasResult()) if (!result.hasResult())
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; log_.debug() << "No rows returned";
return {}; return {};
} }
@@ -916,7 +918,7 @@ public:
std::optional<int64_t> std::optional<int64_t>
getToken(void const* key, boost::asio::yield_context& yield) const getToken(void const* key, boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; log_.trace() << "Fetching from cassandra";
CassandraStatement statement{getToken_}; CassandraStatement statement{getToken_};
statement.bindNextBytes(key, 32); statement.bindNextBytes(key, 32);
@@ -924,7 +926,7 @@ public:
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; log_.error() << "No rows";
return {}; return {};
} }
int64_t token = result.getInt64(); int64_t token = result.getInt64();
@@ -939,14 +941,14 @@ public:
ripple::uint256 const& hash, ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override boost::asio::yield_context& yield) const override
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; log_.trace() << "called";
CassandraStatement statement{selectTransaction_}; CassandraStatement statement{selectTransaction_};
statement.bindNextBytes(hash); statement.bindNextBytes(hash);
CassandraResult result = executeAsyncRead(statement, yield); CassandraResult result = executeAsyncRead(statement, yield);
if (!result) if (!result)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; log_.error() << "No rows";
return {}; return {};
} }
return { return {
@@ -1036,9 +1038,7 @@ public:
std::unique_lock<std::mutex> lck(throttleMutex_); std::unique_lock<std::mutex> lck(throttleMutex_);
if (!canAddRequest()) if (!canAddRequest())
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Max outstanding requests reached. "
<< __func__ << " : "
<< "Max outstanding requests reached. "
<< "Waiting for other requests to finish"; << "Waiting for other requests to finish";
throttleCv_.wait(lck, [this]() { return canAddRequest(); }); throttleCv_.wait(lck, [this]() { return canAddRequest(); });
} }
@@ -1142,7 +1142,7 @@ public:
ss << "Cassandra sync write error"; ss << "Cassandra sync write error";
ss << ", retrying"; ss << ", retrying";
ss << ": " << cass_error_desc(rc); ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(warning) << ss.str(); log_.warn() << ss.str();
std::this_thread::sleep_for(std::chrono::milliseconds(5)); std::this_thread::sleep_for(std::chrono::milliseconds(5));
} }
} while (rc != CASS_OK); } while (rc != CASS_OK);
@@ -1166,7 +1166,7 @@ public:
ss << "Cassandra sync update error"; ss << "Cassandra sync update error";
ss << ", retrying"; ss << ", retrying";
ss << ": " << cass_error_desc(rc); ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(warning) << ss.str(); log_.warn() << ss.str();
std::this_thread::sleep_for(std::chrono::milliseconds(5)); std::this_thread::sleep_for(std::chrono::milliseconds(5));
} }
} while (rc != CASS_OK); } while (rc != CASS_OK);
@@ -1176,7 +1176,7 @@ public:
CassRow const* row = cass_result_first_row(res); CassRow const* row = cass_result_first_row(res);
if (!row) if (!row)
{ {
BOOST_LOG_TRIVIAL(error) << "executeSyncUpdate - no rows"; log_.error() << "executeSyncUpdate - no rows";
cass_result_free(res); cass_result_free(res);
return false; return false;
} }
@@ -1185,16 +1185,14 @@ public:
if (rc != CASS_OK) if (rc != CASS_OK)
{ {
cass_result_free(res); cass_result_free(res);
BOOST_LOG_TRIVIAL(error) log_.error() << "executeSyncUpdate - error getting result " << rc
<< "executeSyncUpdate - error getting result " << rc << ", " << ", " << cass_error_desc(rc);
<< cass_error_desc(rc);
return false; return false;
} }
cass_result_free(res); cass_result_free(res);
if (success != cass_true && timedOut) if (success != cass_true && timedOut)
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Update failed, but timedOut is true";
<< __func__ << " Update failed, but timedOut is true";
// if there was a timeout, the update may have succeeded in the // if there was a timeout, the update may have succeeded in the
// background on the first attempt. To determine if this happened, // background on the first attempt. To determine if this happened,
// we query the range from the db, making sure the range is what // we query the range from the db, making sure the range is what
@@ -1230,15 +1228,14 @@ public:
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Cannot read async cass_future_error_code";
<< "Cannot read async cass_future_error_code";
} }
if (rc != CASS_OK) if (rc != CASS_OK)
{ {
std::stringstream ss; std::stringstream ss;
ss << "Cassandra executeAsyncRead error"; ss << "Cassandra executeAsyncRead error";
ss << ": " << cass_error_desc(rc); ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str(); log_.error() << ss.str();
} }
if (isTimeout(rc)) if (isTimeout(rc))
{ {

View File

@@ -1,6 +1,6 @@
#include <config/Config.h> #include <config/Config.h>
#include <log/Logger.h>
#include <boost/log/trivial.hpp>
#include <fstream> #include <fstream>
namespace clio { namespace clio {
@@ -161,11 +161,11 @@ ConfigReader::open(std::filesystem::path path)
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) << "Could not read configuration file from '" LogService::error() << "Could not read configuration file from '"
<< path.string() << "': " << e.what(); << path.string() << "': " << e.what();
} }
BOOST_LOG_TRIVIAL(warning) << "Using empty default configuration"; LogService::warn() << "Using empty default configuration";
return Config{}; return Config{};
} }

View File

@@ -5,18 +5,22 @@
#include <boost/beast/ssl.hpp> #include <boost/beast/ssl.hpp>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <boost/json/src.hpp> #include <boost/json/src.hpp>
#include <boost/log/trivial.hpp>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <etl/ETLSource.h> #include <etl/ETLSource.h>
#include <etl/ProbingETLSource.h> #include <etl/ProbingETLSource.h>
#include <etl/ReportingETL.h> #include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
#include <thread> #include <thread>
using namespace clio;
void void
ForwardCache::freshen() ForwardCache::freshen()
{ {
BOOST_LOG_TRIVIAL(trace) << "Freshening ForwardCache"; log_.trace() << "Freshening ForwardCache";
auto numOutstanding = auto numOutstanding =
std::make_shared<std::atomic_uint>(latestForwarded_.size()); std::make_shared<std::atomic_uint>(latestForwarded_.size());
@@ -127,15 +131,11 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
if (ec != boost::asio::error::operation_aborted && if (ec != boost::asio::error::operation_aborted &&
ec != boost::asio::error::connection_refused) ec != boost::asio::error::connection_refused)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "error code = " << ec << " - " << toString();
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
} }
else else
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "error code = " << ec << " - " << toString();
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
} }
// exponentially increasing timeouts, with a max of 30 seconds // exponentially increasing timeouts, with a max of 30 seconds
@@ -144,7 +144,7 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
timer_.expires_after(boost::asio::chrono::seconds(waitTime)); timer_.expires_after(boost::asio::chrono::seconds(waitTime));
timer_.async_wait([this](auto ec) { timer_.async_wait([this](auto ec) {
bool startAgain = (ec != boost::asio::error::operation_aborted); bool startAgain = (ec != boost::asio::error::operation_aborted);
BOOST_LOG_TRIVIAL(trace) << __func__ << " async_wait : ec = " << ec; log_.trace() << "async_wait : ec = " << ec;
derived().close(startAgain); derived().close(startAgain);
}); });
} }
@@ -168,8 +168,8 @@ PlainETLSource::close(bool startAgain)
[this, startAgain](auto ec) { [this, startAgain](auto ec) {
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error()
<< __func__ << " async_close : " << " async_close : "
<< "error code = " << ec << " - " << toString(); << "error code = " << ec << " - " << toString();
} }
closing_ = false; closing_ = false;
@@ -213,8 +213,8 @@ SslETLSource::close(bool startAgain)
[this, startAgain](auto ec) { [this, startAgain](auto ec) {
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error()
<< __func__ << " async_close : " << " async_close : "
<< "error code = " << ec << " - " << toString(); << "error code = " << ec << " - " << toString();
} }
closing_ = false; closing_ = false;
@@ -246,8 +246,7 @@ ETLSourceImpl<Derived>::onResolve(
boost::beast::error_code ec, boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type results) boost::asio::ip::tcp::resolver::results_type results)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec) if (ec)
{ {
// try again // try again
@@ -269,8 +268,7 @@ PlainETLSource::onConnect(
boost::beast::error_code ec, boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec) if (ec)
{ {
// start over // start over
@@ -311,8 +309,7 @@ SslETLSource::onConnect(
boost::beast::error_code ec, boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec) if (ec)
{ {
// start over // start over
@@ -371,8 +368,7 @@ template <class Derived>
void void
ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec) ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
if (auto action = hooks_.onConnected(ec); if (auto action = hooks_.onConnected(ec);
action == ETLSourceHooks::Action::STOP) action == ETLSourceHooks::Action::STOP)
return; return;
@@ -389,7 +385,7 @@ ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
{"streams", {"streams",
{"ledger", "manifests", "validations", "transactions_proposed"}}}; {"ledger", "manifests", "validations", "transactions_proposed"}}};
std::string s = boost::json::serialize(jv); std::string s = boost::json::serialize(jv);
BOOST_LOG_TRIVIAL(trace) << "Sending subscribe stream message"; log_.trace() << "Sending subscribe stream message";
derived().ws().set_option( derived().ws().set_option(
boost::beast::websocket::stream_base::decorator( boost::beast::websocket::stream_base::decorator(
@@ -415,8 +411,7 @@ ETLSourceImpl<Derived>::onWrite(
boost::beast::error_code ec, boost::beast::error_code ec,
size_t bytesWritten) size_t bytesWritten)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec) if (ec)
{ {
// start over // start over
@@ -433,8 +428,7 @@ template <class Derived>
void void
ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size) ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "ec = " << ec << " - " << toString();
<< __func__ << " : ec = " << ec << " - " << toString();
// if error or error reading message, start over // if error or error reading message, start over
if (ec) if (ec)
{ {
@@ -446,8 +440,7 @@ ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
boost::beast::flat_buffer buffer; boost::beast::flat_buffer buffer;
swap(readBuffer_, buffer); swap(readBuffer_, buffer);
BOOST_LOG_TRIVIAL(trace) log_.trace() << "calling async_read - " << toString();
<< __func__ << " : calling async_read - " << toString();
derived().ws().async_read( derived().ws().async_read(
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
} }
@@ -457,7 +450,7 @@ template <class Derived>
bool bool
ETLSourceImpl<Derived>::handleMessage() ETLSourceImpl<Derived>::handleMessage()
{ {
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString(); log_.trace() << toString();
setLastMsgTime(); setLastMsgTime();
connected_ = true; connected_ = true;
@@ -466,9 +459,9 @@ ETLSourceImpl<Derived>::handleMessage()
std::string msg{ std::string msg{
static_cast<char const*>(readBuffer_.data().data()), static_cast<char const*>(readBuffer_.data().data()),
readBuffer_.size()}; readBuffer_.size()};
BOOST_LOG_TRIVIAL(trace) << __func__ << msg; log_.trace() << msg;
boost::json::value raw = boost::json::parse(msg); boost::json::value raw = boost::json::parse(msg);
BOOST_LOG_TRIVIAL(trace) << __func__ << " parsed"; log_.trace() << "parsed";
boost::json::object response = raw.as_object(); boost::json::object response = raw.as_object();
uint32_t ledgerIndex = 0; uint32_t ledgerIndex = 0;
@@ -487,20 +480,16 @@ ETLSourceImpl<Derived>::handleMessage()
setValidatedRange( setValidatedRange(
{validatedLedgers.c_str(), validatedLedgers.size()}); {validatedLedgers.c_str(), validatedLedgers.size()});
} }
BOOST_LOG_TRIVIAL(debug) log_.info() << "Received a message on ledger "
<< __func__ << " : " << " subscription stream. Message : " << response
<< "Received a message on ledger " << " - " << toString();
<< " subscription stream. Message : " << response << " - "
<< toString();
} }
else if ( else if (
response.contains("type") && response["type"] == "ledgerClosed") response.contains("type") && response["type"] == "ledgerClosed")
{ {
BOOST_LOG_TRIVIAL(debug) log_.info() << "Received a message on ledger "
<< __func__ << " : " << " subscription stream. Message : " << response
<< "Received a message on ledger " << " - " << toString();
<< " subscription stream. Message : " << response << " - "
<< toString();
if (response.contains("ledger_index")) if (response.contains("ledger_index"))
{ {
ledgerIndex = response["ledger_index"].as_int64(); ledgerIndex = response["ledger_index"].as_int64();
@@ -539,9 +528,7 @@ ETLSourceImpl<Derived>::handleMessage()
if (ledgerIndex != 0) if (ledgerIndex != 0)
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - "
<< __func__ << " : "
<< "Pushing ledger sequence = " << ledgerIndex << " - "
<< toString(); << toString();
networkValidatedLedgers_->push(ledgerIndex); networkValidatedLedgers_->push(ledgerIndex);
} }
@@ -549,13 +536,15 @@ ETLSourceImpl<Derived>::handleMessage()
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) << "Exception in handleMessage : " << e.what(); log_.error() << "Exception in handleMessage : " << e.what();
return false; return false;
} }
} }
class AsyncCallData class AsyncCallData
{ {
clio::Logger log_{"ETL"};
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_; std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_; std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
@@ -585,8 +574,8 @@ public:
unsigned char prefix = marker.data()[0]; unsigned char prefix = marker.data()[0];
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Setting up AsyncCallData. marker = "
<< "Setting up AsyncCallData. marker = " << ripple::strHex(marker) << ripple::strHex(marker)
<< " . prefix = " << ripple::strHex(std::string(1, prefix)) << " . prefix = " << ripple::strHex(std::string(1, prefix))
<< " . nextPrefix_ = " << " . nextPrefix_ = "
<< ripple::strHex(std::string(1, nextPrefix_)); << ripple::strHex(std::string(1, nextPrefix_));
@@ -609,25 +598,23 @@ public:
bool abort, bool abort,
bool cacheOnly = false) bool cacheOnly = false)
{ {
BOOST_LOG_TRIVIAL(trace) << "Processing response. " log_.trace() << "Processing response. "
<< "Marker prefix = " << getMarkerPrefix(); << "Marker prefix = " << getMarkerPrefix();
if (abort) if (abort)
{ {
BOOST_LOG_TRIVIAL(error) << "AsyncCallData aborted"; log_.error() << "AsyncCallData aborted";
return CallStatus::ERRORED; return CallStatus::ERRORED;
} }
if (!status_.ok()) if (!status_.ok())
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "AsyncCallData status_ not ok: "
<< "AsyncCallData status_ not ok: "
<< " code = " << status_.error_code() << " code = " << status_.error_code()
<< " message = " << status_.error_message(); << " message = " << status_.error_message();
return CallStatus::ERRORED; return CallStatus::ERRORED;
} }
if (!next_->is_unlimited()) if (!next_->is_unlimited())
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
<< "AsyncCallData is_unlimited is false. Make sure "
"secure_gateway is set correctly at the ETL source"; "secure_gateway is set correctly at the ETL source";
} }
@@ -651,7 +638,7 @@ public:
call(stub, cq); call(stub, cq);
} }
BOOST_LOG_TRIVIAL(trace) << "Writing objects"; log_.trace() << "Writing objects";
std::vector<Backend::LedgerObject> cacheUpdates; std::vector<Backend::LedgerObject> cacheUpdates;
cacheUpdates.reserve(cur_->ledger_objects().objects_size()); cacheUpdates.reserve(cur_->ledger_objects().objects_size());
for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i) for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i)
@@ -681,7 +668,7 @@ public:
} }
backend.cache().update( backend.cache().update(
cacheUpdates, request_.ledger().sequence(), cacheOnly); cacheUpdates, request_.ledger().sequence(), cacheOnly);
BOOST_LOG_TRIVIAL(trace) << "Wrote objects"; log_.trace() << "Wrote objects";
return more ? CallStatus::MORE : CallStatus::DONE; return more ? CallStatus::MORE : CallStatus::DONE;
} }
@@ -745,7 +732,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
calls.emplace_back(sequence, markers[i], nextMarker); calls.emplace_back(sequence, markers[i], nextMarker);
} }
BOOST_LOG_TRIVIAL(debug) << "Starting data download for ledger " << sequence log_.debug() << "Starting data download for ledger " << sequence
<< ". Using source = " << toString(); << ". Using source = " << toString();
for (auto& c : calls) for (auto& c : calls)
@@ -764,20 +751,18 @@ ETLSourceImpl<Derived>::loadInitialLedger(
if (!ok) if (!ok)
{ {
BOOST_LOG_TRIVIAL(error) << "loadInitialLedger - ok is false"; log_.error() << "loadInitialLedger - ok is false";
return false; return false;
// handle cancelled // handle cancelled
} }
else else
{ {
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
<< "Marker prefix = " << ptr->getMarkerPrefix();
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly); auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
if (result != AsyncCallData::CallStatus::MORE) if (result != AsyncCallData::CallStatus::MORE)
{ {
numFinished++; numFinished++;
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Finished a marker. "
<< "Finished a marker. "
<< "Current number of finished = " << numFinished; << "Current number of finished = " << numFinished;
std::string lastKey = ptr->getLastKey(); std::string lastKey = ptr->getLastKey();
if (lastKey.size()) if (lastKey.size())
@@ -789,15 +774,13 @@ ETLSourceImpl<Derived>::loadInitialLedger(
} }
if (backend_->cache().size() > progress) if (backend_->cache().size() > progress)
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Downloaded " << backend_->cache().size()
<< "Downloaded " << backend_->cache().size()
<< " records from rippled"; << " records from rippled";
progress += incr; progress += incr;
} }
} }
} }
BOOST_LOG_TRIVIAL(info) log_.info() << "Finished loadInitialLedger. cache size = "
<< __func__ << " - finished loadInitialLedger. cache size = "
<< backend_->cache().size(); << backend_->cache().size();
size_t numWrites = 0; size_t numWrites = 0;
if (!abort) if (!abort)
@@ -808,9 +791,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
for (auto& key : edgeKeys) for (auto& key : edgeKeys)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Writing edge key = " << ripple::strHex(key);
<< __func__
<< " writing edge key = " << ripple::strHex(key);
auto succ = backend_->cache().getSuccessor( auto succ = backend_->cache().getSuccessor(
*ripple::uint256::fromVoidChecked(key), sequence); *ripple::uint256::fromVoidChecked(key), sequence);
if (succ) if (succ)
@@ -840,8 +821,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
assert(succ); assert(succ);
if (succ->key == cur->key) if (succ->key == cur->key)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Writing book successor = "
<< __func__ << " Writing book successor = "
<< ripple::strHex(base) << " - " << ripple::strHex(base) << " - "
<< ripple::strHex(cur->key); << ripple::strHex(cur->key);
@@ -855,8 +835,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
} }
prev = std::move(cur->key); prev = std::move(cur->key);
if (numWrites % 100000 == 0 && numWrites != 0) if (numWrites % 100000 == 0 && numWrites != 0)
BOOST_LOG_TRIVIAL(info) << __func__ << " Wrote " log_.info() << "Wrote " << numWrites << " book successors";
<< numWrites << " book successors";
} }
backend_->writeSuccessor( backend_->writeSuccessor(
@@ -869,9 +848,8 @@ ETLSourceImpl<Derived>::loadInitialLedger(
auto seconds = auto seconds =
std::chrono::duration_cast<std::chrono::seconds>(end - start) std::chrono::duration_cast<std::chrono::seconds>(end - start)
.count(); .count();
BOOST_LOG_TRIVIAL(info) log_.info()
<< __func__ << "Looping through cache and submitting all writes took "
<< " - Looping through cache and submitting all writes took "
<< seconds << seconds
<< " seconds. numWrites = " << std::to_string(numWrites); << " seconds. numWrites = " << std::to_string(numWrites);
} }
@@ -902,8 +880,7 @@ ETLSourceImpl<Derived>::fetchLedger(
grpc::Status status = stub_->GetLedger(&context, request, &response); grpc::Status status = stub_->GetLedger(&context, request, &response);
if (status.ok() && !response.is_unlimited()) if (status.ok() && !response.is_unlimited())
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "ETLSourceImpl::fetchLedger - is_unlimited is "
<< "ETLSourceImpl::fetchLedger - is_unlimited is "
"false. Make sure secure_gateway is set " "false. Make sure secure_gateway is set "
"correctly on the ETL source. source = " "correctly on the ETL source. source = "
<< toString() << " status = " << status.error_message(); << toString() << " status = " << status.error_message();
@@ -951,8 +928,7 @@ ETLLoadBalancer::ETLLoadBalancer(
entry, ioContext, backend, subscriptions, nwvl, *this); entry, ioContext, backend, subscriptions, nwvl, *this);
sources_.push_back(std::move(source)); sources_.push_back(std::move(source));
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - " log_.info() << "Added etl source - " << sources_.back()->toString();
<< sources_.back()->toString();
} }
} }
@@ -965,7 +941,7 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
source->loadInitialLedger(sequence, downloadRanges_, cacheOnly); source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
if (!res) if (!res)
{ {
BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger." log_.error() << "Failed to download initial ledger."
<< " Sequence = " << sequence << " Sequence = " << sequence
<< " source = " << source->toString(); << " source = " << source->toString();
} }
@@ -982,26 +958,24 @@ ETLLoadBalancer::fetchLedger(
{ {
org::xrpl::rpc::v1::GetLedgerResponse response; org::xrpl::rpc::v1::GetLedgerResponse response;
bool success = execute( bool success = execute(
[&response, ledgerSequence, getObjects, getObjectNeighbors]( [&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](
auto& source) { auto& source) {
auto [status, data] = source->fetchLedger( auto [status, data] = source->fetchLedger(
ledgerSequence, getObjects, getObjectNeighbors); ledgerSequence, getObjects, getObjectNeighbors);
response = std::move(data); response = std::move(data);
if (status.ok() && response.validated()) if (status.ok() && response.validated())
{ {
BOOST_LOG_TRIVIAL(info) log.info() << "Successfully fetched ledger = " << ledgerSequence
<< "Successfully fetched ledger = " << ledgerSequence
<< " from source = " << source->toString(); << " from source = " << source->toString();
return true; return true;
} }
else else
{ {
BOOST_LOG_TRIVIAL(warning) log.warn() << "Error getting ledger = " << ledgerSequence
<< "Error getting ledger = " << ledgerSequence << ", Reply: " << response.DebugString()
<< " Reply : " << response.DebugString() << ", error_code: " << status.error_code()
<< " error_code : " << status.error_code() << ", error_msg: " << status.error_message()
<< " error_msg : " << status.error_message() << ", source = " << source->toString();
<< " source = " << source->toString();
return false; return false;
} }
}, },
@@ -1042,7 +1016,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
{ {
if (auto resp = forwardCache_.get(request); resp) if (auto resp = forwardCache_.get(request); resp)
{ {
BOOST_LOG_TRIVIAL(debug) << "request hit forwardCache"; log_.debug() << "request hit forwardCache";
return resp; return resp;
} }
@@ -1056,14 +1030,13 @@ ETLSourceImpl<Derived>::requestFromRippled(
std::string const& clientIp, std::string const& clientIp,
boost::asio::yield_context& yield) const boost::asio::yield_context& yield) const
{ {
BOOST_LOG_TRIVIAL(trace) << "Attempting to forward request to tx. " log_.trace() << "Attempting to forward request to tx. "
<< "request = " << boost::json::serialize(request); << "request = " << boost::json::serialize(request);
boost::json::object response; boost::json::object response;
if (!connected_) if (!connected_)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Attempted to proxy but failed to connect to tx";
<< "Attempted to proxy but failed to connect to tx";
return {}; return {};
} }
namespace beast = boost::beast; // from <boost/beast.hpp> namespace beast = boost::beast; // from <boost/beast.hpp>
@@ -1077,7 +1050,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
// These objects perform our I/O // These objects perform our I/O
tcp::resolver resolver{ioc_}; tcp::resolver resolver{ioc_};
BOOST_LOG_TRIVIAL(trace) << "Creating websocket"; log_.trace() << "Creating websocket";
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_); auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
// Look up the domain name // Look up the domain name
@@ -1087,7 +1060,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
ws->next_layer().expires_after(std::chrono::seconds(3)); ws->next_layer().expires_after(std::chrono::seconds(3));
BOOST_LOG_TRIVIAL(trace) << "Connecting websocket"; log_.trace() << "Connecting websocket";
// Make the connection on the IP address we get from a lookup // Make the connection on the IP address we get from a lookup
ws->next_layer().async_connect(results, yield[ec]); ws->next_layer().async_connect(results, yield[ec]);
if (ec) if (ec)
@@ -1106,15 +1079,15 @@ ETLSourceImpl<Derived>::requestFromRippled(
" websocket-client-coro"); " websocket-client-coro");
req.set(http::field::forwarded, "for=" + clientIp); req.set(http::field::forwarded, "for=" + clientIp);
})); }));
BOOST_LOG_TRIVIAL(trace) << "client ip: " << clientIp; log_.trace() << "client ip: " << clientIp;
BOOST_LOG_TRIVIAL(trace) << "Performing websocket handshake"; log_.trace() << "Performing websocket handshake";
// Perform the websocket handshake // Perform the websocket handshake
ws->async_handshake(ip_, "/", yield[ec]); ws->async_handshake(ip_, "/", yield[ec]);
if (ec) if (ec)
return {}; return {};
BOOST_LOG_TRIVIAL(trace) << "Sending request"; log_.trace() << "Sending request";
// Send the message // Send the message
ws->async_write( ws->async_write(
net::buffer(boost::json::serialize(request)), yield[ec]); net::buffer(boost::json::serialize(request)), yield[ec]);
@@ -1132,11 +1105,11 @@ ETLSourceImpl<Derived>::requestFromRippled(
if (!parsed.is_object()) if (!parsed.is_object())
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Error parsing response: "
<< "Error parsing response: " << std::string{begin, end}; << std::string{begin, end};
return {}; return {};
} }
BOOST_LOG_TRIVIAL(trace) << "Successfully forward request"; log_.trace() << "Successfully forward request";
response = parsed.as_object(); response = parsed.as_object();
@@ -1145,7 +1118,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) << "Encountered exception : " << e.what(); log_.error() << "Encountered exception : " << e.what();
return {}; return {};
} }
} }
@@ -1162,45 +1135,36 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
{ {
auto& source = sources_[sourceIdx]; auto& source = sources_[sourceIdx];
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Attempting to execute func. ledger sequence = "
<< __func__ << " : "
<< "Attempting to execute func. ledger sequence = "
<< ledgerSequence << " - source = " << source->toString(); << ledgerSequence << " - source = " << source->toString();
if (source->hasLedger(ledgerSequence) || true) if (source->hasLedger(ledgerSequence) || true)
{ {
bool res = f(source); bool res = f(source);
if (res) if (res)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Successfully executed func at source = "
<< __func__ << " : "
<< "Successfully executed func at source = "
<< source->toString() << source->toString()
<< " - ledger sequence = " << ledgerSequence; << " - ledger sequence = " << ledgerSequence;
break; break;
} }
else else
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Failed to execute func at source = "
<< __func__ << " : "
<< "Failed to execute func at source = "
<< source->toString() << source->toString()
<< " - ledger sequence = " << ledgerSequence; << " - ledger sequence = " << ledgerSequence;
} }
} }
else else
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Ledger not present at source = "
<< __func__ << " : " << source->toString()
<< "Ledger not present at source = " << source->toString()
<< " - ledger sequence = " << ledgerSequence; << " - ledger sequence = " << ledgerSequence;
} }
sourceIdx = (sourceIdx + 1) % sources_.size(); sourceIdx = (sourceIdx + 1) % sources_.size();
numAttempts++; numAttempts++;
if (numAttempts % sources_.size() == 0) if (numAttempts % sources_.size() == 0)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Error executing function "
<< __func__ << " : "
<< "Error executing function "
<< " - ledger sequence = " << ledgerSequence << " - ledger sequence = " << ledgerSequence
<< " - Tried all sources. Sleeping and trying again"; << " - Tried all sources. Sleeping and trying again";
std::this_thread::sleep_for(std::chrono::seconds(2)); std::this_thread::sleep_for(std::chrono::seconds(2));

View File

@@ -1,6 +1,15 @@
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED #ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED #define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <etl/ETLHelpers.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/beast/core.hpp> #include <boost/beast/core.hpp>
@@ -8,14 +17,6 @@
#include <boost/beast/ssl.hpp> #include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp> #include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <etl/ETLHelpers.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
class ETLLoadBalancer; class ETLLoadBalancer;
class ETLSource; class ETLSource;
class ProbingETLSource; class ProbingETLSource;
@@ -32,6 +33,7 @@ class ForwardCache
{ {
using response_type = std::optional<boost::json::object>; using response_type = std::optional<boost::json::object>;
clio::Logger log_{"ETL"};
mutable std::atomic_bool stopping_ = false; mutable std::atomic_bool stopping_ = false;
mutable std::shared_mutex mtx_; mutable std::shared_mutex mtx_;
std::unordered_map<std::string, response_type> latestForwarded_; std::unordered_map<std::string, response_type> latestForwarded_;
@@ -127,6 +129,9 @@ public:
{ {
} }
protected:
clio::Logger log_{"ETL"};
private: private:
friend ForwardCache; friend ForwardCache;
friend ProbingETLSource; friend ProbingETLSource;
@@ -161,7 +166,7 @@ class ETLSourceImpl : public ETLSource
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_; std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
std::string validatedLedgersRaw_; std::string validatedLedgersRaw_{"N/A"};
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_; std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
@@ -218,7 +223,7 @@ protected:
void void
run() override run() override
{ {
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString(); log_.trace() << toString();
auto const host = ip_; auto const host = ip_;
auto const port = wsPort_; auto const port = wsPort_;
@@ -292,13 +297,11 @@ public:
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateCustomChannel( grpc::CreateCustomChannel(
ss.str(), grpc::InsecureChannelCredentials(), chArgs)); ss.str(), grpc::InsecureChannelCredentials(), chArgs));
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Made stub for remote = " << toString();
<< "Made stub for remote = " << toString();
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Exception while creating stub = " << e.what()
<< "Exception while creating stub = " << e.what()
<< " . Remote = " << toString(); << " . Remote = " << toString();
} }
} }
@@ -371,7 +374,6 @@ public:
getValidatedRange() const getValidatedRange() const
{ {
std::lock_guard lck(mtx_); std::lock_guard lck(mtx_);
return validatedLedgersRaw_; return validatedLedgersRaw_;
} }
@@ -389,9 +391,8 @@ public:
std::string std::string
toString() const override toString() const override
{ {
return "{ validated_ledger : " + getValidatedRange() + return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ +
" , ip : " + ip_ + " , web socket port : " + wsPort_ + ", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}";
", grpc port : " + grpcPort_ + " }";
} }
boost::json::object boost::json::object
@@ -592,8 +593,8 @@ public:
class ETLLoadBalancer class ETLLoadBalancer
{ {
private: private:
clio::Logger log_{"ETL"};
std::vector<std::unique_ptr<ETLSource>> sources_; std::vector<std::unique_ptr<ETLSource>> sources_;
std::uint32_t downloadRanges_ = 16; std::uint32_t downloadRanges_ = 16;
public: public:

View File

@@ -107,8 +107,7 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
NFTsData(tokenIDResult.front(), *owner, txMeta, false)}; NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - unexpected NFTokenMint data in tx " msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
<< sttx.getTransactionID();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
@@ -173,7 +172,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
} }
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - could not determine owner at burntime for tx " msg << " - could not determine owner at burntime for tx "
<< sttx.getTransactionID(); << sttx.getTransactionID();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
@@ -198,7 +197,7 @@ getNFTokenAcceptOfferData(
if (affectedBuyOffer == txMeta.getNodes().end()) if (affectedBuyOffer == txMeta.getNodes().end())
{ {
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID(); << sttx.getTransactionID();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
@@ -228,7 +227,7 @@ getNFTokenAcceptOfferData(
if (affectedSellOffer == txMeta.getNodes().end()) if (affectedSellOffer == txMeta.getNodes().end())
{ {
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID(); << sttx.getTransactionID();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
@@ -278,7 +277,7 @@ getNFTokenAcceptOfferData(
} }
std::stringstream msg; std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID(); << sttx.getTransactionID();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }

View File

@@ -1,4 +1,7 @@
#include <etl/ProbingETLSource.h> #include <etl/ProbingETLSource.h>
#include <log/Logger.h>
using namespace clio;
ProbingETLSource::ProbingETLSource( ProbingETLSource::ProbingETLSource(
clio::Config const& config, clio::Config const& config,
@@ -85,7 +88,8 @@ std::string
ProbingETLSource::toString() const ProbingETLSource::toString() const
{ {
if (!currentSrc_) if (!currentSrc_)
return "{ probing }"; return "{probing... ws: " + plainSrc_->toString() +
", wss: " + sslSrc_->toString() + "}";
return currentSrc_->toString(); return currentSrc_->toString();
} }
@@ -148,8 +152,7 @@ ProbingETLSource::make_SSLHooks() noexcept
{ {
plainSrc_->pause(); plainSrc_->pause();
currentSrc_ = sslSrc_; currentSrc_ = sslSrc_;
BOOST_LOG_TRIVIAL(info) log_.info() << "Selected WSS as the main source: "
<< "Selected WSS as the main source: "
<< currentSrc_->toString(); << currentSrc_->toString();
} }
return ETLSourceHooks::Action::PROCEED; return ETLSourceHooks::Action::PROCEED;
@@ -179,8 +182,7 @@ ProbingETLSource::make_PlainHooks() noexcept
{ {
sslSrc_->pause(); sslSrc_->pause();
currentSrc_ = plainSrc_; currentSrc_ = plainSrc_;
BOOST_LOG_TRIVIAL(info) log_.info() << "Selected Plain WS as the main source: "
<< "Selected Plain WS as the main source: "
<< currentSrc_->toString(); << currentSrc_->toString();
} }
return ETLSourceHooks::Action::PROCEED; return ETLSourceHooks::Action::PROCEED;

View File

@@ -11,6 +11,7 @@
#include <config/Config.h> #include <config/Config.h>
#include <etl/ETLSource.h> #include <etl/ETLSource.h>
#include <log/Logger.h>
/// This ETLSource implementation attempts to connect over both secure websocket /// This ETLSource implementation attempts to connect over both secure websocket
/// and plain websocket. First to connect pauses the other and the probing is /// and plain websocket. First to connect pauses the other and the probing is
@@ -18,6 +19,8 @@
/// connection the probing is kickstarted again. /// connection the probing is kickstarted again.
class ProbingETLSource : public ETLSource class ProbingETLSource : public ETLSource
{ {
clio::Logger log_{"ETL"};
std::mutex mtx_; std::mutex mtx_;
boost::asio::ssl::context sslCtx_; boost::asio::ssl::context sslCtx_;
std::shared_ptr<ETLSource> sslSrc_; std::shared_ptr<ETLSource> sslSrc_;

View File

@@ -1,20 +1,25 @@
#include <ripple/basics/StringUtilities.h> #include <ripple/basics/StringUtilities.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <etl/ReportingETL.h> #include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <boost/asio/connect.hpp> #include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp> #include <boost/asio/ip/tcp.hpp>
#include <boost/beast/core.hpp> #include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp> #include <boost/beast/websocket.hpp>
#include <cstdlib> #include <cstdlib>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <subscriptions/SubscriptionManager.h>
#include <thread> #include <thread>
#include <variant> #include <variant>
namespace detail { using namespace clio;
namespace clio::detail {
/// Convenience function for printing out basic ledger info /// Convenience function for printing out basic ledger info
std::string std::string
toString(ripple::LedgerInfo const& info) toString(ripple::LedgerInfo const& info)
@@ -26,7 +31,7 @@ toString(ripple::LedgerInfo const& info)
<< " ParentHash : " << strHex(info.parentHash) << " }"; << " ParentHash : " << strHex(info.parentHash) << " }";
return ss.str(); return ss.str();
} }
} // namespace detail } // namespace clio::detail
FormattedTransactionsData FormattedTransactionsData
ReportingETL::insertTransactions( ReportingETL::insertTransactions(
@@ -43,9 +48,7 @@ ReportingETL::insertTransactions(
ripple::SerialIter it{raw->data(), raw->size()}; ripple::SerialIter it{raw->data(), raw->size()};
ripple::STTx sttx{it}; ripple::STTx sttx{it};
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Inserting transaction = " << sttx.getTransactionID();
<< __func__ << " : "
<< "Inserting transaction = " << sttx.getTransactionID();
ripple::TxMeta txMeta{ ripple::TxMeta txMeta{
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()}; sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
@@ -97,8 +100,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
auto rng = backend_->hardFetchLedgerRangeNoThrow(); auto rng = backend_->hardFetchLedgerRangeNoThrow();
if (rng) if (rng)
{ {
BOOST_LOG_TRIVIAL(fatal) << __func__ << " : " log_.fatal() << "Database is not empty";
<< "Database is not empty";
assert(false); assert(false);
return {}; return {};
} }
@@ -114,23 +116,21 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
ripple::LedgerInfo lgrInfo = ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(ledgerData->ledger_header())); deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo);
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
backend_->startWrites(); backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " started writes"; log_.debug() << "Started writes";
backend_->writeLedger( backend_->writeLedger(
lgrInfo, std::move(*ledgerData->mutable_ledger_header())); lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger"; log_.debug() << "Wrote ledger";
FormattedTransactionsData insertTxResult = FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, *ledgerData); insertTransactions(lgrInfo, *ledgerData);
BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns"; log_.debug() << "Inserted txns";
// download the full account state map. This function downloads full ledger // download the full account state map. This function downloads full ledger
// data and pushes the downloaded data into the writeQueue. asyncWriter // data and pushes the downloaded data into the writeQueue. asyncWriter
@@ -138,7 +138,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
// Once the below call returns, all data has been pushed into the queue // Once the below call returns, all data has been pushed into the queue
loadBalancer_->loadInitialLedger(startingSequence); loadBalancer_->loadInitialLedger(startingSequence);
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger"; log_.debug() << "Loaded initial ledger";
if (!stopping_) if (!stopping_)
{ {
@@ -150,7 +150,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
backend_->finishWrites(startingSequence); backend_->finishWrites(startingSequence);
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time to download and store ledger = " log_.debug() << "Time to download and store ledger = "
<< ((end - start).count()) / 1000000000.0; << ((end - start).count()) / 1000000000.0;
return lgrInfo; return lgrInfo;
} }
@@ -158,12 +158,11 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
void void
ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
{ {
BOOST_LOG_TRIVIAL(debug) log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq);
<< __func__ << " - Publishing ledger " << std::to_string(lgrInfo.seq);
if (!writing_) if (!writing_)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " - Updating cache"; log_.info() << "Updating cache";
std::vector<Backend::LedgerObject> diff = std::vector<Backend::LedgerObject> diff =
Backend::synchronousAndRetryOnTimeout([&](auto yield) { Backend::synchronousAndRetryOnTimeout([&](auto yield) {
@@ -205,11 +204,10 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
subscriptions_->pubBookChanges(lgrInfo, transactions); subscriptions_->pubBookChanges(lgrInfo, transactions);
BOOST_LOG_TRIVIAL(info) << __func__ << " - Published ledger " log_.info() << "Published ledger " << std::to_string(lgrInfo.seq);
<< std::to_string(lgrInfo.seq);
} }
else else
BOOST_LOG_TRIVIAL(info) << __func__ << " - Skipping publishing ledger " log_.info() << "Skipping publishing ledger "
<< std::to_string(lgrInfo.seq); << std::to_string(lgrInfo.seq);
setLastPublish(); setLastPublish();
} }
@@ -219,9 +217,7 @@ ReportingETL::publishLedger(
uint32_t ledgerSequence, uint32_t ledgerSequence,
std::optional<uint32_t> maxAttempts) std::optional<uint32_t> maxAttempts)
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Attempting to publish ledger = " << ledgerSequence;
<< __func__ << " : "
<< "Attempting to publish ledger = " << ledgerSequence;
size_t numAttempts = 0; size_t numAttempts = 0;
while (!stopping_) while (!stopping_)
{ {
@@ -229,17 +225,15 @@ ReportingETL::publishLedger(
if (!range || range->maxSequence < ledgerSequence) if (!range || range->maxSequence < ledgerSequence)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "Trying to publish. Could not find "
<< "Trying to publish. Could not find "
"ledger with sequence = " "ledger with sequence = "
<< ledgerSequence; << ledgerSequence;
// We try maxAttempts times to publish the ledger, waiting one // We try maxAttempts times to publish the ledger, waiting one
// second in between each attempt. // second in between each attempt.
if (maxAttempts && numAttempts >= maxAttempts) if (maxAttempts && numAttempts >= maxAttempts)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "Failed to publish ledger after " << numAttempts
<< "Failed to publish ledger after " << " attempts.";
<< numAttempts << " attempts.";
return false; return false;
} }
std::this_thread::sleep_for(std::chrono::seconds(1)); std::this_thread::sleep_for(std::chrono::seconds(1));
@@ -264,25 +258,19 @@ ReportingETL::publishLedger(
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerData(uint32_t seq) ReportingETL::fetchLedgerData(uint32_t seq)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << seq;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response = std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger(seq, false, false); loadBalancer_->fetchLedger(seq, false, false);
if (response) if (response)
BOOST_LOG_TRIVIAL(trace) log_.trace() << "GetLedger reply = " << response->DebugString();
<< __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
return response; return response;
} }
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerDataAndDiff(uint32_t seq) ReportingETL::fetchLedgerDataAndDiff(uint32_t seq)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << seq;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response = std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger( loadBalancer_->fetchLedger(
@@ -291,45 +279,34 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t seq)
!backend_->cache().isFull() || !backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() >= seq); backend_->cache().latestLedgerSequence() >= seq);
if (response) if (response)
BOOST_LOG_TRIVIAL(trace) log_.trace() << "GetLedger reply = " << response->DebugString();
<< __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
return response; return response;
} }
std::pair<ripple::LedgerInfo, bool> std::pair<ripple::LedgerInfo, bool>
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "Beginning ledger update";
<< "Beginning ledger update";
ripple::LedgerInfo lgrInfo = ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(rawData.ledger_header())); deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo);
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
backend_->startWrites(); backend_->startWrites();
log_.debug() << "started writes";
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "started writes";
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header())); backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
log_.debug() << "wrote ledger header";
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote ledger header";
// Write successor info, if included from rippled // Write successor info, if included from rippled
if (rawData.object_neighbors_included()) if (rawData.object_neighbors_included())
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " object neighbors included"; log_.debug() << "object neighbors included";
for (auto& obj : *(rawData.mutable_book_successors())) for (auto& obj : *(rawData.mutable_book_successors()))
{ {
auto firstBook = std::move(*obj.mutable_first_book()); auto firstBook = std::move(*obj.mutable_first_book());
if (!firstBook.size()) if (!firstBook.size())
firstBook = uint256ToString(Backend::lastKey); firstBook = uint256ToString(Backend::lastKey);
BOOST_LOG_TRIVIAL(debug) << __func__ << " writing book successor " log_.debug() << "writing book successor "
<< ripple::strHex(obj.book_base()) << " - " << ripple::strHex(obj.book_base()) << " - "
<< ripple::strHex(firstBook); << ripple::strHex(firstBook);
@@ -352,9 +329,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
if (obj.mod_type() == if (obj.mod_type() ==
org::xrpl::rpc::v1::RawLedgerObject::DELETED) org::xrpl::rpc::v1::RawLedgerObject::DELETED)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Modifying successors for deleted object "
<< __func__
<< " modifying successors for deleted object "
<< ripple::strHex(obj.key()) << " - " << ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr); << ripple::strHex(*succPtr);
@@ -364,8 +339,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
} }
else else
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "adding successor for new object "
<< __func__ << " adding successor for new object "
<< ripple::strHex(obj.key()) << " - " << ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr); << ripple::strHex(*succPtr);
@@ -381,8 +355,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
} }
} }
else else
BOOST_LOG_TRIVIAL(debug) << __func__ << " object modified " log_.debug() << "object modified " << ripple::strHex(obj.key());
<< ripple::strHex(obj.key());
} }
} }
std::vector<Backend::LedgerObject> cacheUpdates; std::vector<Backend::LedgerObject> cacheUpdates;
@@ -396,15 +369,13 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
assert(key); assert(key);
cacheUpdates.push_back( cacheUpdates.push_back(
{*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); {*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}});
BOOST_LOG_TRIVIAL(debug) log_.debug() << "key = " << ripple::strHex(*key)
<< __func__ << " key = " << ripple::strHex(*key)
<< " - mod type = " << obj.mod_type(); << " - mod type = " << obj.mod_type();
if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED && if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED &&
!rawData.object_neighbors_included()) !rawData.object_neighbors_included())
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "object neighbors not included. using cache";
<< __func__ << " object neighbors not included. using cache";
if (!backend_->cache().isFull() || if (!backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1) backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
throw std::runtime_error( throw std::runtime_error(
@@ -423,9 +394,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
checkBookBase = isBookDir(*key, *blob); checkBookBase = isBookDir(*key, *blob);
if (checkBookBase) if (checkBookBase)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Is book dir. key = " << ripple::strHex(*key);
<< __func__
<< " is book dir. key = " << ripple::strHex(*key);
auto bookBase = getBookBase(*key); auto bookBase = getBookBase(*key);
auto oldFirstDir = auto oldFirstDir =
backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1); backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1);
@@ -435,8 +404,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
if ((isDeleted && key == oldFirstDir->key) || if ((isDeleted && key == oldFirstDir->key) ||
(!isDeleted && key < oldFirstDir->key)) (!isDeleted && key < oldFirstDir->key))
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug()
<< __func__
<< "Need to recalculate book base successor. base = " << "Need to recalculate book base successor. base = "
<< ripple::strHex(bookBase) << ripple::strHex(bookBase)
<< " - key = " << ripple::strHex(*key) << " - key = " << ripple::strHex(*key)
@@ -458,8 +426,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
// rippled didn't send successor information, so use our cache // rippled didn't send successor information, so use our cache
if (!rawData.object_neighbors_included()) if (!rawData.object_neighbors_included())
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "object neighbors not included. using cache";
<< __func__ << " object neighbors not included. using cache";
if (!backend_->cache().isFull() || if (!backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() != lgrInfo.seq) backend_->cache().latestLedgerSequence() != lgrInfo.seq)
throw std::runtime_error( throw std::runtime_error(
@@ -477,8 +444,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
ub = {Backend::lastKey, {}}; ub = {Backend::lastKey, {}};
if (obj.blob.size() == 0) if (obj.blob.size() == 0)
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "writing successor for deleted object "
<< __func__ << " writing successor for deleted object "
<< ripple::strHex(obj.key) << " - " << ripple::strHex(obj.key) << " - "
<< ripple::strHex(lb->key) << " - " << ripple::strHex(lb->key) << " - "
<< ripple::strHex(ub->key); << ripple::strHex(ub->key);
@@ -499,8 +465,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq, lgrInfo.seq,
uint256ToString(ub->key)); uint256ToString(ub->key));
BOOST_LOG_TRIVIAL(debug) log_.debug() << "writing successor for new object "
<< __func__ << " writing successor for new object "
<< ripple::strHex(lb->key) << " - " << ripple::strHex(lb->key) << " - "
<< ripple::strHex(obj.key) << " - " << ripple::strHex(obj.key) << " - "
<< ripple::strHex(ub->key); << ripple::strHex(ub->key);
@@ -516,10 +481,9 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq, lgrInfo.seq,
uint256ToString(succ->key)); uint256ToString(succ->key));
BOOST_LOG_TRIVIAL(debug) log_.debug()
<< __func__ << " Updating book successor " << "Updating book successor " << ripple::strHex(base)
<< ripple::strHex(base) << " - " << " - " << ripple::strHex(succ->key);
<< ripple::strHex(succ->key);
} }
else else
{ {
@@ -528,42 +492,33 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq, lgrInfo.seq,
uint256ToString(Backend::lastKey)); uint256ToString(Backend::lastKey));
BOOST_LOG_TRIVIAL(debug) log_.debug()
<< __func__ << " Updating book successor " << "Updating book successor " << ripple::strHex(base)
<< ripple::strHex(base) << " - " << " - " << ripple::strHex(Backend::lastKey);
<< ripple::strHex(Backend::lastKey);
} }
} }
} }
BOOST_LOG_TRIVIAL(debug) log_.debug()
<< __func__ << " : "
<< "Inserted/modified/deleted all objects. Number of objects = " << "Inserted/modified/deleted all objects. Number of objects = "
<< rawData.ledger_objects().objects_size(); << rawData.ledger_objects().objects_size();
FormattedTransactionsData insertTxResult = FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, rawData); insertTransactions(lgrInfo, rawData);
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Inserted all transactions. Number of transactions = "
<< __func__ << " : "
<< "Inserted all transactions. Number of transactions = "
<< rawData.transactions_list().transactions_size(); << rawData.transactions_list().transactions_size();
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData)); backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
backend_->writeNFTs(std::move(insertTxResult.nfTokensData)); backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData)); backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "wrote account_tx";
<< "wrote account_tx";
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
bool success = backend_->finishWrites(lgrInfo.seq); bool success = backend_->finishWrites(lgrInfo.seq);
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0; auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " finished writes. took " << std::to_string(duration);
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Finished writes. took " << std::to_string(duration);
<< __func__ << " : " log_.debug() << "Finished ledger update. " << detail::toString(lgrInfo);
<< "Finished ledger update. " << detail::toString(lgrInfo);
return {lgrInfo, success}; return {lgrInfo, success};
} }
@@ -596,8 +551,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
* optional, returns. * optional, returns.
*/ */
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "Starting etl pipeline";
<< "Starting etl pipeline";
writing_ = true; writing_ = true;
auto rng = backend_->hardFetchLedgerRangeNoThrow(); auto rng = backend_->hardFetchLedgerRangeNoThrow();
@@ -670,8 +624,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
fetchResponse->transactions_list().transactions_size() / fetchResponse->transactions_list().transactions_size() /
time; time;
BOOST_LOG_TRIVIAL(info) log_.info() << "Extract phase time = " << time
<< "Extract phase time = " << time
<< " . Extract phase tps = " << tps << " . Extract phase tps = " << tps
<< " . Avg extract time = " << " . Avg extract time = "
<< totalTime / (currentSequence - startSequence + 1) << totalTime / (currentSequence - startSequence + 1)
@@ -720,7 +673,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
auto duration = ((end - start).count()) / 1000000000.0; auto duration = ((end - start).count()) / 1000000000.0;
if (success) if (success)
BOOST_LOG_TRIVIAL(info) log_.info()
<< "Load phase of etl : " << "Load phase of etl : "
<< "Successfully wrote ledger! Ledger info: " << "Successfully wrote ledger! Ledger info: "
<< detail::toString(lgrInfo) << ". txn count = " << numTxns << detail::toString(lgrInfo) << ". txn count = " << numTxns
@@ -729,7 +682,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
<< ". load txns per second = " << numTxns / duration << ". load txns per second = " << numTxns / duration
<< ". load objs per second = " << numObjects / duration; << ". load objs per second = " << numObjects / duration;
else else
BOOST_LOG_TRIVIAL(error) log_.error()
<< "Error writing ledger. " << detail::toString(lgrInfo); << "Error writing ledger. " << detail::toString(lgrInfo);
// success is false if the ledger was already written // success is false if the ledger was already written
if (success) if (success)
@@ -747,7 +700,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
{ {
deleting_ = true; deleting_ = true;
ioContext_.post([this, &minSequence]() { ioContext_.post([this, &minSequence]() {
BOOST_LOG_TRIVIAL(info) << "Running online delete"; log_.info() << "Running online delete";
Backend::synchronous( Backend::synchronous(
[&](boost::asio::yield_context& yield) { [&](boost::asio::yield_context& yield) {
@@ -755,7 +708,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
*onlineDeleteInterval_, yield); *onlineDeleteInterval_, yield);
}); });
BOOST_LOG_TRIVIAL(info) << "Finished online delete"; log_.info() << "Finished online delete";
auto rng = backend_->fetchLedgerRange(); auto rng = backend_->fetchLedgerRange();
minSequence = rng->minSequence; minSequence = rng->minSequence;
deleting_ = false; deleting_ = false;
@@ -774,13 +727,12 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
for (auto& t : extractors) for (auto& t : extractors)
t.join(); t.join();
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Extracted and wrote "
<< "Extracted and wrote " << *lastPublishedSequence - startSequence << *lastPublishedSequence - startSequence << " in "
<< " in " << ((end - begin).count()) / 1000000000.0; << ((end - begin).count()) / 1000000000.0;
writing_ = false; writing_ = false;
BOOST_LOG_TRIVIAL(debug) << __func__ << " : " log_.debug() << "Stopping etl pipeline";
<< "Stopping etl pipeline";
return lastPublishedSequence; return lastPublishedSequence;
} }
@@ -801,38 +753,32 @@ ReportingETL::monitor()
auto rng = backend_->hardFetchLedgerRangeNoThrow(); auto rng = backend_->hardFetchLedgerRangeNoThrow();
if (!rng) if (!rng)
{ {
BOOST_LOG_TRIVIAL(info) << __func__ << " : " log_.info() << "Database is empty. Will download a ledger "
<< "Database is empty. Will download a ledger "
"from the network."; "from the network.";
std::optional<ripple::LedgerInfo> ledger; std::optional<ripple::LedgerInfo> ledger;
if (startSequence_) if (startSequence_)
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "ledger sequence specified in config. "
<< __func__ << " : "
<< "ledger sequence specified in config. "
<< "Will begin ETL process starting with ledger " << "Will begin ETL process starting with ledger "
<< *startSequence_; << *startSequence_;
ledger = loadInitialLedger(*startSequence_); ledger = loadInitialLedger(*startSequence_);
} }
else else
{ {
BOOST_LOG_TRIVIAL(info) log_.info()
<< __func__ << " : "
<< "Waiting for next ledger to be validated by network..."; << "Waiting for next ledger to be validated by network...";
std::optional<uint32_t> mostRecentValidated = std::optional<uint32_t> mostRecentValidated =
networkValidatedLedgers_->getMostRecent(); networkValidatedLedgers_->getMostRecent();
if (mostRecentValidated) if (mostRecentValidated)
{ {
BOOST_LOG_TRIVIAL(info) << __func__ << " : " log_.info() << "Ledger " << *mostRecentValidated
<< "Ledger " << *mostRecentValidated
<< " has been validated. " << " has been validated. "
<< "Downloading..."; << "Downloading...";
ledger = loadInitialLedger(*mostRecentValidated); ledger = loadInitialLedger(*mostRecentValidated);
} }
else else
{ {
BOOST_LOG_TRIVIAL(info) << __func__ << " : " log_.info() << "The wait for the next validated "
<< "The wait for the next validated "
<< "ledger has been aborted. " << "ledger has been aborted. "
<< "Exiting monitor loop"; << "Exiting monitor loop";
return; return;
@@ -842,8 +788,7 @@ ReportingETL::monitor()
rng = backend_->hardFetchLedgerRangeNoThrow(); rng = backend_->hardFetchLedgerRangeNoThrow();
else else
{ {
BOOST_LOG_TRIVIAL(error) log_.error()
<< __func__ << " : "
<< "Failed to load initial ledger. Exiting monitor loop"; << "Failed to load initial ledger. Exiting monitor loop";
return; return;
} }
@@ -852,20 +797,17 @@ ReportingETL::monitor()
{ {
if (startSequence_) if (startSequence_)
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn()
<< "start sequence specified but db is already populated"; << "start sequence specified but db is already populated";
} }
BOOST_LOG_TRIVIAL(info) log_.info()
<< __func__ << " : "
<< "Database already populated. Picking up from the tip of history"; << "Database already populated. Picking up from the tip of history";
loadCache(rng->maxSequence); loadCache(rng->maxSequence);
} }
assert(rng); assert(rng);
uint32_t nextSequence = rng->maxSequence + 1; uint32_t nextSequence = rng->maxSequence + 1;
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Database is populated. "
<< __func__ << " : "
<< "Database is populated. "
<< "Starting monitor loop. sequence = " << nextSequence; << "Starting monitor loop. sequence = " << nextSequence;
while (true) while (true)
{ {
@@ -878,9 +820,7 @@ ReportingETL::monitor()
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork( else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(
nextSequence, 1000)) nextSequence, 1000))
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Ledger with sequence = " << nextSequence
<< __func__ << " : "
<< "Ledger with sequence = " << nextSequence
<< " has been validated by the network. " << " has been validated by the network. "
<< "Attempting to find in database and publish"; << "Attempting to find in database and publish";
// Attempt to take over responsibility of ETL writer after 10 failed // Attempt to take over responsibility of ETL writer after 10 failed
@@ -893,17 +833,13 @@ ReportingETL::monitor()
bool success = publishLedger(nextSequence, timeoutSeconds); bool success = publishLedger(nextSequence, timeoutSeconds);
if (!success) if (!success)
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Failed to publish ledger with sequence = "
<< __func__ << " : "
<< "Failed to publish ledger with sequence = "
<< nextSequence << " . Beginning ETL"; << nextSequence << " . Beginning ETL";
// doContinousETLPipelined returns the most recent sequence // doContinousETLPipelined returns the most recent sequence
// published empty optional if no sequence was published // published empty optional if no sequence was published
std::optional<uint32_t> lastPublished = std::optional<uint32_t> lastPublished =
runETLPipeline(nextSequence, extractorThreads_); runETLPipeline(nextSequence, extractorThreads_);
BOOST_LOG_TRIVIAL(info) log_.info() << "Aborting ETL. Falling back to publishing";
<< __func__ << " : "
<< "Aborting ETL. Falling back to publishing";
// if no ledger was published, don't increment nextSequence // if no ledger was published, don't increment nextSequence
if (lastPublished) if (lastPublished)
nextSequence = *lastPublished + 1; nextSequence = *lastPublished + 1;
@@ -920,8 +856,8 @@ ReportingETL::loadCacheFromClioPeer(
std::string const& port, std::string const& port,
boost::asio::yield_context& yield) boost::asio::yield_context& yield)
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Loading cache from peer. ip = " << ip
<< "Loading cache from peer. ip = " << ip << " . port = " << port; << " . port = " << port;
namespace beast = boost::beast; // from <boost/beast.hpp> namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp> namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from namespace websocket = beast::websocket; // from
@@ -933,7 +869,7 @@ ReportingETL::loadCacheFromClioPeer(
// These objects perform our I/O // These objects perform our I/O
tcp::resolver resolver{ioContext_}; tcp::resolver resolver{ioContext_};
BOOST_LOG_TRIVIAL(trace) << __func__ << " Creating websocket"; log_.trace() << "Creating websocket";
auto ws = auto ws =
std::make_unique<websocket::stream<beast::tcp_stream>>(ioContext_); std::make_unique<websocket::stream<beast::tcp_stream>>(ioContext_);
@@ -942,14 +878,13 @@ ReportingETL::loadCacheFromClioPeer(
if (ec) if (ec)
return {}; return {};
BOOST_LOG_TRIVIAL(trace) << __func__ << " Connecting websocket"; log_.trace() << "Connecting websocket";
// Make the connection on the IP address we get from a lookup // Make the connection on the IP address we get from a lookup
ws->next_layer().async_connect(results, yield[ec]); ws->next_layer().async_connect(results, yield[ec]);
if (ec) if (ec)
return false; return false;
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Performing websocket handshake";
<< __func__ << " Performing websocket handshake";
// Perform the websocket handshake // Perform the websocket handshake
ws->async_handshake(ip, "/", yield[ec]); ws->async_handshake(ip, "/", yield[ec]);
if (ec) if (ec)
@@ -957,7 +892,7 @@ ReportingETL::loadCacheFromClioPeer(
std::optional<boost::json::value> marker; std::optional<boost::json::value> marker;
BOOST_LOG_TRIVIAL(trace) << __func__ << " Sending request"; log_.trace() << "Sending request";
auto getRequest = [&](auto marker) { auto getRequest = [&](auto marker) {
boost::json::object request = { boost::json::object request = {
{"command", "ledger_data"}, {"command", "ledger_data"},
@@ -981,8 +916,7 @@ ReportingETL::loadCacheFromClioPeer(
yield[ec]); yield[ec]);
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "error writing = " << ec.message();
<< __func__ << " error writing = " << ec.message();
return false; return false;
} }
@@ -990,8 +924,7 @@ ReportingETL::loadCacheFromClioPeer(
ws->async_read(buffer, yield[ec]); ws->async_read(buffer, yield[ec]);
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "error reading = " << ec.message();
<< __func__ << " error reading = " << ec.message();
return false; return false;
} }
@@ -1000,35 +933,29 @@ ReportingETL::loadCacheFromClioPeer(
if (!parsed.is_object()) if (!parsed.is_object())
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Error parsing response: " << raw;
<< __func__ << " Error parsing response: " << raw;
return false; return false;
} }
BOOST_LOG_TRIVIAL(trace) log_.trace() << "Successfully parsed response " << parsed;
<< __func__ << " Successfully parsed response " << parsed;
if (auto const& response = parsed.as_object(); if (auto const& response = parsed.as_object();
response.contains("error")) response.contains("error"))
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Response contains error: " << response;
<< __func__ << " Response contains error: " << response;
auto const& err = response.at("error"); auto const& err = response.at("error");
if (err.is_string() && err.as_string() == "lgrNotFound") if (err.is_string() && err.as_string() == "lgrNotFound")
{ {
++numAttempts; ++numAttempts;
if (numAttempts >= 5) if (numAttempts >= 5)
{ {
BOOST_LOG_TRIVIAL(error) log_.error()
<< __func__
<< " ledger not found at peer after 5 attempts. " << " ledger not found at peer after 5 attempts. "
"peer = " "peer = "
<< ip << " ledger = " << ledgerIndex << ip << " ledger = " << ledgerIndex
<< ". Check your config and the health of the peer"; << ". Check your config and the health of the peer";
return false; return false;
} }
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Ledger not found. ledger = " << ledgerIndex
<< __func__
<< " ledger not found. ledger = " << ledgerIndex
<< ". Sleeping and trying again"; << ". Sleeping and trying again";
std::this_thread::sleep_for(std::chrono::seconds(1)); std::this_thread::sleep_for(std::chrono::seconds(1));
continue; continue;
@@ -1041,8 +968,7 @@ ReportingETL::loadCacheFromClioPeer(
if (!response.contains("cache_full") || if (!response.contains("cache_full") ||
!response.at("cache_full").as_bool()) !response.at("cache_full").as_bool())
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "cache not full for clio node. ip = " << ip;
<< __func__ << " cache not full for clio node. ip = " << ip;
return false; return false;
} }
if (response.contains("marker")) if (response.contains("marker"))
@@ -1063,8 +989,7 @@ ReportingETL::loadCacheFromClioPeer(
if (!stateObject.key.parseHex( if (!stateObject.key.parseHex(
obj.at("index").as_string().c_str())) obj.at("index").as_string().c_str()))
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "failed to parse object id";
<< __func__ << " failed to parse object id";
return false; return false;
} }
boost::algorithm::unhex( boost::algorithm::unhex(
@@ -1075,21 +1000,18 @@ ReportingETL::loadCacheFromClioPeer(
backend_->cache().update(objects, ledgerIndex, true); backend_->cache().update(objects, ledgerIndex, true);
if (marker) if (marker)
BOOST_LOG_TRIVIAL(debug) log_.debug() << "At marker " << *marker;
<< __func__ << " - At marker " << *marker;
} while (marker || !started); } while (marker || !started);
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " Finished downloading ledger from clio node. ip = " << ip;
backend_->cache().setFull();
log_.info() << "Finished downloading ledger from clio node. ip = "
<< ip;
backend_->cache().setFull();
return true; return true;
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Encountered exception : " << e.what()
<< __func__ << " Encountered exception : " << e.what()
<< " - ip = " << ip; << " - ip = " << ip;
return false; return false;
} }
@@ -1101,7 +1023,7 @@ ReportingETL::loadCache(uint32_t seq)
if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL) if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL)
{ {
backend_->cache().setDisabled(); backend_->cache().setDisabled();
BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading"; log_.warn() << "Cache is disabled. Not loading";
return; return;
} }
// sanity check to make sure we are not calling this multiple times // sanity check to make sure we are not calling this multiple times
@@ -1142,12 +1064,11 @@ ReportingETL::loadCache(uint32_t seq)
while (cacheLoadStyle_ == CacheLoadStyle::SYNC && while (cacheLoadStyle_ == CacheLoadStyle::SYNC &&
!backend_->cache().isFull()) !backend_->cache().isFull())
{ {
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Cache not full. Cache size = "
<< "Cache not full. Cache size = " << backend_->cache().size() << backend_->cache().size() << ". Sleeping ...";
<< ". Sleeping ...";
std::this_thread::sleep_for(std::chrono::seconds(10)); std::this_thread::sleep_for(std::chrono::seconds(10));
BOOST_LOG_TRIVIAL(info) log_.info() << "Cache is full. Cache size = "
<< "Cache is full. Cache size = " << backend_->cache().size(); << backend_->cache().size();
} }
} }
@@ -1198,9 +1119,8 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
if (c) if (c)
cursorStr << ripple::strHex(*c) << ", "; cursorStr << ripple::strHex(*c) << ", ";
} }
BOOST_LOG_TRIVIAL(info) log_.info() << "Loading cache. num cursors = " << cursors.size() - 1;
<< "Loading cache. num cursors = " << cursors.size() - 1; log_.trace() << "cursors = " << cursorStr.str();
BOOST_LOG_TRIVIAL(trace) << __func__ << " cursors = " << cursorStr.str();
cacheDownloader_ = std::thread{[this, seq, cursors]() { cacheDownloader_ = std::thread{[this, seq, cursors]() {
auto startTime = std::chrono::system_clock::now(); auto startTime = std::chrono::system_clock::now();
@@ -1221,8 +1141,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
std::string cursorStr = cursor.has_value() std::string cursorStr = cursor.has_value()
? ripple::strHex(cursor.value()) ? ripple::strHex(cursor.value())
: ripple::strHex(Backend::firstKey); : ripple::strHex(Backend::firstKey);
BOOST_LOG_TRIVIAL(debug) log_.debug() << "Starting a cursor: " << cursorStr
<< "Starting a cursor: " << cursorStr
<< " markers = " << *markers; << " markers = " << *markers;
while (!stopping_) while (!stopping_)
@@ -1237,7 +1156,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
backend_->cache().update(res.objects, seq, true); backend_->cache().update(res.objects, seq, true);
if (!res.cursor || (end && *(res.cursor) > *end)) if (!res.cursor || (end && *(res.cursor) > *end))
break; break;
BOOST_LOG_TRIVIAL(trace) log_.trace()
<< "Loading cache. cache size = " << "Loading cache. cache size = "
<< backend_->cache().size() << " - cursor = " << backend_->cache().size() << " - cursor = "
<< ripple::strHex(res.cursor.value()) << ripple::strHex(res.cursor.value())
@@ -1254,16 +1173,14 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
auto duration = auto duration =
std::chrono::duration_cast<std::chrono::seconds>( std::chrono::duration_cast<std::chrono::seconds>(
endTime - startTime); endTime - startTime);
BOOST_LOG_TRIVIAL(info) log_.info() << "Finished loading cache. cache size = "
<< "Finished loading cache. cache size = "
<< backend_->cache().size() << ". Took " << backend_->cache().size() << ". Took "
<< duration.count() << " seconds"; << duration.count() << " seconds";
backend_->cache().setFull(); backend_->cache().setFull();
} }
else else
{ {
BOOST_LOG_TRIVIAL(info) log_.info() << "Finished a cursor. num remaining = "
<< "Finished a cursor. num remaining = "
<< *numRemaining << " start = " << cursorStr << *numRemaining << " start = " << cursorStr
<< " markers = " << *markers; << " markers = " << *markers;
} }
@@ -1275,7 +1192,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
void void
ReportingETL::monitorReadOnly() ReportingETL::monitorReadOnly()
{ {
BOOST_LOG_TRIVIAL(debug) << "Starting reporting in strict read only mode"; log_.debug() << "Starting reporting in strict read only mode";
auto rng = backend_->hardFetchLedgerRangeNoThrow(); auto rng = backend_->hardFetchLedgerRangeNoThrow();
uint32_t latestSequence; uint32_t latestSequence;
if (!rng) if (!rng)

View File

@@ -8,6 +8,7 @@
#include <boost/beast/websocket.hpp> #include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <etl/ETLSource.h> #include <etl/ETLSource.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h> #include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" #include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
@@ -55,6 +56,8 @@ class SubscriptionManager;
class ReportingETL class ReportingETL
{ {
private: private:
clio::Logger log_{"ETL"};
std::shared_ptr<BackendInterface> backend_; std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_; std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> loadBalancer_; std::shared_ptr<ETLLoadBalancer> loadBalancer_;
@@ -300,7 +303,7 @@ private:
void void
run() run()
{ {
BOOST_LOG_TRIVIAL(info) << "Starting reporting etl"; log_.info() << "Starting reporting etl";
stopping_ = false; stopping_ = false;
doWork(); doWork();
@@ -337,8 +340,8 @@ public:
~ReportingETL() ~ReportingETL()
{ {
BOOST_LOG_TRIVIAL(info) << "onStop called"; log_.info() << "onStop called";
BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL"; log_.debug() << "Stopping Reporting ETL";
stopping_ = true; stopping_ = true;
if (worker_.joinable()) if (worker_.joinable())
@@ -346,7 +349,7 @@ public:
if (cacheDownloader_.joinable()) if (cacheDownloader_.joinable())
cacheDownloader_.join(); cacheDownloader_.join();
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread"; log_.debug() << "Joined ReportingETL worker thread";
} }
boost::json::object boost::json::object

191
src/log/Logger.cpp Normal file
View File

@@ -0,0 +1,191 @@
#include <config/Config.h>
#include <log/Logger.h>
#include <algorithm>
#include <array>
#include <filesystem>
namespace clio {
Logger LogService::general_log_ = Logger{"General"};
Logger LogService::alert_log_ = Logger{"Alert"};
std::ostream&
operator<<(std::ostream& stream, Severity sev)
{
static constexpr std::array<const char*, 6> labels = {
"TRC",
"DBG",
"NFO",
"WRN",
"ERR",
"FTL",
};
return stream << labels.at(static_cast<int>(sev));
}
Severity
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value)
{
if (not value.is_string())
throw std::runtime_error("`log_level` must be a string");
auto const& logLevel = value.as_string();
if (boost::iequals(logLevel, "trace"))
return Severity::TRACE;
else if (boost::iequals(logLevel, "debug"))
return Severity::DEBUG;
else if (boost::iequals(logLevel, "info"))
return Severity::INFO;
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
return Severity::WARNING;
else if (boost::iequals(logLevel, "error"))
return Severity::ERROR;
else if (boost::iequals(logLevel, "fatal"))
return Severity::FATAL;
else
throw std::runtime_error(
"Could not parse `log_level`: expected `trace`, `debug`, `info`, "
"`warning`, `error` or `fatal`");
}
void
LogService::init(Config const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<Severity, char>("Severity");
auto const defaultFormat =
"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% "
"%Message%";
std::string format =
config.valueOr<std::string>("log_format", defaultFormat);
if (config.valueOr("log_to_console", false))
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
auto logDir = config.maybeValue<std::string>("log_directory");
if (logDir)
{
boost::filesystem::path dirPath{logDir.value()};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
auto const rotationSize =
config.valueOr<uint64_t>("log_rotation_size", 2048u) * 1024u *
1024u;
auto const rotationPeriod =
config.valueOr<uint32_t>("log_rotation_hour_interval", 12u);
auto const dirSize =
config.valueOr<uint64_t>("log_directory_max_size", 50u * 1024u) *
1024u * 1024u;
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
// get default severity, can be overridden per channel using
// the `log_channels` array
auto defaultSeverity =
config.valueOr<Severity>("log_level", Severity::INFO);
static constexpr std::array<const char*, 7> channels = {
"General",
"WebServer",
"Backend",
"RPC",
"ETL",
"Subscriptions",
"Performance",
};
auto core = boost::log::core::get();
auto min_severity = boost::log::expressions::channel_severity_filter(
log_channel, log_severity);
for (auto const& channel : channels)
min_severity[channel] = defaultSeverity;
min_severity["Alert"] =
Severity::WARNING; // Channel for alerts, always warning severity
for (auto const overrides = config.arrayOr("log_channels", {});
auto const& cfg : overrides)
{
auto name = cfg.valueOrThrow<std::string>(
"channel", "Channel name is required");
if (not std::count(std::begin(channels), std::end(channels), name))
throw std::runtime_error(
"Can't override settings for log channel " + name +
": invalid channel");
min_severity[name] =
cfg.valueOr<Severity>("log_level", defaultSeverity);
}
core->set_filter(min_severity);
LogService::info() << "Default log level = " << defaultSeverity;
}
Logger::Pump
Logger::trace(source_location_t const loc) const
{
return {logger_, Severity::TRACE, loc};
};
Logger::Pump
Logger::debug(source_location_t const loc) const
{
return {logger_, Severity::DEBUG, loc};
};
Logger::Pump
Logger::info(source_location_t const loc) const
{
return {logger_, Severity::INFO, loc};
};
Logger::Pump
Logger::warn(source_location_t const loc) const
{
return {logger_, Severity::WARNING, loc};
};
Logger::Pump
Logger::error(source_location_t const loc) const
{
return {logger_, Severity::ERROR, loc};
};
Logger::Pump
Logger::fatal(source_location_t const loc) const
{
return {logger_, Severity::FATAL, loc};
};
std::string
Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const
{
auto const file_path = std::string{loc.file_name()};
auto idx = file_path.size();
while (max_depth-- > 0)
{
idx = file_path.rfind('/', idx - 1);
if (idx == std::string::npos || idx == 0)
break;
}
return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' +
std::to_string(loc.line());
}
} // namespace clio

257
src/log/Logger.h Normal file
View File

@@ -0,0 +1,257 @@
#ifndef RIPPLE_UTIL_LOGGER_H
#define RIPPLE_UTIL_LOGGER_H
#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/json.hpp>
#include <boost/log/core/core.hpp>
#include <boost/log/expressions/predicates/channel_severity_filter.hpp>
#include <boost/log/sinks/unlocked_frontend.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_channel_logger.hpp>
#include <boost/log/sources/severity_feature.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/utility/manipulators/add_value.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <boost/log/utility/setup/formatter_parser.hpp>
// Note: clang still does not provide non-experimental support, gcc does
// TODO: start using <source_location> once clang catches up on c++20
#include <experimental/source_location>
#include <optional>
#include <string>
namespace clio {
class Config;
using source_location_t = std::experimental::source_location;
/**
* @brief Custom severity levels for @ref Logger.
*/
enum class Severity {
TRACE,
DEBUG,
INFO,
WARNING,
ERROR,
FATAL,
};
BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity);
BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string);
/**
* @brief Custom labels for @ref Severity in log output.
*
* @param stream std::ostream The output stream
* @param sev Severity The severity to output to the ostream
* @return std::ostream& The same ostream we were given
*/
std::ostream&
operator<<(std::ostream& stream, Severity sev);
/**
* @brief Custom JSON parser for @ref Severity.
*
* @param value The JSON string to parse
* @return Severity The parsed severity
* @throws std::runtime_error Thrown if severity is not in the right format
*/
Severity
tag_invoke(
boost::json::value_to_tag<Severity>,
boost::json::value const& value);
/**
* @brief A simple thread-safe logger for the channel specified
* in the constructor.
*
* This is cheap to copy and move. Designed to be used as a member variable or
* otherwise. See @ref LogService::init() for setup of the logging core and
* severity levels for each channel.
*/
class Logger final
{
using logger_t =
boost::log::sources::severity_channel_logger_mt<Severity, std::string>;
mutable logger_t logger_;
friend class LogService; // to expose the Pump interface
/**
* @brief Helper that pumps data into a log record via `operator<<`.
*/
class Pump final
{
using pump_opt_t =
std::optional<boost::log::aux::record_pump<logger_t>>;
boost::log::record rec_;
pump_opt_t pump_ = std::nullopt;
public:
~Pump() = default;
Pump(logger_t& logger, Severity sev, source_location_t const& loc)
: rec_{logger.open_record(boost::log::keywords::severity = sev)}
{
if (rec_)
{
pump_.emplace(boost::log::aux::make_record_pump(logger, rec_));
pump_->stream() << boost::log::add_value(
"SourceLocation", pretty_path(loc));
}
}
Pump(Pump&&) = delete;
Pump(Pump const&) = delete;
Pump&
operator=(Pump const&) = delete;
Pump&
operator=(Pump&&) = delete;
/**
* @brief Perfectly forwards any incoming data into the underlying
* boost::log pump if the pump is available. nop otherwise.
*
* @tparam T Type of data to pump
* @param data The data to pump
* @return Pump& Reference to itself for chaining
*/
template <typename T>
[[maybe_unused]] Pump&
operator<<(T&& data)
{
if (pump_)
pump_->stream() << std::forward<T>(data);
return *this;
}
private:
[[nodiscard]] std::string
pretty_path(source_location_t const& loc, size_t max_depth = 3) const;
};
public:
~Logger() = default;
/**
* @brief Construct a new Logger object that produces loglines for the
* specified channel.
*
* See @ref LogService::init() for general setup and configuration of
* severity levels per channel.
*
* @param channel The channel this logger will report into.
*/
Logger(std::string channel)
: logger_{boost::log::keywords::channel = channel}
{
}
Logger(Logger const&) = default;
Logger(Logger&&) = default;
Logger&
operator=(Logger const&) = default;
Logger&
operator=(Logger&&) = default;
/*! Interface for logging at @ref Severity::TRACE severity */
[[nodiscard]] Pump
trace(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::DEBUG severity */
[[nodiscard]] Pump
debug(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::INFO severity */
[[nodiscard]] Pump
info(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::WARNING severity */
[[nodiscard]] Pump
warn(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::ERROR severity */
[[nodiscard]] Pump
error(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::FATAL severity */
[[nodiscard]] Pump
fatal(source_location_t const loc = source_location_t::current()) const;
};
/**
* @brief A global logging service.
*
* Used to initialize and setup the logging core as well as a globally available
* entrypoint for logging into the `General` channel as well as raising alerts.
*/
class LogService
{
static Logger general_log_; /*! Global logger for General channel */
static Logger alert_log_; /*! Global logger for Alerts channel */
public:
LogService() = delete;
/**
* @brief Global log core initialization from a @ref Config
*/
static void
init(Config const& config);
/*! Globally accesible General logger at @ref Severity::TRACE severity */
[[nodiscard]] static Logger::Pump
trace(source_location_t const loc = source_location_t::current())
{
return general_log_.trace(loc);
}
/*! Globally accesible General logger at @ref Severity::TRACE severity */
[[nodiscard]] static Logger::Pump
debug(source_location_t const loc = source_location_t::current())
{
return general_log_.debug(loc);
}
/*! Globally accesible General logger at @ref Severity::INFO severity */
[[nodiscard]] static Logger::Pump
info(source_location_t const loc = source_location_t::current())
{
return general_log_.info(loc);
}
/*! Globally accesible General logger at @ref Severity::WARNING severity */
[[nodiscard]] static Logger::Pump
warn(source_location_t const loc = source_location_t::current())
{
return general_log_.warn(loc);
}
/*! Globally accesible General logger at @ref Severity::ERROR severity */
[[nodiscard]] static Logger::Pump
error(source_location_t const loc = source_location_t::current())
{
return general_log_.error(loc);
}
/*! Globally accesible General logger at @ref Severity::FATAL severity */
[[nodiscard]] static Logger::Pump
fatal(source_location_t const loc = source_location_t::current())
{
return general_log_.fatal(loc);
}
/*! Globally accesible Alert logger */
[[nodiscard]] static Logger::Pump
alert(source_location_t const loc = source_location_t::current())
{
return alert_log_.warn(loc);
}
};
}; // namespace clio
#endif // RIPPLE_UTIL_LOGGER_H

View File

@@ -6,26 +6,20 @@
#undef GRPC_ASAN_ENABLED #undef GRPC_ASAN_ENABLED
#endif #endif
#include <backend/BackendFactory.h>
#include <config/Config.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <webserver/Listener.h>
#include <boost/asio/dispatch.hpp> #include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp> #include <boost/asio/strand.hpp>
#include <boost/beast/websocket.hpp> #include <boost/beast/websocket.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp> #include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/sinks/text_file_backend.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/support/date_time.hpp>
#include <boost/log/trivial.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <algorithm> #include <algorithm>
#include <backend/BackendFactory.h>
#include <config/Config.h>
#include <cstdlib> #include <cstdlib>
#include <etl/ReportingETL.h>
#include <fstream> #include <fstream>
#include <functional> #include <functional>
#include <iostream> #include <iostream>
@@ -35,7 +29,6 @@
#include <string> #include <string>
#include <thread> #include <thread>
#include <vector> #include <vector>
#include <webserver/Listener.h>
using namespace clio; using namespace clio;
@@ -80,85 +73,6 @@ parse_certs(Config const& config)
return ctx; return ctx;
} }
void
initLogging(Config const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
namespace trivial = boost::log::trivial;
boost::log::add_common_attributes();
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
if (config.valueOr<bool>("log_to_console", true))
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
if (auto logDir = config.maybeValue<std::string>("log_directory"); logDir)
{
boost::filesystem::path dirPath{*logDir};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
auto const rotationSize =
config.valueOr<int64_t>("log_rotation_size", 2 * 1024) * 1024 *
1024u;
if (rotationSize <= 0)
throw std::runtime_error(
"log rotation size must be greater than 0");
auto const rotationPeriod =
config.valueOr<int64_t>("log_rotation_hour_interval", 12u);
if (rotationPeriod <= 0)
throw std::runtime_error(
"log rotation time interval must be greater than 0");
auto const dirSize =
config.valueOr<int64_t>("log_directory_max_size", 50 * 1024) *
1024 * 1024u;
if (dirSize <= 0)
throw std::runtime_error(
"log rotation directory max size must be greater than 0");
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
auto const logLevel = config.valueOr<std::string>("log_level", "info");
if (boost::iequals(logLevel, "trace"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::trace);
else if (boost::iequals(logLevel, "debug"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::debug);
else if (boost::iequals(logLevel, "info"))
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::warning);
else if (boost::iequals(logLevel, "error"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::error);
else if (boost::iequals(logLevel, "fatal"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::fatal);
else
{
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
<< ". Setting log level to info";
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
}
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
}
void void
start(boost::asio::io_context& ioc, std::uint32_t numThreads) start(boost::asio::io_context& ioc, std::uint32_t numThreads)
{ {
@@ -172,6 +86,7 @@ start(boost::asio::io_context& ioc, std::uint32_t numThreads)
int int
main(int argc, char* argv[]) main(int argc, char* argv[])
try
{ {
// Check command line arguments. // Check command line arguments.
if (argc != 2) if (argc != 2)
@@ -196,10 +111,8 @@ main(int argc, char* argv[])
return EXIT_FAILURE; return EXIT_FAILURE;
} }
initLogging(config); LogService::init(config);
LogService::info() << "Clio version: " << Build::getClioFullVersionString();
BOOST_LOG_TRIVIAL(info)
<< "Clio version: " << Build::getClioFullVersionString();
auto ctx = parse_certs(config); auto ctx = parse_certs(config);
auto ctxRef = ctx auto ctxRef = ctx
@@ -209,10 +122,10 @@ main(int argc, char* argv[])
auto const threads = config.valueOr("io_threads", 2); auto const threads = config.valueOr("io_threads", 2);
if (threads <= 0) if (threads <= 0)
{ {
BOOST_LOG_TRIVIAL(fatal) << "io_threads is less than 0"; LogService::fatal() << "io_threads is less than 0";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
BOOST_LOG_TRIVIAL(info) << "Number of io threads = " << threads; LogService::info() << "Number of io threads = " << threads;
// io context to handle all incoming requests, as well as other things // io context to handle all incoming requests, as well as other things
// This is not the only io context in the application // This is not the only io context in the application
@@ -258,3 +171,7 @@ main(int argc, char* argv[])
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }
catch (std::exception const& e)
{
LogService::fatal() << "Exit on exception: " << e.what();
}

View File

@@ -1,4 +1,5 @@
#include <etl/ETLSource.h> #include <etl/ETLSource.h>
#include <log/Logger.h>
#include <rpc/Handlers.h> #include <rpc/Handlers.h>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
#include <webserver/HttpBase.h> #include <webserver/HttpBase.h>
@@ -9,9 +10,15 @@
#include <unordered_map> #include <unordered_map>
using namespace std; using namespace std;
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gPerfLog{"Performance"};
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
Context::Context( Context::Context(
boost::asio::yield_context& yield_, boost::asio::yield_context& yield_,
string const& command_, string const& command_,
@@ -40,7 +47,7 @@ Context::Context(
, counters(counters_) , counters(counters_)
, clientIp(clientIp_) , clientIp(clientIp_)
{ {
BOOST_LOG_TRIVIAL(debug) << tag() << "new Context created"; gPerfLog.debug() << tag() << "new Context created";
} }
optional<Context> optional<Context>
@@ -325,8 +332,7 @@ buildResponse(Context const& ctx)
if (ctx.backend->isTooBusy()) if (ctx.backend->isTooBusy())
{ {
BOOST_LOG_TRIVIAL(error) gLog.error() << "Database is too busy. Rejecting request";
<< __func__ << " Database is too busy. Rejecting request";
return Status{RippledError::rpcTOO_BUSY}; return Status{RippledError::rpcTOO_BUSY};
} }
@@ -337,12 +343,10 @@ buildResponse(Context const& ctx)
try try
{ {
BOOST_LOG_TRIVIAL(debug) gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method
<< ctx.tag() << __func__ << " start executing rpc `" << ctx.method
<< '`'; << '`';
auto v = (*method)(ctx); auto v = (*method)(ctx);
BOOST_LOG_TRIVIAL(debug) gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method
<< ctx.tag() << __func__ << " finish executing rpc `" << ctx.method
<< '`'; << '`';
if (auto object = get_if<boost::json::object>(&v); if (auto object = get_if<boost::json::object>(&v);
@@ -363,13 +367,12 @@ buildResponse(Context const& ctx)
} }
catch (Backend::DatabaseTimeout const& t) catch (Backend::DatabaseTimeout const& t)
{ {
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout"; gLog.error() << "Database timeout";
return Status{RippledError::rpcTOO_BUSY}; return Status{RippledError::rpcTOO_BUSY};
} }
catch (exception const& err) catch (exception const& err)
{ {
BOOST_LOG_TRIVIAL(error) gLog.error() << ctx.tag() << " caught exception: " << err.what();
<< ctx.tag() << __func__ << " caught exception : " << err.what();
return Status{RippledError::rpcINTERNAL}; return Status{RippledError::rpcINTERNAL};
} }
} }

View File

@@ -1,15 +1,15 @@
#ifndef REPORTING_RPC_H_INCLUDED #ifndef REPORTING_RPC_H_INCLUDED
#define REPORTING_RPC_H_INCLUDED #define REPORTING_RPC_H_INCLUDED
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/Counters.h>
#include <rpc/Errors.h> #include <rpc/Errors.h>
#include <util/Taggable.h>
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/Counters.h>
#include <util/Taggable.h>
#include <optional> #include <optional>
#include <string> #include <string>
#include <variant> #include <variant>
@@ -34,6 +34,7 @@ namespace RPC {
struct Context : public util::Taggable struct Context : public util::Taggable
{ {
clio::Logger perfLog_{"Performance"};
boost::asio::yield_context& yield; boost::asio::yield_context& yield;
std::string method; std::string method;
std::uint32_t version; std::uint32_t version;
@@ -129,6 +130,7 @@ template <class T>
void void
logDuration(Context const& ctx, T const& dur) logDuration(Context const& ctx, T const& dur)
{ {
static clio::Logger log{"RPC"};
std::stringstream ss; std::stringstream ss;
ss << ctx.tag() << "Request processing duration = " ss << ctx.tag() << "Request processing duration = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(dur).count() << std::chrono::duration_cast<std::chrono::milliseconds>(dur).count()
@@ -136,11 +138,11 @@ logDuration(Context const& ctx, T const& dur)
auto seconds = auto seconds =
std::chrono::duration_cast<std::chrono::seconds>(dur).count(); std::chrono::duration_cast<std::chrono::seconds>(dur).count();
if (seconds > 10) if (seconds > 10)
BOOST_LOG_TRIVIAL(error) << ss.str(); log.error() << ss.str();
else if (seconds > 1) else if (seconds > 1)
BOOST_LOG_TRIVIAL(warning) << ss.str(); log.warn() << ss.str();
else else
BOOST_LOG_TRIVIAL(info) << ss.str(); log.info() << ss.str();
} }
} // namespace RPC } // namespace RPC

View File

@@ -1,10 +1,17 @@
#include <ripple/basics/StringUtilities.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/format.hpp> #include <boost/format.hpp>
#include <ripple/basics/StringUtilities.h> using namespace clio;
#include <backend/BackendInterface.h> // local to compilation unit loggers
#include <rpc/RPCHelpers.h> namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
@@ -412,12 +419,11 @@ deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs)
blobs.metadata.begin(), blobs.metadata.begin(),
blobs.metadata.end(), blobs.metadata.end(),
std::ostream_iterator<unsigned char>(meta)); std::ostream_iterator<unsigned char>(meta));
BOOST_LOG_TRIVIAL(error) gLog.error() << "Failed to deserialize transaction. txn = " << txn.str()
<< __func__ << " - meta = " << meta.str() << " txn length = "
<< " Failed to deserialize transaction. txn = " << txn.str() << std::to_string(blobs.transaction.size())
<< " - meta = " << meta.str() << " meta length = "
<< " txn length = " << std::to_string(blobs.transaction.size()) << std::to_string(blobs.metadata.size());
<< " meta length = " << std::to_string(blobs.metadata.size());
throw e; throw e;
} }
} }
@@ -807,9 +813,9 @@ traverseOwnedNodes(
} }
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Time loading owned directories: "
<< "Time loading owned directories: " << std::chrono::duration_cast<std::chrono::milliseconds>(
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) end - start)
.count() .count()
<< " milliseconds"; << " milliseconds";
@@ -817,9 +823,9 @@ traverseOwnedNodes(
auto objects = backend.fetchLedgerObjects(keys, sequence, yield); auto objects = backend.fetchLedgerObjects(keys, sequence, yield);
end = std::chrono::system_clock::now(); end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Time loading owned entries: "
<< "Time loading owned entries: " << std::chrono::duration_cast<std::chrono::milliseconds>(
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) end - start)
.count() .count()
<< " milliseconds"; << " milliseconds";
@@ -1341,7 +1347,7 @@ postProcessOrderBook(
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) << "caught exception: " << e.what(); gLog.error() << "caught exception: " << e.what();
} }
} }
return jsonOffers; return jsonOffers;
@@ -1676,9 +1682,7 @@ traverseTransactions(
} }
else if (txnPlusMeta.ledgerSequence > maxIndex && !forward) else if (txnPlusMeta.ledgerSequence > maxIndex && !forward)
{ {
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Skipping over transactions from incomplete ledger";
<< __func__
<< " skipping over transactions from incomplete ledger";
continue; continue;
} }
@@ -1708,8 +1712,7 @@ traverseTransactions(
response[JS(ledger_index_max)] = maxIndex; response[JS(ledger_index_max)] = maxIndex;
response[JS(transactions)] = txns; response[JS(transactions)] = txns;
BOOST_LOG_TRIVIAL(info) gLog.info() << "serialization took "
<< __func__ << " serialization took "
<< std::chrono::duration_cast<std::chrono::milliseconds>( << std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - serializationStart) std::chrono::system_clock::now() - serializationStart)
.count() .count()

View File

@@ -1,10 +1,11 @@
#ifndef CLIO_WORK_QUEUE_H #ifndef CLIO_WORK_QUEUE_H
#define CLIO_WORK_QUEUE_H #define CLIO_WORK_QUEUE_H
#include <log/Logger.h>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <memory> #include <memory>
#include <optional> #include <optional>
@@ -20,6 +21,7 @@ class WorkQueue
std::atomic_uint64_t curSize_ = 0; std::atomic_uint64_t curSize_ = 0;
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max(); uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
clio::Logger log_{"RPC"};
public: public:
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0); WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
@@ -30,10 +32,8 @@ public:
{ {
if (curSize_ >= maxSize_ && !isWhiteListed) if (curSize_ >= maxSize_ && !isWhiteListed)
{ {
BOOST_LOG_TRIVIAL(warning) log_.warn() << "Queue is full. rejecting job. current size = "
<< __func__ << curSize_ << " max size = " << maxSize_;
<< " queue is full. rejecting job. current size = " << curSize_
<< " max size = " << maxSize_;
return false; return false;
} }
++curSize_; ++curSize_;
@@ -52,7 +52,7 @@ public:
// durationUs_ // durationUs_
++queued_; ++queued_;
durationUs_ += wait; durationUs_ += wait;
BOOST_LOG_TRIVIAL(debug) << "WorkQueue wait time = " << wait log_.info() << "WorkQueue wait time = " << wait
<< " queue size = " << curSize_; << " queue size = " << curSize_;
f(yield); f(yield);
--curSize_; --curSize_;

View File

@@ -1,5 +1,13 @@
#include <log/Logger.h>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
Result Result
@@ -10,7 +18,8 @@ doAccountTx(Context const& context)
return status; return status;
constexpr std::string_view outerFuncName = __func__; constexpr std::string_view outerFuncName = __func__;
auto const maybeResponse = traverseTransactions( auto const maybeResponse =
traverseTransactions(
context, context,
[&accountID, &outerFuncName]( [&accountID, &outerFuncName](
std::shared_ptr<Backend::BackendInterface const> const& backend, std::shared_ptr<Backend::BackendInterface const> const& backend,
@@ -21,12 +30,13 @@ doAccountTx(Context const& context)
auto const start = std::chrono::system_clock::now(); auto const start = std::chrono::system_clock::now();
auto const txnsAndCursor = backend->fetchAccountTransactions( auto const txnsAndCursor = backend->fetchAccountTransactions(
accountID, limit, forward, cursorIn, yield); accountID, limit, forward, cursorIn, yield);
BOOST_LOG_TRIVIAL(info) gLog.info()
<< outerFuncName << " db fetch took " << outerFuncName << " db fetch took "
<< std::chrono::duration_cast<std::chrono::milliseconds>( << std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - start) std::chrono::system_clock::now() - start)
.count() .count()
<< " milliseconds - num blobs = " << txnsAndCursor.txns.size(); << " milliseconds - num blobs = "
<< txnsAndCursor.txns.size();
return txnsAndCursor; return txnsAndCursor;
}); });

View File

@@ -4,12 +4,21 @@
#include <ripple/protocol/Indexes.h> #include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h> #include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h> #include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
@@ -64,9 +73,9 @@ doBookOffers(Context const& context)
bookBase, lgrInfo.seq, limit, marker, context.yield); bookBase, lgrInfo.seq, limit, marker, context.yield);
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning) gLog.warn() << "Time loading books: "
<< "Time loading books: " << std::chrono::duration_cast<std::chrono::milliseconds>(
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start) end - start)
.count() .count()
<< " milliseconds - request = " << request; << " milliseconds - request = " << request;
@@ -78,9 +87,9 @@ doBookOffers(Context const& context)
auto end2 = std::chrono::system_clock::now(); auto end2 = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning) gLog.warn() << "Time transforming to json: "
<< "Time transforming to json: " << std::chrono::duration_cast<std::chrono::milliseconds>(
<< std::chrono::duration_cast<std::chrono::milliseconds>(end2 - end) end2 - end)
.count() .count()
<< " milliseconds - request = " << request; << " milliseconds - request = " << request;

View File

@@ -1,9 +1,11 @@
#include <ripple/app/ledger/LedgerToJson.h> #include <ripple/app/ledger/LedgerToJson.h>
#include <ripple/protocol/STLedgerEntry.h> #include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp> #include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
// Get state nodes from a ledger // Get state nodes from a ledger
// Inputs: // Inputs:
// limit: integer, maximum number of entries // limit: integer, maximum number of entries
@@ -18,6 +20,13 @@
// //
// //
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
using boost::json::value_to; using boost::json::value_to;
@@ -65,7 +74,7 @@ doLedgerData(Context const& context)
} }
else else
{ {
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker"; gLog.debug() << "Parsing marker";
marker = ripple::uint256{}; marker = ripple::uint256{};
if (!marker->parseHex(request.at(JS(marker)).as_string().c_str())) if (!marker->parseHex(request.at(JS(marker)).as_string().c_str()))
@@ -168,9 +177,8 @@ doLedgerData(Context const& context)
std::chrono::duration_cast<std::chrono::microseconds>(end - start) std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count(); .count();
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Number of results = " << results.size() << " fetched in "
<< __func__ << " number of results = " << results.size() << time << " microseconds";
<< " fetched in " << time << " microseconds";
boost::json::array objects; boost::json::array objects;
objects.reserve(results.size()); objects.reserve(results.size());
for (auto const& [key, object] : results) for (auto const& [key, object] : results)
@@ -194,8 +202,7 @@ doLedgerData(Context const& context)
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end) time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)
.count(); .count();
BOOST_LOG_TRIVIAL(debug) gLog.debug() << "Number of results = " << results.size()
<< __func__ << " number of results = " << results.size()
<< " serialized in " << time << " microseconds"; << " serialized in " << time << " microseconds";
return response; return response;

View File

@@ -1,5 +1,13 @@
#include <log/Logger.h>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC { namespace RPC {
Result Result
@@ -11,7 +19,8 @@ doNFTHistory(Context const& context)
auto const tokenID = std::get<ripple::uint256>(maybeTokenID); auto const tokenID = std::get<ripple::uint256>(maybeTokenID);
constexpr std::string_view outerFuncName = __func__; constexpr std::string_view outerFuncName = __func__;
auto const maybeResponse = traverseTransactions( auto const maybeResponse =
traverseTransactions(
context, context,
[&tokenID, &outerFuncName]( [&tokenID, &outerFuncName](
std::shared_ptr<Backend::BackendInterface const> const& backend, std::shared_ptr<Backend::BackendInterface const> const& backend,
@@ -23,12 +32,13 @@ doNFTHistory(Context const& context)
auto const start = std::chrono::system_clock::now(); auto const start = std::chrono::system_clock::now();
auto const txnsAndCursor = backend->fetchNFTTransactions( auto const txnsAndCursor = backend->fetchNFTTransactions(
tokenID, limit, forward, cursorIn, yield); tokenID, limit, forward, cursorIn, yield);
BOOST_LOG_TRIVIAL(info) gLog.info()
<< outerFuncName << " db fetch took " << outerFuncName << " db fetch took "
<< std::chrono::duration_cast<std::chrono::milliseconds>( << std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - start) std::chrono::system_clock::now() - start)
.count() .count()
<< " milliseconds - num blobs = " << txnsAndCursor.txns.size(); << " milliseconds - num blobs = "
<< txnsAndCursor.txns.size();
return txnsAndCursor; return txnsAndCursor;
}); });

View File

@@ -4,10 +4,12 @@
#include <ripple/protocol/Indexes.h> #include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h> #include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h> #include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
namespace json = boost::json; namespace json = boost::json;
namespace ripple { namespace ripple {

View File

@@ -3,9 +3,11 @@
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <config/Config.h> #include <config/Config.h>
#include <memory> #include <log/Logger.h>
#include <subscriptions/Message.h> #include <subscriptions/Message.h>
#include <memory>
class WsBase; class WsBase;
class Subscription class Subscription
@@ -87,6 +89,7 @@ public:
class SubscriptionManager class SubscriptionManager
{ {
using session_ptr = std::shared_ptr<WsBase>; using session_ptr = std::shared_ptr<WsBase>;
clio::Logger log_{"Subscriptions"};
std::vector<std::thread> workers_; std::vector<std::thread> workers_;
boost::asio::io_context ioc_; boost::asio::io_context ioc_;
@@ -134,8 +137,8 @@ public:
// We will eventually want to clamp this to be the number of strands, // We will eventually want to clamp this to be the number of strands,
// since adding more threads than we have strands won't see any // since adding more threads than we have strands won't see any
// performance benefits // performance benefits
BOOST_LOG_TRIVIAL(info) << "Starting subscription manager with " log_.info() << "Starting subscription manager with " << numThreads
<< numThreads << " workers"; << " workers";
workers_.reserve(numThreads); workers_.reserve(numThreads);
for (auto i = numThreads; i > 0; --i) for (auto i = numThreads; i > 0; --i)

View File

@@ -209,7 +209,8 @@ private:
friend Type friend Type
tag_invoke(boost::json::value_to_tag<Type>, boost::json::value const& value) tag_invoke(boost::json::value_to_tag<Type>, boost::json::value const& value)
{ {
assert(value.is_string()); if (not value.is_string())
throw std::runtime_error("`log_tag_style` must be a string");
auto const& style = value.as_string(); auto const& style = value.as_string();
if (boost::iequals(style, "int") || boost::iequals(style, "uint")) if (boost::iequals(style, "int") || boost::iequals(style, "uint"))

View File

@@ -19,6 +19,7 @@
#include <thread> #include <thread>
#include <etl/ReportingETL.h> #include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <main/Build.h> #include <main/Build.h>
#include <rpc/Counters.h> #include <rpc/Counters.h>
#include <rpc/RPC.h> #include <rpc/RPC.h>
@@ -27,6 +28,7 @@
#include <vector> #include <vector>
#include <webserver/DOSGuard.h> #include <webserver/DOSGuard.h>
// TODO: consider removing those - visible to anyone including this header
namespace http = boost::beast::http; namespace http = boost::beast::http;
namespace net = boost::asio; namespace net = boost::asio;
namespace ssl = boost::asio::ssl; namespace ssl = boost::asio::ssl;
@@ -101,6 +103,7 @@ class HttpBase : public util::Taggable
send_lambda lambda_; send_lambda lambda_;
protected: protected:
clio::Logger perfLog_{"Performance"};
boost::beast::flat_buffer buffer_; boost::beast::flat_buffer buffer_;
bool bool
@@ -135,8 +138,7 @@ protected:
if (!ec_ && ec != boost::asio::error::operation_aborted) if (!ec_ && ec != boost::asio::error::operation_aborted)
{ {
ec_ = ec; ec_ = ec;
BOOST_LOG_TRIVIAL(info) perfLog_.info() << tag() << ": " << what << ": " << ec.message();
<< tag() << __func__ << ": " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().stream()) boost::beast::get_lowest_layer(derived().stream())
.socket() .socket()
.close(ec); .close(ec);
@@ -168,11 +170,11 @@ public:
, lambda_(*this) , lambda_(*this)
, buffer_(std::move(buffer)) , buffer_(std::move(buffer))
{ {
BOOST_LOG_TRIVIAL(debug) << tag() << "http session created"; perfLog_.debug() << tag() << "http session created";
} }
virtual ~HttpBase() virtual ~HttpBase()
{ {
BOOST_LOG_TRIVIAL(debug) << tag() << "http session closed"; perfLog_.debug() << tag() << "http session closed";
} }
void void
@@ -234,8 +236,7 @@ public:
if (!ip) if (!ip)
return; return;
BOOST_LOG_TRIVIAL(debug) << tag() << "http::" << __func__ perfLog_.debug() << tag() << "Received request from ip = " << *ip
<< " received request from ip = " << *ip
<< " - posting to WorkQueue"; << " - posting to WorkQueue";
auto session = derived().shared_from_this(); auto session = derived().shared_from_this();
@@ -256,7 +257,8 @@ public:
dosGuard_, dosGuard_,
counters_, counters_,
*ip, *ip,
session); session,
perfLog_);
}, },
dosGuard_.isWhiteListed(*ip))) dosGuard_.isWhiteListed(*ip)))
{ {
@@ -321,7 +323,8 @@ handle_request(
DOSGuard& dosGuard, DOSGuard& dosGuard,
RPC::Counters& counters, RPC::Counters& counters,
std::string const& ip, std::string const& ip,
std::shared_ptr<Session> http) std::shared_ptr<Session> http,
clio::Logger& perfLog)
{ {
auto const httpResponse = [&req]( auto const httpResponse = [&req](
http::status status, http::status status,
@@ -356,9 +359,9 @@ handle_request(
try try
{ {
BOOST_LOG_TRIVIAL(debug) perfLog.debug() << http->tag()
<< http->tag() << "http received request from work queue: "
<< "http received request from work queue: " << req.body(); << req.body();
boost::json::object request; boost::json::object request;
std::string responseStr = ""; std::string responseStr = "";
@@ -422,8 +425,8 @@ handle_request(
error["request"] = request; error["request"] = request;
result = error; result = error;
BOOST_LOG_TRIVIAL(debug) << http->tag() << __func__ perfLog.debug()
<< " Encountered error: " << responseStr; << http->tag() << "Encountered error: " << responseStr;
} }
else else
{ {
@@ -457,8 +460,7 @@ handle_request(
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) perfLog.error() << http->tag() << "Caught exception : " << e.what();
<< http->tag() << __func__ << " Caught exception : " << e.what();
return send(httpResponse( return send(httpResponse(
http::status::internal_server_error, http::status::internal_server_error,
"application/json", "application/json",

View File

@@ -1,10 +1,7 @@
#ifndef LISTENER_H #ifndef LISTENER_H
#define LISTENER_H #define LISTENER_H
#include <boost/asio/dispatch.hpp> #include <log/Logger.h>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <subscriptions/SubscriptionManager.h> #include <subscriptions/SubscriptionManager.h>
#include <util/Taggable.h> #include <util/Taggable.h>
#include <webserver/HttpSession.h> #include <webserver/HttpSession.h>
@@ -12,6 +9,10 @@
#include <webserver/SslHttpSession.h> #include <webserver/SslHttpSession.h>
#include <webserver/SslWsSession.h> #include <webserver/SslWsSession.h>
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream> #include <iostream>
class SubscriptionManager; class SubscriptionManager;
@@ -23,6 +24,7 @@ class Detector
using std::enable_shared_from_this< using std::enable_shared_from_this<
Detector<PlainSession, SslSession>>::shared_from_this; Detector<PlainSession, SslSession>>::shared_from_this;
clio::Logger log_{"WebServer"};
boost::asio::io_context& ioc_; boost::asio::io_context& ioc_;
boost::beast::tcp_stream stream_; boost::beast::tcp_stream stream_;
std::optional<std::reference_wrapper<ssl::context>> ctx_; std::optional<std::reference_wrapper<ssl::context>> ctx_;
@@ -69,8 +71,7 @@ public:
if (ec == net::ssl::error::stream_truncated) if (ec == net::ssl::error::stream_truncated)
return; return;
BOOST_LOG_TRIVIAL(info) log_.info() << "Detector failed (" << message << "): " << ec.message();
<< "Detector failed: " << message << ec.message() << std::endl;
} }
// Launch the detector // Launch the detector
@@ -202,6 +203,7 @@ class Listener
using std::enable_shared_from_this< using std::enable_shared_from_this<
Listener<PlainSession, SslSession>>::shared_from_this; Listener<PlainSession, SslSession>>::shared_from_this;
clio::Logger log_{"WebServer"};
boost::asio::io_context& ioc_; boost::asio::io_context& ioc_;
std::optional<std::reference_wrapper<ssl::context>> ctx_; std::optional<std::reference_wrapper<ssl::context>> ctx_;
tcp::acceptor acceptor_; tcp::acceptor acceptor_;
@@ -255,8 +257,7 @@ public:
acceptor_.bind(endpoint, ec); acceptor_.bind(endpoint, ec);
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Failed to bind to endpoint: " << endpoint
<< "Failed to bind to endpoint: " << endpoint
<< ". message: " << ec.message(); << ". message: " << ec.message();
throw std::runtime_error("Failed to bind to specified endpoint"); throw std::runtime_error("Failed to bind to specified endpoint");
} }
@@ -265,8 +266,7 @@ public:
acceptor_.listen(net::socket_base::max_listen_connections, ec); acceptor_.listen(net::socket_base::max_listen_connections, ec);
if (ec) if (ec)
{ {
BOOST_LOG_TRIVIAL(error) log_.error() << "Failed to listen at endpoint: " << endpoint
<< "Failed to listen at endpoint: " << endpoint
<< ". message: " << ec.message(); << ". message: " << ec.message();
throw std::runtime_error("Failed to listen at specified endpoint"); throw std::runtime_error("Failed to listen at specified endpoint");
} }
@@ -336,6 +336,7 @@ make_HttpServer(
std::shared_ptr<ReportingETL const> etl, std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard) DOSGuard& dosGuard)
{ {
static clio::Logger log{"WebServer"};
if (!config.contains("server")) if (!config.contains("server"))
return nullptr; return nullptr;
@@ -348,7 +349,7 @@ make_HttpServer(
auto const maxQueueSize = auto const maxQueueSize =
serverConfig.valueOr<uint32_t>("max_queue_size", 0); // 0 is no limit serverConfig.valueOr<uint32_t>("max_queue_size", 0); // 0 is no limit
BOOST_LOG_TRIVIAL(info) << __func__ << " Number of workers = " << numThreads log.info() << "Number of workers = " << numThreads
<< ". Max queue size = " << maxQueueSize; << ". Max queue size = " << maxQueueSize;
auto server = std::make_shared<HttpServer>( auto server = std::make_shared<HttpServer>(

View File

@@ -1,15 +1,10 @@
#ifndef RIPPLE_REPORTING_WS_BASE_SESSION_H #ifndef RIPPLE_REPORTING_WS_BASE_SESSION_H
#define RIPPLE_REPORTING_WS_BASE_SESSION_H #define RIPPLE_REPORTING_WS_BASE_SESSION_H
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream>
#include <memory>
#include <backend/BackendInterface.h> #include <backend/BackendInterface.h>
#include <etl/ETLSource.h> #include <etl/ETLSource.h>
#include <etl/ReportingETL.h> #include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <rpc/Counters.h> #include <rpc/Counters.h>
#include <rpc/RPC.h> #include <rpc/RPC.h>
#include <rpc/WorkQueue.h> #include <rpc/WorkQueue.h>
@@ -18,6 +13,13 @@
#include <util/Taggable.h> #include <util/Taggable.h>
#include <webserver/DOSGuard.h> #include <webserver/DOSGuard.h>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream>
#include <memory>
// TODO: Consider removing these. Visible to anyone including this header.
namespace http = boost::beast::http; namespace http = boost::beast::http;
namespace net = boost::asio; namespace net = boost::asio;
namespace ssl = boost::asio::ssl; namespace ssl = boost::asio::ssl;
@@ -27,8 +29,8 @@ using tcp = boost::asio::ip::tcp;
inline void inline void
logError(boost::beast::error_code ec, char const* what) logError(boost::beast::error_code ec, char const* what)
{ {
BOOST_LOG_TRIVIAL(debug) static clio::Logger log{"WebServer"};
<< __func__ << " : " << what << ": " << ec.message() << "\n"; log.debug() << what << ": " << ec.message() << "\n";
} }
inline boost::json::object inline boost::json::object
@@ -48,6 +50,8 @@ getDefaultWsResponse(boost::json::value const& id)
class WsBase : public util::Taggable class WsBase : public util::Taggable
{ {
protected: protected:
clio::Logger log_{"WebServer"};
clio::Logger perfLog_{"Performance"};
boost::system::error_code ec_; boost::system::error_code ec_;
public: public:
@@ -114,8 +118,7 @@ class WsSession : public WsBase,
if (!ec_ && ec != boost::asio::error::operation_aborted) if (!ec_ && ec != boost::asio::error::operation_aborted)
{ {
ec_ = ec; ec_ = ec;
BOOST_LOG_TRIVIAL(info) perfLog_.info() << tag() << ": " << what << ": " << ec.message();
<< tag() << __func__ << ": " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec); boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
if (auto manager = subscriptions_.lock(); manager) if (auto manager = subscriptions_.lock(); manager)
@@ -147,11 +150,11 @@ public:
, counters_(counters) , counters_(counters)
, queue_(queue) , queue_(queue)
{ {
BOOST_LOG_TRIVIAL(info) << tag() << "session created"; perfLog_.info() << tag() << "session created";
} }
virtual ~WsSession() virtual ~WsSession()
{ {
BOOST_LOG_TRIVIAL(info) << tag() << "session closed"; perfLog_.info() << tag() << "session closed";
} }
// Access the derived class, this is part of // Access the derived class, this is part of
@@ -244,7 +247,7 @@ public:
if (ec) if (ec)
return wsFail(ec, "accept"); return wsFail(ec, "accept");
BOOST_LOG_TRIVIAL(info) << tag() << "accepting new connection"; perfLog_.info() << tag() << "accepting new connection";
// Read a message // Read a message
do_read(); do_read();
@@ -287,7 +290,7 @@ public:
try try
{ {
BOOST_LOG_TRIVIAL(debug) perfLog_.debug()
<< tag() << "ws received request from work queue : " << request; << tag() << "ws received request from work queue : " << request;
auto range = backend_->fetchLedgerRange(); auto range = backend_->fetchLedgerRange();
@@ -309,8 +312,7 @@ public:
if (!context) if (!context)
{ {
BOOST_LOG_TRIVIAL(warning) perfLog_.warn() << tag() << "Could not create RPC context";
<< tag() << " could not create RPC context";
return sendError(RPC::RippledError::rpcBAD_SYNTAX); return sendError(RPC::RippledError::rpcBAD_SYNTAX);
} }
@@ -344,8 +346,7 @@ public:
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
BOOST_LOG_TRIVIAL(error) perfLog_.error() << tag() << "Caught exception : " << e.what();
<< tag() << __func__ << " caught exception : " << e.what();
return sendError(RPC::RippledError::rpcINTERNAL); return sendError(RPC::RippledError::rpcINTERNAL);
} }
@@ -385,8 +386,7 @@ public:
if (!ip) if (!ip)
return; return;
BOOST_LOG_TRIVIAL(info) << tag() << "ws::" << __func__ perfLog_.info() << tag() << "Received request from ip = " << *ip;
<< " received request from ip = " << *ip;
auto sendError = [this, ip]( auto sendError = [this, ip](
auto error, auto error,
@@ -399,7 +399,7 @@ public:
e["request"] = request; e["request"] = request;
auto responseStr = boost::json::serialize(e); auto responseStr = boost::json::serialize(e);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr; log_.trace() << responseStr;
dosGuard_.add(*ip, responseStr.size()); dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr)); send(std::move(responseStr));
}; };
@@ -429,8 +429,7 @@ public:
} }
else else
{ {
BOOST_LOG_TRIVIAL(debug) perfLog_.debug() << tag() << "Adding to work queue";
<< tag() << __func__ << " adding to work queue";
if (!queue_.postCoro( if (!queue_.postCoro(
[shared_this = shared_from_this(), [shared_this = shared_from_this(),

View File

@@ -1,17 +1,23 @@
#include <algorithm> #include <backend/BackendFactory.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h> #include <backend/DBHelpers.h>
#include <config/Config.h>
#include <etl/ReportingETL.h> #include <etl/ReportingETL.h>
#include <gtest/gtest.h> #include <log/Logger.h>
#include <rpc/RPCHelpers.h> #include <rpc/RPCHelpers.h>
#include <util/Fixtures.h>
#include <boost/log/core.hpp> #include <boost/log/core.hpp>
#include <boost/log/expressions.hpp> #include <boost/log/expressions.hpp>
#include <boost/log/trivial.hpp> #include <gtest/gtest.h>
#include <backend/BackendFactory.h>
#include <backend/BackendInterface.h>
#include <config/Config.h>
TEST(BackendTest, Basic) #include <algorithm>
class BackendTest : public NoLoggerFixture
{
};
TEST_F(BackendTest, Basic)
{ {
boost::asio::io_context ioc; boost::asio::io_context ioc;
std::optional<boost::asio::io_context::work> work; std::optional<boost::asio::io_context::work> work;
@@ -21,7 +27,7 @@ TEST(BackendTest, Basic)
boost::asio::spawn( boost::asio::spawn(
ioc, [&done, &work, &ioc](boost::asio::yield_context yield) { ioc, [&done, &work, &ioc](boost::asio::yield_context yield) {
boost::log::core::get()->set_filter( boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning); clio::log_severity >= clio::Severity::WARNING);
std::string keyspace = "clio_test_" + std::string keyspace = "clio_test_" +
std::to_string(std::chrono::system_clock::now() std::to_string(std::chrono::system_clock::now()
.time_since_epoch() .time_since_epoch()
@@ -41,7 +47,6 @@ TEST(BackendTest, Basic)
std::vector<boost::json::object> configs = {cassandraConfig}; std::vector<boost::json::object> configs = {cassandraConfig};
for (auto& config : configs) for (auto& config : configs)
{ {
std::cout << keyspace << std::endl;
auto backend = Backend::make_Backend(ioc, clio::Config{config}); auto backend = Backend::make_Backend(ioc, clio::Config{config});
std::string rawHeader = std::string rawHeader =
@@ -107,10 +112,8 @@ TEST(BackendTest, Basic)
} }
{ {
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr = auto retLgr =
backend->fetchLedgerBySequence(lgrInfo.seq, yield); backend->fetchLedgerBySequence(lgrInfo.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
ASSERT_TRUE(retLgr.has_value()); ASSERT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfo.seq); EXPECT_EQ(retLgr->seq, lgrInfo.seq);
EXPECT_EQ( EXPECT_EQ(
@@ -147,10 +150,8 @@ TEST(BackendTest, Basic)
EXPECT_EQ(seq, lgrInfoNext.seq); EXPECT_EQ(seq, lgrInfoNext.seq);
} }
{ {
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr = auto retLgr =
backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); backend->fetchLedgerBySequence(lgrInfoNext.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
EXPECT_TRUE(retLgr.has_value()); EXPECT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); EXPECT_EQ(retLgr->seq, lgrInfoNext.seq);
EXPECT_EQ( EXPECT_EQ(
@@ -802,9 +803,6 @@ TEST(BackendTest, Basic)
auto objs, auto objs,
auto accountTx, auto accountTx,
auto state) { auto state) {
std::cout
<< "writing ledger = " << std::to_string(lgrInfo.seq)
<< std::endl;
backend->startWrites(); backend->startWrites();
backend->writeLedger( backend->writeLedger(
@@ -978,11 +976,6 @@ TEST(BackendTest, Basic)
uint32_t limit = 10; uint32_t limit = 10;
page = backend->fetchLedgerPage( page = backend->fetchLedgerPage(
page.cursor, seq, limit, false, yield); page.cursor, seq, limit, false, yield);
std::cout << "fetched a page " << page.objects.size()
<< std::endl;
if (page.cursor)
std::cout << ripple::strHex(*page.cursor)
<< std::endl;
// if (page.cursor) // if (page.cursor)
// EXPECT_EQ(page.objects.size(), limit); // EXPECT_EQ(page.objects.size(), limit);
retObjs.insert( retObjs.insert(
@@ -1007,7 +1000,6 @@ TEST(BackendTest, Basic)
} }
} }
if (found != (obj.second.size() != 0)) if (found != (obj.second.size() != 0))
std::cout << ripple::strHex(obj.first) << std::endl;
ASSERT_EQ(found, obj.second.size() != 0); ASSERT_EQ(found, obj.second.size() != 0);
} }
}; };
@@ -1148,15 +1140,12 @@ TEST(BackendTest, Basic)
for (auto [seq, diff] : state) for (auto [seq, diff] : state)
{ {
std::cout << "flatteneing" << std::endl;
auto flat = flatten(seq); auto flat = flatten(seq);
std::cout << "flattened" << std::endl;
checkLedger( checkLedger(
lgrInfos[seq], lgrInfos[seq],
allTxns[seq], allTxns[seq],
flat, flat,
flattenAccountTx(seq)); flattenAccountTx(seq));
std::cout << "checked" << std::endl;
} }
} }
@@ -1168,11 +1157,11 @@ TEST(BackendTest, Basic)
EXPECT_EQ(done, true); EXPECT_EQ(done, true);
} }
TEST(Backend, cache) TEST_F(BackendTest, cache)
{ {
using namespace Backend; using namespace Backend;
boost::log::core::get()->set_filter( boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning); clio::log_severity >= clio::Severity::WARNING);
SimpleCache cache; SimpleCache cache;
ASSERT_FALSE(cache.isFull()); ASSERT_FALSE(cache.isFull());
cache.setFull(); cache.setFull();
@@ -1410,11 +1399,11 @@ TEST(Backend, cache)
} }
} }
TEST(Backend, cacheBackground) TEST_F(BackendTest, cacheBackground)
{ {
using namespace Backend; using namespace Backend;
boost::log::core::get()->set_filter( boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning); clio::log_severity >= clio::Severity::WARNING);
SimpleCache cache; SimpleCache cache;
ASSERT_FALSE(cache.isFull()); ASSERT_FALSE(cache.isFull());
ASSERT_EQ(cache.size(), 0); ASSERT_EQ(cache.size(), 0);
@@ -1814,7 +1803,7 @@ TEST(Backend, cacheBackground)
ASSERT_EQ(idx, allObjs.size()); ASSERT_EQ(idx, allObjs.size());
} }
TEST(Backend, cacheIntegration) TEST_F(BackendTest, cacheIntegration)
{ {
boost::asio::io_context ioc; boost::asio::io_context ioc;
std::optional<boost::asio::io_context::work> work; std::optional<boost::asio::io_context::work> work;
@@ -1824,7 +1813,7 @@ TEST(Backend, cacheIntegration)
boost::asio::spawn( boost::asio::spawn(
ioc, [&ioc, &done, &work](boost::asio::yield_context yield) { ioc, [&ioc, &done, &work](boost::asio::yield_context yield) {
boost::log::core::get()->set_filter( boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning); clio::log_severity >= clio::Severity::WARNING);
std::string keyspace = "clio_test_" + std::string keyspace = "clio_test_" +
std::to_string(std::chrono::system_clock::now() std::to_string(std::chrono::system_clock::now()
.time_since_epoch() .time_since_epoch()
@@ -1844,7 +1833,6 @@ TEST(Backend, cacheIntegration)
std::vector<boost::json::object> configs = {cassandraConfig}; std::vector<boost::json::object> configs = {cassandraConfig};
for (auto& config : configs) for (auto& config : configs)
{ {
std::cout << keyspace << std::endl;
auto backend = Backend::make_Backend(ioc, clio::Config{config}); auto backend = Backend::make_Backend(ioc, clio::Config{config});
backend->cache().setFull(); backend->cache().setFull();
@@ -1927,10 +1915,8 @@ TEST(Backend, cacheIntegration)
} }
{ {
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr = auto retLgr =
backend->fetchLedgerBySequence(lgrInfo.seq, yield); backend->fetchLedgerBySequence(lgrInfo.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
ASSERT_TRUE(retLgr.has_value()); ASSERT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfo.seq); EXPECT_EQ(retLgr->seq, lgrInfo.seq);
EXPECT_EQ( EXPECT_EQ(
@@ -1967,10 +1953,8 @@ TEST(Backend, cacheIntegration)
EXPECT_EQ(seq, lgrInfoNext.seq); EXPECT_EQ(seq, lgrInfoNext.seq);
} }
{ {
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr = auto retLgr =
backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); backend->fetchLedgerBySequence(lgrInfoNext.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
EXPECT_TRUE(retLgr.has_value()); EXPECT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); EXPECT_EQ(retLgr->seq, lgrInfoNext.seq);
EXPECT_EQ( EXPECT_EQ(
@@ -2226,8 +2210,6 @@ TEST(Backend, cacheIntegration)
return lgrInfo; return lgrInfo;
}; };
auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { auto writeLedger = [&](auto lgrInfo, auto objs, auto state) {
std::cout << "writing ledger = "
<< std::to_string(lgrInfo.seq);
backend->startWrites(); backend->startWrites();
backend->writeLedger( backend->writeLedger(
@@ -2344,11 +2326,6 @@ TEST(Backend, cacheIntegration)
uint32_t limit = 10; uint32_t limit = 10;
page = backend->fetchLedgerPage( page = backend->fetchLedgerPage(
page.cursor, seq, limit, false, yield); page.cursor, seq, limit, false, yield);
std::cout << "fetched a page " << page.objects.size()
<< std::endl;
if (page.cursor)
std::cout << ripple::strHex(*page.cursor)
<< std::endl;
// if (page.cursor) // if (page.cursor)
// EXPECT_EQ(page.objects.size(), limit); // EXPECT_EQ(page.objects.size(), limit);
retObjs.insert( retObjs.insert(
@@ -2372,7 +2349,6 @@ TEST(Backend, cacheIntegration)
} }
} }
if (found != (obj.second.size() != 0)) if (found != (obj.second.size() != 0))
std::cout << ripple::strHex(obj.first) << std::endl;
ASSERT_EQ(found, obj.second.size() != 0); ASSERT_EQ(found, obj.second.size() != 0);
} }
}; };
@@ -2437,11 +2413,8 @@ TEST(Backend, cacheIntegration)
for (auto [seq, diff] : state) for (auto [seq, diff] : state)
{ {
std::cout << "flatteneing" << std::endl;
auto flat = flatten(seq); auto flat = flatten(seq);
std::cout << "flattened" << std::endl;
checkLedger(lgrInfos[seq], flat); checkLedger(lgrInfos[seq], flat);
std::cout << "checked" << std::endl;
} }
} }

View File

@@ -1,9 +1,8 @@
#include <config/Config.h> #include <config/Config.h>
#include <util/Fixtures.h>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/json/parse.hpp> #include <boost/json/parse.hpp>
#include <boost/log/core.hpp>
#include <gtest/gtest.h>
#include <cstdio> #include <cstdio>
#include <exception> #include <exception>
@@ -34,17 +33,10 @@ constexpr static auto JSONData = R"JSON(
} }
)JSON"; )JSON";
class ConfigTest : public ::testing::Test class ConfigTest : public NoLoggerFixture
{ {
protected: protected:
Config cfg{json::parse(JSONData)}; Config cfg{json::parse(JSONData)};
void
SetUp() override
{
// disable logging in test
core::get()->set_logging_enabled(false);
}
}; };
TEST_F(ConfigTest, SanityCheck) TEST_F(ConfigTest, SanityCheck)

49
unittests/Logger.cpp Normal file
View File

@@ -0,0 +1,49 @@
#include <util/Fixtures.h>
using namespace clio;
// Used as a fixture for tests with enabled logging
class LoggerTest : public LoggerFixture
{
};
// Used as a fixture for tests with disabled logging
class NoLoggerTest : public NoLoggerFixture
{
};
TEST_F(LoggerTest, Basic)
{
Logger log{"General"};
log.info() << "Info line logged";
checkEqual("General:NFO Info line logged");
LogService::debug() << "Debug line with numbers " << 12345;
checkEqual("General:DBG Debug line with numbers 12345");
LogService::warn() << "Warning is logged";
checkEqual("General:WRN Warning is logged");
}
TEST_F(LoggerTest, Filtering)
{
Logger log{"General"};
log.trace() << "Should not be logged";
checkEmpty();
log.warn() << "Warning is logged";
checkEqual("General:WRN Warning is logged");
Logger tlog{"Trace"};
tlog.trace() << "Trace line logged for 'Trace' component";
checkEqual("Trace:TRC Trace line logged for 'Trace' component");
}
TEST_F(NoLoggerTest, Basic)
{
Logger log{"Trace"};
log.trace() << "Nothing";
checkEmpty();
LogService::fatal() << "Still nothing";
checkEmpty();
}

93
unittests/util/Fixtures.h Normal file
View File

@@ -0,0 +1,93 @@
#ifndef CLIO_UNITTEST_FIXTURES_H
#define CLIO_UNITTEST_FIXTURES_H
#include <ios>
#include <mutex>
#include <gtest/gtest.h>
#include <log/Logger.h>
/**
* @brief Fixture with LogService support.
*/
class LoggerFixture : public ::testing::Test
{
/**
* @brief A simple string buffer that can be used to mock std::cout for
* console logging.
*/
class FakeBuffer final : public std::stringbuf
{
public:
std::string
getStrAndReset()
{
auto value = str();
str("");
return value;
}
};
FakeBuffer buffer_;
std::ostream stream_ = std::ostream{&buffer_};
protected:
// Simulates the `LogService::init(config)` call
void
SetUp() override
{
static std::once_flag once_;
std::call_once(once_, [] {
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<clio::Severity, char>(
"Severity");
});
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
namespace expr = boost::log::expressions;
auto core = boost::log::core::get();
core->remove_all_sinks();
boost::log::add_console_log(
stream_, keywords::format = "%Channel%:%Severity% %Message%");
auto min_severity = expr::channel_severity_filter(
clio::log_channel, clio::log_severity);
min_severity["General"] = clio::Severity::DEBUG;
min_severity["Trace"] = clio::Severity::TRACE;
core->set_filter(min_severity);
core->set_logging_enabled(true);
}
void
checkEqual(std::string expected)
{
auto value = buffer_.getStrAndReset();
ASSERT_EQ(value, expected + '\n');
}
void
checkEmpty()
{
ASSERT_TRUE(buffer_.getStrAndReset().empty());
}
};
/**
* @brief Fixture with LogService support but completely disabled logging.
*
* This is meant to be used as a base for other fixtures.
*/
class NoLoggerFixture : public LoggerFixture
{
protected:
void
SetUp() override
{
LoggerFixture::SetUp();
boost::log::core::get()->set_logging_enabled(false);
}
};
#endif // CLIO_UNITTEST_FIXTURES_H