Implement logging abstraction (#371)

Fixes #290
This commit is contained in:
Alex Kremer
2022-11-17 23:02:16 +01:00
committed by GitHub
parent 4b8dd7b981
commit a47bf2e8fe
38 changed files with 1441 additions and 1003 deletions

View File

@@ -11,6 +11,7 @@ if(NOT googletest_POPULATED)
endif()
target_link_libraries(clio_tests PUBLIC clio gtest_main)
target_include_directories(clio_tests PRIVATE unittests)
enable_testing()

View File

@@ -1 +1 @@
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -Wno-narrowing -Wno-dangling-else -Wno-deprecated-declarations")

View File

@@ -99,10 +99,11 @@ target_sources(clio PRIVATE
src/rpc/handlers/Subscribe.cpp
# Server
src/rpc/handlers/ServerInfo.cpp
# Utility
# Utilities
src/rpc/handlers/Random.cpp
src/util/Taggable.cpp
src/config/Config.cpp)
src/config/Config.cpp
src/log/Logger.cpp
src/util/Taggable.cpp)
add_executable(clio_server src/main/main.cpp)
target_link_libraries(clio_server PUBLIC clio)
@@ -110,8 +111,9 @@ target_link_libraries(clio_server PUBLIC clio)
if(BUILD_TESTS)
add_executable(clio_tests
unittests/RPCErrors.cpp
unittests/config.cpp
unittests/main.cpp)
unittests/Backend.cpp
unittests/Logger.cpp
unittests/Config.cpp)
include(CMake/deps/gtest.cmake)
endif()

View File

@@ -1,9 +1,7 @@
{
"database":
{
"database": {
"type": "cassandra",
"cassandra":
{
"cassandra": {
"contact_points": "127.0.0.1",
"port": 9042,
"keyspace": "clio",
@@ -14,8 +12,7 @@
"threads": 8
}
},
"etl_sources":
[
"etl_sources": [
{
"ip": "127.0.0.1",
"ws_port": "6006",
@@ -34,7 +31,34 @@
"ip": "0.0.0.0",
"port": 51233
},
"log_level":"debug",
"log_channels": [
{
"channel": "Backend",
"log_level": "fatal"
},
{
"channel": "WebServer",
"log_level": "info"
},
{
"channel": "Subscriptions",
"log_level": "info"
},
{
"channel": "RPC",
"log_level": "error"
},
{
"channel": "ETL",
"log_level": "debug"
},
{
"channel": "Performance",
"log_level": "trace"
}
],
"log_level": "info",
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
"log_to_console": true,
"log_directory": "./clio_log",
"log_rotation_size": 2048,

View File

@@ -1,16 +1,19 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#include <boost/algorithm/string.hpp>
#include <backend/BackendInterface.h>
#include <backend/CassandraBackend.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/algorithm/string.hpp>
namespace Backend {
std::shared_ptr<BackendInterface>
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
{
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
static clio::Logger log{"Backend"};
log.info() << "Constructing BackendInterface";
auto readOnly = config.valueOr("read_only", false);
auto type = config.value<std::string>("database.type");
@@ -34,8 +37,7 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
backend->updateRange(rng->maxSequence);
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << ": Constructed BackendInterface Successfully";
log.info() << "Constructed BackendInterface Successfully";
return backend;
}

View File

@@ -1,6 +1,15 @@
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"Backend"};
} // namespace
namespace Backend {
bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
@@ -26,7 +35,7 @@ std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow(
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << __func__ << "(yield)";
gLog.trace() << "called";
while (true)
{
try
@@ -43,7 +52,7 @@ BackendInterface::hardFetchLedgerRangeNoThrow(
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const
{
BOOST_LOG_TRIVIAL(trace) << __func__ << "()";
gLog.trace() << "called";
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
}
@@ -57,21 +66,17 @@ BackendInterface::fetchLedgerObject(
auto obj = cache_.get(key, sequence);
if (obj)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - cache hit - " << ripple::strHex(key);
gLog.trace() << "Cache hit - " << ripple::strHex(key);
return *obj;
}
else
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - cache miss - " << ripple::strHex(key);
gLog.trace() << "Cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj)
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - missed cache and missed in db";
gLog.trace() << "Missed cache and missed in db";
else
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - missed cache but found in db";
gLog.trace() << "Missed cache but found in db";
return dbObj;
}
}
@@ -93,8 +98,7 @@ BackendInterface::fetchLedgerObjects(
else
misses.push_back(keys[i]);
}
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - cache hits = " << keys.size() - misses.size()
gLog.trace() << "Cache hits = " << keys.size() - misses.size()
<< " - cache misses = " << misses.size();
if (misses.size())
@@ -121,11 +125,9 @@ BackendInterface::fetchSuccessorKey(
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ)
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - cache hit - " << ripple::strHex(key);
gLog.trace() << "Cache hit - " << ripple::strHex(key);
else
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " - cache miss - " << ripple::strHex(key);
gLog.trace() << "Cache miss - " << ripple::strHex(key);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
@@ -179,8 +181,8 @@ BackendInterface::fetchBookOffers(
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd)
{
BOOST_LOG_TRIVIAL(trace) << __func__ << " - offerDir.has_value() "
<< offerDir.has_value() << " breaking";
gLog.trace() << "offerDir.has_value() " << offerDir.has_value()
<< " breaking";
break;
}
uTipIndex = offerDir->key;
@@ -196,8 +198,7 @@ BackendInterface::fetchBookOffers(
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " next is empty. breaking";
gLog.trace() << "Next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
@@ -214,23 +215,21 @@ BackendInterface::fetchBookOffers(
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " key = " << ripple::strHex(keys[i])
gLog.trace() << "Key = " << ripple::strHex(keys[i])
<< " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
assert(objs[i].size());
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " "
<< "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin))
gLog.debug() << "Fetching " << std::to_string(keys.size())
<< " offers took " << std::to_string(getMillis(mid - begin))
<< " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
<< std::to_string(numSucc) << " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis)
<< " milliseconds"
<< std::to_string(succMillis)
<< " milliseonds. Fetched next dir " << std::to_string(numSucc)
<< " times"
<< " Fetching next page of dir took "
<< std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages)
<< ". Fetching all objects took "
<< std::to_string(getMillis(end - mid))
@@ -274,16 +273,15 @@ BackendInterface::fetchLedgerPage(
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
else if (!outOfOrder)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
<< " deleted or non-existent object in successor table. key = "
gLog.error()
<< "Deleted or non-existent object in successor table. key = "
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j)
{
msg << " - " << ripple::strHex(keys[j]);
}
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
gLog.error() << msg.str();
}
}
if (keys.size() && !reachedEnd)
@@ -304,7 +302,7 @@ BackendInterface::fetchFees(
if (!bytes)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees";
gLog.error() << "Could not find fees";
return {};
}

View File

@@ -1,17 +1,15 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <ripple/ledger/ReadView.h>
#include <backend/DBHelpers.h>
#include <backend/SimpleCache.h>
#include <backend/Types.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <thread>
#include <type_traits>
@@ -46,6 +44,8 @@ template <class F>
auto
retryOnTimeout(F func, size_t waitMs = 500)
{
static clio::Logger log{"Backend"};
while (true)
{
try
@@ -54,8 +54,7 @@ retryOnTimeout(F func, size_t waitMs = 500)
}
catch (DatabaseTimeout& t)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
log.error()
<< "Database request timed out. Sleeping and retrying ... ";
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
}

View File

@@ -1,9 +1,13 @@
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <backend/CassandraBackend.h>
#include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <functional>
#include <unordered_map>
using namespace clio;
namespace Backend {
// Type alias for async completion handlers
@@ -16,6 +20,8 @@ template <class T, class F>
void
processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
{
static clio::Logger log{"Backend"};
CassandraBackend const& backend = *requestParams.backend;
auto rc = cass_future_error_code(fut);
if (rc != CASS_OK)
@@ -23,9 +29,9 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
// exponential backoff with a max wait of 2^10 ms (about 1 second)
auto wait = std::chrono::milliseconds(
lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
BOOST_LOG_TRIVIAL(error)
<< "ERROR!!! Cassandra write error: " << rc << ", "
<< cass_error_desc(rc) << " id= " << requestParams.toString()
log.error() << "ERROR!!! Cassandra write error: " << rc << ", "
<< cass_error_desc(rc)
<< " id= " << requestParams.toString()
<< ", current retries " << requestParams.currentRetries
<< ", retrying in " << wait.count() << " milliseconds";
++requestParams.currentRetries;
@@ -40,11 +46,11 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func)
}
else
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " Succesfully inserted a record";
log.trace() << "Succesfully inserted a record";
requestParams.finish();
}
}
template <class T>
void
processAsyncWrite(CassFuture* fut, void* cbData)
@@ -155,6 +161,7 @@ makeAndExecuteAsyncWrite(
auto* cb = new WriteCallbackData<T, B>(b, std::move(d), bind, id);
cb->start();
}
template <class T, class B>
std::shared_ptr<BulkWriteCallbackData<T, B>>
makeAndExecuteBulkAsyncWrite(
@@ -170,13 +177,14 @@ makeAndExecuteBulkAsyncWrite(
cb->start();
return cb;
}
void
CassandraBackend::doWriteLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob)
{
BOOST_LOG_TRIVIAL(trace) << "Writing ledger object to cassandra";
log_.trace() << "Writing ledger object to cassandra";
if (range)
makeAndExecuteAsyncWrite(
this,
@@ -204,15 +212,16 @@ CassandraBackend::doWriteLedgerObject(
},
"ledger_object");
}
void
CassandraBackend::writeSuccessor(
std::string&& key,
std::uint32_t const seq,
std::string&& successor)
{
BOOST_LOG_TRIVIAL(trace)
<< "Writing successor. key = " << key
<< " seq = " << std::to_string(seq) << " successor = " << successor;
log_.trace() << "Writing successor. key = " << key.size() << " bytes. "
<< " seq = " << std::to_string(seq)
<< " successor = " << successor.size() << " bytes.";
assert(key.size() != 0);
assert(successor.size() != 0);
makeAndExecuteAsyncWrite(
@@ -319,7 +328,7 @@ CassandraBackend::writeTransaction(
std::string&& transaction,
std::string&& metadata)
{
BOOST_LOG_TRIVIAL(trace) << "Writing txn to cassandra";
log_.trace() << "Writing txn to cassandra";
std::string hashCpy = hash;
makeAndExecuteAsyncWrite(
@@ -393,13 +402,13 @@ CassandraBackend::writeNFTs(std::vector<NFTsData>&& data)
std::optional<LedgerRange>
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectLedgerRange_};
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
log_.error() << "No rows";
return {};
}
LedgerRange range;
@@ -528,9 +537,10 @@ CassandraBackend::fetchTransactions(
throw DatabaseTimeout();
}
BOOST_LOG_TRIVIAL(debug)
<< "Fetched " << numHashes << " transactions from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
log_.debug() << "Fetched " << numHashes
<< " transactions from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds";
return results;
@@ -550,9 +560,7 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
auto end = std::chrono::system_clock::now();
if (!result)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
<< " - no rows . ledger = " << std::to_string(ledgerSequence);
log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence);
return {};
}
std::vector<ripple::uint256> hashes;
@@ -560,10 +568,10 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
{
hashes.push_back(result.getUInt256());
} while (result.nextRow());
BOOST_LOG_TRIVIAL(debug)
<< "Fetched " << hashes.size()
log_.debug() << "Fetched " << hashes.size()
<< " transaction hashes from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds";
return hashes;
@@ -613,9 +621,9 @@ CassandraBackend::fetchNFTTransactions(
{
statement.bindNextIntTuple(
cursor->ledgerSequence, cursor->transactionIndex);
BOOST_LOG_TRIVIAL(debug) << " token_id = " << ripple::strHex(tokenID)
log_.debug() << "token_id = " << ripple::strHex(tokenID)
<< " tuple = " << cursor->ledgerSequence
<< " : " << cursor->transactionIndex;
<< cursor->transactionIndex;
}
else
{
@@ -624,9 +632,8 @@ CassandraBackend::fetchNFTTransactions(
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug)
<< " token_id = " << ripple::strHex(tokenID) << " idx = " << seq
<< " tuple = " << placeHolder;
log_.debug() << "token_id = " << ripple::strHex(tokenID)
<< " idx = " << seq << " tuple = " << placeHolder;
}
statement.bindNextUInt(limit);
@@ -635,19 +642,19 @@ CassandraBackend::fetchNFTTransactions(
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
log_.debug() << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = result.numRows();
BOOST_LOG_TRIVIAL(info) << "num_rows = " << numRows;
log_.info() << "num_rows = " << numRows;
do
{
hashes.push_back(result.getUInt256());
if (--numRows == 0)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor";
log_.debug() << "Setting cursor";
auto const [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = {
static_cast<std::uint32_t>(lgrSeq),
@@ -662,11 +669,11 @@ CassandraBackend::fetchNFTTransactions(
} while (result.nextRow());
auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << " txns = " << txns.size();
log_.debug() << "Txns = " << txns.size();
if (txns.size() == limit)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor";
log_.debug() << "Returning cursor";
return {txns, cursor};
}
@@ -698,9 +705,9 @@ CassandraBackend::fetchAccountTransactions(
{
statement.bindNextIntTuple(
cursor->ledgerSequence, cursor->transactionIndex);
BOOST_LOG_TRIVIAL(debug) << " account = " << ripple::strHex(account)
log_.debug() << "account = " << ripple::strHex(account)
<< " tuple = " << cursor->ledgerSequence
<< " : " << cursor->transactionIndex;
<< cursor->transactionIndex;
}
else
{
@@ -709,9 +716,8 @@ CassandraBackend::fetchAccountTransactions(
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug)
<< " account = " << ripple::strHex(account) << " idx = " << seq
<< " tuple = " << placeHolder;
log_.debug() << "account = " << ripple::strHex(account)
<< " idx = " << seq << " tuple = " << placeHolder;
}
statement.bindNextUInt(limit);
@@ -719,19 +725,19 @@ CassandraBackend::fetchAccountTransactions(
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
log_.debug() << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = result.numRows();
BOOST_LOG_TRIVIAL(info) << "num_rows = " << std::to_string(numRows);
log_.info() << "num_rows = " << std::to_string(numRows);
do
{
hashes.push_back(result.getUInt256());
if (--numRows == 0)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor";
log_.debug() << "Setting cursor";
auto [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = {
static_cast<std::uint32_t>(lgrSeq),
@@ -746,11 +752,11 @@ CassandraBackend::fetchAccountTransactions(
} while (result.nextRow());
auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << "txns = " << txns.size();
log_.debug() << "Txns = " << txns.size();
if (txns.size() == limit)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor";
log_.debug() << "Returning cursor";
return {txns, cursor};
}
@@ -763,7 +769,7 @@ CassandraBackend::doFetchSuccessorKey(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectSuccessor_};
statement.bindNextBytes(key);
statement.bindNextInt(ledgerSequence);
@@ -772,7 +778,7 @@ CassandraBackend::doFetchSuccessorKey(
if (!result)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows";
log_.debug() << "No rows";
return {};
}
auto next = result.getUInt256();
@@ -787,7 +793,7 @@ CassandraBackend::doFetchLedgerObject(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
log_.trace() << "Fetching from cassandra";
CassandraStatement statement{selectObject_};
statement.bindNextBytes(key);
statement.bindNextInt(sequence);
@@ -796,7 +802,7 @@ CassandraBackend::doFetchLedgerObject(
if (!result)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows";
log_.debug() << "No rows";
return {};
}
auto res = result.getBytes();
@@ -820,8 +826,7 @@ CassandraBackend::doFetchLedgerObjects(
result_type result(handler);
std::size_t const numKeys = keys.size();
BOOST_LOG_TRIVIAL(trace)
<< "Fetching " << numKeys << " records from Cassandra";
log_.trace() << "Fetching " << numKeys << " records from Cassandra";
std::atomic_int numOutstanding = numKeys;
std::vector<Blob> results{numKeys};
std::vector<std::shared_ptr<ReadCallbackData<result_type>>> cbs;
@@ -850,8 +855,7 @@ CassandraBackend::doFetchLedgerObjects(
throw DatabaseTimeout();
}
BOOST_LOG_TRIVIAL(trace)
<< "Fetched " << numKeys << " records from Cassandra";
log_.trace() << "Fetched " << numKeys << " records from Cassandra";
return results;
}
@@ -870,9 +874,7 @@ CassandraBackend::fetchLedgerDiff(
if (!result)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
<< " - no rows . ledger = " << std::to_string(ledgerSequence);
log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence);
return {};
}
std::vector<ripple::uint256> keys;
@@ -880,9 +882,10 @@ CassandraBackend::fetchLedgerDiff(
{
keys.push_back(result.getUInt256());
} while (result.nextRow());
BOOST_LOG_TRIVIAL(debug)
<< "Fetched " << keys.size() << " diff hashes from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
log_.debug() << "Fetched " << keys.size()
<< " diff hashes from Cassandra in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds";
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
@@ -951,12 +954,12 @@ CassandraBackend::doOnlineDelete(
cv));
std::unique_lock<std::mutex> lck(mtx);
BOOST_LOG_TRIVIAL(trace) << __func__ << "Got the mutex";
log_.trace() << "Got the mutex";
cv.wait(lck, [&numOutstanding, concurrentLimit]() {
return numOutstanding < concurrentLimit;
});
}
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
log_.debug() << "Fetched a page";
cursor = curCursor;
if (!cursor)
break;
@@ -982,11 +985,11 @@ CassandraBackend::open(bool readOnly)
if (open_)
{
assert(false);
BOOST_LOG_TRIVIAL(error) << "database is already open";
log_.error() << "Database is already open";
return;
}
BOOST_LOG_TRIVIAL(info) << "Opening Cassandra Backend";
log_.info() << "Opening Cassandra Backend";
CassCluster* cluster = cass_cluster_new();
if (!cluster)
@@ -1002,7 +1005,7 @@ CassandraBackend::open(bool readOnly)
if (cass_cluster_set_cloud_secure_connection_bundle(
cluster, secureConnectBundle.c_str()) != CASS_OK)
{
BOOST_LOG_TRIVIAL(error) << "Unable to configure cloud using the "
log_.error() << "Unable to configure cloud using the "
"secure connection bundle: "
<< secureConnectBundle;
throw std::runtime_error(
@@ -1057,7 +1060,7 @@ CassandraBackend::open(bool readOnly)
auto username = config_.maybeValue<std::string>("username");
if (username)
{
BOOST_LOG_TRIVIAL(debug) << "user = " << *username;
log_.debug() << "user = " << *username;
auto password = config_.value<std::string>("password");
cass_cluster_set_credentials(
cluster, username->c_str(), password.c_str());
@@ -1080,10 +1083,11 @@ CassandraBackend::open(bool readOnly)
"max_read_requests_outstanding", maxReadRequestsOutstanding);
syncInterval_ = config_.valueOr<int>("sync_interval", syncInterval_);
BOOST_LOG_TRIVIAL(info)
<< __func__ << " sync interval is " << syncInterval_
<< ". max write requests outstanding is " << maxWriteRequestsOutstanding
<< ". max read requests outstanding is " << maxReadRequestsOutstanding;
log_.info() << "Sync interval is " << syncInterval_
<< ". max write requests outstanding is "
<< maxWriteRequestsOutstanding
<< ". max read requests outstanding is "
<< maxReadRequestsOutstanding;
cass_cluster_set_request_timeout(cluster, 10000);
@@ -1098,7 +1102,7 @@ CassandraBackend::open(bool readOnly)
ss << "nodestore: Error setting Cassandra max core connections per "
"host"
<< ", result: " << rc << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
@@ -1140,8 +1144,7 @@ CassandraBackend::open(bool readOnly)
auto keyspace = config_.valueOr<std::string>("keyspace", "");
if (keyspace.empty())
{
BOOST_LOG_TRIVIAL(warning)
<< "No keyspace specified. Using keyspace clio";
log_.warn() << "No keyspace specified. Using keyspace clio";
keyspace = "clio";
}
@@ -1149,14 +1152,13 @@ CassandraBackend::open(bool readOnly)
auto tablePrefix = config_.valueOr<std::string>("table_prefix", "");
if (tablePrefix.empty())
{
BOOST_LOG_TRIVIAL(warning) << "Table prefix is empty";
log_.warn() << "Table prefix is empty";
}
cass_cluster_set_connect_timeout(cluster, 10000);
auto ttl = ttl_ * 2;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " setting ttl to " << std::to_string(ttl);
log_.info() << "Setting ttl to " << std::to_string(ttl);
auto executeSimpleStatement = [this](std::string const& query) {
CassStatement* statement = makeStatement(query.c_str(), 0);
@@ -1169,7 +1171,7 @@ CassandraBackend::open(bool readOnly)
std::stringstream ss;
ss << "nodestore: Error executing simple statement: " << rc << ", "
<< cass_error_desc(rc) << " - " << query;
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
return false;
}
return true;
@@ -1192,7 +1194,7 @@ CassandraBackend::open(bool readOnly)
ss << "nodestore: Error connecting Cassandra session keyspace: "
<< rc << ", " << cass_error_desc(rc)
<< ", trying to create it ourselves";
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
// if the keyspace doesn't exist, try to create it
session_.reset(cass_session_new());
fut = cass_session_connect(session_.get(), cluster);
@@ -1203,7 +1205,7 @@ CassandraBackend::open(bool readOnly)
std::stringstream ss;
ss << "nodestore: Error connecting Cassandra session at all: "
<< rc << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
}
else
{
@@ -1626,6 +1628,6 @@ CassandraBackend::open(bool readOnly)
open_ = true;
BOOST_LOG_TRIVIAL(info) << "Opened CassandraBackend successfully";
log_.info() << "Opened CassandraBackend successfully";
}
} // namespace Backend

View File

@@ -2,16 +2,19 @@
#define RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED
#include <ripple/basics/base_uint.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <cassandra.h>
#include <boost/asio.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/filesystem.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <atomic>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <cassandra.h>
#include <cstddef>
#include <iostream>
#include <memory>
@@ -25,6 +28,7 @@ namespace Backend {
class CassandraPreparedStatement
{
private:
clio::Logger log_{"Backend"};
CassPrepared const* prepared_ = nullptr;
public:
@@ -65,7 +69,7 @@ public:
std::stringstream ss;
ss << "nodestore: Error preparing statement : " << rc << ", "
<< cass_error_desc(rc) << ". query : " << query;
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
}
cass_future_free(prepareFuture);
return rc == CASS_OK;
@@ -73,7 +77,7 @@ public:
~CassandraPreparedStatement()
{
BOOST_LOG_TRIVIAL(trace) << __func__;
log_.trace() << "called";
if (prepared_)
{
cass_prepared_free(prepared_);
@@ -86,6 +90,7 @@ class CassandraStatement
{
CassStatement* statement_ = nullptr;
size_t curBindingIndex_ = 0;
clio::Logger log_{"Backend"};
public:
CassandraStatement(CassandraPreparedStatement const& prepared)
@@ -123,7 +128,7 @@ public:
std::stringstream ss;
ss << "Error binding boolean to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
curBindingIndex_++;
@@ -179,7 +184,7 @@ public:
std::stringstream ss;
ss << "Error binding bytes to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
curBindingIndex_++;
@@ -191,8 +196,8 @@ public:
if (!statement_)
throw std::runtime_error(
"CassandraStatement::bindNextUInt - statement_ is null");
BOOST_LOG_TRIVIAL(trace)
<< std::to_string(curBindingIndex_) << " " << std::to_string(value);
log_.trace() << std::to_string(curBindingIndex_) << " "
<< std::to_string(value);
CassError rc =
cass_statement_bind_int32(statement_, curBindingIndex_, value);
if (rc != CASS_OK)
@@ -200,7 +205,7 @@ public:
std::stringstream ss;
ss << "Error binding uint to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
curBindingIndex_++;
@@ -225,7 +230,7 @@ public:
std::stringstream ss;
ss << "Error binding int to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
curBindingIndex_++;
@@ -241,7 +246,7 @@ public:
std::stringstream ss;
ss << "Error binding int to tuple: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
rc = cass_tuple_set_int64(tuple, 1, second);
@@ -250,7 +255,7 @@ public:
std::stringstream ss;
ss << "Error binding int to tuple: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple);
@@ -259,7 +264,7 @@ public:
std::stringstream ss;
ss << "Error binding tuple to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
log_.error() << ss.str();
throw std::runtime_error(ss.str());
}
cass_tuple_free(tuple);
@@ -275,6 +280,7 @@ public:
class CassandraResult
{
clio::Logger log_{"Backend"};
CassResult const* result_ = nullptr;
CassRow const* row_ = nullptr;
CassIterator* iter_ = nullptr;
@@ -365,7 +371,7 @@ public:
std::stringstream msg;
msg << "CassandraResult::getBytes - error getting value: " << rc
<< ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
log_.error() << msg.str();
throw std::runtime_error(msg.str());
}
curGetIndex_++;
@@ -386,7 +392,7 @@ public:
std::stringstream msg;
msg << "CassandraResult::getuint256 - error getting value: " << rc
<< ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
log_.error() << msg.str();
throw std::runtime_error(msg.str());
}
curGetIndex_++;
@@ -406,7 +412,7 @@ public:
std::stringstream msg;
msg << "CassandraResult::getInt64 - error getting value: " << rc
<< ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
log_.error() << msg.str();
throw std::runtime_error(msg.str());
}
++curGetIndex_;
@@ -490,10 +496,9 @@ public:
{
if (!row_)
{
std::stringstream msg;
msg << __func__ << " - no result";
BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str());
std::string msg{"No result"};
log_.error() << msg;
throw std::runtime_error(msg);
}
cass_bool_t val;
CassError rc =
@@ -501,9 +506,8 @@ public:
if (rc != CASS_OK)
{
std::stringstream msg;
msg << __func__ << " - error getting value: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
msg << "Error getting value: " << rc << ", " << cass_error_desc(rc);
log_.error() << msg.str();
throw std::runtime_error(msg.str());
}
++curGetIndex_;
@@ -518,6 +522,7 @@ public:
cass_iterator_free(iter_);
}
};
inline bool
isTimeout(CassError rc)
{
@@ -597,6 +602,7 @@ private:
return ret;
}
clio::Logger log_{"Backend"};
std::atomic<bool> open_{false};
std::unique_ptr<CassSession, void (*)(CassSession*)> session_{
@@ -751,13 +757,11 @@ public:
statement.bindNextInt(ledgerSequence_ - 1);
if (!executeSyncUpdate(statement))
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Update failed for ledger "
log_.warn() << "Update failed for ledger "
<< std::to_string(ledgerSequence_) << ". Returning";
return false;
}
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger "
<< std::to_string(ledgerSequence_);
log_.info() << "Committed ledger " << std::to_string(ledgerSequence_);
return true;
}
@@ -791,19 +795,17 @@ public:
statement.bindNextInt(lastSync_);
if (!executeSyncUpdate(statement))
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Update failed for ledger "
log_.warn() << "Update failed for ledger "
<< std::to_string(ledgerSequence_) << ". Returning";
return false;
}
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger "
log_.info() << "Committed ledger "
<< std::to_string(ledgerSequence_);
lastSync_ = ledgerSequence_;
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " Skipping commit. sync interval is "
log_.info() << "Skipping commit. sync interval is "
<< std::to_string(syncInterval_) << " - last sync is "
<< std::to_string(lastSync_) << " - ledger sequence is "
<< std::to_string(ledgerSequence_);
@@ -826,12 +828,12 @@ public:
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
log_.trace() << "called";
CassandraStatement statement{selectLatestLedger_};
CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(error)
log_.error()
<< "CassandraBackend::fetchLatestLedgerSequence - no rows";
return {};
}
@@ -843,13 +845,13 @@ public:
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
log_.trace() << "called";
CassandraStatement statement{selectLedgerBySeq_};
statement.bindNextInt(sequence);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
log_.error() << "No rows";
return {};
}
std::vector<unsigned char> header = result.getBytes();
@@ -869,7 +871,7 @@ public:
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
log_.debug() << "No rows returned";
return {};
}
@@ -916,7 +918,7 @@ public:
std::optional<int64_t>
getToken(void const* key, boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
log_.trace() << "Fetching from cassandra";
CassandraStatement statement{getToken_};
statement.bindNextBytes(key, 32);
@@ -924,7 +926,7 @@ public:
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
log_.error() << "No rows";
return {};
}
int64_t token = result.getInt64();
@@ -939,14 +941,14 @@ public:
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const override
{
BOOST_LOG_TRIVIAL(trace) << __func__;
log_.trace() << "called";
CassandraStatement statement{selectTransaction_};
statement.bindNextBytes(hash);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
log_.error() << "No rows";
return {};
}
return {
@@ -1036,9 +1038,7 @@ public:
std::unique_lock<std::mutex> lck(throttleMutex_);
if (!canAddRequest())
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Max outstanding requests reached. "
log_.debug() << "Max outstanding requests reached. "
<< "Waiting for other requests to finish";
throttleCv_.wait(lck, [this]() { return canAddRequest(); });
}
@@ -1142,7 +1142,7 @@ public:
ss << "Cassandra sync write error";
ss << ", retrying";
ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(warning) << ss.str();
log_.warn() << ss.str();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
} while (rc != CASS_OK);
@@ -1166,7 +1166,7 @@ public:
ss << "Cassandra sync update error";
ss << ", retrying";
ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(warning) << ss.str();
log_.warn() << ss.str();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
} while (rc != CASS_OK);
@@ -1176,7 +1176,7 @@ public:
CassRow const* row = cass_result_first_row(res);
if (!row)
{
BOOST_LOG_TRIVIAL(error) << "executeSyncUpdate - no rows";
log_.error() << "executeSyncUpdate - no rows";
cass_result_free(res);
return false;
}
@@ -1185,16 +1185,14 @@ public:
if (rc != CASS_OK)
{
cass_result_free(res);
BOOST_LOG_TRIVIAL(error)
<< "executeSyncUpdate - error getting result " << rc << ", "
<< cass_error_desc(rc);
log_.error() << "executeSyncUpdate - error getting result " << rc
<< ", " << cass_error_desc(rc);
return false;
}
cass_result_free(res);
if (success != cass_true && timedOut)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Update failed, but timedOut is true";
log_.warn() << "Update failed, but timedOut is true";
// if there was a timeout, the update may have succeeded in the
// background on the first attempt. To determine if this happened,
// we query the range from the db, making sure the range is what
@@ -1230,15 +1228,14 @@ public:
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< "Cannot read async cass_future_error_code";
log_.error() << "Cannot read async cass_future_error_code";
}
if (rc != CASS_OK)
{
std::stringstream ss;
ss << "Cassandra executeAsyncRead error";
ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << ss.str();
log_.error() << ss.str();
}
if (isTimeout(rc))
{

View File

@@ -1,6 +1,6 @@
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/log/trivial.hpp>
#include <fstream>
namespace clio {
@@ -161,11 +161,11 @@ ConfigReader::open(std::filesystem::path path)
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "Could not read configuration file from '"
LogService::error() << "Could not read configuration file from '"
<< path.string() << "': " << e.what();
}
BOOST_LOG_TRIVIAL(warning) << "Using empty default configuration";
LogService::warn() << "Using empty default configuration";
return Config{};
}

View File

@@ -5,18 +5,22 @@
#include <boost/beast/ssl.hpp>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <boost/log/trivial.hpp>
#include <backend/DBHelpers.h>
#include <etl/ETLSource.h>
#include <etl/ProbingETLSource.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <thread>
using namespace clio;
void
ForwardCache::freshen()
{
BOOST_LOG_TRIVIAL(trace) << "Freshening ForwardCache";
log_.trace() << "Freshening ForwardCache";
auto numOutstanding =
std::make_shared<std::atomic_uint>(latestForwarded_.size());
@@ -127,15 +131,11 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
if (ec != boost::asio::error::operation_aborted &&
ec != boost::asio::error::connection_refused)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
log_.error() << "error code = " << ec << " - " << toString();
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
log_.warn() << "error code = " << ec << " - " << toString();
}
// exponentially increasing timeouts, with a max of 30 seconds
@@ -144,7 +144,7 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
timer_.async_wait([this](auto ec) {
bool startAgain = (ec != boost::asio::error::operation_aborted);
BOOST_LOG_TRIVIAL(trace) << __func__ << " async_wait : ec = " << ec;
log_.trace() << "async_wait : ec = " << ec;
derived().close(startAgain);
});
}
@@ -168,8 +168,8 @@ PlainETLSource::close(bool startAgain)
[this, startAgain](auto ec) {
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " async_close : "
log_.error()
<< " async_close : "
<< "error code = " << ec << " - " << toString();
}
closing_ = false;
@@ -213,8 +213,8 @@ SslETLSource::close(bool startAgain)
[this, startAgain](auto ec) {
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " async_close : "
log_.error()
<< " async_close : "
<< "error code = " << ec << " - " << toString();
}
closing_ = false;
@@ -246,8 +246,7 @@ ETLSourceImpl<Derived>::onResolve(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type results)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
if (ec)
{
// try again
@@ -269,8 +268,7 @@ PlainETLSource::onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
if (ec)
{
// start over
@@ -311,8 +309,7 @@ SslETLSource::onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
if (ec)
{
// start over
@@ -371,8 +368,7 @@ template <class Derived>
void
ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
if (auto action = hooks_.onConnected(ec);
action == ETLSourceHooks::Action::STOP)
return;
@@ -389,7 +385,7 @@ ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
{"streams",
{"ledger", "manifests", "validations", "transactions_proposed"}}};
std::string s = boost::json::serialize(jv);
BOOST_LOG_TRIVIAL(trace) << "Sending subscribe stream message";
log_.trace() << "Sending subscribe stream message";
derived().ws().set_option(
boost::beast::websocket::stream_base::decorator(
@@ -415,8 +411,7 @@ ETLSourceImpl<Derived>::onWrite(
boost::beast::error_code ec,
size_t bytesWritten)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
if (ec)
{
// start over
@@ -433,8 +428,7 @@ template <class Derived>
void
ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
log_.trace() << "ec = " << ec << " - " << toString();
// if error or error reading message, start over
if (ec)
{
@@ -446,8 +440,7 @@ ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
boost::beast::flat_buffer buffer;
swap(readBuffer_, buffer);
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : calling async_read - " << toString();
log_.trace() << "calling async_read - " << toString();
derived().ws().async_read(
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
}
@@ -457,7 +450,7 @@ template <class Derived>
bool
ETLSourceImpl<Derived>::handleMessage()
{
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
log_.trace() << toString();
setLastMsgTime();
connected_ = true;
@@ -466,9 +459,9 @@ ETLSourceImpl<Derived>::handleMessage()
std::string msg{
static_cast<char const*>(readBuffer_.data().data()),
readBuffer_.size()};
BOOST_LOG_TRIVIAL(trace) << __func__ << msg;
log_.trace() << msg;
boost::json::value raw = boost::json::parse(msg);
BOOST_LOG_TRIVIAL(trace) << __func__ << " parsed";
log_.trace() << "parsed";
boost::json::object response = raw.as_object();
uint32_t ledgerIndex = 0;
@@ -487,20 +480,16 @@ ETLSourceImpl<Derived>::handleMessage()
setValidatedRange(
{validatedLedgers.c_str(), validatedLedgers.size()});
}
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Received a message on ledger "
<< " subscription stream. Message : " << response << " - "
<< toString();
log_.info() << "Received a message on ledger "
<< " subscription stream. Message : " << response
<< " - " << toString();
}
else if (
response.contains("type") && response["type"] == "ledgerClosed")
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Received a message on ledger "
<< " subscription stream. Message : " << response << " - "
<< toString();
log_.info() << "Received a message on ledger "
<< " subscription stream. Message : " << response
<< " - " << toString();
if (response.contains("ledger_index"))
{
ledgerIndex = response["ledger_index"].as_int64();
@@ -539,9 +528,7 @@ ETLSourceImpl<Derived>::handleMessage()
if (ledgerIndex != 0)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "Pushing ledger sequence = " << ledgerIndex << " - "
log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - "
<< toString();
networkValidatedLedgers_->push(ledgerIndex);
}
@@ -549,13 +536,15 @@ ETLSourceImpl<Derived>::handleMessage()
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "Exception in handleMessage : " << e.what();
log_.error() << "Exception in handleMessage : " << e.what();
return false;
}
}
class AsyncCallData
{
clio::Logger log_{"ETL"};
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
@@ -585,8 +574,8 @@ public:
unsigned char prefix = marker.data()[0];
BOOST_LOG_TRIVIAL(debug)
<< "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
log_.debug() << "Setting up AsyncCallData. marker = "
<< ripple::strHex(marker)
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
<< " . nextPrefix_ = "
<< ripple::strHex(std::string(1, nextPrefix_));
@@ -609,25 +598,23 @@ public:
bool abort,
bool cacheOnly = false)
{
BOOST_LOG_TRIVIAL(trace) << "Processing response. "
log_.trace() << "Processing response. "
<< "Marker prefix = " << getMarkerPrefix();
if (abort)
{
BOOST_LOG_TRIVIAL(error) << "AsyncCallData aborted";
log_.error() << "AsyncCallData aborted";
return CallStatus::ERRORED;
}
if (!status_.ok())
{
BOOST_LOG_TRIVIAL(error)
<< "AsyncCallData status_ not ok: "
log_.error() << "AsyncCallData status_ not ok: "
<< " code = " << status_.error_code()
<< " message = " << status_.error_message();
return CallStatus::ERRORED;
}
if (!next_->is_unlimited())
{
BOOST_LOG_TRIVIAL(warning)
<< "AsyncCallData is_unlimited is false. Make sure "
log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
"secure_gateway is set correctly at the ETL source";
}
@@ -651,7 +638,7 @@ public:
call(stub, cq);
}
BOOST_LOG_TRIVIAL(trace) << "Writing objects";
log_.trace() << "Writing objects";
std::vector<Backend::LedgerObject> cacheUpdates;
cacheUpdates.reserve(cur_->ledger_objects().objects_size());
for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i)
@@ -681,7 +668,7 @@ public:
}
backend.cache().update(
cacheUpdates, request_.ledger().sequence(), cacheOnly);
BOOST_LOG_TRIVIAL(trace) << "Wrote objects";
log_.trace() << "Wrote objects";
return more ? CallStatus::MORE : CallStatus::DONE;
}
@@ -745,7 +732,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
calls.emplace_back(sequence, markers[i], nextMarker);
}
BOOST_LOG_TRIVIAL(debug) << "Starting data download for ledger " << sequence
log_.debug() << "Starting data download for ledger " << sequence
<< ". Using source = " << toString();
for (auto& c : calls)
@@ -764,20 +751,18 @@ ETLSourceImpl<Derived>::loadInitialLedger(
if (!ok)
{
BOOST_LOG_TRIVIAL(error) << "loadInitialLedger - ok is false";
log_.error() << "loadInitialLedger - ok is false";
return false;
// handle cancelled
}
else
{
BOOST_LOG_TRIVIAL(trace)
<< "Marker prefix = " << ptr->getMarkerPrefix();
log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
if (result != AsyncCallData::CallStatus::MORE)
{
numFinished++;
BOOST_LOG_TRIVIAL(debug)
<< "Finished a marker. "
log_.debug() << "Finished a marker. "
<< "Current number of finished = " << numFinished;
std::string lastKey = ptr->getLastKey();
if (lastKey.size())
@@ -789,15 +774,13 @@ ETLSourceImpl<Derived>::loadInitialLedger(
}
if (backend_->cache().size() > progress)
{
BOOST_LOG_TRIVIAL(info)
<< "Downloaded " << backend_->cache().size()
log_.info() << "Downloaded " << backend_->cache().size()
<< " records from rippled";
progress += incr;
}
}
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - finished loadInitialLedger. cache size = "
log_.info() << "Finished loadInitialLedger. cache size = "
<< backend_->cache().size();
size_t numWrites = 0;
if (!abort)
@@ -808,9 +791,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
auto start = std::chrono::system_clock::now();
for (auto& key : edgeKeys)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " writing edge key = " << ripple::strHex(key);
log_.debug() << "Writing edge key = " << ripple::strHex(key);
auto succ = backend_->cache().getSuccessor(
*ripple::uint256::fromVoidChecked(key), sequence);
if (succ)
@@ -840,8 +821,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
assert(succ);
if (succ->key == cur->key)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Writing book successor = "
log_.debug() << "Writing book successor = "
<< ripple::strHex(base) << " - "
<< ripple::strHex(cur->key);
@@ -855,8 +835,7 @@ ETLSourceImpl<Derived>::loadInitialLedger(
}
prev = std::move(cur->key);
if (numWrites % 100000 == 0 && numWrites != 0)
BOOST_LOG_TRIVIAL(info) << __func__ << " Wrote "
<< numWrites << " book successors";
log_.info() << "Wrote " << numWrites << " book successors";
}
backend_->writeSuccessor(
@@ -869,9 +848,8 @@ ETLSourceImpl<Derived>::loadInitialLedger(
auto seconds =
std::chrono::duration_cast<std::chrono::seconds>(end - start)
.count();
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " - Looping through cache and submitting all writes took "
log_.info()
<< "Looping through cache and submitting all writes took "
<< seconds
<< " seconds. numWrites = " << std::to_string(numWrites);
}
@@ -902,8 +880,7 @@ ETLSourceImpl<Derived>::fetchLedger(
grpc::Status status = stub_->GetLedger(&context, request, &response);
if (status.ok() && !response.is_unlimited())
{
BOOST_LOG_TRIVIAL(warning)
<< "ETLSourceImpl::fetchLedger - is_unlimited is "
log_.warn() << "ETLSourceImpl::fetchLedger - is_unlimited is "
"false. Make sure secure_gateway is set "
"correctly on the ETL source. source = "
<< toString() << " status = " << status.error_message();
@@ -951,8 +928,7 @@ ETLLoadBalancer::ETLLoadBalancer(
entry, ioContext, backend, subscriptions, nwvl, *this);
sources_.push_back(std::move(source));
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
<< sources_.back()->toString();
log_.info() << "Added etl source - " << sources_.back()->toString();
}
}
@@ -965,7 +941,7 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
if (!res)
{
BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger."
log_.error() << "Failed to download initial ledger."
<< " Sequence = " << sequence
<< " source = " << source->toString();
}
@@ -982,26 +958,24 @@ ETLLoadBalancer::fetchLedger(
{
org::xrpl::rpc::v1::GetLedgerResponse response;
bool success = execute(
[&response, ledgerSequence, getObjects, getObjectNeighbors](
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](
auto& source) {
auto [status, data] = source->fetchLedger(
ledgerSequence, getObjects, getObjectNeighbors);
response = std::move(data);
if (status.ok() && response.validated())
{
BOOST_LOG_TRIVIAL(info)
<< "Successfully fetched ledger = " << ledgerSequence
log.info() << "Successfully fetched ledger = " << ledgerSequence
<< " from source = " << source->toString();
return true;
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< "Error getting ledger = " << ledgerSequence
<< " Reply : " << response.DebugString()
<< " error_code : " << status.error_code()
<< " error_msg : " << status.error_message()
<< " source = " << source->toString();
log.warn() << "Error getting ledger = " << ledgerSequence
<< ", Reply: " << response.DebugString()
<< ", error_code: " << status.error_code()
<< ", error_msg: " << status.error_message()
<< ", source = " << source->toString();
return false;
}
},
@@ -1042,7 +1016,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
{
if (auto resp = forwardCache_.get(request); resp)
{
BOOST_LOG_TRIVIAL(debug) << "request hit forwardCache";
log_.debug() << "request hit forwardCache";
return resp;
}
@@ -1056,14 +1030,13 @@ ETLSourceImpl<Derived>::requestFromRippled(
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Attempting to forward request to tx. "
log_.trace() << "Attempting to forward request to tx. "
<< "request = " << boost::json::serialize(request);
boost::json::object response;
if (!connected_)
{
BOOST_LOG_TRIVIAL(error)
<< "Attempted to proxy but failed to connect to tx";
log_.error() << "Attempted to proxy but failed to connect to tx";
return {};
}
namespace beast = boost::beast; // from <boost/beast.hpp>
@@ -1077,7 +1050,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
// These objects perform our I/O
tcp::resolver resolver{ioc_};
BOOST_LOG_TRIVIAL(trace) << "Creating websocket";
log_.trace() << "Creating websocket";
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
// Look up the domain name
@@ -1087,7 +1060,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
ws->next_layer().expires_after(std::chrono::seconds(3));
BOOST_LOG_TRIVIAL(trace) << "Connecting websocket";
log_.trace() << "Connecting websocket";
// Make the connection on the IP address we get from a lookup
ws->next_layer().async_connect(results, yield[ec]);
if (ec)
@@ -1106,15 +1079,15 @@ ETLSourceImpl<Derived>::requestFromRippled(
" websocket-client-coro");
req.set(http::field::forwarded, "for=" + clientIp);
}));
BOOST_LOG_TRIVIAL(trace) << "client ip: " << clientIp;
log_.trace() << "client ip: " << clientIp;
BOOST_LOG_TRIVIAL(trace) << "Performing websocket handshake";
log_.trace() << "Performing websocket handshake";
// Perform the websocket handshake
ws->async_handshake(ip_, "/", yield[ec]);
if (ec)
return {};
BOOST_LOG_TRIVIAL(trace) << "Sending request";
log_.trace() << "Sending request";
// Send the message
ws->async_write(
net::buffer(boost::json::serialize(request)), yield[ec]);
@@ -1132,11 +1105,11 @@ ETLSourceImpl<Derived>::requestFromRippled(
if (!parsed.is_object())
{
BOOST_LOG_TRIVIAL(error)
<< "Error parsing response: " << std::string{begin, end};
log_.error() << "Error parsing response: "
<< std::string{begin, end};
return {};
}
BOOST_LOG_TRIVIAL(trace) << "Successfully forward request";
log_.trace() << "Successfully forward request";
response = parsed.as_object();
@@ -1145,7 +1118,7 @@ ETLSourceImpl<Derived>::requestFromRippled(
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "Encountered exception : " << e.what();
log_.error() << "Encountered exception : " << e.what();
return {};
}
}
@@ -1162,45 +1135,36 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
{
auto& source = sources_[sourceIdx];
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to execute func. ledger sequence = "
log_.debug() << "Attempting to execute func. ledger sequence = "
<< ledgerSequence << " - source = " << source->toString();
if (source->hasLedger(ledgerSequence) || true)
{
bool res = f(source);
if (res)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Successfully executed func at source = "
log_.debug() << "Successfully executed func at source = "
<< source->toString()
<< " - ledger sequence = " << ledgerSequence;
break;
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to execute func at source = "
log_.warn() << "Failed to execute func at source = "
<< source->toString()
<< " - ledger sequence = " << ledgerSequence;
}
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Ledger not present at source = " << source->toString()
log_.warn() << "Ledger not present at source = "
<< source->toString()
<< " - ledger sequence = " << ledgerSequence;
}
sourceIdx = (sourceIdx + 1) % sources_.size();
numAttempts++;
if (numAttempts % sources_.size() == 0)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "Error executing function "
log_.error() << "Error executing function "
<< " - ledger sequence = " << ledgerSequence
<< " - Tried all sources. Sleeping and trying again";
std::this_thread::sleep_for(std::chrono::seconds(2));

View File

@@ -1,6 +1,15 @@
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <etl/ETLHelpers.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <boost/algorithm/string.hpp>
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
@@ -8,14 +17,6 @@
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <etl/ETLHelpers.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
class ETLLoadBalancer;
class ETLSource;
class ProbingETLSource;
@@ -32,6 +33,7 @@ class ForwardCache
{
using response_type = std::optional<boost::json::object>;
clio::Logger log_{"ETL"};
mutable std::atomic_bool stopping_ = false;
mutable std::shared_mutex mtx_;
std::unordered_map<std::string, response_type> latestForwarded_;
@@ -127,6 +129,9 @@ public:
{
}
protected:
clio::Logger log_{"ETL"};
private:
friend ForwardCache;
friend ProbingETLSource;
@@ -161,7 +166,7 @@ class ETLSourceImpl : public ETLSource
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
std::string validatedLedgersRaw_;
std::string validatedLedgersRaw_{"N/A"};
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
@@ -218,7 +223,7 @@ protected:
void
run() override
{
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
log_.trace() << toString();
auto const host = ip_;
auto const port = wsPort_;
@@ -292,13 +297,11 @@ public:
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateCustomChannel(
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
BOOST_LOG_TRIVIAL(debug)
<< "Made stub for remote = " << toString();
log_.debug() << "Made stub for remote = " << toString();
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(debug)
<< "Exception while creating stub = " << e.what()
log_.debug() << "Exception while creating stub = " << e.what()
<< " . Remote = " << toString();
}
}
@@ -371,7 +374,6 @@ public:
getValidatedRange() const
{
std::lock_guard lck(mtx_);
return validatedLedgersRaw_;
}
@@ -389,9 +391,8 @@ public:
std::string
toString() const override
{
return "{ validated_ledger : " + getValidatedRange() +
" , ip : " + ip_ + " , web socket port : " + wsPort_ +
", grpc port : " + grpcPort_ + " }";
return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ +
", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}";
}
boost::json::object
@@ -592,8 +593,8 @@ public:
class ETLLoadBalancer
{
private:
clio::Logger log_{"ETL"};
std::vector<std::unique_ptr<ETLSource>> sources_;
std::uint32_t downloadRanges_ = 16;
public:

View File

@@ -107,8 +107,7 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenMint data in tx "
<< sttx.getTransactionID();
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
@@ -173,7 +172,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
}
std::stringstream msg;
msg << __func__ << " - could not determine owner at burntime for tx "
msg << " - could not determine owner at burntime for tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
@@ -198,7 +197,7 @@ getNFTokenAcceptOfferData(
if (affectedBuyOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
@@ -228,7 +227,7 @@ getNFTokenAcceptOfferData(
if (affectedSellOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
@@ -278,7 +277,7 @@ getNFTokenAcceptOfferData(
}
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}

View File

@@ -1,4 +1,7 @@
#include <etl/ProbingETLSource.h>
#include <log/Logger.h>
using namespace clio;
ProbingETLSource::ProbingETLSource(
clio::Config const& config,
@@ -85,7 +88,8 @@ std::string
ProbingETLSource::toString() const
{
if (!currentSrc_)
return "{ probing }";
return "{probing... ws: " + plainSrc_->toString() +
", wss: " + sslSrc_->toString() + "}";
return currentSrc_->toString();
}
@@ -148,8 +152,7 @@ ProbingETLSource::make_SSLHooks() noexcept
{
plainSrc_->pause();
currentSrc_ = sslSrc_;
BOOST_LOG_TRIVIAL(info)
<< "Selected WSS as the main source: "
log_.info() << "Selected WSS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;
@@ -179,8 +182,7 @@ ProbingETLSource::make_PlainHooks() noexcept
{
sslSrc_->pause();
currentSrc_ = plainSrc_;
BOOST_LOG_TRIVIAL(info)
<< "Selected Plain WS as the main source: "
log_.info() << "Selected Plain WS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;

View File

@@ -11,6 +11,7 @@
#include <config/Config.h>
#include <etl/ETLSource.h>
#include <log/Logger.h>
/// This ETLSource implementation attempts to connect over both secure websocket
/// and plain websocket. First to connect pauses the other and the probing is
@@ -18,6 +19,8 @@
/// connection the probing is kickstarted again.
class ProbingETLSource : public ETLSource
{
clio::Logger log_{"ETL"};
std::mutex mtx_;
boost::asio::ssl::context sslCtx_;
std::shared_ptr<ETLSource> sslSrc_;

View File

@@ -1,20 +1,25 @@
#include <ripple/basics/StringUtilities.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <backend/DBHelpers.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <cstdlib>
#include <iostream>
#include <string>
#include <subscriptions/SubscriptionManager.h>
#include <thread>
#include <variant>
namespace detail {
using namespace clio;
namespace clio::detail {
/// Convenience function for printing out basic ledger info
std::string
toString(ripple::LedgerInfo const& info)
@@ -26,7 +31,7 @@ toString(ripple::LedgerInfo const& info)
<< " ParentHash : " << strHex(info.parentHash) << " }";
return ss.str();
}
} // namespace detail
} // namespace clio::detail
FormattedTransactionsData
ReportingETL::insertTransactions(
@@ -43,9 +48,7 @@ ReportingETL::insertTransactions(
ripple::SerialIter it{raw->data(), raw->size()};
ripple::STTx sttx{it};
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "Inserting transaction = " << sttx.getTransactionID();
log_.trace() << "Inserting transaction = " << sttx.getTransactionID();
ripple::TxMeta txMeta{
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
@@ -97,8 +100,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
auto rng = backend_->hardFetchLedgerRangeNoThrow();
if (rng)
{
BOOST_LOG_TRIVIAL(fatal) << __func__ << " : "
<< "Database is not empty";
log_.fatal() << "Database is not empty";
assert(false);
return {};
}
@@ -114,23 +116,21 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo);
auto start = std::chrono::system_clock::now();
backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " started writes";
log_.debug() << "Started writes";
backend_->writeLedger(
lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger";
log_.debug() << "Wrote ledger";
FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, *ledgerData);
BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns";
log_.debug() << "Inserted txns";
// download the full account state map. This function downloads full ledger
// data and pushes the downloaded data into the writeQueue. asyncWriter
@@ -138,7 +138,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
// Once the below call returns, all data has been pushed into the queue
loadBalancer_->loadInitialLedger(startingSequence);
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger";
log_.debug() << "Loaded initial ledger";
if (!stopping_)
{
@@ -150,7 +150,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
backend_->finishWrites(startingSequence);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time to download and store ledger = "
log_.debug() << "Time to download and store ledger = "
<< ((end - start).count()) / 1000000000.0;
return lgrInfo;
}
@@ -158,12 +158,11 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
void
ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - Publishing ledger " << std::to_string(lgrInfo.seq);
log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq);
if (!writing_)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - Updating cache";
log_.info() << "Updating cache";
std::vector<Backend::LedgerObject> diff =
Backend::synchronousAndRetryOnTimeout([&](auto yield) {
@@ -205,11 +204,10 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
subscriptions_->pubBookChanges(lgrInfo, transactions);
BOOST_LOG_TRIVIAL(info) << __func__ << " - Published ledger "
<< std::to_string(lgrInfo.seq);
log_.info() << "Published ledger " << std::to_string(lgrInfo.seq);
}
else
BOOST_LOG_TRIVIAL(info) << __func__ << " - Skipping publishing ledger "
log_.info() << "Skipping publishing ledger "
<< std::to_string(lgrInfo.seq);
setLastPublish();
}
@@ -219,9 +217,7 @@ ReportingETL::publishLedger(
uint32_t ledgerSequence,
std::optional<uint32_t> maxAttempts)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Attempting to publish ledger = " << ledgerSequence;
log_.info() << "Attempting to publish ledger = " << ledgerSequence;
size_t numAttempts = 0;
while (!stopping_)
{
@@ -229,17 +225,15 @@ ReportingETL::publishLedger(
if (!range || range->maxSequence < ledgerSequence)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Trying to publish. Could not find "
log_.debug() << "Trying to publish. Could not find "
"ledger with sequence = "
<< ledgerSequence;
// We try maxAttempts times to publish the ledger, waiting one
// second in between each attempt.
if (maxAttempts && numAttempts >= maxAttempts)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Failed to publish ledger after "
<< numAttempts << " attempts.";
log_.debug() << "Failed to publish ledger after " << numAttempts
<< " attempts.";
return false;
}
std::this_thread::sleep_for(std::chrono::seconds(1));
@@ -264,25 +258,19 @@ ReportingETL::publishLedger(
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerData(uint32_t seq)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << seq;
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger(seq, false, false);
if (response)
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
log_.trace() << "GetLedger reply = " << response->DebugString();
return response;
}
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerDataAndDiff(uint32_t seq)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << seq;
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger(
@@ -291,45 +279,34 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t seq)
!backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() >= seq);
if (response)
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
log_.trace() << "GetLedger reply = " << response->DebugString();
return response;
}
std::pair<ripple::LedgerInfo, bool>
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Beginning ledger update";
log_.debug() << "Beginning ledger update";
ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo);
backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "started writes";
log_.debug() << "started writes";
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote ledger header";
log_.debug() << "wrote ledger header";
// Write successor info, if included from rippled
if (rawData.object_neighbors_included())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " object neighbors included";
log_.debug() << "object neighbors included";
for (auto& obj : *(rawData.mutable_book_successors()))
{
auto firstBook = std::move(*obj.mutable_first_book());
if (!firstBook.size())
firstBook = uint256ToString(Backend::lastKey);
BOOST_LOG_TRIVIAL(debug) << __func__ << " writing book successor "
log_.debug() << "writing book successor "
<< ripple::strHex(obj.book_base()) << " - "
<< ripple::strHex(firstBook);
@@ -352,9 +329,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
if (obj.mod_type() ==
org::xrpl::rpc::v1::RawLedgerObject::DELETED)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " modifying successors for deleted object "
log_.debug() << "Modifying successors for deleted object "
<< ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr);
@@ -364,8 +339,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
}
else
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " adding successor for new object "
log_.debug() << "adding successor for new object "
<< ripple::strHex(obj.key()) << " - "
<< ripple::strHex(*predPtr) << " - "
<< ripple::strHex(*succPtr);
@@ -381,8 +355,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
}
}
else
BOOST_LOG_TRIVIAL(debug) << __func__ << " object modified "
<< ripple::strHex(obj.key());
log_.debug() << "object modified " << ripple::strHex(obj.key());
}
}
std::vector<Backend::LedgerObject> cacheUpdates;
@@ -396,15 +369,13 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
assert(key);
cacheUpdates.push_back(
{*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}});
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " key = " << ripple::strHex(*key)
log_.debug() << "key = " << ripple::strHex(*key)
<< " - mod type = " << obj.mod_type();
if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED &&
!rawData.object_neighbors_included())
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " object neighbors not included. using cache";
log_.debug() << "object neighbors not included. using cache";
if (!backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
throw std::runtime_error(
@@ -423,9 +394,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
checkBookBase = isBookDir(*key, *blob);
if (checkBookBase)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " is book dir. key = " << ripple::strHex(*key);
log_.debug() << "Is book dir. key = " << ripple::strHex(*key);
auto bookBase = getBookBase(*key);
auto oldFirstDir =
backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1);
@@ -435,8 +404,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
if ((isDeleted && key == oldFirstDir->key) ||
(!isDeleted && key < oldFirstDir->key))
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
log_.debug()
<< "Need to recalculate book base successor. base = "
<< ripple::strHex(bookBase)
<< " - key = " << ripple::strHex(*key)
@@ -458,8 +426,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
// rippled didn't send successor information, so use our cache
if (!rawData.object_neighbors_included())
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " object neighbors not included. using cache";
log_.debug() << "object neighbors not included. using cache";
if (!backend_->cache().isFull() ||
backend_->cache().latestLedgerSequence() != lgrInfo.seq)
throw std::runtime_error(
@@ -477,8 +444,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
ub = {Backend::lastKey, {}};
if (obj.blob.size() == 0)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " writing successor for deleted object "
log_.debug() << "writing successor for deleted object "
<< ripple::strHex(obj.key) << " - "
<< ripple::strHex(lb->key) << " - "
<< ripple::strHex(ub->key);
@@ -499,8 +465,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq,
uint256ToString(ub->key));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " writing successor for new object "
log_.debug() << "writing successor for new object "
<< ripple::strHex(lb->key) << " - "
<< ripple::strHex(obj.key) << " - "
<< ripple::strHex(ub->key);
@@ -516,10 +481,9 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq,
uint256ToString(succ->key));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Updating book successor "
<< ripple::strHex(base) << " - "
<< ripple::strHex(succ->key);
log_.debug()
<< "Updating book successor " << ripple::strHex(base)
<< " - " << ripple::strHex(succ->key);
}
else
{
@@ -528,42 +492,33 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
lgrInfo.seq,
uint256ToString(Backend::lastKey));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Updating book successor "
<< ripple::strHex(base) << " - "
<< ripple::strHex(Backend::lastKey);
log_.debug()
<< "Updating book successor " << ripple::strHex(base)
<< " - " << ripple::strHex(Backend::lastKey);
}
}
}
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
log_.debug()
<< "Inserted/modified/deleted all objects. Number of objects = "
<< rawData.ledger_objects().objects_size();
FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, rawData);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Inserted all transactions. Number of transactions = "
log_.debug() << "Inserted all transactions. Number of transactions = "
<< rawData.transactions_list().transactions_size();
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote account_tx";
log_.debug() << "wrote account_tx";
auto start = std::chrono::system_clock::now();
bool success = backend_->finishWrites(lgrInfo.seq);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " finished writes. took " << std::to_string(duration);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Finished ledger update. " << detail::toString(lgrInfo);
log_.debug() << "Finished writes. took " << std::to_string(duration);
log_.debug() << "Finished ledger update. " << detail::toString(lgrInfo);
return {lgrInfo, success};
}
@@ -596,8 +551,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
* optional, returns.
*/
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Starting etl pipeline";
log_.debug() << "Starting etl pipeline";
writing_ = true;
auto rng = backend_->hardFetchLedgerRangeNoThrow();
@@ -670,8 +624,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
fetchResponse->transactions_list().transactions_size() /
time;
BOOST_LOG_TRIVIAL(info)
<< "Extract phase time = " << time
log_.info() << "Extract phase time = " << time
<< " . Extract phase tps = " << tps
<< " . Avg extract time = "
<< totalTime / (currentSequence - startSequence + 1)
@@ -720,7 +673,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
auto duration = ((end - start).count()) / 1000000000.0;
if (success)
BOOST_LOG_TRIVIAL(info)
log_.info()
<< "Load phase of etl : "
<< "Successfully wrote ledger! Ledger info: "
<< detail::toString(lgrInfo) << ". txn count = " << numTxns
@@ -729,7 +682,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
<< ". load txns per second = " << numTxns / duration
<< ". load objs per second = " << numObjects / duration;
else
BOOST_LOG_TRIVIAL(error)
log_.error()
<< "Error writing ledger. " << detail::toString(lgrInfo);
// success is false if the ledger was already written
if (success)
@@ -747,7 +700,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
{
deleting_ = true;
ioContext_.post([this, &minSequence]() {
BOOST_LOG_TRIVIAL(info) << "Running online delete";
log_.info() << "Running online delete";
Backend::synchronous(
[&](boost::asio::yield_context& yield) {
@@ -755,7 +708,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
*onlineDeleteInterval_, yield);
});
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
log_.info() << "Finished online delete";
auto rng = backend_->fetchLedgerRange();
minSequence = rng->minSequence;
deleting_ = false;
@@ -774,13 +727,12 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
for (auto& t : extractors)
t.join();
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "Extracted and wrote " << *lastPublishedSequence - startSequence
<< " in " << ((end - begin).count()) / 1000000000.0;
log_.debug() << "Extracted and wrote "
<< *lastPublishedSequence - startSequence << " in "
<< ((end - begin).count()) / 1000000000.0;
writing_ = false;
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Stopping etl pipeline";
log_.debug() << "Stopping etl pipeline";
return lastPublishedSequence;
}
@@ -801,38 +753,32 @@ ReportingETL::monitor()
auto rng = backend_->hardFetchLedgerRangeNoThrow();
if (!rng)
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Database is empty. Will download a ledger "
log_.info() << "Database is empty. Will download a ledger "
"from the network.";
std::optional<ripple::LedgerInfo> ledger;
if (startSequence_)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "ledger sequence specified in config. "
log_.info() << "ledger sequence specified in config. "
<< "Will begin ETL process starting with ledger "
<< *startSequence_;
ledger = loadInitialLedger(*startSequence_);
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
log_.info()
<< "Waiting for next ledger to be validated by network...";
std::optional<uint32_t> mostRecentValidated =
networkValidatedLedgers_->getMostRecent();
if (mostRecentValidated)
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Ledger " << *mostRecentValidated
log_.info() << "Ledger " << *mostRecentValidated
<< " has been validated. "
<< "Downloading...";
ledger = loadInitialLedger(*mostRecentValidated);
}
else
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "The wait for the next validated "
log_.info() << "The wait for the next validated "
<< "ledger has been aborted. "
<< "Exiting monitor loop";
return;
@@ -842,8 +788,7 @@ ReportingETL::monitor()
rng = backend_->hardFetchLedgerRangeNoThrow();
else
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
log_.error()
<< "Failed to load initial ledger. Exiting monitor loop";
return;
}
@@ -852,20 +797,17 @@ ReportingETL::monitor()
{
if (startSequence_)
{
BOOST_LOG_TRIVIAL(warning)
log_.warn()
<< "start sequence specified but db is already populated";
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
log_.info()
<< "Database already populated. Picking up from the tip of history";
loadCache(rng->maxSequence);
}
assert(rng);
uint32_t nextSequence = rng->maxSequence + 1;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Database is populated. "
log_.debug() << "Database is populated. "
<< "Starting monitor loop. sequence = " << nextSequence;
while (true)
{
@@ -878,9 +820,7 @@ ReportingETL::monitor()
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(
nextSequence, 1000))
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Ledger with sequence = " << nextSequence
log_.info() << "Ledger with sequence = " << nextSequence
<< " has been validated by the network. "
<< "Attempting to find in database and publish";
// Attempt to take over responsibility of ETL writer after 10 failed
@@ -893,17 +833,13 @@ ReportingETL::monitor()
bool success = publishLedger(nextSequence, timeoutSeconds);
if (!success)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to publish ledger with sequence = "
log_.warn() << "Failed to publish ledger with sequence = "
<< nextSequence << " . Beginning ETL";
// doContinousETLPipelined returns the most recent sequence
// published empty optional if no sequence was published
std::optional<uint32_t> lastPublished =
runETLPipeline(nextSequence, extractorThreads_);
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Aborting ETL. Falling back to publishing";
log_.info() << "Aborting ETL. Falling back to publishing";
// if no ledger was published, don't increment nextSequence
if (lastPublished)
nextSequence = *lastPublished + 1;
@@ -920,8 +856,8 @@ ReportingETL::loadCacheFromClioPeer(
std::string const& port,
boost::asio::yield_context& yield)
{
BOOST_LOG_TRIVIAL(info)
<< "Loading cache from peer. ip = " << ip << " . port = " << port;
log_.info() << "Loading cache from peer. ip = " << ip
<< " . port = " << port;
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from
@@ -933,7 +869,7 @@ ReportingETL::loadCacheFromClioPeer(
// These objects perform our I/O
tcp::resolver resolver{ioContext_};
BOOST_LOG_TRIVIAL(trace) << __func__ << " Creating websocket";
log_.trace() << "Creating websocket";
auto ws =
std::make_unique<websocket::stream<beast::tcp_stream>>(ioContext_);
@@ -942,14 +878,13 @@ ReportingETL::loadCacheFromClioPeer(
if (ec)
return {};
BOOST_LOG_TRIVIAL(trace) << __func__ << " Connecting websocket";
log_.trace() << "Connecting websocket";
// Make the connection on the IP address we get from a lookup
ws->next_layer().async_connect(results, yield[ec]);
if (ec)
return false;
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " Performing websocket handshake";
log_.trace() << "Performing websocket handshake";
// Perform the websocket handshake
ws->async_handshake(ip, "/", yield[ec]);
if (ec)
@@ -957,7 +892,7 @@ ReportingETL::loadCacheFromClioPeer(
std::optional<boost::json::value> marker;
BOOST_LOG_TRIVIAL(trace) << __func__ << " Sending request";
log_.trace() << "Sending request";
auto getRequest = [&](auto marker) {
boost::json::object request = {
{"command", "ledger_data"},
@@ -981,8 +916,7 @@ ReportingETL::loadCacheFromClioPeer(
yield[ec]);
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " error writing = " << ec.message();
log_.error() << "error writing = " << ec.message();
return false;
}
@@ -990,8 +924,7 @@ ReportingETL::loadCacheFromClioPeer(
ws->async_read(buffer, yield[ec]);
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " error reading = " << ec.message();
log_.error() << "error reading = " << ec.message();
return false;
}
@@ -1000,35 +933,29 @@ ReportingETL::loadCacheFromClioPeer(
if (!parsed.is_object())
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " Error parsing response: " << raw;
log_.error() << "Error parsing response: " << raw;
return false;
}
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " Successfully parsed response " << parsed;
log_.trace() << "Successfully parsed response " << parsed;
if (auto const& response = parsed.as_object();
response.contains("error"))
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " Response contains error: " << response;
log_.error() << "Response contains error: " << response;
auto const& err = response.at("error");
if (err.is_string() && err.as_string() == "lgrNotFound")
{
++numAttempts;
if (numAttempts >= 5)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
log_.error()
<< " ledger not found at peer after 5 attempts. "
"peer = "
<< ip << " ledger = " << ledgerIndex
<< ". Check your config and the health of the peer";
return false;
}
BOOST_LOG_TRIVIAL(warning)
<< __func__
<< " ledger not found. ledger = " << ledgerIndex
log_.warn() << "Ledger not found. ledger = " << ledgerIndex
<< ". Sleeping and trying again";
std::this_thread::sleep_for(std::chrono::seconds(1));
continue;
@@ -1041,8 +968,7 @@ ReportingETL::loadCacheFromClioPeer(
if (!response.contains("cache_full") ||
!response.at("cache_full").as_bool())
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " cache not full for clio node. ip = " << ip;
log_.error() << "cache not full for clio node. ip = " << ip;
return false;
}
if (response.contains("marker"))
@@ -1063,8 +989,7 @@ ReportingETL::loadCacheFromClioPeer(
if (!stateObject.key.parseHex(
obj.at("index").as_string().c_str()))
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " failed to parse object id";
log_.error() << "failed to parse object id";
return false;
}
boost::algorithm::unhex(
@@ -1075,21 +1000,18 @@ ReportingETL::loadCacheFromClioPeer(
backend_->cache().update(objects, ledgerIndex, true);
if (marker)
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - At marker " << *marker;
log_.debug() << "At marker " << *marker;
} while (marker || !started);
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " Finished downloading ledger from clio node. ip = " << ip;
backend_->cache().setFull();
log_.info() << "Finished downloading ledger from clio node. ip = "
<< ip;
backend_->cache().setFull();
return true;
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " Encountered exception : " << e.what()
log_.error() << "Encountered exception : " << e.what()
<< " - ip = " << ip;
return false;
}
@@ -1101,7 +1023,7 @@ ReportingETL::loadCache(uint32_t seq)
if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL)
{
backend_->cache().setDisabled();
BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading";
log_.warn() << "Cache is disabled. Not loading";
return;
}
// sanity check to make sure we are not calling this multiple times
@@ -1142,12 +1064,11 @@ ReportingETL::loadCache(uint32_t seq)
while (cacheLoadStyle_ == CacheLoadStyle::SYNC &&
!backend_->cache().isFull())
{
BOOST_LOG_TRIVIAL(debug)
<< "Cache not full. Cache size = " << backend_->cache().size()
<< ". Sleeping ...";
log_.debug() << "Cache not full. Cache size = "
<< backend_->cache().size() << ". Sleeping ...";
std::this_thread::sleep_for(std::chrono::seconds(10));
BOOST_LOG_TRIVIAL(info)
<< "Cache is full. Cache size = " << backend_->cache().size();
log_.info() << "Cache is full. Cache size = "
<< backend_->cache().size();
}
}
@@ -1198,9 +1119,8 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
if (c)
cursorStr << ripple::strHex(*c) << ", ";
}
BOOST_LOG_TRIVIAL(info)
<< "Loading cache. num cursors = " << cursors.size() - 1;
BOOST_LOG_TRIVIAL(trace) << __func__ << " cursors = " << cursorStr.str();
log_.info() << "Loading cache. num cursors = " << cursors.size() - 1;
log_.trace() << "cursors = " << cursorStr.str();
cacheDownloader_ = std::thread{[this, seq, cursors]() {
auto startTime = std::chrono::system_clock::now();
@@ -1221,8 +1141,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
std::string cursorStr = cursor.has_value()
? ripple::strHex(cursor.value())
: ripple::strHex(Backend::firstKey);
BOOST_LOG_TRIVIAL(debug)
<< "Starting a cursor: " << cursorStr
log_.debug() << "Starting a cursor: " << cursorStr
<< " markers = " << *markers;
while (!stopping_)
@@ -1237,7 +1156,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
backend_->cache().update(res.objects, seq, true);
if (!res.cursor || (end && *(res.cursor) > *end))
break;
BOOST_LOG_TRIVIAL(trace)
log_.trace()
<< "Loading cache. cache size = "
<< backend_->cache().size() << " - cursor = "
<< ripple::strHex(res.cursor.value())
@@ -1254,16 +1173,14 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
auto duration =
std::chrono::duration_cast<std::chrono::seconds>(
endTime - startTime);
BOOST_LOG_TRIVIAL(info)
<< "Finished loading cache. cache size = "
log_.info() << "Finished loading cache. cache size = "
<< backend_->cache().size() << ". Took "
<< duration.count() << " seconds";
backend_->cache().setFull();
}
else
{
BOOST_LOG_TRIVIAL(info)
<< "Finished a cursor. num remaining = "
log_.info() << "Finished a cursor. num remaining = "
<< *numRemaining << " start = " << cursorStr
<< " markers = " << *markers;
}
@@ -1275,7 +1192,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq)
void
ReportingETL::monitorReadOnly()
{
BOOST_LOG_TRIVIAL(debug) << "Starting reporting in strict read only mode";
log_.debug() << "Starting reporting in strict read only mode";
auto rng = backend_->hardFetchLedgerRangeNoThrow();
uint32_t latestSequence;
if (!rng)

View File

@@ -8,6 +8,7 @@
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
@@ -55,6 +56,8 @@ class SubscriptionManager;
class ReportingETL
{
private:
clio::Logger log_{"ETL"};
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
@@ -300,7 +303,7 @@ private:
void
run()
{
BOOST_LOG_TRIVIAL(info) << "Starting reporting etl";
log_.info() << "Starting reporting etl";
stopping_ = false;
doWork();
@@ -337,8 +340,8 @@ public:
~ReportingETL()
{
BOOST_LOG_TRIVIAL(info) << "onStop called";
BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL";
log_.info() << "onStop called";
log_.debug() << "Stopping Reporting ETL";
stopping_ = true;
if (worker_.joinable())
@@ -346,7 +349,7 @@ public:
if (cacheDownloader_.joinable())
cacheDownloader_.join();
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
log_.debug() << "Joined ReportingETL worker thread";
}
boost::json::object

191
src/log/Logger.cpp Normal file
View File

@@ -0,0 +1,191 @@
#include <config/Config.h>
#include <log/Logger.h>
#include <algorithm>
#include <array>
#include <filesystem>
namespace clio {
Logger LogService::general_log_ = Logger{"General"};
Logger LogService::alert_log_ = Logger{"Alert"};
std::ostream&
operator<<(std::ostream& stream, Severity sev)
{
static constexpr std::array<const char*, 6> labels = {
"TRC",
"DBG",
"NFO",
"WRN",
"ERR",
"FTL",
};
return stream << labels.at(static_cast<int>(sev));
}
Severity
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value)
{
if (not value.is_string())
throw std::runtime_error("`log_level` must be a string");
auto const& logLevel = value.as_string();
if (boost::iequals(logLevel, "trace"))
return Severity::TRACE;
else if (boost::iequals(logLevel, "debug"))
return Severity::DEBUG;
else if (boost::iequals(logLevel, "info"))
return Severity::INFO;
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
return Severity::WARNING;
else if (boost::iequals(logLevel, "error"))
return Severity::ERROR;
else if (boost::iequals(logLevel, "fatal"))
return Severity::FATAL;
else
throw std::runtime_error(
"Could not parse `log_level`: expected `trace`, `debug`, `info`, "
"`warning`, `error` or `fatal`");
}
void
LogService::init(Config const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<Severity, char>("Severity");
auto const defaultFormat =
"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% "
"%Message%";
std::string format =
config.valueOr<std::string>("log_format", defaultFormat);
if (config.valueOr("log_to_console", false))
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
auto logDir = config.maybeValue<std::string>("log_directory");
if (logDir)
{
boost::filesystem::path dirPath{logDir.value()};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
auto const rotationSize =
config.valueOr<uint64_t>("log_rotation_size", 2048u) * 1024u *
1024u;
auto const rotationPeriod =
config.valueOr<uint32_t>("log_rotation_hour_interval", 12u);
auto const dirSize =
config.valueOr<uint64_t>("log_directory_max_size", 50u * 1024u) *
1024u * 1024u;
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
// get default severity, can be overridden per channel using
// the `log_channels` array
auto defaultSeverity =
config.valueOr<Severity>("log_level", Severity::INFO);
static constexpr std::array<const char*, 7> channels = {
"General",
"WebServer",
"Backend",
"RPC",
"ETL",
"Subscriptions",
"Performance",
};
auto core = boost::log::core::get();
auto min_severity = boost::log::expressions::channel_severity_filter(
log_channel, log_severity);
for (auto const& channel : channels)
min_severity[channel] = defaultSeverity;
min_severity["Alert"] =
Severity::WARNING; // Channel for alerts, always warning severity
for (auto const overrides = config.arrayOr("log_channels", {});
auto const& cfg : overrides)
{
auto name = cfg.valueOrThrow<std::string>(
"channel", "Channel name is required");
if (not std::count(std::begin(channels), std::end(channels), name))
throw std::runtime_error(
"Can't override settings for log channel " + name +
": invalid channel");
min_severity[name] =
cfg.valueOr<Severity>("log_level", defaultSeverity);
}
core->set_filter(min_severity);
LogService::info() << "Default log level = " << defaultSeverity;
}
Logger::Pump
Logger::trace(source_location_t const loc) const
{
return {logger_, Severity::TRACE, loc};
};
Logger::Pump
Logger::debug(source_location_t const loc) const
{
return {logger_, Severity::DEBUG, loc};
};
Logger::Pump
Logger::info(source_location_t const loc) const
{
return {logger_, Severity::INFO, loc};
};
Logger::Pump
Logger::warn(source_location_t const loc) const
{
return {logger_, Severity::WARNING, loc};
};
Logger::Pump
Logger::error(source_location_t const loc) const
{
return {logger_, Severity::ERROR, loc};
};
Logger::Pump
Logger::fatal(source_location_t const loc) const
{
return {logger_, Severity::FATAL, loc};
};
std::string
Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const
{
auto const file_path = std::string{loc.file_name()};
auto idx = file_path.size();
while (max_depth-- > 0)
{
idx = file_path.rfind('/', idx - 1);
if (idx == std::string::npos || idx == 0)
break;
}
return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' +
std::to_string(loc.line());
}
} // namespace clio

257
src/log/Logger.h Normal file
View File

@@ -0,0 +1,257 @@
#ifndef RIPPLE_UTIL_LOGGER_H
#define RIPPLE_UTIL_LOGGER_H
#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/json.hpp>
#include <boost/log/core/core.hpp>
#include <boost/log/expressions/predicates/channel_severity_filter.hpp>
#include <boost/log/sinks/unlocked_frontend.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_channel_logger.hpp>
#include <boost/log/sources/severity_feature.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/utility/manipulators/add_value.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <boost/log/utility/setup/formatter_parser.hpp>
// Note: clang still does not provide non-experimental support, gcc does
// TODO: start using <source_location> once clang catches up on c++20
#include <experimental/source_location>
#include <optional>
#include <string>
namespace clio {
class Config;
using source_location_t = std::experimental::source_location;
/**
* @brief Custom severity levels for @ref Logger.
*/
enum class Severity {
TRACE,
DEBUG,
INFO,
WARNING,
ERROR,
FATAL,
};
BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity);
BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string);
/**
* @brief Custom labels for @ref Severity in log output.
*
* @param stream std::ostream The output stream
* @param sev Severity The severity to output to the ostream
* @return std::ostream& The same ostream we were given
*/
std::ostream&
operator<<(std::ostream& stream, Severity sev);
/**
* @brief Custom JSON parser for @ref Severity.
*
* @param value The JSON string to parse
* @return Severity The parsed severity
* @throws std::runtime_error Thrown if severity is not in the right format
*/
Severity
tag_invoke(
boost::json::value_to_tag<Severity>,
boost::json::value const& value);
/**
* @brief A simple thread-safe logger for the channel specified
* in the constructor.
*
* This is cheap to copy and move. Designed to be used as a member variable or
* otherwise. See @ref LogService::init() for setup of the logging core and
* severity levels for each channel.
*/
class Logger final
{
using logger_t =
boost::log::sources::severity_channel_logger_mt<Severity, std::string>;
mutable logger_t logger_;
friend class LogService; // to expose the Pump interface
/**
* @brief Helper that pumps data into a log record via `operator<<`.
*/
class Pump final
{
using pump_opt_t =
std::optional<boost::log::aux::record_pump<logger_t>>;
boost::log::record rec_;
pump_opt_t pump_ = std::nullopt;
public:
~Pump() = default;
Pump(logger_t& logger, Severity sev, source_location_t const& loc)
: rec_{logger.open_record(boost::log::keywords::severity = sev)}
{
if (rec_)
{
pump_.emplace(boost::log::aux::make_record_pump(logger, rec_));
pump_->stream() << boost::log::add_value(
"SourceLocation", pretty_path(loc));
}
}
Pump(Pump&&) = delete;
Pump(Pump const&) = delete;
Pump&
operator=(Pump const&) = delete;
Pump&
operator=(Pump&&) = delete;
/**
* @brief Perfectly forwards any incoming data into the underlying
* boost::log pump if the pump is available. nop otherwise.
*
* @tparam T Type of data to pump
* @param data The data to pump
* @return Pump& Reference to itself for chaining
*/
template <typename T>
[[maybe_unused]] Pump&
operator<<(T&& data)
{
if (pump_)
pump_->stream() << std::forward<T>(data);
return *this;
}
private:
[[nodiscard]] std::string
pretty_path(source_location_t const& loc, size_t max_depth = 3) const;
};
public:
~Logger() = default;
/**
* @brief Construct a new Logger object that produces loglines for the
* specified channel.
*
* See @ref LogService::init() for general setup and configuration of
* severity levels per channel.
*
* @param channel The channel this logger will report into.
*/
Logger(std::string channel)
: logger_{boost::log::keywords::channel = channel}
{
}
Logger(Logger const&) = default;
Logger(Logger&&) = default;
Logger&
operator=(Logger const&) = default;
Logger&
operator=(Logger&&) = default;
/*! Interface for logging at @ref Severity::TRACE severity */
[[nodiscard]] Pump
trace(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::DEBUG severity */
[[nodiscard]] Pump
debug(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::INFO severity */
[[nodiscard]] Pump
info(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::WARNING severity */
[[nodiscard]] Pump
warn(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::ERROR severity */
[[nodiscard]] Pump
error(source_location_t const loc = source_location_t::current()) const;
/*! Interface for logging at @ref Severity::FATAL severity */
[[nodiscard]] Pump
fatal(source_location_t const loc = source_location_t::current()) const;
};
/**
* @brief A global logging service.
*
* Used to initialize and setup the logging core as well as a globally available
* entrypoint for logging into the `General` channel as well as raising alerts.
*/
class LogService
{
static Logger general_log_; /*! Global logger for General channel */
static Logger alert_log_; /*! Global logger for Alerts channel */
public:
LogService() = delete;
/**
* @brief Global log core initialization from a @ref Config
*/
static void
init(Config const& config);
/*! Globally accesible General logger at @ref Severity::TRACE severity */
[[nodiscard]] static Logger::Pump
trace(source_location_t const loc = source_location_t::current())
{
return general_log_.trace(loc);
}
/*! Globally accesible General logger at @ref Severity::TRACE severity */
[[nodiscard]] static Logger::Pump
debug(source_location_t const loc = source_location_t::current())
{
return general_log_.debug(loc);
}
/*! Globally accesible General logger at @ref Severity::INFO severity */
[[nodiscard]] static Logger::Pump
info(source_location_t const loc = source_location_t::current())
{
return general_log_.info(loc);
}
/*! Globally accesible General logger at @ref Severity::WARNING severity */
[[nodiscard]] static Logger::Pump
warn(source_location_t const loc = source_location_t::current())
{
return general_log_.warn(loc);
}
/*! Globally accesible General logger at @ref Severity::ERROR severity */
[[nodiscard]] static Logger::Pump
error(source_location_t const loc = source_location_t::current())
{
return general_log_.error(loc);
}
/*! Globally accesible General logger at @ref Severity::FATAL severity */
[[nodiscard]] static Logger::Pump
fatal(source_location_t const loc = source_location_t::current())
{
return general_log_.fatal(loc);
}
/*! Globally accesible Alert logger */
[[nodiscard]] static Logger::Pump
alert(source_location_t const loc = source_location_t::current())
{
return alert_log_.warn(loc);
}
};
}; // namespace clio
#endif // RIPPLE_UTIL_LOGGER_H

View File

@@ -6,26 +6,20 @@
#undef GRPC_ASAN_ENABLED
#endif
#include <backend/BackendFactory.h>
#include <config/Config.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <webserver/Listener.h>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/json.hpp>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/sinks/text_file_backend.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/support/date_time.hpp>
#include <boost/log/trivial.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <algorithm>
#include <backend/BackendFactory.h>
#include <config/Config.h>
#include <cstdlib>
#include <etl/ReportingETL.h>
#include <fstream>
#include <functional>
#include <iostream>
@@ -35,7 +29,6 @@
#include <string>
#include <thread>
#include <vector>
#include <webserver/Listener.h>
using namespace clio;
@@ -80,85 +73,6 @@ parse_certs(Config const& config)
return ctx;
}
void
initLogging(Config const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
namespace trivial = boost::log::trivial;
boost::log::add_common_attributes();
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
if (config.valueOr<bool>("log_to_console", true))
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
if (auto logDir = config.maybeValue<std::string>("log_directory"); logDir)
{
boost::filesystem::path dirPath{*logDir};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
auto const rotationSize =
config.valueOr<int64_t>("log_rotation_size", 2 * 1024) * 1024 *
1024u;
if (rotationSize <= 0)
throw std::runtime_error(
"log rotation size must be greater than 0");
auto const rotationPeriod =
config.valueOr<int64_t>("log_rotation_hour_interval", 12u);
if (rotationPeriod <= 0)
throw std::runtime_error(
"log rotation time interval must be greater than 0");
auto const dirSize =
config.valueOr<int64_t>("log_directory_max_size", 50 * 1024) *
1024 * 1024u;
if (dirSize <= 0)
throw std::runtime_error(
"log rotation directory max size must be greater than 0");
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
auto const logLevel = config.valueOr<std::string>("log_level", "info");
if (boost::iequals(logLevel, "trace"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::trace);
else if (boost::iequals(logLevel, "debug"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::debug);
else if (boost::iequals(logLevel, "info"))
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::warning);
else if (boost::iequals(logLevel, "error"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::error);
else if (boost::iequals(logLevel, "fatal"))
boost::log::core::get()->set_filter(
trivial::severity >= trivial::fatal);
else
{
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
<< ". Setting log level to info";
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
}
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
}
void
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
{
@@ -172,6 +86,7 @@ start(boost::asio::io_context& ioc, std::uint32_t numThreads)
int
main(int argc, char* argv[])
try
{
// Check command line arguments.
if (argc != 2)
@@ -196,10 +111,8 @@ main(int argc, char* argv[])
return EXIT_FAILURE;
}
initLogging(config);
BOOST_LOG_TRIVIAL(info)
<< "Clio version: " << Build::getClioFullVersionString();
LogService::init(config);
LogService::info() << "Clio version: " << Build::getClioFullVersionString();
auto ctx = parse_certs(config);
auto ctxRef = ctx
@@ -209,10 +122,10 @@ main(int argc, char* argv[])
auto const threads = config.valueOr("io_threads", 2);
if (threads <= 0)
{
BOOST_LOG_TRIVIAL(fatal) << "io_threads is less than 0";
LogService::fatal() << "io_threads is less than 0";
return EXIT_FAILURE;
}
BOOST_LOG_TRIVIAL(info) << "Number of io threads = " << threads;
LogService::info() << "Number of io threads = " << threads;
// io context to handle all incoming requests, as well as other things
// This is not the only io context in the application
@@ -258,3 +171,7 @@ main(int argc, char* argv[])
return EXIT_SUCCESS;
}
catch (std::exception const& e)
{
LogService::fatal() << "Exit on exception: " << e.what();
}

View File

@@ -1,4 +1,5 @@
#include <etl/ETLSource.h>
#include <log/Logger.h>
#include <rpc/Handlers.h>
#include <rpc/RPCHelpers.h>
#include <webserver/HttpBase.h>
@@ -9,9 +10,15 @@
#include <unordered_map>
using namespace std;
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gPerfLog{"Performance"};
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
Context::Context(
boost::asio::yield_context& yield_,
string const& command_,
@@ -40,7 +47,7 @@ Context::Context(
, counters(counters_)
, clientIp(clientIp_)
{
BOOST_LOG_TRIVIAL(debug) << tag() << "new Context created";
gPerfLog.debug() << tag() << "new Context created";
}
optional<Context>
@@ -325,8 +332,7 @@ buildResponse(Context const& ctx)
if (ctx.backend->isTooBusy())
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " Database is too busy. Rejecting request";
gLog.error() << "Database is too busy. Rejecting request";
return Status{RippledError::rpcTOO_BUSY};
}
@@ -337,12 +343,10 @@ buildResponse(Context const& ctx)
try
{
BOOST_LOG_TRIVIAL(debug)
<< ctx.tag() << __func__ << " start executing rpc `" << ctx.method
gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method
<< '`';
auto v = (*method)(ctx);
BOOST_LOG_TRIVIAL(debug)
<< ctx.tag() << __func__ << " finish executing rpc `" << ctx.method
gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method
<< '`';
if (auto object = get_if<boost::json::object>(&v);
@@ -363,13 +367,12 @@ buildResponse(Context const& ctx)
}
catch (Backend::DatabaseTimeout const& t)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
gLog.error() << "Database timeout";
return Status{RippledError::rpcTOO_BUSY};
}
catch (exception const& err)
{
BOOST_LOG_TRIVIAL(error)
<< ctx.tag() << __func__ << " caught exception : " << err.what();
gLog.error() << ctx.tag() << " caught exception: " << err.what();
return Status{RippledError::rpcINTERNAL};
}
}

View File

@@ -1,15 +1,15 @@
#ifndef REPORTING_RPC_H_INCLUDED
#define REPORTING_RPC_H_INCLUDED
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/Counters.h>
#include <rpc/Errors.h>
#include <util/Taggable.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/Counters.h>
#include <util/Taggable.h>
#include <optional>
#include <string>
#include <variant>
@@ -34,6 +34,7 @@ namespace RPC {
struct Context : public util::Taggable
{
clio::Logger perfLog_{"Performance"};
boost::asio::yield_context& yield;
std::string method;
std::uint32_t version;
@@ -129,6 +130,7 @@ template <class T>
void
logDuration(Context const& ctx, T const& dur)
{
static clio::Logger log{"RPC"};
std::stringstream ss;
ss << ctx.tag() << "Request processing duration = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(dur).count()
@@ -136,11 +138,11 @@ logDuration(Context const& ctx, T const& dur)
auto seconds =
std::chrono::duration_cast<std::chrono::seconds>(dur).count();
if (seconds > 10)
BOOST_LOG_TRIVIAL(error) << ss.str();
log.error() << ss.str();
else if (seconds > 1)
BOOST_LOG_TRIVIAL(warning) << ss.str();
log.warn() << ss.str();
else
BOOST_LOG_TRIVIAL(info) << ss.str();
log.info() << ss.str();
}
} // namespace RPC

View File

@@ -1,10 +1,17 @@
#include <ripple/basics/StringUtilities.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/algorithm/string.hpp>
#include <boost/format.hpp>
#include <ripple/basics/StringUtilities.h>
using namespace clio;
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
@@ -412,12 +419,11 @@ deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs)
blobs.metadata.begin(),
blobs.metadata.end(),
std::ostream_iterator<unsigned char>(meta));
BOOST_LOG_TRIVIAL(error)
<< __func__
<< " Failed to deserialize transaction. txn = " << txn.str()
<< " - meta = " << meta.str()
<< " txn length = " << std::to_string(blobs.transaction.size())
<< " meta length = " << std::to_string(blobs.metadata.size());
gLog.error() << "Failed to deserialize transaction. txn = " << txn.str()
<< " - meta = " << meta.str() << " txn length = "
<< std::to_string(blobs.transaction.size())
<< " meta length = "
<< std::to_string(blobs.metadata.size());
throw e;
}
}
@@ -807,9 +813,9 @@ traverseOwnedNodes(
}
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "Time loading owned directories: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
gLog.debug() << "Time loading owned directories: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds";
@@ -817,9 +823,9 @@ traverseOwnedNodes(
auto objects = backend.fetchLedgerObjects(keys, sequence, yield);
end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "Time loading owned entries: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
gLog.debug() << "Time loading owned entries: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds";
@@ -1341,7 +1347,7 @@ postProcessOrderBook(
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "caught exception: " << e.what();
gLog.error() << "caught exception: " << e.what();
}
}
return jsonOffers;
@@ -1676,9 +1682,7 @@ traverseTransactions(
}
else if (txnPlusMeta.ledgerSequence > maxIndex && !forward)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " skipping over transactions from incomplete ledger";
gLog.debug() << "Skipping over transactions from incomplete ledger";
continue;
}
@@ -1708,8 +1712,7 @@ traverseTransactions(
response[JS(ledger_index_max)] = maxIndex;
response[JS(transactions)] = txns;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " serialization took "
gLog.info() << "serialization took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - serializationStart)
.count()

View File

@@ -1,10 +1,11 @@
#ifndef CLIO_WORK_QUEUE_H
#define CLIO_WORK_QUEUE_H
#include <log/Logger.h>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <memory>
#include <optional>
@@ -20,6 +21,7 @@ class WorkQueue
std::atomic_uint64_t curSize_ = 0;
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
clio::Logger log_{"RPC"};
public:
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
@@ -30,10 +32,8 @@ public:
{
if (curSize_ >= maxSize_ && !isWhiteListed)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__
<< " queue is full. rejecting job. current size = " << curSize_
<< " max size = " << maxSize_;
log_.warn() << "Queue is full. rejecting job. current size = "
<< curSize_ << " max size = " << maxSize_;
return false;
}
++curSize_;
@@ -52,7 +52,7 @@ public:
// durationUs_
++queued_;
durationUs_ += wait;
BOOST_LOG_TRIVIAL(debug) << "WorkQueue wait time = " << wait
log_.info() << "WorkQueue wait time = " << wait
<< " queue size = " << curSize_;
f(yield);
--curSize_;

View File

@@ -1,5 +1,13 @@
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
Result
@@ -10,7 +18,8 @@ doAccountTx(Context const& context)
return status;
constexpr std::string_view outerFuncName = __func__;
auto const maybeResponse = traverseTransactions(
auto const maybeResponse =
traverseTransactions(
context,
[&accountID, &outerFuncName](
std::shared_ptr<Backend::BackendInterface const> const& backend,
@@ -21,12 +30,13 @@ doAccountTx(Context const& context)
auto const start = std::chrono::system_clock::now();
auto const txnsAndCursor = backend->fetchAccountTransactions(
accountID, limit, forward, cursorIn, yield);
BOOST_LOG_TRIVIAL(info)
gLog.info()
<< outerFuncName << " db fetch took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - start)
.count()
<< " milliseconds - num blobs = " << txnsAndCursor.txns.size();
<< " milliseconds - num blobs = "
<< txnsAndCursor.txns.size();
return txnsAndCursor;
});

View File

@@ -4,12 +4,21 @@
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
@@ -64,9 +73,9 @@ doBookOffers(Context const& context)
bookBase, lgrInfo.seq, limit, marker, context.yield);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning)
<< "Time loading books: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
gLog.warn() << "Time loading books: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count()
<< " milliseconds - request = " << request;
@@ -78,9 +87,9 @@ doBookOffers(Context const& context)
auto end2 = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning)
<< "Time transforming to json: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end2 - end)
gLog.warn() << "Time transforming to json: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end2 - end)
.count()
<< " milliseconds - request = " << request;

View File

@@ -1,9 +1,11 @@
#include <ripple/app/ledger/LedgerToJson.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
// Get state nodes from a ledger
// Inputs:
// limit: integer, maximum number of entries
@@ -18,6 +20,13 @@
//
//
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
using boost::json::value_to;
@@ -65,7 +74,7 @@ doLedgerData(Context const& context)
}
else
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker";
gLog.debug() << "Parsing marker";
marker = ripple::uint256{};
if (!marker->parseHex(request.at(JS(marker)).as_string().c_str()))
@@ -168,9 +177,8 @@ doLedgerData(Context const& context)
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " number of results = " << results.size()
<< " fetched in " << time << " microseconds";
gLog.debug() << "Number of results = " << results.size() << " fetched in "
<< time << " microseconds";
boost::json::array objects;
objects.reserve(results.size());
for (auto const& [key, object] : results)
@@ -194,8 +202,7 @@ doLedgerData(Context const& context)
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)
.count();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " number of results = " << results.size()
gLog.debug() << "Number of results = " << results.size()
<< " serialized in " << time << " microseconds";
return response;

View File

@@ -1,5 +1,13 @@
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
Result
@@ -11,7 +19,8 @@ doNFTHistory(Context const& context)
auto const tokenID = std::get<ripple::uint256>(maybeTokenID);
constexpr std::string_view outerFuncName = __func__;
auto const maybeResponse = traverseTransactions(
auto const maybeResponse =
traverseTransactions(
context,
[&tokenID, &outerFuncName](
std::shared_ptr<Backend::BackendInterface const> const& backend,
@@ -23,12 +32,13 @@ doNFTHistory(Context const& context)
auto const start = std::chrono::system_clock::now();
auto const txnsAndCursor = backend->fetchNFTTransactions(
tokenID, limit, forward, cursorIn, yield);
BOOST_LOG_TRIVIAL(info)
gLog.info()
<< outerFuncName << " db fetch took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - start)
.count()
<< " milliseconds - num blobs = " << txnsAndCursor.txns.size();
<< " milliseconds - num blobs = "
<< txnsAndCursor.txns.size();
return txnsAndCursor;
});

View File

@@ -4,10 +4,12 @@
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
namespace json = boost::json;
namespace ripple {

View File

@@ -3,9 +3,11 @@
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <memory>
#include <log/Logger.h>
#include <subscriptions/Message.h>
#include <memory>
class WsBase;
class Subscription
@@ -87,6 +89,7 @@ public:
class SubscriptionManager
{
using session_ptr = std::shared_ptr<WsBase>;
clio::Logger log_{"Subscriptions"};
std::vector<std::thread> workers_;
boost::asio::io_context ioc_;
@@ -134,8 +137,8 @@ public:
// We will eventually want to clamp this to be the number of strands,
// since adding more threads than we have strands won't see any
// performance benefits
BOOST_LOG_TRIVIAL(info) << "Starting subscription manager with "
<< numThreads << " workers";
log_.info() << "Starting subscription manager with " << numThreads
<< " workers";
workers_.reserve(numThreads);
for (auto i = numThreads; i > 0; --i)

View File

@@ -209,7 +209,8 @@ private:
friend Type
tag_invoke(boost::json::value_to_tag<Type>, boost::json::value const& value)
{
assert(value.is_string());
if (not value.is_string())
throw std::runtime_error("`log_tag_style` must be a string");
auto const& style = value.as_string();
if (boost::iequals(style, "int") || boost::iequals(style, "uint"))

View File

@@ -19,6 +19,7 @@
#include <thread>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <main/Build.h>
#include <rpc/Counters.h>
#include <rpc/RPC.h>
@@ -27,6 +28,7 @@
#include <vector>
#include <webserver/DOSGuard.h>
// TODO: consider removing those - visible to anyone including this header
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
@@ -101,6 +103,7 @@ class HttpBase : public util::Taggable
send_lambda lambda_;
protected:
clio::Logger perfLog_{"Performance"};
boost::beast::flat_buffer buffer_;
bool
@@ -135,8 +138,7 @@ protected:
if (!ec_ && ec != boost::asio::error::operation_aborted)
{
ec_ = ec;
BOOST_LOG_TRIVIAL(info)
<< tag() << __func__ << ": " << what << ": " << ec.message();
perfLog_.info() << tag() << ": " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().stream())
.socket()
.close(ec);
@@ -168,11 +170,11 @@ public:
, lambda_(*this)
, buffer_(std::move(buffer))
{
BOOST_LOG_TRIVIAL(debug) << tag() << "http session created";
perfLog_.debug() << tag() << "http session created";
}
virtual ~HttpBase()
{
BOOST_LOG_TRIVIAL(debug) << tag() << "http session closed";
perfLog_.debug() << tag() << "http session closed";
}
void
@@ -234,8 +236,7 @@ public:
if (!ip)
return;
BOOST_LOG_TRIVIAL(debug) << tag() << "http::" << __func__
<< " received request from ip = " << *ip
perfLog_.debug() << tag() << "Received request from ip = " << *ip
<< " - posting to WorkQueue";
auto session = derived().shared_from_this();
@@ -256,7 +257,8 @@ public:
dosGuard_,
counters_,
*ip,
session);
session,
perfLog_);
},
dosGuard_.isWhiteListed(*ip)))
{
@@ -321,7 +323,8 @@ handle_request(
DOSGuard& dosGuard,
RPC::Counters& counters,
std::string const& ip,
std::shared_ptr<Session> http)
std::shared_ptr<Session> http,
clio::Logger& perfLog)
{
auto const httpResponse = [&req](
http::status status,
@@ -356,9 +359,9 @@ handle_request(
try
{
BOOST_LOG_TRIVIAL(debug)
<< http->tag()
<< "http received request from work queue: " << req.body();
perfLog.debug() << http->tag()
<< "http received request from work queue: "
<< req.body();
boost::json::object request;
std::string responseStr = "";
@@ -422,8 +425,8 @@ handle_request(
error["request"] = request;
result = error;
BOOST_LOG_TRIVIAL(debug) << http->tag() << __func__
<< " Encountered error: " << responseStr;
perfLog.debug()
<< http->tag() << "Encountered error: " << responseStr;
}
else
{
@@ -457,8 +460,7 @@ handle_request(
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error)
<< http->tag() << __func__ << " Caught exception : " << e.what();
perfLog.error() << http->tag() << "Caught exception : " << e.what();
return send(httpResponse(
http::status::internal_server_error,
"application/json",

View File

@@ -1,10 +1,7 @@
#ifndef LISTENER_H
#define LISTENER_H
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include <util/Taggable.h>
#include <webserver/HttpSession.h>
@@ -12,6 +9,10 @@
#include <webserver/SslHttpSession.h>
#include <webserver/SslWsSession.h>
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream>
class SubscriptionManager;
@@ -23,6 +24,7 @@ class Detector
using std::enable_shared_from_this<
Detector<PlainSession, SslSession>>::shared_from_this;
clio::Logger log_{"WebServer"};
boost::asio::io_context& ioc_;
boost::beast::tcp_stream stream_;
std::optional<std::reference_wrapper<ssl::context>> ctx_;
@@ -69,8 +71,7 @@ public:
if (ec == net::ssl::error::stream_truncated)
return;
BOOST_LOG_TRIVIAL(info)
<< "Detector failed: " << message << ec.message() << std::endl;
log_.info() << "Detector failed (" << message << "): " << ec.message();
}
// Launch the detector
@@ -202,6 +203,7 @@ class Listener
using std::enable_shared_from_this<
Listener<PlainSession, SslSession>>::shared_from_this;
clio::Logger log_{"WebServer"};
boost::asio::io_context& ioc_;
std::optional<std::reference_wrapper<ssl::context>> ctx_;
tcp::acceptor acceptor_;
@@ -255,8 +257,7 @@ public:
acceptor_.bind(endpoint, ec);
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< "Failed to bind to endpoint: " << endpoint
log_.error() << "Failed to bind to endpoint: " << endpoint
<< ". message: " << ec.message();
throw std::runtime_error("Failed to bind to specified endpoint");
}
@@ -265,8 +266,7 @@ public:
acceptor_.listen(net::socket_base::max_listen_connections, ec);
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< "Failed to listen at endpoint: " << endpoint
log_.error() << "Failed to listen at endpoint: " << endpoint
<< ". message: " << ec.message();
throw std::runtime_error("Failed to listen at specified endpoint");
}
@@ -336,6 +336,7 @@ make_HttpServer(
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard)
{
static clio::Logger log{"WebServer"};
if (!config.contains("server"))
return nullptr;
@@ -348,7 +349,7 @@ make_HttpServer(
auto const maxQueueSize =
serverConfig.valueOr<uint32_t>("max_queue_size", 0); // 0 is no limit
BOOST_LOG_TRIVIAL(info) << __func__ << " Number of workers = " << numThreads
log.info() << "Number of workers = " << numThreads
<< ". Max queue size = " << maxQueueSize;
auto server = std::make_shared<HttpServer>(

View File

@@ -1,15 +1,10 @@
#ifndef RIPPLE_REPORTING_WS_BASE_SESSION_H
#define RIPPLE_REPORTING_WS_BASE_SESSION_H
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream>
#include <memory>
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <rpc/Counters.h>
#include <rpc/RPC.h>
#include <rpc/WorkQueue.h>
@@ -18,6 +13,13 @@
#include <util/Taggable.h>
#include <webserver/DOSGuard.h>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <iostream>
#include <memory>
// TODO: Consider removing these. Visible to anyone including this header.
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
@@ -27,8 +29,8 @@ using tcp = boost::asio::ip::tcp;
inline void
logError(boost::beast::error_code ec, char const* what)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : " << what << ": " << ec.message() << "\n";
static clio::Logger log{"WebServer"};
log.debug() << what << ": " << ec.message() << "\n";
}
inline boost::json::object
@@ -48,6 +50,8 @@ getDefaultWsResponse(boost::json::value const& id)
class WsBase : public util::Taggable
{
protected:
clio::Logger log_{"WebServer"};
clio::Logger perfLog_{"Performance"};
boost::system::error_code ec_;
public:
@@ -114,8 +118,7 @@ class WsSession : public WsBase,
if (!ec_ && ec != boost::asio::error::operation_aborted)
{
ec_ = ec;
BOOST_LOG_TRIVIAL(info)
<< tag() << __func__ << ": " << what << ": " << ec.message();
perfLog_.info() << tag() << ": " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
if (auto manager = subscriptions_.lock(); manager)
@@ -147,11 +150,11 @@ public:
, counters_(counters)
, queue_(queue)
{
BOOST_LOG_TRIVIAL(info) << tag() << "session created";
perfLog_.info() << tag() << "session created";
}
virtual ~WsSession()
{
BOOST_LOG_TRIVIAL(info) << tag() << "session closed";
perfLog_.info() << tag() << "session closed";
}
// Access the derived class, this is part of
@@ -244,7 +247,7 @@ public:
if (ec)
return wsFail(ec, "accept");
BOOST_LOG_TRIVIAL(info) << tag() << "accepting new connection";
perfLog_.info() << tag() << "accepting new connection";
// Read a message
do_read();
@@ -287,7 +290,7 @@ public:
try
{
BOOST_LOG_TRIVIAL(debug)
perfLog_.debug()
<< tag() << "ws received request from work queue : " << request;
auto range = backend_->fetchLedgerRange();
@@ -309,8 +312,7 @@ public:
if (!context)
{
BOOST_LOG_TRIVIAL(warning)
<< tag() << " could not create RPC context";
perfLog_.warn() << tag() << "Could not create RPC context";
return sendError(RPC::RippledError::rpcBAD_SYNTAX);
}
@@ -344,8 +346,7 @@ public:
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error)
<< tag() << __func__ << " caught exception : " << e.what();
perfLog_.error() << tag() << "Caught exception : " << e.what();
return sendError(RPC::RippledError::rpcINTERNAL);
}
@@ -385,8 +386,7 @@ public:
if (!ip)
return;
BOOST_LOG_TRIVIAL(info) << tag() << "ws::" << __func__
<< " received request from ip = " << *ip;
perfLog_.info() << tag() << "Received request from ip = " << *ip;
auto sendError = [this, ip](
auto error,
@@ -399,7 +399,7 @@ public:
e["request"] = request;
auto responseStr = boost::json::serialize(e);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
log_.trace() << responseStr;
dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr));
};
@@ -429,8 +429,7 @@ public:
}
else
{
BOOST_LOG_TRIVIAL(debug)
<< tag() << __func__ << " adding to work queue";
perfLog_.debug() << tag() << "Adding to work queue";
if (!queue_.postCoro(
[shared_this = shared_from_this(),

View File

@@ -1,17 +1,23 @@
#include <algorithm>
#include <backend/BackendFactory.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <config/Config.h>
#include <etl/ReportingETL.h>
#include <gtest/gtest.h>
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <util/Fixtures.h>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/trivial.hpp>
#include <backend/BackendFactory.h>
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <gtest/gtest.h>
TEST(BackendTest, Basic)
#include <algorithm>
class BackendTest : public NoLoggerFixture
{
};
TEST_F(BackendTest, Basic)
{
boost::asio::io_context ioc;
std::optional<boost::asio::io_context::work> work;
@@ -21,7 +27,7 @@ TEST(BackendTest, Basic)
boost::asio::spawn(
ioc, [&done, &work, &ioc](boost::asio::yield_context yield) {
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
clio::log_severity >= clio::Severity::WARNING);
std::string keyspace = "clio_test_" +
std::to_string(std::chrono::system_clock::now()
.time_since_epoch()
@@ -41,7 +47,6 @@ TEST(BackendTest, Basic)
std::vector<boost::json::object> configs = {cassandraConfig};
for (auto& config : configs)
{
std::cout << keyspace << std::endl;
auto backend = Backend::make_Backend(ioc, clio::Config{config});
std::string rawHeader =
@@ -107,10 +112,8 @@ TEST(BackendTest, Basic)
}
{
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr =
backend->fetchLedgerBySequence(lgrInfo.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
ASSERT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfo.seq);
EXPECT_EQ(
@@ -147,10 +150,8 @@ TEST(BackendTest, Basic)
EXPECT_EQ(seq, lgrInfoNext.seq);
}
{
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr =
backend->fetchLedgerBySequence(lgrInfoNext.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
EXPECT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfoNext.seq);
EXPECT_EQ(
@@ -802,9 +803,6 @@ TEST(BackendTest, Basic)
auto objs,
auto accountTx,
auto state) {
std::cout
<< "writing ledger = " << std::to_string(lgrInfo.seq)
<< std::endl;
backend->startWrites();
backend->writeLedger(
@@ -978,11 +976,6 @@ TEST(BackendTest, Basic)
uint32_t limit = 10;
page = backend->fetchLedgerPage(
page.cursor, seq, limit, false, yield);
std::cout << "fetched a page " << page.objects.size()
<< std::endl;
if (page.cursor)
std::cout << ripple::strHex(*page.cursor)
<< std::endl;
// if (page.cursor)
// EXPECT_EQ(page.objects.size(), limit);
retObjs.insert(
@@ -1007,7 +1000,6 @@ TEST(BackendTest, Basic)
}
}
if (found != (obj.second.size() != 0))
std::cout << ripple::strHex(obj.first) << std::endl;
ASSERT_EQ(found, obj.second.size() != 0);
}
};
@@ -1148,15 +1140,12 @@ TEST(BackendTest, Basic)
for (auto [seq, diff] : state)
{
std::cout << "flatteneing" << std::endl;
auto flat = flatten(seq);
std::cout << "flattened" << std::endl;
checkLedger(
lgrInfos[seq],
allTxns[seq],
flat,
flattenAccountTx(seq));
std::cout << "checked" << std::endl;
}
}
@@ -1168,11 +1157,11 @@ TEST(BackendTest, Basic)
EXPECT_EQ(done, true);
}
TEST(Backend, cache)
TEST_F(BackendTest, cache)
{
using namespace Backend;
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
clio::log_severity >= clio::Severity::WARNING);
SimpleCache cache;
ASSERT_FALSE(cache.isFull());
cache.setFull();
@@ -1410,11 +1399,11 @@ TEST(Backend, cache)
}
}
TEST(Backend, cacheBackground)
TEST_F(BackendTest, cacheBackground)
{
using namespace Backend;
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
clio::log_severity >= clio::Severity::WARNING);
SimpleCache cache;
ASSERT_FALSE(cache.isFull());
ASSERT_EQ(cache.size(), 0);
@@ -1814,7 +1803,7 @@ TEST(Backend, cacheBackground)
ASSERT_EQ(idx, allObjs.size());
}
TEST(Backend, cacheIntegration)
TEST_F(BackendTest, cacheIntegration)
{
boost::asio::io_context ioc;
std::optional<boost::asio::io_context::work> work;
@@ -1824,7 +1813,7 @@ TEST(Backend, cacheIntegration)
boost::asio::spawn(
ioc, [&ioc, &done, &work](boost::asio::yield_context yield) {
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
clio::log_severity >= clio::Severity::WARNING);
std::string keyspace = "clio_test_" +
std::to_string(std::chrono::system_clock::now()
.time_since_epoch()
@@ -1844,7 +1833,6 @@ TEST(Backend, cacheIntegration)
std::vector<boost::json::object> configs = {cassandraConfig};
for (auto& config : configs)
{
std::cout << keyspace << std::endl;
auto backend = Backend::make_Backend(ioc, clio::Config{config});
backend->cache().setFull();
@@ -1927,10 +1915,8 @@ TEST(Backend, cacheIntegration)
}
{
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr =
backend->fetchLedgerBySequence(lgrInfo.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
ASSERT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfo.seq);
EXPECT_EQ(
@@ -1967,10 +1953,8 @@ TEST(Backend, cacheIntegration)
EXPECT_EQ(seq, lgrInfoNext.seq);
}
{
std::cout << "fetching ledger by sequence" << std::endl;
auto retLgr =
backend->fetchLedgerBySequence(lgrInfoNext.seq, yield);
std::cout << "fetched ledger by sequence" << std::endl;
EXPECT_TRUE(retLgr.has_value());
EXPECT_EQ(retLgr->seq, lgrInfoNext.seq);
EXPECT_EQ(
@@ -2226,8 +2210,6 @@ TEST(Backend, cacheIntegration)
return lgrInfo;
};
auto writeLedger = [&](auto lgrInfo, auto objs, auto state) {
std::cout << "writing ledger = "
<< std::to_string(lgrInfo.seq);
backend->startWrites();
backend->writeLedger(
@@ -2344,11 +2326,6 @@ TEST(Backend, cacheIntegration)
uint32_t limit = 10;
page = backend->fetchLedgerPage(
page.cursor, seq, limit, false, yield);
std::cout << "fetched a page " << page.objects.size()
<< std::endl;
if (page.cursor)
std::cout << ripple::strHex(*page.cursor)
<< std::endl;
// if (page.cursor)
// EXPECT_EQ(page.objects.size(), limit);
retObjs.insert(
@@ -2372,7 +2349,6 @@ TEST(Backend, cacheIntegration)
}
}
if (found != (obj.second.size() != 0))
std::cout << ripple::strHex(obj.first) << std::endl;
ASSERT_EQ(found, obj.second.size() != 0);
}
};
@@ -2437,11 +2413,8 @@ TEST(Backend, cacheIntegration)
for (auto [seq, diff] : state)
{
std::cout << "flatteneing" << std::endl;
auto flat = flatten(seq);
std::cout << "flattened" << std::endl;
checkLedger(lgrInfos[seq], flat);
std::cout << "checked" << std::endl;
}
}

View File

@@ -1,9 +1,8 @@
#include <config/Config.h>
#include <util/Fixtures.h>
#include <boost/filesystem.hpp>
#include <boost/json/parse.hpp>
#include <boost/log/core.hpp>
#include <gtest/gtest.h>
#include <cstdio>
#include <exception>
@@ -34,17 +33,10 @@ constexpr static auto JSONData = R"JSON(
}
)JSON";
class ConfigTest : public ::testing::Test
class ConfigTest : public NoLoggerFixture
{
protected:
Config cfg{json::parse(JSONData)};
void
SetUp() override
{
// disable logging in test
core::get()->set_logging_enabled(false);
}
};
TEST_F(ConfigTest, SanityCheck)

49
unittests/Logger.cpp Normal file
View File

@@ -0,0 +1,49 @@
#include <util/Fixtures.h>
using namespace clio;
// Used as a fixture for tests with enabled logging
class LoggerTest : public LoggerFixture
{
};
// Used as a fixture for tests with disabled logging
class NoLoggerTest : public NoLoggerFixture
{
};
TEST_F(LoggerTest, Basic)
{
Logger log{"General"};
log.info() << "Info line logged";
checkEqual("General:NFO Info line logged");
LogService::debug() << "Debug line with numbers " << 12345;
checkEqual("General:DBG Debug line with numbers 12345");
LogService::warn() << "Warning is logged";
checkEqual("General:WRN Warning is logged");
}
TEST_F(LoggerTest, Filtering)
{
Logger log{"General"};
log.trace() << "Should not be logged";
checkEmpty();
log.warn() << "Warning is logged";
checkEqual("General:WRN Warning is logged");
Logger tlog{"Trace"};
tlog.trace() << "Trace line logged for 'Trace' component";
checkEqual("Trace:TRC Trace line logged for 'Trace' component");
}
TEST_F(NoLoggerTest, Basic)
{
Logger log{"Trace"};
log.trace() << "Nothing";
checkEmpty();
LogService::fatal() << "Still nothing";
checkEmpty();
}

93
unittests/util/Fixtures.h Normal file
View File

@@ -0,0 +1,93 @@
#ifndef CLIO_UNITTEST_FIXTURES_H
#define CLIO_UNITTEST_FIXTURES_H
#include <ios>
#include <mutex>
#include <gtest/gtest.h>
#include <log/Logger.h>
/**
* @brief Fixture with LogService support.
*/
class LoggerFixture : public ::testing::Test
{
/**
* @brief A simple string buffer that can be used to mock std::cout for
* console logging.
*/
class FakeBuffer final : public std::stringbuf
{
public:
std::string
getStrAndReset()
{
auto value = str();
str("");
return value;
}
};
FakeBuffer buffer_;
std::ostream stream_ = std::ostream{&buffer_};
protected:
// Simulates the `LogService::init(config)` call
void
SetUp() override
{
static std::once_flag once_;
std::call_once(once_, [] {
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<clio::Severity, char>(
"Severity");
});
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
namespace expr = boost::log::expressions;
auto core = boost::log::core::get();
core->remove_all_sinks();
boost::log::add_console_log(
stream_, keywords::format = "%Channel%:%Severity% %Message%");
auto min_severity = expr::channel_severity_filter(
clio::log_channel, clio::log_severity);
min_severity["General"] = clio::Severity::DEBUG;
min_severity["Trace"] = clio::Severity::TRACE;
core->set_filter(min_severity);
core->set_logging_enabled(true);
}
void
checkEqual(std::string expected)
{
auto value = buffer_.getStrAndReset();
ASSERT_EQ(value, expected + '\n');
}
void
checkEmpty()
{
ASSERT_TRUE(buffer_.getStrAndReset().empty());
}
};
/**
* @brief Fixture with LogService support but completely disabled logging.
*
* This is meant to be used as a base for other fixtures.
*/
class NoLoggerFixture : public LoggerFixture
{
protected:
void
SetUp() override
{
LoggerFixture::SetUp();
boost::log::core::get()->set_logging_enabled(false);
}
};
#endif // CLIO_UNITTEST_FIXTURES_H