From a47bf2e8fe138ff91f3ea3f915c82156d53942ad Mon Sep 17 00:00:00 2001 From: Alex Kremer Date: Thu, 17 Nov 2022 23:02:16 +0100 Subject: [PATCH] Implement logging abstraction (#371) Fixes #290 --- CMake/deps/gtest.cmake | 1 + CMake/settings.cmake | 2 +- CMakeLists.txt | 14 +- example-config.json | 72 +++-- src/backend/BackendFactory.h | 10 +- src/backend/BackendInterface.cpp | 88 +++--- src/backend/BackendInterface.h | 17 +- src/backend/CassandraBackend.cpp | 178 +++++------ src/backend/CassandraBackend.h | 125 ++++---- src/config/Config.cpp | 8 +- src/etl/ETLSource.cpp | 256 +++++++-------- src/etl/ETLSource.h | 41 +-- src/etl/NFTHelpers.cpp | 11 +- src/etl/ProbingETLSource.cpp | 16 +- src/etl/ProbingETLSource.h | 3 + src/etl/ReportingETL.cpp | 397 ++++++++++-------------- src/etl/ReportingETL.h | 11 +- src/log/Logger.cpp | 191 ++++++++++++ src/log/Logger.h | 257 +++++++++++++++ src/main/main.cpp | 115 +------ src/rpc/RPC.cpp | 29 +- src/rpc/RPC.h | 16 +- src/rpc/RPCHelpers.cpp | 61 ++-- src/rpc/WorkQueue.h | 14 +- src/rpc/handlers/AccountTx.cpp | 48 +-- src/rpc/handlers/BookOffers.cpp | 37 ++- src/rpc/handlers/LedgerData.cpp | 25 +- src/rpc/handlers/NFTHistory.cpp | 50 +-- src/rpc/handlers/NFTOffers.cpp | 6 +- src/subscriptions/SubscriptionManager.h | 9 +- src/util/Taggable.h | 3 +- src/webserver/HttpBase.h | 34 +- src/webserver/Listener.h | 29 +- src/webserver/WsBase.h | 45 ++- unittests/{main.cpp => Backend.cpp} | 71 ++--- unittests/{config.cpp => Config.cpp} | 12 +- unittests/Logger.cpp | 49 +++ unittests/util/Fixtures.h | 93 ++++++ 38 files changed, 1441 insertions(+), 1003 deletions(-) create mode 100644 src/log/Logger.cpp create mode 100644 src/log/Logger.h rename unittests/{main.cpp => Backend.cpp} (97%) rename unittests/{config.cpp => Config.cpp} (96%) create mode 100644 unittests/Logger.cpp create mode 100644 unittests/util/Fixtures.h diff --git a/CMake/deps/gtest.cmake b/CMake/deps/gtest.cmake index df5affdf..73d4cbe5 100644 --- a/CMake/deps/gtest.cmake +++ b/CMake/deps/gtest.cmake @@ -11,6 +11,7 @@ if(NOT googletest_POPULATED) endif() target_link_libraries(clio_tests PUBLIC clio gtest_main) +target_include_directories(clio_tests PRIVATE unittests) enable_testing() diff --git a/CMake/settings.cmake b/CMake/settings.cmake index fcf42a62..3803f31c 100644 --- a/CMake/settings.cmake +++ b/CMake/settings.cmake @@ -1 +1 @@ -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -Wno-narrowing -Wno-dangling-else -Wno-deprecated-declarations") diff --git a/CMakeLists.txt b/CMakeLists.txt index 85caa8d3..5d6f55d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,19 +99,21 @@ target_sources(clio PRIVATE src/rpc/handlers/Subscribe.cpp # Server src/rpc/handlers/ServerInfo.cpp - # Utility + # Utilities src/rpc/handlers/Random.cpp - src/util/Taggable.cpp - src/config/Config.cpp) + src/config/Config.cpp + src/log/Logger.cpp + src/util/Taggable.cpp) add_executable(clio_server src/main/main.cpp) target_link_libraries(clio_server PUBLIC clio) if(BUILD_TESTS) - add_executable(clio_tests + add_executable(clio_tests unittests/RPCErrors.cpp - unittests/config.cpp - unittests/main.cpp) + unittests/Backend.cpp + unittests/Logger.cpp + unittests/Config.cpp) include(CMake/deps/gtest.cmake) endif() diff --git a/example-config.json b/example-config.json index 6bea69e8..816c26f8 100644 --- a/example-config.json +++ b/example-config.json @@ -1,25 +1,22 @@ { - "database": - { - "type":"cassandra", - "cassandra": - { - "contact_points":"127.0.0.1", - "port":9042, - "keyspace":"clio", - "replication_factor":1, - "table_prefix":"", - "max_write_requests_outstanding":25000, - "max_read_requests_outstanding":30000, - "threads":8 + "database": { + "type": "cassandra", + "cassandra": { + "contact_points": "127.0.0.1", + "port": 9042, + "keyspace": "clio", + "replication_factor": 1, + "table_prefix": "", + "max_write_requests_outstanding": 25000, + "max_read_requests_outstanding": 30000, + "threads": 8 } }, - "etl_sources": - [ + "etl_sources": [ { - "ip":"127.0.0.1", - "ws_port":"6006", - "grpc_port":"50051" + "ip": "127.0.0.1", + "ws_port": "6006", + "grpc_port": "50051" } ], "dos_guard": @@ -31,16 +28,43 @@ "peers": [{"ip":"127.0.0.1","port":51234}] }, "server":{ - "ip":"0.0.0.0", - "port":51233 + "ip": "0.0.0.0", + "port": 51233 }, - "log_level":"debug", + "log_channels": [ + { + "channel": "Backend", + "log_level": "fatal" + }, + { + "channel": "WebServer", + "log_level": "info" + }, + { + "channel": "Subscriptions", + "log_level": "info" + }, + { + "channel": "RPC", + "log_level": "error" + }, + { + "channel": "ETL", + "log_level": "debug" + }, + { + "channel": "Performance", + "log_level": "trace" + } + ], + "log_level": "info", + "log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format "log_to_console": true, - "log_directory":"./clio_log", + "log_directory": "./clio_log", "log_rotation_size": 2048, "log_directory_max_size": 51200, "log_rotation_hour_interval": 12, "log_tag_style": "uint", - "extractor_threads":8, - "read_only":false + "extractor_threads": 8, + "read_only": false } diff --git a/src/backend/BackendFactory.h b/src/backend/BackendFactory.h index e842ce46..427d4c63 100644 --- a/src/backend/BackendFactory.h +++ b/src/backend/BackendFactory.h @@ -1,16 +1,19 @@ #ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED #define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED -#include #include #include #include +#include + +#include namespace Backend { std::shared_ptr make_Backend(boost::asio::io_context& ioc, clio::Config const& config) { - BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface"; + static clio::Logger log{"Backend"}; + log.info() << "Constructing BackendInterface"; auto readOnly = config.valueOr("read_only", false); auto type = config.value("database.type"); @@ -34,8 +37,7 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config) backend->updateRange(rng->maxSequence); } - BOOST_LOG_TRIVIAL(info) - << __func__ << ": Constructed BackendInterface Successfully"; + log.info() << "Constructed BackendInterface Successfully"; return backend; } diff --git a/src/backend/BackendInterface.cpp b/src/backend/BackendInterface.cpp index 5438fecf..9f8ffee6 100644 --- a/src/backend/BackendInterface.cpp +++ b/src/backend/BackendInterface.cpp @@ -1,6 +1,15 @@ #include #include #include +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"Backend"}; +} // namespace + namespace Backend { bool BackendInterface::finishWrites(std::uint32_t const ledgerSequence) @@ -26,7 +35,7 @@ std::optional BackendInterface::hardFetchLedgerRangeNoThrow( boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << __func__ << "(yield)"; + gLog.trace() << "called"; while (true) { try @@ -43,7 +52,7 @@ BackendInterface::hardFetchLedgerRangeNoThrow( std::optional BackendInterface::hardFetchLedgerRangeNoThrow() const { - BOOST_LOG_TRIVIAL(trace) << __func__ << "()"; + gLog.trace() << "called"; return retryOnTimeout([&]() { return hardFetchLedgerRange(); }); } @@ -57,21 +66,17 @@ BackendInterface::fetchLedgerObject( auto obj = cache_.get(key, sequence); if (obj) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - cache hit - " << ripple::strHex(key); + gLog.trace() << "Cache hit - " << ripple::strHex(key); return *obj; } else { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - cache miss - " << ripple::strHex(key); + gLog.trace() << "Cache miss - " << ripple::strHex(key); auto dbObj = doFetchLedgerObject(key, sequence, yield); if (!dbObj) - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - missed cache and missed in db"; + gLog.trace() << "Missed cache and missed in db"; else - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - missed cache but found in db"; + gLog.trace() << "Missed cache but found in db"; return dbObj; } } @@ -93,9 +98,8 @@ BackendInterface::fetchLedgerObjects( else misses.push_back(keys[i]); } - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - cache hits = " << keys.size() - misses.size() - << " - cache misses = " << misses.size(); + gLog.trace() << "Cache hits = " << keys.size() - misses.size() + << " - cache misses = " << misses.size(); if (misses.size()) { @@ -121,11 +125,9 @@ BackendInterface::fetchSuccessorKey( { auto succ = cache_.getSuccessor(key, ledgerSequence); if (succ) - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - cache hit - " << ripple::strHex(key); + gLog.trace() << "Cache hit - " << ripple::strHex(key); else - BOOST_LOG_TRIVIAL(trace) - << __func__ << " - cache miss - " << ripple::strHex(key); + gLog.trace() << "Cache miss - " << ripple::strHex(key); return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield); } @@ -179,8 +181,8 @@ BackendInterface::fetchBookOffers( succMillis += getMillis(mid2 - mid1); if (!offerDir || offerDir->key >= bookEnd) { - BOOST_LOG_TRIVIAL(trace) << __func__ << " - offerDir.has_value() " - << offerDir.has_value() << " breaking"; + gLog.trace() << "offerDir.has_value() " << offerDir.has_value() + << " breaking"; break; } uTipIndex = offerDir->key; @@ -196,8 +198,7 @@ BackendInterface::fetchBookOffers( auto next = sle.getFieldU64(ripple::sfIndexNext); if (!next) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " next is empty. breaking"; + gLog.trace() << "Next is empty. breaking"; break; } auto nextKey = ripple::keylet::page(uTipIndex, next); @@ -214,29 +215,27 @@ BackendInterface::fetchBookOffers( auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); for (size_t i = 0; i < keys.size() && i < limit; ++i) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " key = " << ripple::strHex(keys[i]) - << " blob = " << ripple::strHex(objs[i]) - << " ledgerSequence = " << ledgerSequence; + gLog.trace() << "Key = " << ripple::strHex(keys[i]) + << " blob = " << ripple::strHex(objs[i]) + << " ledgerSequence = " << ledgerSequence; assert(objs[i].size()); page.offers.push_back({keys[i], objs[i]}); } auto end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " " - << "Fetching " << std::to_string(keys.size()) << " offers took " - << std::to_string(getMillis(mid - begin)) - << " milliseconds. Fetching next dir took " - << std::to_string(succMillis) << " milliseonds. Fetched next dir " - << std::to_string(numSucc) << " times" - << " Fetching next page of dir took " << std::to_string(pageMillis) - << " milliseconds" - << ". num pages = " << std::to_string(numPages) - << ". Fetching all objects took " - << std::to_string(getMillis(end - mid)) - << " milliseconds. total time = " - << std::to_string(getMillis(end - begin)) << " milliseconds" - << " book = " << ripple::strHex(book); + gLog.debug() << "Fetching " << std::to_string(keys.size()) + << " offers took " << std::to_string(getMillis(mid - begin)) + << " milliseconds. Fetching next dir took " + << std::to_string(succMillis) + << " milliseonds. Fetched next dir " << std::to_string(numSucc) + << " times" + << " Fetching next page of dir took " + << std::to_string(pageMillis) << " milliseconds" + << ". num pages = " << std::to_string(numPages) + << ". Fetching all objects took " + << std::to_string(getMillis(end - mid)) + << " milliseconds. total time = " + << std::to_string(getMillis(end - begin)) << " milliseconds" + << " book = " << ripple::strHex(book); return page; } @@ -274,16 +273,15 @@ BackendInterface::fetchLedgerPage( page.objects.push_back({std::move(keys[i]), std::move(objects[i])}); else if (!outOfOrder) { - BOOST_LOG_TRIVIAL(error) - << __func__ - << " deleted or non-existent object in successor table. key = " + gLog.error() + << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i]) << " - seq = " << ledgerSequence; std::stringstream msg; for (size_t j = 0; j < objects.size(); ++j) { msg << " - " << ripple::strHex(keys[j]); } - BOOST_LOG_TRIVIAL(error) << __func__ << msg.str(); + gLog.error() << msg.str(); } } if (keys.size() && !reachedEnd) @@ -304,7 +302,7 @@ BackendInterface::fetchFees( if (!bytes) { - BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees"; + gLog.error() << "Could not find fees"; return {}; } diff --git a/src/backend/BackendInterface.h b/src/backend/BackendInterface.h index a26f3e55..1a36b6c4 100644 --- a/src/backend/BackendInterface.h +++ b/src/backend/BackendInterface.h @@ -1,17 +1,15 @@ #ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED #define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED -#include -#include -#include - #include - #include #include #include - #include +#include + +#include +#include #include #include @@ -46,6 +44,8 @@ template auto retryOnTimeout(F func, size_t waitMs = 500) { + static clio::Logger log{"Backend"}; + while (true) { try @@ -54,9 +54,8 @@ retryOnTimeout(F func, size_t waitMs = 500) } catch (DatabaseTimeout& t) { - BOOST_LOG_TRIVIAL(error) - << __func__ - << " Database request timed out. Sleeping and retrying ... "; + log.error() + << "Database request timed out. Sleeping and retrying ... "; std::this_thread::sleep_for(std::chrono::milliseconds(waitMs)); } } diff --git a/src/backend/CassandraBackend.cpp b/src/backend/CassandraBackend.cpp index d362446e..471f3e01 100644 --- a/src/backend/CassandraBackend.cpp +++ b/src/backend/CassandraBackend.cpp @@ -1,9 +1,13 @@ #include #include #include +#include + #include #include +using namespace clio; + namespace Backend { // Type alias for async completion handlers @@ -16,6 +20,8 @@ template void processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) { + static clio::Logger log{"Backend"}; + CassandraBackend const& backend = *requestParams.backend; auto rc = cass_future_error_code(fut); if (rc != CASS_OK) @@ -23,11 +29,11 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) // exponential backoff with a max wait of 2^10 ms (about 1 second) auto wait = std::chrono::milliseconds( lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); - BOOST_LOG_TRIVIAL(error) - << "ERROR!!! Cassandra write error: " << rc << ", " - << cass_error_desc(rc) << " id= " << requestParams.toString() - << ", current retries " << requestParams.currentRetries - << ", retrying in " << wait.count() << " milliseconds"; + log.error() << "ERROR!!! Cassandra write error: " << rc << ", " + << cass_error_desc(rc) + << " id= " << requestParams.toString() + << ", current retries " << requestParams.currentRetries + << ", retrying in " << wait.count() << " milliseconds"; ++requestParams.currentRetries; std::shared_ptr timer = std::make_shared( @@ -40,11 +46,11 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) } else { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " Succesfully inserted a record"; + log.trace() << "Succesfully inserted a record"; requestParams.finish(); } } + template void processAsyncWrite(CassFuture* fut, void* cbData) @@ -155,6 +161,7 @@ makeAndExecuteAsyncWrite( auto* cb = new WriteCallbackData(b, std::move(d), bind, id); cb->start(); } + template std::shared_ptr> makeAndExecuteBulkAsyncWrite( @@ -170,13 +177,14 @@ makeAndExecuteBulkAsyncWrite( cb->start(); return cb; } + void CassandraBackend::doWriteLedgerObject( std::string&& key, std::uint32_t const seq, std::string&& blob) { - BOOST_LOG_TRIVIAL(trace) << "Writing ledger object to cassandra"; + log_.trace() << "Writing ledger object to cassandra"; if (range) makeAndExecuteAsyncWrite( this, @@ -204,15 +212,16 @@ CassandraBackend::doWriteLedgerObject( }, "ledger_object"); } + void CassandraBackend::writeSuccessor( std::string&& key, std::uint32_t const seq, std::string&& successor) { - BOOST_LOG_TRIVIAL(trace) - << "Writing successor. key = " << key - << " seq = " << std::to_string(seq) << " successor = " << successor; + log_.trace() << "Writing successor. key = " << key.size() << " bytes. " + << " seq = " << std::to_string(seq) + << " successor = " << successor.size() << " bytes."; assert(key.size() != 0); assert(successor.size() != 0); makeAndExecuteAsyncWrite( @@ -319,7 +328,7 @@ CassandraBackend::writeTransaction( std::string&& transaction, std::string&& metadata) { - BOOST_LOG_TRIVIAL(trace) << "Writing txn to cassandra"; + log_.trace() << "Writing txn to cassandra"; std::string hashCpy = hash; makeAndExecuteAsyncWrite( @@ -393,13 +402,13 @@ CassandraBackend::writeNFTs(std::vector&& data) std::optional CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; + log_.trace() << "Fetching from cassandra"; CassandraStatement statement{selectLedgerRange_}; CassandraResult result = executeAsyncRead(statement, yield); if (!result) { - BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; + log_.error() << "No rows"; return {}; } LedgerRange range; @@ -528,11 +537,12 @@ CassandraBackend::fetchTransactions( throw DatabaseTimeout(); } - BOOST_LOG_TRIVIAL(debug) - << "Fetched " << numHashes << " transactions from Cassandra in " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << numHashes + << " transactions from Cassandra in " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; return results; } @@ -550,9 +560,7 @@ CassandraBackend::fetchAllTransactionHashesInLedger( auto end = std::chrono::system_clock::now(); if (!result) { - BOOST_LOG_TRIVIAL(error) - << __func__ - << " - no rows . ledger = " << std::to_string(ledgerSequence); + log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence); return {}; } std::vector hashes; @@ -560,12 +568,12 @@ CassandraBackend::fetchAllTransactionHashesInLedger( { hashes.push_back(result.getUInt256()); } while (result.nextRow()); - BOOST_LOG_TRIVIAL(debug) - << "Fetched " << hashes.size() - << " transaction hashes from Cassandra in " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << hashes.size() + << " transaction hashes from Cassandra in " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; return hashes; } @@ -613,9 +621,9 @@ CassandraBackend::fetchNFTTransactions( { statement.bindNextIntTuple( cursor->ledgerSequence, cursor->transactionIndex); - BOOST_LOG_TRIVIAL(debug) << " token_id = " << ripple::strHex(tokenID) - << " tuple = " << cursor->ledgerSequence - << " : " << cursor->transactionIndex; + log_.debug() << "token_id = " << ripple::strHex(tokenID) + << " tuple = " << cursor->ledgerSequence + << cursor->transactionIndex; } else { @@ -624,9 +632,8 @@ CassandraBackend::fetchNFTTransactions( forward ? 0 : std::numeric_limits::max(); statement.bindNextIntTuple(placeHolder, placeHolder); - BOOST_LOG_TRIVIAL(debug) - << " token_id = " << ripple::strHex(tokenID) << " idx = " << seq - << " tuple = " << placeHolder; + log_.debug() << "token_id = " << ripple::strHex(tokenID) + << " idx = " << seq << " tuple = " << placeHolder; } statement.bindNextUInt(limit); @@ -635,19 +642,19 @@ CassandraBackend::fetchNFTTransactions( if (!result.hasResult()) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; + log_.debug() << "No rows returned"; return {}; } std::vector hashes = {}; auto numRows = result.numRows(); - BOOST_LOG_TRIVIAL(info) << "num_rows = " << numRows; + log_.info() << "num_rows = " << numRows; do { hashes.push_back(result.getUInt256()); if (--numRows == 0) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor"; + log_.debug() << "Setting cursor"; auto const [lgrSeq, txnIdx] = result.getInt64Tuple(); cursor = { static_cast(lgrSeq), @@ -662,11 +669,11 @@ CassandraBackend::fetchNFTTransactions( } while (result.nextRow()); auto txns = fetchTransactions(hashes, yield); - BOOST_LOG_TRIVIAL(debug) << __func__ << " txns = " << txns.size(); + log_.debug() << "Txns = " << txns.size(); if (txns.size() == limit) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor"; + log_.debug() << "Returning cursor"; return {txns, cursor}; } @@ -698,9 +705,9 @@ CassandraBackend::fetchAccountTransactions( { statement.bindNextIntTuple( cursor->ledgerSequence, cursor->transactionIndex); - BOOST_LOG_TRIVIAL(debug) << " account = " << ripple::strHex(account) - << " tuple = " << cursor->ledgerSequence - << " : " << cursor->transactionIndex; + log_.debug() << "account = " << ripple::strHex(account) + << " tuple = " << cursor->ledgerSequence + << cursor->transactionIndex; } else { @@ -709,9 +716,8 @@ CassandraBackend::fetchAccountTransactions( forward ? 0 : std::numeric_limits::max(); statement.bindNextIntTuple(placeHolder, placeHolder); - BOOST_LOG_TRIVIAL(debug) - << " account = " << ripple::strHex(account) << " idx = " << seq - << " tuple = " << placeHolder; + log_.debug() << "account = " << ripple::strHex(account) + << " idx = " << seq << " tuple = " << placeHolder; } statement.bindNextUInt(limit); @@ -719,19 +725,19 @@ CassandraBackend::fetchAccountTransactions( if (!result.hasResult()) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; + log_.debug() << "No rows returned"; return {}; } std::vector hashes = {}; auto numRows = result.numRows(); - BOOST_LOG_TRIVIAL(info) << "num_rows = " << std::to_string(numRows); + log_.info() << "num_rows = " << std::to_string(numRows); do { hashes.push_back(result.getUInt256()); if (--numRows == 0) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor"; + log_.debug() << "Setting cursor"; auto [lgrSeq, txnIdx] = result.getInt64Tuple(); cursor = { static_cast(lgrSeq), @@ -746,11 +752,11 @@ CassandraBackend::fetchAccountTransactions( } while (result.nextRow()); auto txns = fetchTransactions(hashes, yield); - BOOST_LOG_TRIVIAL(debug) << __func__ << "txns = " << txns.size(); + log_.debug() << "Txns = " << txns.size(); if (txns.size() == limit) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor"; + log_.debug() << "Returning cursor"; return {txns, cursor}; } @@ -763,7 +769,7 @@ CassandraBackend::doFetchSuccessorKey( std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; + log_.trace() << "Fetching from cassandra"; CassandraStatement statement{selectSuccessor_}; statement.bindNextBytes(key); statement.bindNextInt(ledgerSequence); @@ -772,7 +778,7 @@ CassandraBackend::doFetchSuccessorKey( if (!result) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows"; + log_.debug() << "No rows"; return {}; } auto next = result.getUInt256(); @@ -787,7 +793,7 @@ CassandraBackend::doFetchLedgerObject( std::uint32_t const sequence, boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; + log_.trace() << "Fetching from cassandra"; CassandraStatement statement{selectObject_}; statement.bindNextBytes(key); statement.bindNextInt(sequence); @@ -796,7 +802,7 @@ CassandraBackend::doFetchLedgerObject( if (!result) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows"; + log_.debug() << "No rows"; return {}; } auto res = result.getBytes(); @@ -820,8 +826,7 @@ CassandraBackend::doFetchLedgerObjects( result_type result(handler); std::size_t const numKeys = keys.size(); - BOOST_LOG_TRIVIAL(trace) - << "Fetching " << numKeys << " records from Cassandra"; + log_.trace() << "Fetching " << numKeys << " records from Cassandra"; std::atomic_int numOutstanding = numKeys; std::vector results{numKeys}; std::vector>> cbs; @@ -850,8 +855,7 @@ CassandraBackend::doFetchLedgerObjects( throw DatabaseTimeout(); } - BOOST_LOG_TRIVIAL(trace) - << "Fetched " << numKeys << " records from Cassandra"; + log_.trace() << "Fetched " << numKeys << " records from Cassandra"; return results; } @@ -870,9 +874,7 @@ CassandraBackend::fetchLedgerDiff( if (!result) { - BOOST_LOG_TRIVIAL(error) - << __func__ - << " - no rows . ledger = " << std::to_string(ledgerSequence); + log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence); return {}; } std::vector keys; @@ -880,11 +882,12 @@ CassandraBackend::fetchLedgerDiff( { keys.push_back(result.getUInt256()); } while (result.nextRow()); - BOOST_LOG_TRIVIAL(debug) - << "Fetched " << keys.size() << " diff hashes from Cassandra in " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << keys.size() + << " diff hashes from Cassandra in " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); std::vector results; std::transform( @@ -951,12 +954,12 @@ CassandraBackend::doOnlineDelete( cv)); std::unique_lock lck(mtx); - BOOST_LOG_TRIVIAL(trace) << __func__ << "Got the mutex"; + log_.trace() << "Got the mutex"; cv.wait(lck, [&numOutstanding, concurrentLimit]() { return numOutstanding < concurrentLimit; }); } - BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page"; + log_.debug() << "Fetched a page"; cursor = curCursor; if (!cursor) break; @@ -982,11 +985,11 @@ CassandraBackend::open(bool readOnly) if (open_) { assert(false); - BOOST_LOG_TRIVIAL(error) << "database is already open"; + log_.error() << "Database is already open"; return; } - BOOST_LOG_TRIVIAL(info) << "Opening Cassandra Backend"; + log_.info() << "Opening Cassandra Backend"; CassCluster* cluster = cass_cluster_new(); if (!cluster) @@ -1002,9 +1005,9 @@ CassandraBackend::open(bool readOnly) if (cass_cluster_set_cloud_secure_connection_bundle( cluster, secureConnectBundle.c_str()) != CASS_OK) { - BOOST_LOG_TRIVIAL(error) << "Unable to configure cloud using the " - "secure connection bundle: " - << secureConnectBundle; + log_.error() << "Unable to configure cloud using the " + "secure connection bundle: " + << secureConnectBundle; throw std::runtime_error( "nodestore: Failed to connect using secure connection " "bundle"); @@ -1057,7 +1060,7 @@ CassandraBackend::open(bool readOnly) auto username = config_.maybeValue("username"); if (username) { - BOOST_LOG_TRIVIAL(debug) << "user = " << *username; + log_.debug() << "user = " << *username; auto password = config_.value("password"); cass_cluster_set_credentials( cluster, username->c_str(), password.c_str()); @@ -1080,10 +1083,11 @@ CassandraBackend::open(bool readOnly) "max_read_requests_outstanding", maxReadRequestsOutstanding); syncInterval_ = config_.valueOr("sync_interval", syncInterval_); - BOOST_LOG_TRIVIAL(info) - << __func__ << " sync interval is " << syncInterval_ - << ". max write requests outstanding is " << maxWriteRequestsOutstanding - << ". max read requests outstanding is " << maxReadRequestsOutstanding; + log_.info() << "Sync interval is " << syncInterval_ + << ". max write requests outstanding is " + << maxWriteRequestsOutstanding + << ". max read requests outstanding is " + << maxReadRequestsOutstanding; cass_cluster_set_request_timeout(cluster, 10000); @@ -1098,7 +1102,7 @@ CassandraBackend::open(bool readOnly) ss << "nodestore: Error setting Cassandra max core connections per " "host" << ", result: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -1140,8 +1144,7 @@ CassandraBackend::open(bool readOnly) auto keyspace = config_.valueOr("keyspace", ""); if (keyspace.empty()) { - BOOST_LOG_TRIVIAL(warning) - << "No keyspace specified. Using keyspace clio"; + log_.warn() << "No keyspace specified. Using keyspace clio"; keyspace = "clio"; } @@ -1149,14 +1152,13 @@ CassandraBackend::open(bool readOnly) auto tablePrefix = config_.valueOr("table_prefix", ""); if (tablePrefix.empty()) { - BOOST_LOG_TRIVIAL(warning) << "Table prefix is empty"; + log_.warn() << "Table prefix is empty"; } cass_cluster_set_connect_timeout(cluster, 10000); auto ttl = ttl_ * 2; - BOOST_LOG_TRIVIAL(info) - << __func__ << " setting ttl to " << std::to_string(ttl); + log_.info() << "Setting ttl to " << std::to_string(ttl); auto executeSimpleStatement = [this](std::string const& query) { CassStatement* statement = makeStatement(query.c_str(), 0); @@ -1169,7 +1171,7 @@ CassandraBackend::open(bool readOnly) std::stringstream ss; ss << "nodestore: Error executing simple statement: " << rc << ", " << cass_error_desc(rc) << " - " << query; - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); return false; } return true; @@ -1192,7 +1194,7 @@ CassandraBackend::open(bool readOnly) ss << "nodestore: Error connecting Cassandra session keyspace: " << rc << ", " << cass_error_desc(rc) << ", trying to create it ourselves"; - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); // if the keyspace doesn't exist, try to create it session_.reset(cass_session_new()); fut = cass_session_connect(session_.get(), cluster); @@ -1203,7 +1205,7 @@ CassandraBackend::open(bool readOnly) std::stringstream ss; ss << "nodestore: Error connecting Cassandra session at all: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); } else { @@ -1626,6 +1628,6 @@ CassandraBackend::open(bool readOnly) open_ = true; - BOOST_LOG_TRIVIAL(info) << "Opened CassandraBackend successfully"; + log_.info() << "Opened CassandraBackend successfully"; } } // namespace Backend diff --git a/src/backend/CassandraBackend.h b/src/backend/CassandraBackend.h index 2dcc4780..085da9b4 100644 --- a/src/backend/CassandraBackend.h +++ b/src/backend/CassandraBackend.h @@ -2,16 +2,19 @@ #define RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED #include +#include +#include +#include + +#include + #include #include #include #include #include -#include + #include -#include -#include -#include #include #include #include @@ -25,6 +28,7 @@ namespace Backend { class CassandraPreparedStatement { private: + clio::Logger log_{"Backend"}; CassPrepared const* prepared_ = nullptr; public: @@ -65,7 +69,7 @@ public: std::stringstream ss; ss << "nodestore: Error preparing statement : " << rc << ", " << cass_error_desc(rc) << ". query : " << query; - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); } cass_future_free(prepareFuture); return rc == CASS_OK; @@ -73,7 +77,7 @@ public: ~CassandraPreparedStatement() { - BOOST_LOG_TRIVIAL(trace) << __func__; + log_.trace() << "called"; if (prepared_) { cass_prepared_free(prepared_); @@ -86,6 +90,7 @@ class CassandraStatement { CassStatement* statement_ = nullptr; size_t curBindingIndex_ = 0; + clio::Logger log_{"Backend"}; public: CassandraStatement(CassandraPreparedStatement const& prepared) @@ -123,7 +128,7 @@ public: std::stringstream ss; ss << "Error binding boolean to statement: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } curBindingIndex_++; @@ -179,7 +184,7 @@ public: std::stringstream ss; ss << "Error binding bytes to statement: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } curBindingIndex_++; @@ -191,8 +196,8 @@ public: if (!statement_) throw std::runtime_error( "CassandraStatement::bindNextUInt - statement_ is null"); - BOOST_LOG_TRIVIAL(trace) - << std::to_string(curBindingIndex_) << " " << std::to_string(value); + log_.trace() << std::to_string(curBindingIndex_) << " " + << std::to_string(value); CassError rc = cass_statement_bind_int32(statement_, curBindingIndex_, value); if (rc != CASS_OK) @@ -200,7 +205,7 @@ public: std::stringstream ss; ss << "Error binding uint to statement: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } curBindingIndex_++; @@ -225,7 +230,7 @@ public: std::stringstream ss; ss << "Error binding int to statement: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } curBindingIndex_++; @@ -241,7 +246,7 @@ public: std::stringstream ss; ss << "Error binding int to tuple: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } rc = cass_tuple_set_int64(tuple, 1, second); @@ -250,7 +255,7 @@ public: std::stringstream ss; ss << "Error binding int to tuple: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple); @@ -259,7 +264,7 @@ public: std::stringstream ss; ss << "Error binding tuple to statement: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str(); + log_.error() << ss.str(); throw std::runtime_error(ss.str()); } cass_tuple_free(tuple); @@ -275,6 +280,7 @@ public: class CassandraResult { + clio::Logger log_{"Backend"}; CassResult const* result_ = nullptr; CassRow const* row_ = nullptr; CassIterator* iter_ = nullptr; @@ -365,7 +371,7 @@ public: std::stringstream msg; msg << "CassandraResult::getBytes - error getting value: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << msg.str(); + log_.error() << msg.str(); throw std::runtime_error(msg.str()); } curGetIndex_++; @@ -386,7 +392,7 @@ public: std::stringstream msg; msg << "CassandraResult::getuint256 - error getting value: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << msg.str(); + log_.error() << msg.str(); throw std::runtime_error(msg.str()); } curGetIndex_++; @@ -406,7 +412,7 @@ public: std::stringstream msg; msg << "CassandraResult::getInt64 - error getting value: " << rc << ", " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << msg.str(); + log_.error() << msg.str(); throw std::runtime_error(msg.str()); } ++curGetIndex_; @@ -490,10 +496,9 @@ public: { if (!row_) { - std::stringstream msg; - msg << __func__ << " - no result"; - BOOST_LOG_TRIVIAL(error) << msg.str(); - throw std::runtime_error(msg.str()); + std::string msg{"No result"}; + log_.error() << msg; + throw std::runtime_error(msg); } cass_bool_t val; CassError rc = @@ -501,9 +506,8 @@ public: if (rc != CASS_OK) { std::stringstream msg; - msg << __func__ << " - error getting value: " << rc << ", " - << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << msg.str(); + msg << "Error getting value: " << rc << ", " << cass_error_desc(rc); + log_.error() << msg.str(); throw std::runtime_error(msg.str()); } ++curGetIndex_; @@ -518,6 +522,7 @@ public: cass_iterator_free(iter_); } }; + inline bool isTimeout(CassError rc) { @@ -597,6 +602,7 @@ private: return ret; } + clio::Logger log_{"Backend"}; std::atomic open_{false}; std::unique_ptr session_{ @@ -751,13 +757,11 @@ public: statement.bindNextInt(ledgerSequence_ - 1); if (!executeSyncUpdate(statement)) { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " Update failed for ledger " - << std::to_string(ledgerSequence_) << ". Returning"; + log_.warn() << "Update failed for ledger " + << std::to_string(ledgerSequence_) << ". Returning"; return false; } - BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger " - << std::to_string(ledgerSequence_); + log_.info() << "Committed ledger " << std::to_string(ledgerSequence_); return true; } @@ -791,22 +795,20 @@ public: statement.bindNextInt(lastSync_); if (!executeSyncUpdate(statement)) { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " Update failed for ledger " - << std::to_string(ledgerSequence_) << ". Returning"; + log_.warn() << "Update failed for ledger " + << std::to_string(ledgerSequence_) << ". Returning"; return false; } - BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger " - << std::to_string(ledgerSequence_); + log_.info() << "Committed ledger " + << std::to_string(ledgerSequence_); lastSync_ = ledgerSequence_; } else { - BOOST_LOG_TRIVIAL(info) - << __func__ << " Skipping commit. sync interval is " - << std::to_string(syncInterval_) << " - last sync is " - << std::to_string(lastSync_) << " - ledger sequence is " - << std::to_string(ledgerSequence_); + log_.info() << "Skipping commit. sync interval is " + << std::to_string(syncInterval_) << " - last sync is " + << std::to_string(lastSync_) << " - ledger sequence is " + << std::to_string(ledgerSequence_); } return true; } @@ -826,12 +828,12 @@ public: std::optional fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override { - BOOST_LOG_TRIVIAL(trace) << __func__; + log_.trace() << "called"; CassandraStatement statement{selectLatestLedger_}; CassandraResult result = executeAsyncRead(statement, yield); if (!result.hasResult()) { - BOOST_LOG_TRIVIAL(error) + log_.error() << "CassandraBackend::fetchLatestLedgerSequence - no rows"; return {}; } @@ -843,13 +845,13 @@ public: std::uint32_t const sequence, boost::asio::yield_context& yield) const override { - BOOST_LOG_TRIVIAL(trace) << __func__; + log_.trace() << "called"; CassandraStatement statement{selectLedgerBySeq_}; statement.bindNextInt(sequence); CassandraResult result = executeAsyncRead(statement, yield); if (!result) { - BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; + log_.error() << "No rows"; return {}; } std::vector header = result.getBytes(); @@ -869,7 +871,7 @@ public: if (!result.hasResult()) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned"; + log_.debug() << "No rows returned"; return {}; } @@ -916,7 +918,7 @@ public: std::optional getToken(void const* key, boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra"; + log_.trace() << "Fetching from cassandra"; CassandraStatement statement{getToken_}; statement.bindNextBytes(key, 32); @@ -924,7 +926,7 @@ public: if (!result) { - BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; + log_.error() << "No rows"; return {}; } int64_t token = result.getInt64(); @@ -939,14 +941,14 @@ public: ripple::uint256 const& hash, boost::asio::yield_context& yield) const override { - BOOST_LOG_TRIVIAL(trace) << __func__; + log_.trace() << "called"; CassandraStatement statement{selectTransaction_}; statement.bindNextBytes(hash); CassandraResult result = executeAsyncRead(statement, yield); if (!result) { - BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows"; + log_.error() << "No rows"; return {}; } return { @@ -1036,10 +1038,8 @@ public: std::unique_lock lck(throttleMutex_); if (!canAddRequest()) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Max outstanding requests reached. " - << "Waiting for other requests to finish"; + log_.debug() << "Max outstanding requests reached. " + << "Waiting for other requests to finish"; throttleCv_.wait(lck, [this]() { return canAddRequest(); }); } } @@ -1142,7 +1142,7 @@ public: ss << "Cassandra sync write error"; ss << ", retrying"; ss << ": " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(warning) << ss.str(); + log_.warn() << ss.str(); std::this_thread::sleep_for(std::chrono::milliseconds(5)); } } while (rc != CASS_OK); @@ -1166,7 +1166,7 @@ public: ss << "Cassandra sync update error"; ss << ", retrying"; ss << ": " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(warning) << ss.str(); + log_.warn() << ss.str(); std::this_thread::sleep_for(std::chrono::milliseconds(5)); } } while (rc != CASS_OK); @@ -1176,7 +1176,7 @@ public: CassRow const* row = cass_result_first_row(res); if (!row) { - BOOST_LOG_TRIVIAL(error) << "executeSyncUpdate - no rows"; + log_.error() << "executeSyncUpdate - no rows"; cass_result_free(res); return false; } @@ -1185,16 +1185,14 @@ public: if (rc != CASS_OK) { cass_result_free(res); - BOOST_LOG_TRIVIAL(error) - << "executeSyncUpdate - error getting result " << rc << ", " - << cass_error_desc(rc); + log_.error() << "executeSyncUpdate - error getting result " << rc + << ", " << cass_error_desc(rc); return false; } cass_result_free(res); if (success != cass_true && timedOut) { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " Update failed, but timedOut is true"; + log_.warn() << "Update failed, but timedOut is true"; // if there was a timeout, the update may have succeeded in the // background on the first attempt. To determine if this happened, // we query the range from the db, making sure the range is what @@ -1230,15 +1228,14 @@ public: if (ec) { - BOOST_LOG_TRIVIAL(error) - << "Cannot read async cass_future_error_code"; + log_.error() << "Cannot read async cass_future_error_code"; } if (rc != CASS_OK) { std::stringstream ss; ss << "Cassandra executeAsyncRead error"; ss << ": " << cass_error_desc(rc); - BOOST_LOG_TRIVIAL(error) << ss.str(); + log_.error() << ss.str(); } if (isTimeout(rc)) { diff --git a/src/config/Config.cpp b/src/config/Config.cpp index 76829108..2facbd2d 100644 --- a/src/config/Config.cpp +++ b/src/config/Config.cpp @@ -1,6 +1,6 @@ #include +#include -#include #include namespace clio { @@ -161,11 +161,11 @@ ConfigReader::open(std::filesystem::path path) } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) << "Could not read configuration file from '" - << path.string() << "': " << e.what(); + LogService::error() << "Could not read configuration file from '" + << path.string() << "': " << e.what(); } - BOOST_LOG_TRIVIAL(warning) << "Using empty default configuration"; + LogService::warn() << "Using empty default configuration"; return Config{}; } diff --git a/src/etl/ETLSource.cpp b/src/etl/ETLSource.cpp index 7e98220e..2f12e919 100644 --- a/src/etl/ETLSource.cpp +++ b/src/etl/ETLSource.cpp @@ -5,18 +5,22 @@ #include #include #include -#include + #include #include #include #include +#include #include + #include +using namespace clio; + void ForwardCache::freshen() { - BOOST_LOG_TRIVIAL(trace) << "Freshening ForwardCache"; + log_.trace() << "Freshening ForwardCache"; auto numOutstanding = std::make_shared(latestForwarded_.size()); @@ -127,15 +131,11 @@ ETLSourceImpl::reconnect(boost::beast::error_code ec) if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " : " - << "error code = " << ec << " - " << toString(); + log_.error() << "error code = " << ec << " - " << toString(); } else { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " : " - << "error code = " << ec << " - " << toString(); + log_.warn() << "error code = " << ec << " - " << toString(); } // exponentially increasing timeouts, with a max of 30 seconds @@ -144,7 +144,7 @@ ETLSourceImpl::reconnect(boost::beast::error_code ec) timer_.expires_after(boost::asio::chrono::seconds(waitTime)); timer_.async_wait([this](auto ec) { bool startAgain = (ec != boost::asio::error::operation_aborted); - BOOST_LOG_TRIVIAL(trace) << __func__ << " async_wait : ec = " << ec; + log_.trace() << "async_wait : ec = " << ec; derived().close(startAgain); }); } @@ -168,8 +168,8 @@ PlainETLSource::close(bool startAgain) [this, startAgain](auto ec) { if (ec) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " async_close : " + log_.error() + << " async_close : " << "error code = " << ec << " - " << toString(); } closing_ = false; @@ -213,8 +213,8 @@ SslETLSource::close(bool startAgain) [this, startAgain](auto ec) { if (ec) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " async_close : " + log_.error() + << " async_close : " << "error code = " << ec << " - " << toString(); } closing_ = false; @@ -246,8 +246,7 @@ ETLSourceImpl::onResolve( boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); if (ec) { // try again @@ -269,8 +268,7 @@ PlainETLSource::onConnect( boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); if (ec) { // start over @@ -311,8 +309,7 @@ SslETLSource::onConnect( boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); if (ec) { // start over @@ -371,8 +368,7 @@ template void ETLSourceImpl::onHandshake(boost::beast::error_code ec) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); if (auto action = hooks_.onConnected(ec); action == ETLSourceHooks::Action::STOP) return; @@ -389,7 +385,7 @@ ETLSourceImpl::onHandshake(boost::beast::error_code ec) {"streams", {"ledger", "manifests", "validations", "transactions_proposed"}}}; std::string s = boost::json::serialize(jv); - BOOST_LOG_TRIVIAL(trace) << "Sending subscribe stream message"; + log_.trace() << "Sending subscribe stream message"; derived().ws().set_option( boost::beast::websocket::stream_base::decorator( @@ -415,8 +411,7 @@ ETLSourceImpl::onWrite( boost::beast::error_code ec, size_t bytesWritten) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); if (ec) { // start over @@ -433,8 +428,7 @@ template void ETLSourceImpl::onRead(boost::beast::error_code ec, size_t size) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : ec = " << ec << " - " << toString(); + log_.trace() << "ec = " << ec << " - " << toString(); // if error or error reading message, start over if (ec) { @@ -446,8 +440,7 @@ ETLSourceImpl::onRead(boost::beast::error_code ec, size_t size) boost::beast::flat_buffer buffer; swap(readBuffer_, buffer); - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : calling async_read - " << toString(); + log_.trace() << "calling async_read - " << toString(); derived().ws().async_read( readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); } @@ -457,7 +450,7 @@ template bool ETLSourceImpl::handleMessage() { - BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString(); + log_.trace() << toString(); setLastMsgTime(); connected_ = true; @@ -466,9 +459,9 @@ ETLSourceImpl::handleMessage() std::string msg{ static_cast(readBuffer_.data().data()), readBuffer_.size()}; - BOOST_LOG_TRIVIAL(trace) << __func__ << msg; + log_.trace() << msg; boost::json::value raw = boost::json::parse(msg); - BOOST_LOG_TRIVIAL(trace) << __func__ << " parsed"; + log_.trace() << "parsed"; boost::json::object response = raw.as_object(); uint32_t ledgerIndex = 0; @@ -487,20 +480,16 @@ ETLSourceImpl::handleMessage() setValidatedRange( {validatedLedgers.c_str(), validatedLedgers.size()}); } - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " << response << " - " - << toString(); + log_.info() << "Received a message on ledger " + << " subscription stream. Message : " << response + << " - " << toString(); } else if ( response.contains("type") && response["type"] == "ledgerClosed") { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " << response << " - " - << toString(); + log_.info() << "Received a message on ledger " + << " subscription stream. Message : " << response + << " - " << toString(); if (response.contains("ledger_index")) { ledgerIndex = response["ledger_index"].as_int64(); @@ -539,23 +528,23 @@ ETLSourceImpl::handleMessage() if (ledgerIndex != 0) { - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : " - << "Pushing ledger sequence = " << ledgerIndex << " - " - << toString(); + log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " + << toString(); networkValidatedLedgers_->push(ledgerIndex); } return true; } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) << "Exception in handleMessage : " << e.what(); + log_.error() << "Exception in handleMessage : " << e.what(); return false; } } class AsyncCallData { + clio::Logger log_{"ETL"}; + std::unique_ptr cur_; std::unique_ptr next_; @@ -585,11 +574,11 @@ public: unsigned char prefix = marker.data()[0]; - BOOST_LOG_TRIVIAL(debug) - << "Setting up AsyncCallData. marker = " << ripple::strHex(marker) - << " . prefix = " << ripple::strHex(std::string(1, prefix)) - << " . nextPrefix_ = " - << ripple::strHex(std::string(1, nextPrefix_)); + log_.debug() << "Setting up AsyncCallData. marker = " + << ripple::strHex(marker) + << " . prefix = " << ripple::strHex(std::string(1, prefix)) + << " . nextPrefix_ = " + << ripple::strHex(std::string(1, nextPrefix_)); assert(nextPrefix_ > prefix || nextPrefix_ == 0x00); @@ -609,26 +598,24 @@ public: bool abort, bool cacheOnly = false) { - BOOST_LOG_TRIVIAL(trace) << "Processing response. " - << "Marker prefix = " << getMarkerPrefix(); + log_.trace() << "Processing response. " + << "Marker prefix = " << getMarkerPrefix(); if (abort) { - BOOST_LOG_TRIVIAL(error) << "AsyncCallData aborted"; + log_.error() << "AsyncCallData aborted"; return CallStatus::ERRORED; } if (!status_.ok()) { - BOOST_LOG_TRIVIAL(error) - << "AsyncCallData status_ not ok: " - << " code = " << status_.error_code() - << " message = " << status_.error_message(); + log_.error() << "AsyncCallData status_ not ok: " + << " code = " << status_.error_code() + << " message = " << status_.error_message(); return CallStatus::ERRORED; } if (!next_->is_unlimited()) { - BOOST_LOG_TRIVIAL(warning) - << "AsyncCallData is_unlimited is false. Make sure " - "secure_gateway is set correctly at the ETL source"; + log_.warn() << "AsyncCallData is_unlimited is false. Make sure " + "secure_gateway is set correctly at the ETL source"; } std::swap(cur_, next_); @@ -651,7 +638,7 @@ public: call(stub, cq); } - BOOST_LOG_TRIVIAL(trace) << "Writing objects"; + log_.trace() << "Writing objects"; std::vector cacheUpdates; cacheUpdates.reserve(cur_->ledger_objects().objects_size()); for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i) @@ -681,7 +668,7 @@ public: } backend.cache().update( cacheUpdates, request_.ledger().sequence(), cacheOnly); - BOOST_LOG_TRIVIAL(trace) << "Wrote objects"; + log_.trace() << "Wrote objects"; return more ? CallStatus::MORE : CallStatus::DONE; } @@ -745,8 +732,8 @@ ETLSourceImpl::loadInitialLedger( calls.emplace_back(sequence, markers[i], nextMarker); } - BOOST_LOG_TRIVIAL(debug) << "Starting data download for ledger " << sequence - << ". Using source = " << toString(); + log_.debug() << "Starting data download for ledger " << sequence + << ". Using source = " << toString(); for (auto& c : calls) c.call(stub_, cq); @@ -764,21 +751,19 @@ ETLSourceImpl::loadInitialLedger( if (!ok) { - BOOST_LOG_TRIVIAL(error) << "loadInitialLedger - ok is false"; + log_.error() << "loadInitialLedger - ok is false"; return false; // handle cancelled } else { - BOOST_LOG_TRIVIAL(trace) - << "Marker prefix = " << ptr->getMarkerPrefix(); + log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix(); auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly); if (result != AsyncCallData::CallStatus::MORE) { numFinished++; - BOOST_LOG_TRIVIAL(debug) - << "Finished a marker. " - << "Current number of finished = " << numFinished; + log_.debug() << "Finished a marker. " + << "Current number of finished = " << numFinished; std::string lastKey = ptr->getLastKey(); if (lastKey.size()) edgeKeys.push_back(ptr->getLastKey()); @@ -789,16 +774,14 @@ ETLSourceImpl::loadInitialLedger( } if (backend_->cache().size() > progress) { - BOOST_LOG_TRIVIAL(info) - << "Downloaded " << backend_->cache().size() - << " records from rippled"; + log_.info() << "Downloaded " << backend_->cache().size() + << " records from rippled"; progress += incr; } } } - BOOST_LOG_TRIVIAL(info) - << __func__ << " - finished loadInitialLedger. cache size = " - << backend_->cache().size(); + log_.info() << "Finished loadInitialLedger. cache size = " + << backend_->cache().size(); size_t numWrites = 0; if (!abort) { @@ -808,9 +791,7 @@ ETLSourceImpl::loadInitialLedger( auto start = std::chrono::system_clock::now(); for (auto& key : edgeKeys) { - BOOST_LOG_TRIVIAL(debug) - << __func__ - << " writing edge key = " << ripple::strHex(key); + log_.debug() << "Writing edge key = " << ripple::strHex(key); auto succ = backend_->cache().getSuccessor( *ripple::uint256::fromVoidChecked(key), sequence); if (succ) @@ -840,10 +821,9 @@ ETLSourceImpl::loadInitialLedger( assert(succ); if (succ->key == cur->key) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " Writing book successor = " - << ripple::strHex(base) << " - " - << ripple::strHex(cur->key); + log_.debug() << "Writing book successor = " + << ripple::strHex(base) << " - " + << ripple::strHex(cur->key); backend_->writeSuccessor( uint256ToString(base), @@ -855,8 +835,7 @@ ETLSourceImpl::loadInitialLedger( } prev = std::move(cur->key); if (numWrites % 100000 == 0 && numWrites != 0) - BOOST_LOG_TRIVIAL(info) << __func__ << " Wrote " - << numWrites << " book successors"; + log_.info() << "Wrote " << numWrites << " book successors"; } backend_->writeSuccessor( @@ -869,9 +848,8 @@ ETLSourceImpl::loadInitialLedger( auto seconds = std::chrono::duration_cast(end - start) .count(); - BOOST_LOG_TRIVIAL(info) - << __func__ - << " - Looping through cache and submitting all writes took " + log_.info() + << "Looping through cache and submitting all writes took " << seconds << " seconds. numWrites = " << std::to_string(numWrites); } @@ -902,11 +880,10 @@ ETLSourceImpl::fetchLedger( grpc::Status status = stub_->GetLedger(&context, request, &response); if (status.ok() && !response.is_unlimited()) { - BOOST_LOG_TRIVIAL(warning) - << "ETLSourceImpl::fetchLedger - is_unlimited is " - "false. Make sure secure_gateway is set " - "correctly on the ETL source. source = " - << toString() << " status = " << status.error_message(); + log_.warn() << "ETLSourceImpl::fetchLedger - is_unlimited is " + "false. Make sure secure_gateway is set " + "correctly on the ETL source. source = " + << toString() << " status = " << status.error_message(); } return {status, std::move(response)}; } @@ -951,8 +928,7 @@ ETLLoadBalancer::ETLLoadBalancer( entry, ioContext, backend, subscriptions, nwvl, *this); sources_.push_back(std::move(source)); - BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - " - << sources_.back()->toString(); + log_.info() << "Added etl source - " << sources_.back()->toString(); } } @@ -965,9 +941,9 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly) source->loadInitialLedger(sequence, downloadRanges_, cacheOnly); if (!res) { - BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger." - << " Sequence = " << sequence - << " source = " << source->toString(); + log_.error() << "Failed to download initial ledger." + << " Sequence = " << sequence + << " source = " << source->toString(); } return res; }, @@ -982,26 +958,24 @@ ETLLoadBalancer::fetchLedger( { org::xrpl::rpc::v1::GetLedgerResponse response; bool success = execute( - [&response, ledgerSequence, getObjects, getObjectNeighbors]( + [&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_]( auto& source) { auto [status, data] = source->fetchLedger( ledgerSequence, getObjects, getObjectNeighbors); response = std::move(data); if (status.ok() && response.validated()) { - BOOST_LOG_TRIVIAL(info) - << "Successfully fetched ledger = " << ledgerSequence - << " from source = " << source->toString(); + log.info() << "Successfully fetched ledger = " << ledgerSequence + << " from source = " << source->toString(); return true; } else { - BOOST_LOG_TRIVIAL(warning) - << "Error getting ledger = " << ledgerSequence - << " Reply : " << response.DebugString() - << " error_code : " << status.error_code() - << " error_msg : " << status.error_message() - << " source = " << source->toString(); + log.warn() << "Error getting ledger = " << ledgerSequence + << ", Reply: " << response.DebugString() + << ", error_code: " << status.error_code() + << ", error_msg: " << status.error_message() + << ", source = " << source->toString(); return false; } }, @@ -1042,7 +1016,7 @@ ETLSourceImpl::forwardToRippled( { if (auto resp = forwardCache_.get(request); resp) { - BOOST_LOG_TRIVIAL(debug) << "request hit forwardCache"; + log_.debug() << "request hit forwardCache"; return resp; } @@ -1056,14 +1030,13 @@ ETLSourceImpl::requestFromRippled( std::string const& clientIp, boost::asio::yield_context& yield) const { - BOOST_LOG_TRIVIAL(trace) << "Attempting to forward request to tx. " - << "request = " << boost::json::serialize(request); + log_.trace() << "Attempting to forward request to tx. " + << "request = " << boost::json::serialize(request); boost::json::object response; if (!connected_) { - BOOST_LOG_TRIVIAL(error) - << "Attempted to proxy but failed to connect to tx"; + log_.error() << "Attempted to proxy but failed to connect to tx"; return {}; } namespace beast = boost::beast; // from @@ -1077,7 +1050,7 @@ ETLSourceImpl::requestFromRippled( // These objects perform our I/O tcp::resolver resolver{ioc_}; - BOOST_LOG_TRIVIAL(trace) << "Creating websocket"; + log_.trace() << "Creating websocket"; auto ws = std::make_unique>(ioc_); // Look up the domain name @@ -1087,7 +1060,7 @@ ETLSourceImpl::requestFromRippled( ws->next_layer().expires_after(std::chrono::seconds(3)); - BOOST_LOG_TRIVIAL(trace) << "Connecting websocket"; + log_.trace() << "Connecting websocket"; // Make the connection on the IP address we get from a lookup ws->next_layer().async_connect(results, yield[ec]); if (ec) @@ -1106,15 +1079,15 @@ ETLSourceImpl::requestFromRippled( " websocket-client-coro"); req.set(http::field::forwarded, "for=" + clientIp); })); - BOOST_LOG_TRIVIAL(trace) << "client ip: " << clientIp; + log_.trace() << "client ip: " << clientIp; - BOOST_LOG_TRIVIAL(trace) << "Performing websocket handshake"; + log_.trace() << "Performing websocket handshake"; // Perform the websocket handshake ws->async_handshake(ip_, "/", yield[ec]); if (ec) return {}; - BOOST_LOG_TRIVIAL(trace) << "Sending request"; + log_.trace() << "Sending request"; // Send the message ws->async_write( net::buffer(boost::json::serialize(request)), yield[ec]); @@ -1132,11 +1105,11 @@ ETLSourceImpl::requestFromRippled( if (!parsed.is_object()) { - BOOST_LOG_TRIVIAL(error) - << "Error parsing response: " << std::string{begin, end}; + log_.error() << "Error parsing response: " + << std::string{begin, end}; return {}; } - BOOST_LOG_TRIVIAL(trace) << "Successfully forward request"; + log_.trace() << "Successfully forward request"; response = parsed.as_object(); @@ -1145,7 +1118,7 @@ ETLSourceImpl::requestFromRippled( } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) << "Encountered exception : " << e.what(); + log_.error() << "Encountered exception : " << e.what(); return {}; } } @@ -1162,47 +1135,38 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) { auto& source = sources_[sourceIdx]; - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Attempting to execute func. ledger sequence = " - << ledgerSequence << " - source = " << source->toString(); + log_.debug() << "Attempting to execute func. ledger sequence = " + << ledgerSequence << " - source = " << source->toString(); if (source->hasLedger(ledgerSequence) || true) { bool res = f(source); if (res) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Successfully executed func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; + log_.debug() << "Successfully executed func at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; break; } else { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " : " - << "Failed to execute func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; + log_.warn() << "Failed to execute func at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; } } else { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " : " - << "Ledger not present at source = " << source->toString() - << " - ledger sequence = " << ledgerSequence; + log_.warn() << "Ledger not present at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; } sourceIdx = (sourceIdx + 1) % sources_.size(); numAttempts++; if (numAttempts % sources_.size() == 0) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " : " - << "Error executing function " - << " - ledger sequence = " << ledgerSequence - << " - Tried all sources. Sleeping and trying again"; + log_.error() << "Error executing function " + << " - ledger sequence = " << ledgerSequence + << " - Tried all sources. Sleeping and trying again"; std::this_thread::sleep_for(std::chrono::seconds(2)); } } diff --git a/src/etl/ETLSource.h b/src/etl/ETLSource.h index 55cbe653..f877c930 100644 --- a/src/etl/ETLSource.h +++ b/src/etl/ETLSource.h @@ -1,6 +1,15 @@ #ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED #define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED +#include +#include +#include +#include +#include + +#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" +#include + #include #include #include @@ -8,14 +17,6 @@ #include #include -#include -#include -#include -#include - -#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" -#include - class ETLLoadBalancer; class ETLSource; class ProbingETLSource; @@ -32,6 +33,7 @@ class ForwardCache { using response_type = std::optional; + clio::Logger log_{"ETL"}; mutable std::atomic_bool stopping_ = false; mutable std::shared_mutex mtx_; std::unordered_map latestForwarded_; @@ -127,6 +129,9 @@ public: { } +protected: + clio::Logger log_{"ETL"}; + private: friend ForwardCache; friend ProbingETLSource; @@ -161,7 +166,7 @@ class ETLSourceImpl : public ETLSource std::vector> validatedLedgers_; - std::string validatedLedgersRaw_; + std::string validatedLedgersRaw_{"N/A"}; std::shared_ptr networkValidatedLedgers_; @@ -218,7 +223,7 @@ protected: void run() override { - BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString(); + log_.trace() << toString(); auto const host = ip_; auto const port = wsPort_; @@ -292,14 +297,12 @@ public: stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( grpc::CreateCustomChannel( ss.str(), grpc::InsecureChannelCredentials(), chArgs)); - BOOST_LOG_TRIVIAL(debug) - << "Made stub for remote = " << toString(); + log_.debug() << "Made stub for remote = " << toString(); } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(debug) - << "Exception while creating stub = " << e.what() - << " . Remote = " << toString(); + log_.debug() << "Exception while creating stub = " << e.what() + << " . Remote = " << toString(); } } } @@ -371,7 +374,6 @@ public: getValidatedRange() const { std::lock_guard lck(mtx_); - return validatedLedgersRaw_; } @@ -389,9 +391,8 @@ public: std::string toString() const override { - return "{ validated_ledger : " + getValidatedRange() + - " , ip : " + ip_ + " , web socket port : " + wsPort_ + - ", grpc port : " + grpcPort_ + " }"; + return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ + + ", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}"; } boost::json::object @@ -592,8 +593,8 @@ public: class ETLLoadBalancer { private: + clio::Logger log_{"ETL"}; std::vector> sources_; - std::uint32_t downloadRanges_ = 16; public: diff --git a/src/etl/NFTHelpers.cpp b/src/etl/NFTHelpers.cpp index 00eb58d3..cf154ebc 100644 --- a/src/etl/NFTHelpers.cpp +++ b/src/etl/NFTHelpers.cpp @@ -107,8 +107,7 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) NFTsData(tokenIDResult.front(), *owner, txMeta, false)}; std::stringstream msg; - msg << __func__ << " - unexpected NFTokenMint data in tx " - << sttx.getTransactionID(); + msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } @@ -173,7 +172,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::stringstream msg; - msg << __func__ << " - could not determine owner at burntime for tx " + msg << " - could not determine owner at burntime for tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } @@ -198,7 +197,7 @@ getNFTokenAcceptOfferData( if (affectedBuyOffer == txMeta.getNodes().end()) { std::stringstream msg; - msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } @@ -228,7 +227,7 @@ getNFTokenAcceptOfferData( if (affectedSellOffer == txMeta.getNodes().end()) { std::stringstream msg; - msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } @@ -278,7 +277,7 @@ getNFTokenAcceptOfferData( } std::stringstream msg; - msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx " + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } diff --git a/src/etl/ProbingETLSource.cpp b/src/etl/ProbingETLSource.cpp index 1eac34b1..05ec3b14 100644 --- a/src/etl/ProbingETLSource.cpp +++ b/src/etl/ProbingETLSource.cpp @@ -1,4 +1,7 @@ #include +#include + +using namespace clio; ProbingETLSource::ProbingETLSource( clio::Config const& config, @@ -85,7 +88,8 @@ std::string ProbingETLSource::toString() const { if (!currentSrc_) - return "{ probing }"; + return "{probing... ws: " + plainSrc_->toString() + + ", wss: " + sslSrc_->toString() + "}"; return currentSrc_->toString(); } @@ -148,9 +152,8 @@ ProbingETLSource::make_SSLHooks() noexcept { plainSrc_->pause(); currentSrc_ = sslSrc_; - BOOST_LOG_TRIVIAL(info) - << "Selected WSS as the main source: " - << currentSrc_->toString(); + log_.info() << "Selected WSS as the main source: " + << currentSrc_->toString(); } return ETLSourceHooks::Action::PROCEED; }, @@ -179,9 +182,8 @@ ProbingETLSource::make_PlainHooks() noexcept { sslSrc_->pause(); currentSrc_ = plainSrc_; - BOOST_LOG_TRIVIAL(info) - << "Selected Plain WS as the main source: " - << currentSrc_->toString(); + log_.info() << "Selected Plain WS as the main source: " + << currentSrc_->toString(); } return ETLSourceHooks::Action::PROCEED; }, diff --git a/src/etl/ProbingETLSource.h b/src/etl/ProbingETLSource.h index a39f0ce3..3eaf6a04 100644 --- a/src/etl/ProbingETLSource.h +++ b/src/etl/ProbingETLSource.h @@ -11,6 +11,7 @@ #include #include +#include /// This ETLSource implementation attempts to connect over both secure websocket /// and plain websocket. First to connect pauses the other and the probing is @@ -18,6 +19,8 @@ /// connection the probing is kickstarted again. class ProbingETLSource : public ETLSource { + clio::Logger log_{"ETL"}; + std::mutex mtx_; boost::asio::ssl::context sslCtx_; std::shared_ptr sslSrc_; diff --git a/src/etl/ReportingETL.cpp b/src/etl/ReportingETL.cpp index d3027e03..6264803b 100644 --- a/src/etl/ReportingETL.cpp +++ b/src/etl/ReportingETL.cpp @@ -1,20 +1,25 @@ #include +#include + #include #include +#include +#include -#include #include #include #include #include + #include #include #include -#include #include #include -namespace detail { +using namespace clio; + +namespace clio::detail { /// Convenience function for printing out basic ledger info std::string toString(ripple::LedgerInfo const& info) @@ -26,7 +31,7 @@ toString(ripple::LedgerInfo const& info) << " ParentHash : " << strHex(info.parentHash) << " }"; return ss.str(); } -} // namespace detail +} // namespace clio::detail FormattedTransactionsData ReportingETL::insertTransactions( @@ -43,9 +48,7 @@ ReportingETL::insertTransactions( ripple::SerialIter it{raw->data(), raw->size()}; ripple::STTx sttx{it}; - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : " - << "Inserting transaction = " << sttx.getTransactionID(); + log_.trace() << "Inserting transaction = " << sttx.getTransactionID(); ripple::TxMeta txMeta{ sttx.getTransactionID(), ledger.seq, txn.metadata_blob()}; @@ -97,8 +100,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) auto rng = backend_->hardFetchLedgerRangeNoThrow(); if (rng) { - BOOST_LOG_TRIVIAL(fatal) << __func__ << " : " - << "Database is not empty"; + log_.fatal() << "Database is not empty"; assert(false); return {}; } @@ -114,23 +116,21 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(ledgerData->ledger_header())); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Deserialized ledger header. " << detail::toString(lgrInfo); + log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); auto start = std::chrono::system_clock::now(); backend_->startWrites(); - BOOST_LOG_TRIVIAL(debug) << __func__ << " started writes"; + log_.debug() << "Started writes"; backend_->writeLedger( lgrInfo, std::move(*ledgerData->mutable_ledger_header())); - BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger"; + log_.debug() << "Wrote ledger"; FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, *ledgerData); - BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns"; + log_.debug() << "Inserted txns"; // download the full account state map. This function downloads full ledger // data and pushes the downloaded data into the writeQueue. asyncWriter @@ -138,7 +138,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) // Once the below call returns, all data has been pushed into the queue loadBalancer_->loadInitialLedger(startingSequence); - BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger"; + log_.debug() << "Loaded initial ledger"; if (!stopping_) { @@ -150,20 +150,19 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) backend_->finishWrites(startingSequence); auto end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(debug) << "Time to download and store ledger = " - << ((end - start).count()) / 1000000000.0; + log_.debug() << "Time to download and store ledger = " + << ((end - start).count()) / 1000000000.0; return lgrInfo; } void ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " - Publishing ledger " << std::to_string(lgrInfo.seq); + log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq); if (!writing_) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " - Updating cache"; + log_.info() << "Updating cache"; std::vector diff = Backend::synchronousAndRetryOnTimeout([&](auto yield) { @@ -205,12 +204,11 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) subscriptions_->pubBookChanges(lgrInfo, transactions); - BOOST_LOG_TRIVIAL(info) << __func__ << " - Published ledger " - << std::to_string(lgrInfo.seq); + log_.info() << "Published ledger " << std::to_string(lgrInfo.seq); } else - BOOST_LOG_TRIVIAL(info) << __func__ << " - Skipping publishing ledger " - << std::to_string(lgrInfo.seq); + log_.info() << "Skipping publishing ledger " + << std::to_string(lgrInfo.seq); setLastPublish(); } @@ -219,9 +217,7 @@ ReportingETL::publishLedger( uint32_t ledgerSequence, std::optional maxAttempts) { - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " - << "Attempting to publish ledger = " << ledgerSequence; + log_.info() << "Attempting to publish ledger = " << ledgerSequence; size_t numAttempts = 0; while (!stopping_) { @@ -229,17 +225,15 @@ ReportingETL::publishLedger( if (!range || range->maxSequence < ledgerSequence) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "Trying to publish. Could not find " - "ledger with sequence = " - << ledgerSequence; + log_.debug() << "Trying to publish. Could not find " + "ledger with sequence = " + << ledgerSequence; // We try maxAttempts times to publish the ledger, waiting one // second in between each attempt. if (maxAttempts && numAttempts >= maxAttempts) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "Failed to publish ledger after " - << numAttempts << " attempts."; + log_.debug() << "Failed to publish ledger after " << numAttempts + << " attempts."; return false; } std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -264,25 +258,19 @@ ReportingETL::publishLedger( std::optional ReportingETL::fetchLedgerData(uint32_t seq) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Attempting to fetch ledger with sequence = " << seq; + log_.debug() << "Attempting to fetch ledger with sequence = " << seq; std::optional response = loadBalancer_->fetchLedger(seq, false, false); if (response) - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); + log_.trace() << "GetLedger reply = " << response->DebugString(); return response; } std::optional ReportingETL::fetchLedgerDataAndDiff(uint32_t seq) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Attempting to fetch ledger with sequence = " << seq; + log_.debug() << "Attempting to fetch ledger with sequence = " << seq; std::optional response = loadBalancer_->fetchLedger( @@ -291,47 +279,36 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t seq) !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= seq); if (response) - BOOST_LOG_TRIVIAL(trace) - << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); + log_.trace() << "GetLedger reply = " << response->DebugString(); return response; } std::pair ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "Beginning ledger update"; - + log_.debug() << "Beginning ledger update"; ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawData.ledger_header())); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Deserialized ledger header. " << detail::toString(lgrInfo); - + log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); backend_->startWrites(); - - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "started writes"; + log_.debug() << "started writes"; backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header())); - - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "wrote ledger header"; + log_.debug() << "wrote ledger header"; // Write successor info, if included from rippled if (rawData.object_neighbors_included()) { - BOOST_LOG_TRIVIAL(debug) << __func__ << " object neighbors included"; + log_.debug() << "object neighbors included"; for (auto& obj : *(rawData.mutable_book_successors())) { auto firstBook = std::move(*obj.mutable_first_book()); if (!firstBook.size()) firstBook = uint256ToString(Backend::lastKey); - BOOST_LOG_TRIVIAL(debug) << __func__ << " writing book successor " - << ripple::strHex(obj.book_base()) << " - " - << ripple::strHex(firstBook); + log_.debug() << "writing book successor " + << ripple::strHex(obj.book_base()) << " - " + << ripple::strHex(firstBook); backend_->writeSuccessor( std::move(*obj.mutable_book_base()), @@ -352,23 +329,20 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) if (obj.mod_type() == org::xrpl::rpc::v1::RawLedgerObject::DELETED) { - BOOST_LOG_TRIVIAL(debug) - << __func__ - << " modifying successors for deleted object " - << ripple::strHex(obj.key()) << " - " - << ripple::strHex(*predPtr) << " - " - << ripple::strHex(*succPtr); + log_.debug() << "Modifying successors for deleted object " + << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " + << ripple::strHex(*succPtr); backend_->writeSuccessor( std::move(*predPtr), lgrInfo.seq, std::move(*succPtr)); } else { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " adding successor for new object " - << ripple::strHex(obj.key()) << " - " - << ripple::strHex(*predPtr) << " - " - << ripple::strHex(*succPtr); + log_.debug() << "adding successor for new object " + << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " + << ripple::strHex(*succPtr); backend_->writeSuccessor( std::move(*predPtr), @@ -381,8 +355,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) } } else - BOOST_LOG_TRIVIAL(debug) << __func__ << " object modified " - << ripple::strHex(obj.key()); + log_.debug() << "object modified " << ripple::strHex(obj.key()); } } std::vector cacheUpdates; @@ -396,15 +369,13 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) assert(key); cacheUpdates.push_back( {*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " key = " << ripple::strHex(*key) - << " - mod type = " << obj.mod_type(); + log_.debug() << "key = " << ripple::strHex(*key) + << " - mod type = " << obj.mod_type(); if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED && !rawData.object_neighbors_included()) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " object neighbors not included. using cache"; + log_.debug() << "object neighbors not included. using cache"; if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1) throw std::runtime_error( @@ -423,9 +394,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) checkBookBase = isBookDir(*key, *blob); if (checkBookBase) { - BOOST_LOG_TRIVIAL(debug) - << __func__ - << " is book dir. key = " << ripple::strHex(*key); + log_.debug() << "Is book dir. key = " << ripple::strHex(*key); auto bookBase = getBookBase(*key); auto oldFirstDir = backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1); @@ -435,9 +404,8 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key)) { - BOOST_LOG_TRIVIAL(debug) - << __func__ - << " Need to recalculate book base successor. base = " + log_.debug() + << "Need to recalculate book base successor. base = " << ripple::strHex(bookBase) << " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted @@ -458,8 +426,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) // rippled didn't send successor information, so use our cache if (!rawData.object_neighbors_included()) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " object neighbors not included. using cache"; + log_.debug() << "object neighbors not included. using cache"; if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq) throw std::runtime_error( @@ -477,11 +444,10 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) ub = {Backend::lastKey, {}}; if (obj.blob.size() == 0) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " writing successor for deleted object " - << ripple::strHex(obj.key) << " - " - << ripple::strHex(lb->key) << " - " - << ripple::strHex(ub->key); + log_.debug() << "writing successor for deleted object " + << ripple::strHex(obj.key) << " - " + << ripple::strHex(lb->key) << " - " + << ripple::strHex(ub->key); backend_->writeSuccessor( uint256ToString(lb->key), @@ -499,11 +465,10 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) lgrInfo.seq, uint256ToString(ub->key)); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " writing successor for new object " - << ripple::strHex(lb->key) << " - " - << ripple::strHex(obj.key) << " - " - << ripple::strHex(ub->key); + log_.debug() << "writing successor for new object " + << ripple::strHex(lb->key) << " - " + << ripple::strHex(obj.key) << " - " + << ripple::strHex(ub->key); } } for (auto const& base : bookSuccessorsToCalculate) @@ -516,10 +481,9 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) lgrInfo.seq, uint256ToString(succ->key)); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " Updating book successor " - << ripple::strHex(base) << " - " - << ripple::strHex(succ->key); + log_.debug() + << "Updating book successor " << ripple::strHex(base) + << " - " << ripple::strHex(succ->key); } else { @@ -528,42 +492,33 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) lgrInfo.seq, uint256ToString(Backend::lastKey)); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " Updating book successor " - << ripple::strHex(base) << " - " - << ripple::strHex(Backend::lastKey); + log_.debug() + << "Updating book successor " << ripple::strHex(base) + << " - " << ripple::strHex(Backend::lastKey); } } } - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " + log_.debug() << "Inserted/modified/deleted all objects. Number of objects = " << rawData.ledger_objects().objects_size(); FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, rawData); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Inserted all transactions. Number of transactions = " - << rawData.transactions_list().transactions_size(); + log_.debug() << "Inserted all transactions. Number of transactions = " + << rawData.transactions_list().transactions_size(); backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData)); backend_->writeNFTs(std::move(insertTxResult.nfTokensData)); backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData)); - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "wrote account_tx"; + log_.debug() << "wrote account_tx"; + auto start = std::chrono::system_clock::now(); - bool success = backend_->finishWrites(lgrInfo.seq); - auto end = std::chrono::system_clock::now(); - auto duration = ((end - start).count()) / 1000000000.0; - BOOST_LOG_TRIVIAL(debug) - << __func__ << " finished writes. took " << std::to_string(duration); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Finished ledger update. " << detail::toString(lgrInfo); + log_.debug() << "Finished writes. took " << std::to_string(duration); + log_.debug() << "Finished ledger update. " << detail::toString(lgrInfo); + return {lgrInfo, success}; } @@ -596,8 +551,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) * optional, returns. */ - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "Starting etl pipeline"; + log_.debug() << "Starting etl pipeline"; writing_ = true; auto rng = backend_->hardFetchLedgerRangeNoThrow(); @@ -670,13 +624,12 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) fetchResponse->transactions_list().transactions_size() / time; - BOOST_LOG_TRIVIAL(info) - << "Extract phase time = " << time - << " . Extract phase tps = " << tps - << " . Avg extract time = " - << totalTime / (currentSequence - startSequence + 1) - << " . thread num = " << i - << " . seq = " << currentSequence; + log_.info() << "Extract phase time = " << time + << " . Extract phase tps = " << tps + << " . Avg extract time = " + << totalTime / (currentSequence - startSequence + 1) + << " . thread num = " << i + << " . seq = " << currentSequence; transformQueue->push(std::move(fetchResponse)); currentSequence += numExtractors; @@ -720,7 +673,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) auto duration = ((end - start).count()) / 1000000000.0; if (success) - BOOST_LOG_TRIVIAL(info) + log_.info() << "Load phase of etl : " << "Successfully wrote ledger! Ledger info: " << detail::toString(lgrInfo) << ". txn count = " << numTxns @@ -729,7 +682,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) << ". load txns per second = " << numTxns / duration << ". load objs per second = " << numObjects / duration; else - BOOST_LOG_TRIVIAL(error) + log_.error() << "Error writing ledger. " << detail::toString(lgrInfo); // success is false if the ledger was already written if (success) @@ -747,7 +700,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) { deleting_ = true; ioContext_.post([this, &minSequence]() { - BOOST_LOG_TRIVIAL(info) << "Running online delete"; + log_.info() << "Running online delete"; Backend::synchronous( [&](boost::asio::yield_context& yield) { @@ -755,7 +708,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) *onlineDeleteInterval_, yield); }); - BOOST_LOG_TRIVIAL(info) << "Finished online delete"; + log_.info() << "Finished online delete"; auto rng = backend_->fetchLedgerRange(); minSequence = rng->minSequence; deleting_ = false; @@ -774,13 +727,12 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) for (auto& t : extractors) t.join(); auto end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(debug) - << "Extracted and wrote " << *lastPublishedSequence - startSequence - << " in " << ((end - begin).count()) / 1000000000.0; + log_.debug() << "Extracted and wrote " + << *lastPublishedSequence - startSequence << " in " + << ((end - begin).count()) / 1000000000.0; writing_ = false; - BOOST_LOG_TRIVIAL(debug) << __func__ << " : " - << "Stopping etl pipeline"; + log_.debug() << "Stopping etl pipeline"; return lastPublishedSequence; } @@ -801,40 +753,34 @@ ReportingETL::monitor() auto rng = backend_->hardFetchLedgerRangeNoThrow(); if (!rng) { - BOOST_LOG_TRIVIAL(info) << __func__ << " : " - << "Database is empty. Will download a ledger " - "from the network."; + log_.info() << "Database is empty. Will download a ledger " + "from the network."; std::optional ledger; if (startSequence_) { - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " - << "ledger sequence specified in config. " - << "Will begin ETL process starting with ledger " - << *startSequence_; + log_.info() << "ledger sequence specified in config. " + << "Will begin ETL process starting with ledger " + << *startSequence_; ledger = loadInitialLedger(*startSequence_); } else { - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " + log_.info() << "Waiting for next ledger to be validated by network..."; std::optional mostRecentValidated = networkValidatedLedgers_->getMostRecent(); if (mostRecentValidated) { - BOOST_LOG_TRIVIAL(info) << __func__ << " : " - << "Ledger " << *mostRecentValidated - << " has been validated. " - << "Downloading..."; + log_.info() << "Ledger " << *mostRecentValidated + << " has been validated. " + << "Downloading..."; ledger = loadInitialLedger(*mostRecentValidated); } else { - BOOST_LOG_TRIVIAL(info) << __func__ << " : " - << "The wait for the next validated " - << "ledger has been aborted. " - << "Exiting monitor loop"; + log_.info() << "The wait for the next validated " + << "ledger has been aborted. " + << "Exiting monitor loop"; return; } } @@ -842,8 +788,7 @@ ReportingETL::monitor() rng = backend_->hardFetchLedgerRangeNoThrow(); else { - BOOST_LOG_TRIVIAL(error) - << __func__ << " : " + log_.error() << "Failed to load initial ledger. Exiting monitor loop"; return; } @@ -852,21 +797,18 @@ ReportingETL::monitor() { if (startSequence_) { - BOOST_LOG_TRIVIAL(warning) + log_.warn() << "start sequence specified but db is already populated"; } - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " + log_.info() << "Database already populated. Picking up from the tip of history"; loadCache(rng->maxSequence); } assert(rng); uint32_t nextSequence = rng->maxSequence + 1; - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " - << "Database is populated. " - << "Starting monitor loop. sequence = " << nextSequence; + log_.debug() << "Database is populated. " + << "Starting monitor loop. sequence = " << nextSequence; while (true) { if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); @@ -878,11 +820,9 @@ ReportingETL::monitor() else if (networkValidatedLedgers_->waitUntilValidatedByNetwork( nextSequence, 1000)) { - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " - << "Ledger with sequence = " << nextSequence - << " has been validated by the network. " - << "Attempting to find in database and publish"; + log_.info() << "Ledger with sequence = " << nextSequence + << " has been validated by the network. " + << "Attempting to find in database and publish"; // Attempt to take over responsibility of ETL writer after 10 failed // attempts to publish the ledger. publishLedger() fails if the // ledger that has been validated by the network is not found in the @@ -893,17 +833,13 @@ ReportingETL::monitor() bool success = publishLedger(nextSequence, timeoutSeconds); if (!success) { - BOOST_LOG_TRIVIAL(warning) - << __func__ << " : " - << "Failed to publish ledger with sequence = " - << nextSequence << " . Beginning ETL"; + log_.warn() << "Failed to publish ledger with sequence = " + << nextSequence << " . Beginning ETL"; // doContinousETLPipelined returns the most recent sequence // published empty optional if no sequence was published std::optional lastPublished = runETLPipeline(nextSequence, extractorThreads_); - BOOST_LOG_TRIVIAL(info) - << __func__ << " : " - << "Aborting ETL. Falling back to publishing"; + log_.info() << "Aborting ETL. Falling back to publishing"; // if no ledger was published, don't increment nextSequence if (lastPublished) nextSequence = *lastPublished + 1; @@ -920,8 +856,8 @@ ReportingETL::loadCacheFromClioPeer( std::string const& port, boost::asio::yield_context& yield) { - BOOST_LOG_TRIVIAL(info) - << "Loading cache from peer. ip = " << ip << " . port = " << port; + log_.info() << "Loading cache from peer. ip = " << ip + << " . port = " << port; namespace beast = boost::beast; // from namespace http = beast::http; // from namespace websocket = beast::websocket; // from @@ -933,7 +869,7 @@ ReportingETL::loadCacheFromClioPeer( // These objects perform our I/O tcp::resolver resolver{ioContext_}; - BOOST_LOG_TRIVIAL(trace) << __func__ << " Creating websocket"; + log_.trace() << "Creating websocket"; auto ws = std::make_unique>(ioContext_); @@ -942,14 +878,13 @@ ReportingETL::loadCacheFromClioPeer( if (ec) return {}; - BOOST_LOG_TRIVIAL(trace) << __func__ << " Connecting websocket"; + log_.trace() << "Connecting websocket"; // Make the connection on the IP address we get from a lookup ws->next_layer().async_connect(results, yield[ec]); if (ec) return false; - BOOST_LOG_TRIVIAL(trace) - << __func__ << " Performing websocket handshake"; + log_.trace() << "Performing websocket handshake"; // Perform the websocket handshake ws->async_handshake(ip, "/", yield[ec]); if (ec) @@ -957,7 +892,7 @@ ReportingETL::loadCacheFromClioPeer( std::optional marker; - BOOST_LOG_TRIVIAL(trace) << __func__ << " Sending request"; + log_.trace() << "Sending request"; auto getRequest = [&](auto marker) { boost::json::object request = { {"command", "ledger_data"}, @@ -981,8 +916,7 @@ ReportingETL::loadCacheFromClioPeer( yield[ec]); if (ec) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " error writing = " << ec.message(); + log_.error() << "error writing = " << ec.message(); return false; } @@ -990,8 +924,7 @@ ReportingETL::loadCacheFromClioPeer( ws->async_read(buffer, yield[ec]); if (ec) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " error reading = " << ec.message(); + log_.error() << "error reading = " << ec.message(); return false; } @@ -1000,36 +933,30 @@ ReportingETL::loadCacheFromClioPeer( if (!parsed.is_object()) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " Error parsing response: " << raw; + log_.error() << "Error parsing response: " << raw; return false; } - BOOST_LOG_TRIVIAL(trace) - << __func__ << " Successfully parsed response " << parsed; + log_.trace() << "Successfully parsed response " << parsed; if (auto const& response = parsed.as_object(); response.contains("error")) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " Response contains error: " << response; + log_.error() << "Response contains error: " << response; auto const& err = response.at("error"); if (err.is_string() && err.as_string() == "lgrNotFound") { ++numAttempts; if (numAttempts >= 5) { - BOOST_LOG_TRIVIAL(error) - << __func__ + log_.error() << " ledger not found at peer after 5 attempts. " "peer = " << ip << " ledger = " << ledgerIndex << ". Check your config and the health of the peer"; return false; } - BOOST_LOG_TRIVIAL(warning) - << __func__ - << " ledger not found. ledger = " << ledgerIndex - << ". Sleeping and trying again"; + log_.warn() << "Ledger not found. ledger = " << ledgerIndex + << ". Sleeping and trying again"; std::this_thread::sleep_for(std::chrono::seconds(1)); continue; } @@ -1041,8 +968,7 @@ ReportingETL::loadCacheFromClioPeer( if (!response.contains("cache_full") || !response.at("cache_full").as_bool()) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " cache not full for clio node. ip = " << ip; + log_.error() << "cache not full for clio node. ip = " << ip; return false; } if (response.contains("marker")) @@ -1063,8 +989,7 @@ ReportingETL::loadCacheFromClioPeer( if (!stateObject.key.parseHex( obj.at("index").as_string().c_str())) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " failed to parse object id"; + log_.error() << "failed to parse object id"; return false; } boost::algorithm::unhex( @@ -1075,22 +1000,19 @@ ReportingETL::loadCacheFromClioPeer( backend_->cache().update(objects, ledgerIndex, true); if (marker) - BOOST_LOG_TRIVIAL(debug) - << __func__ << " - At marker " << *marker; - + log_.debug() << "At marker " << *marker; } while (marker || !started); - BOOST_LOG_TRIVIAL(info) - << __func__ - << " Finished downloading ledger from clio node. ip = " << ip; - backend_->cache().setFull(); + log_.info() << "Finished downloading ledger from clio node. ip = " + << ip; + + backend_->cache().setFull(); return true; } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " Encountered exception : " << e.what() - << " - ip = " << ip; + log_.error() << "Encountered exception : " << e.what() + << " - ip = " << ip; return false; } } @@ -1101,7 +1023,7 @@ ReportingETL::loadCache(uint32_t seq) if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL) { backend_->cache().setDisabled(); - BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading"; + log_.warn() << "Cache is disabled. Not loading"; return; } // sanity check to make sure we are not calling this multiple times @@ -1142,12 +1064,11 @@ ReportingETL::loadCache(uint32_t seq) while (cacheLoadStyle_ == CacheLoadStyle::SYNC && !backend_->cache().isFull()) { - BOOST_LOG_TRIVIAL(debug) - << "Cache not full. Cache size = " << backend_->cache().size() - << ". Sleeping ..."; + log_.debug() << "Cache not full. Cache size = " + << backend_->cache().size() << ". Sleeping ..."; std::this_thread::sleep_for(std::chrono::seconds(10)); - BOOST_LOG_TRIVIAL(info) - << "Cache is full. Cache size = " << backend_->cache().size(); + log_.info() << "Cache is full. Cache size = " + << backend_->cache().size(); } } @@ -1198,9 +1119,8 @@ ReportingETL::loadCacheFromDb(uint32_t seq) if (c) cursorStr << ripple::strHex(*c) << ", "; } - BOOST_LOG_TRIVIAL(info) - << "Loading cache. num cursors = " << cursors.size() - 1; - BOOST_LOG_TRIVIAL(trace) << __func__ << " cursors = " << cursorStr.str(); + log_.info() << "Loading cache. num cursors = " << cursors.size() - 1; + log_.trace() << "cursors = " << cursorStr.str(); cacheDownloader_ = std::thread{[this, seq, cursors]() { auto startTime = std::chrono::system_clock::now(); @@ -1221,9 +1141,8 @@ ReportingETL::loadCacheFromDb(uint32_t seq) std::string cursorStr = cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(Backend::firstKey); - BOOST_LOG_TRIVIAL(debug) - << "Starting a cursor: " << cursorStr - << " markers = " << *markers; + log_.debug() << "Starting a cursor: " << cursorStr + << " markers = " << *markers; while (!stopping_) { @@ -1237,7 +1156,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq) backend_->cache().update(res.objects, seq, true); if (!res.cursor || (end && *(res.cursor) > *end)) break; - BOOST_LOG_TRIVIAL(trace) + log_.trace() << "Loading cache. cache size = " << backend_->cache().size() << " - cursor = " << ripple::strHex(res.cursor.value()) @@ -1254,18 +1173,16 @@ ReportingETL::loadCacheFromDb(uint32_t seq) auto duration = std::chrono::duration_cast( endTime - startTime); - BOOST_LOG_TRIVIAL(info) - << "Finished loading cache. cache size = " - << backend_->cache().size() << ". Took " - << duration.count() << " seconds"; + log_.info() << "Finished loading cache. cache size = " + << backend_->cache().size() << ". Took " + << duration.count() << " seconds"; backend_->cache().setFull(); } else { - BOOST_LOG_TRIVIAL(info) - << "Finished a cursor. num remaining = " - << *numRemaining << " start = " << cursorStr - << " markers = " << *markers; + log_.info() << "Finished a cursor. num remaining = " + << *numRemaining << " start = " << cursorStr + << " markers = " << *markers; } }); } @@ -1275,7 +1192,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq) void ReportingETL::monitorReadOnly() { - BOOST_LOG_TRIVIAL(debug) << "Starting reporting in strict read only mode"; + log_.debug() << "Starting reporting in strict read only mode"; auto rng = backend_->hardFetchLedgerRangeNoThrow(); uint32_t latestSequence; if (!rng) diff --git a/src/etl/ReportingETL.h b/src/etl/ReportingETL.h index acc20ba0..e5e479d1 100644 --- a/src/etl/ReportingETL.h +++ b/src/etl/ReportingETL.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" @@ -55,6 +56,8 @@ class SubscriptionManager; class ReportingETL { private: + clio::Logger log_{"ETL"}; + std::shared_ptr backend_; std::shared_ptr subscriptions_; std::shared_ptr loadBalancer_; @@ -300,7 +303,7 @@ private: void run() { - BOOST_LOG_TRIVIAL(info) << "Starting reporting etl"; + log_.info() << "Starting reporting etl"; stopping_ = false; doWork(); @@ -337,8 +340,8 @@ public: ~ReportingETL() { - BOOST_LOG_TRIVIAL(info) << "onStop called"; - BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL"; + log_.info() << "onStop called"; + log_.debug() << "Stopping Reporting ETL"; stopping_ = true; if (worker_.joinable()) @@ -346,7 +349,7 @@ public: if (cacheDownloader_.joinable()) cacheDownloader_.join(); - BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread"; + log_.debug() << "Joined ReportingETL worker thread"; } boost::json::object diff --git a/src/log/Logger.cpp b/src/log/Logger.cpp new file mode 100644 index 00000000..332df300 --- /dev/null +++ b/src/log/Logger.cpp @@ -0,0 +1,191 @@ +#include +#include + +#include +#include +#include + +namespace clio { + +Logger LogService::general_log_ = Logger{"General"}; +Logger LogService::alert_log_ = Logger{"Alert"}; + +std::ostream& +operator<<(std::ostream& stream, Severity sev) +{ + static constexpr std::array labels = { + "TRC", + "DBG", + "NFO", + "WRN", + "ERR", + "FTL", + }; + + return stream << labels.at(static_cast(sev)); +} + +Severity +tag_invoke(boost::json::value_to_tag, boost::json::value const& value) +{ + if (not value.is_string()) + throw std::runtime_error("`log_level` must be a string"); + auto const& logLevel = value.as_string(); + + if (boost::iequals(logLevel, "trace")) + return Severity::TRACE; + else if (boost::iequals(logLevel, "debug")) + return Severity::DEBUG; + else if (boost::iequals(logLevel, "info")) + return Severity::INFO; + else if ( + boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn")) + return Severity::WARNING; + else if (boost::iequals(logLevel, "error")) + return Severity::ERROR; + else if (boost::iequals(logLevel, "fatal")) + return Severity::FATAL; + else + throw std::runtime_error( + "Could not parse `log_level`: expected `trace`, `debug`, `info`, " + "`warning`, `error` or `fatal`"); +} + +void +LogService::init(Config const& config) +{ + namespace src = boost::log::sources; + namespace keywords = boost::log::keywords; + namespace sinks = boost::log::sinks; + + boost::log::add_common_attributes(); + boost::log::register_simple_formatter_factory("Severity"); + auto const defaultFormat = + "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% " + "%Message%"; + std::string format = + config.valueOr("log_format", defaultFormat); + + if (config.valueOr("log_to_console", false)) + { + boost::log::add_console_log(std::cout, keywords::format = format); + } + + auto logDir = config.maybeValue("log_directory"); + if (logDir) + { + boost::filesystem::path dirPath{logDir.value()}; + if (!boost::filesystem::exists(dirPath)) + boost::filesystem::create_directories(dirPath); + auto const rotationSize = + config.valueOr("log_rotation_size", 2048u) * 1024u * + 1024u; + auto const rotationPeriod = + config.valueOr("log_rotation_hour_interval", 12u); + auto const dirSize = + config.valueOr("log_directory_max_size", 50u * 1024u) * + 1024u * 1024u; + auto fileSink = boost::log::add_file_log( + keywords::file_name = dirPath / "clio.log", + keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log", + keywords::auto_flush = true, + keywords::format = format, + keywords::open_mode = std::ios_base::app, + keywords::rotation_size = rotationSize, + keywords::time_based_rotation = + sinks::file::rotation_at_time_interval( + boost::posix_time::hours(rotationPeriod))); + fileSink->locked_backend()->set_file_collector( + sinks::file::make_collector( + keywords::target = dirPath, keywords::max_size = dirSize)); + fileSink->locked_backend()->scan_for_files(); + } + + // get default severity, can be overridden per channel using + // the `log_channels` array + auto defaultSeverity = + config.valueOr("log_level", Severity::INFO); + static constexpr std::array channels = { + "General", + "WebServer", + "Backend", + "RPC", + "ETL", + "Subscriptions", + "Performance", + }; + + auto core = boost::log::core::get(); + auto min_severity = boost::log::expressions::channel_severity_filter( + log_channel, log_severity); + + for (auto const& channel : channels) + min_severity[channel] = defaultSeverity; + min_severity["Alert"] = + Severity::WARNING; // Channel for alerts, always warning severity + + for (auto const overrides = config.arrayOr("log_channels", {}); + auto const& cfg : overrides) + { + auto name = cfg.valueOrThrow( + "channel", "Channel name is required"); + if (not std::count(std::begin(channels), std::end(channels), name)) + throw std::runtime_error( + "Can't override settings for log channel " + name + + ": invalid channel"); + + min_severity[name] = + cfg.valueOr("log_level", defaultSeverity); + } + + core->set_filter(min_severity); + LogService::info() << "Default log level = " << defaultSeverity; +} + +Logger::Pump +Logger::trace(source_location_t const loc) const +{ + return {logger_, Severity::TRACE, loc}; +}; +Logger::Pump +Logger::debug(source_location_t const loc) const +{ + return {logger_, Severity::DEBUG, loc}; +}; +Logger::Pump +Logger::info(source_location_t const loc) const +{ + return {logger_, Severity::INFO, loc}; +}; +Logger::Pump +Logger::warn(source_location_t const loc) const +{ + return {logger_, Severity::WARNING, loc}; +}; +Logger::Pump +Logger::error(source_location_t const loc) const +{ + return {logger_, Severity::ERROR, loc}; +}; +Logger::Pump +Logger::fatal(source_location_t const loc) const +{ + return {logger_, Severity::FATAL, loc}; +}; + +std::string +Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const +{ + auto const file_path = std::string{loc.file_name()}; + auto idx = file_path.size(); + while (max_depth-- > 0) + { + idx = file_path.rfind('/', idx - 1); + if (idx == std::string::npos || idx == 0) + break; + } + return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' + + std::to_string(loc.line()); +} + +} // namespace clio diff --git a/src/log/Logger.h b/src/log/Logger.h new file mode 100644 index 00000000..1b81a824 --- /dev/null +++ b/src/log/Logger.h @@ -0,0 +1,257 @@ +#ifndef RIPPLE_UTIL_LOGGER_H +#define RIPPLE_UTIL_LOGGER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Note: clang still does not provide non-experimental support, gcc does +// TODO: start using once clang catches up on c++20 +#include + +#include +#include + +namespace clio { + +class Config; +using source_location_t = std::experimental::source_location; + +/** + * @brief Custom severity levels for @ref Logger. + */ +enum class Severity { + TRACE, + DEBUG, + INFO, + WARNING, + ERROR, + FATAL, +}; + +BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity); +BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string); + +/** + * @brief Custom labels for @ref Severity in log output. + * + * @param stream std::ostream The output stream + * @param sev Severity The severity to output to the ostream + * @return std::ostream& The same ostream we were given + */ +std::ostream& +operator<<(std::ostream& stream, Severity sev); + +/** + * @brief Custom JSON parser for @ref Severity. + * + * @param value The JSON string to parse + * @return Severity The parsed severity + * @throws std::runtime_error Thrown if severity is not in the right format + */ +Severity +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& value); + +/** + * @brief A simple thread-safe logger for the channel specified + * in the constructor. + * + * This is cheap to copy and move. Designed to be used as a member variable or + * otherwise. See @ref LogService::init() for setup of the logging core and + * severity levels for each channel. + */ +class Logger final +{ + using logger_t = + boost::log::sources::severity_channel_logger_mt; + mutable logger_t logger_; + + friend class LogService; // to expose the Pump interface + + /** + * @brief Helper that pumps data into a log record via `operator<<`. + */ + class Pump final + { + using pump_opt_t = + std::optional>; + + boost::log::record rec_; + pump_opt_t pump_ = std::nullopt; + + public: + ~Pump() = default; + Pump(logger_t& logger, Severity sev, source_location_t const& loc) + : rec_{logger.open_record(boost::log::keywords::severity = sev)} + { + if (rec_) + { + pump_.emplace(boost::log::aux::make_record_pump(logger, rec_)); + pump_->stream() << boost::log::add_value( + "SourceLocation", pretty_path(loc)); + } + } + + Pump(Pump&&) = delete; + Pump(Pump const&) = delete; + Pump& + operator=(Pump const&) = delete; + Pump& + operator=(Pump&&) = delete; + + /** + * @brief Perfectly forwards any incoming data into the underlying + * boost::log pump if the pump is available. nop otherwise. + * + * @tparam T Type of data to pump + * @param data The data to pump + * @return Pump& Reference to itself for chaining + */ + template + [[maybe_unused]] Pump& + operator<<(T&& data) + { + if (pump_) + pump_->stream() << std::forward(data); + return *this; + } + + private: + [[nodiscard]] std::string + pretty_path(source_location_t const& loc, size_t max_depth = 3) const; + }; + +public: + ~Logger() = default; + /** + * @brief Construct a new Logger object that produces loglines for the + * specified channel. + * + * See @ref LogService::init() for general setup and configuration of + * severity levels per channel. + * + * @param channel The channel this logger will report into. + */ + Logger(std::string channel) + : logger_{boost::log::keywords::channel = channel} + { + } + Logger(Logger const&) = default; + Logger(Logger&&) = default; + Logger& + operator=(Logger const&) = default; + Logger& + operator=(Logger&&) = default; + + /*! Interface for logging at @ref Severity::TRACE severity */ + [[nodiscard]] Pump + trace(source_location_t const loc = source_location_t::current()) const; + + /*! Interface for logging at @ref Severity::DEBUG severity */ + [[nodiscard]] Pump + debug(source_location_t const loc = source_location_t::current()) const; + + /*! Interface for logging at @ref Severity::INFO severity */ + [[nodiscard]] Pump + info(source_location_t const loc = source_location_t::current()) const; + + /*! Interface for logging at @ref Severity::WARNING severity */ + [[nodiscard]] Pump + warn(source_location_t const loc = source_location_t::current()) const; + + /*! Interface for logging at @ref Severity::ERROR severity */ + [[nodiscard]] Pump + error(source_location_t const loc = source_location_t::current()) const; + + /*! Interface for logging at @ref Severity::FATAL severity */ + [[nodiscard]] Pump + fatal(source_location_t const loc = source_location_t::current()) const; +}; + +/** + * @brief A global logging service. + * + * Used to initialize and setup the logging core as well as a globally available + * entrypoint for logging into the `General` channel as well as raising alerts. + */ +class LogService +{ + static Logger general_log_; /*! Global logger for General channel */ + static Logger alert_log_; /*! Global logger for Alerts channel */ + +public: + LogService() = delete; + + /** + * @brief Global log core initialization from a @ref Config + */ + static void + init(Config const& config); + + /*! Globally accesible General logger at @ref Severity::TRACE severity */ + [[nodiscard]] static Logger::Pump + trace(source_location_t const loc = source_location_t::current()) + { + return general_log_.trace(loc); + } + + /*! Globally accesible General logger at @ref Severity::TRACE severity */ + [[nodiscard]] static Logger::Pump + debug(source_location_t const loc = source_location_t::current()) + { + return general_log_.debug(loc); + } + + /*! Globally accesible General logger at @ref Severity::INFO severity */ + [[nodiscard]] static Logger::Pump + info(source_location_t const loc = source_location_t::current()) + { + return general_log_.info(loc); + } + + /*! Globally accesible General logger at @ref Severity::WARNING severity */ + [[nodiscard]] static Logger::Pump + warn(source_location_t const loc = source_location_t::current()) + { + return general_log_.warn(loc); + } + + /*! Globally accesible General logger at @ref Severity::ERROR severity */ + [[nodiscard]] static Logger::Pump + error(source_location_t const loc = source_location_t::current()) + { + return general_log_.error(loc); + } + + /*! Globally accesible General logger at @ref Severity::FATAL severity */ + [[nodiscard]] static Logger::Pump + fatal(source_location_t const loc = source_location_t::current()) + { + return general_log_.fatal(loc); + } + + /*! Globally accesible Alert logger */ + [[nodiscard]] static Logger::Pump + alert(source_location_t const loc = source_location_t::current()) + { + return alert_log_.warn(loc); + } +}; + +}; // namespace clio + +#endif // RIPPLE_UTIL_LOGGER_H diff --git a/src/main/main.cpp b/src/main/main.cpp index 9f8764a0..3e368f83 100644 --- a/src/main/main.cpp +++ b/src/main/main.cpp @@ -6,26 +6,20 @@ #undef GRPC_ASAN_ENABLED #endif +#include +#include +#include +#include +#include + #include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include + #include -#include -#include #include -#include #include #include #include @@ -35,7 +29,6 @@ #include #include #include -#include using namespace clio; @@ -80,85 +73,6 @@ parse_certs(Config const& config) return ctx; } -void -initLogging(Config const& config) -{ - namespace src = boost::log::sources; - namespace keywords = boost::log::keywords; - namespace sinks = boost::log::sinks; - namespace trivial = boost::log::trivial; - boost::log::add_common_attributes(); - std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%"; - if (config.valueOr("log_to_console", true)) - { - boost::log::add_console_log(std::cout, keywords::format = format); - } - - if (auto logDir = config.maybeValue("log_directory"); logDir) - { - boost::filesystem::path dirPath{*logDir}; - if (!boost::filesystem::exists(dirPath)) - boost::filesystem::create_directories(dirPath); - auto const rotationSize = - config.valueOr("log_rotation_size", 2 * 1024) * 1024 * - 1024u; - if (rotationSize <= 0) - throw std::runtime_error( - "log rotation size must be greater than 0"); - auto const rotationPeriod = - config.valueOr("log_rotation_hour_interval", 12u); - if (rotationPeriod <= 0) - throw std::runtime_error( - "log rotation time interval must be greater than 0"); - auto const dirSize = - config.valueOr("log_directory_max_size", 50 * 1024) * - 1024 * 1024u; - if (dirSize <= 0) - throw std::runtime_error( - "log rotation directory max size must be greater than 0"); - auto fileSink = boost::log::add_file_log( - keywords::file_name = dirPath / "clio.log", - keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log", - keywords::auto_flush = true, - keywords::format = format, - keywords::open_mode = std::ios_base::app, - keywords::rotation_size = rotationSize, - keywords::time_based_rotation = - sinks::file::rotation_at_time_interval( - boost::posix_time::hours(rotationPeriod))); - fileSink->locked_backend()->set_file_collector( - sinks::file::make_collector( - keywords::target = dirPath, keywords::max_size = dirSize)); - fileSink->locked_backend()->scan_for_files(); - } - auto const logLevel = config.valueOr("log_level", "info"); - if (boost::iequals(logLevel, "trace")) - boost::log::core::get()->set_filter( - trivial::severity >= trivial::trace); - else if (boost::iequals(logLevel, "debug")) - boost::log::core::get()->set_filter( - trivial::severity >= trivial::debug); - else if (boost::iequals(logLevel, "info")) - boost::log::core::get()->set_filter(trivial::severity >= trivial::info); - else if ( - boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn")) - boost::log::core::get()->set_filter( - trivial::severity >= trivial::warning); - else if (boost::iequals(logLevel, "error")) - boost::log::core::get()->set_filter( - trivial::severity >= trivial::error); - else if (boost::iequals(logLevel, "fatal")) - boost::log::core::get()->set_filter( - trivial::severity >= trivial::fatal); - else - { - BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel - << ". Setting log level to info"; - boost::log::core::get()->set_filter(trivial::severity >= trivial::info); - } - BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel; -} - void start(boost::asio::io_context& ioc, std::uint32_t numThreads) { @@ -172,6 +86,7 @@ start(boost::asio::io_context& ioc, std::uint32_t numThreads) int main(int argc, char* argv[]) +try { // Check command line arguments. if (argc != 2) @@ -196,10 +111,8 @@ main(int argc, char* argv[]) return EXIT_FAILURE; } - initLogging(config); - - BOOST_LOG_TRIVIAL(info) - << "Clio version: " << Build::getClioFullVersionString(); + LogService::init(config); + LogService::info() << "Clio version: " << Build::getClioFullVersionString(); auto ctx = parse_certs(config); auto ctxRef = ctx @@ -209,10 +122,10 @@ main(int argc, char* argv[]) auto const threads = config.valueOr("io_threads", 2); if (threads <= 0) { - BOOST_LOG_TRIVIAL(fatal) << "io_threads is less than 0"; + LogService::fatal() << "io_threads is less than 0"; return EXIT_FAILURE; } - BOOST_LOG_TRIVIAL(info) << "Number of io threads = " << threads; + LogService::info() << "Number of io threads = " << threads; // io context to handle all incoming requests, as well as other things // This is not the only io context in the application @@ -258,3 +171,7 @@ main(int argc, char* argv[]) return EXIT_SUCCESS; } +catch (std::exception const& e) +{ + LogService::fatal() << "Exit on exception: " << e.what(); +} diff --git a/src/rpc/RPC.cpp b/src/rpc/RPC.cpp index 27e1b00e..86ebd39f 100644 --- a/src/rpc/RPC.cpp +++ b/src/rpc/RPC.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -9,9 +10,15 @@ #include using namespace std; +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gPerfLog{"Performance"}; +clio::Logger gLog{"RPC"}; +} // namespace namespace RPC { - Context::Context( boost::asio::yield_context& yield_, string const& command_, @@ -40,7 +47,7 @@ Context::Context( , counters(counters_) , clientIp(clientIp_) { - BOOST_LOG_TRIVIAL(debug) << tag() << "new Context created"; + gPerfLog.debug() << tag() << "new Context created"; } optional @@ -325,8 +332,7 @@ buildResponse(Context const& ctx) if (ctx.backend->isTooBusy()) { - BOOST_LOG_TRIVIAL(error) - << __func__ << " Database is too busy. Rejecting request"; + gLog.error() << "Database is too busy. Rejecting request"; return Status{RippledError::rpcTOO_BUSY}; } @@ -337,13 +343,11 @@ buildResponse(Context const& ctx) try { - BOOST_LOG_TRIVIAL(debug) - << ctx.tag() << __func__ << " start executing rpc `" << ctx.method - << '`'; + gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method + << '`'; auto v = (*method)(ctx); - BOOST_LOG_TRIVIAL(debug) - << ctx.tag() << __func__ << " finish executing rpc `" << ctx.method - << '`'; + gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method + << '`'; if (auto object = get_if(&v); object && not shouldSuppressValidatedFlag(ctx)) @@ -363,13 +367,12 @@ buildResponse(Context const& ctx) } catch (Backend::DatabaseTimeout const& t) { - BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout"; + gLog.error() << "Database timeout"; return Status{RippledError::rpcTOO_BUSY}; } catch (exception const& err) { - BOOST_LOG_TRIVIAL(error) - << ctx.tag() << __func__ << " caught exception : " << err.what(); + gLog.error() << ctx.tag() << " caught exception: " << err.what(); return Status{RippledError::rpcINTERNAL}; } } diff --git a/src/rpc/RPC.h b/src/rpc/RPC.h index fc73084d..636bf6f5 100644 --- a/src/rpc/RPC.h +++ b/src/rpc/RPC.h @@ -1,15 +1,15 @@ #ifndef REPORTING_RPC_H_INCLUDED #define REPORTING_RPC_H_INCLUDED +#include +#include +#include #include +#include #include #include -#include -#include -#include - #include #include #include @@ -34,6 +34,7 @@ namespace RPC { struct Context : public util::Taggable { + clio::Logger perfLog_{"Performance"}; boost::asio::yield_context& yield; std::string method; std::uint32_t version; @@ -129,6 +130,7 @@ template void logDuration(Context const& ctx, T const& dur) { + static clio::Logger log{"RPC"}; std::stringstream ss; ss << ctx.tag() << "Request processing duration = " << std::chrono::duration_cast(dur).count() @@ -136,11 +138,11 @@ logDuration(Context const& ctx, T const& dur) auto seconds = std::chrono::duration_cast(dur).count(); if (seconds > 10) - BOOST_LOG_TRIVIAL(error) << ss.str(); + log.error() << ss.str(); else if (seconds > 1) - BOOST_LOG_TRIVIAL(warning) << ss.str(); + log.warn() << ss.str(); else - BOOST_LOG_TRIVIAL(info) << ss.str(); + log.info() << ss.str(); } } // namespace RPC diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp index f9a641b3..10bc8c24 100644 --- a/src/rpc/RPCHelpers.cpp +++ b/src/rpc/RPCHelpers.cpp @@ -1,10 +1,17 @@ +#include +#include +#include +#include + #include #include -#include +using namespace clio; -#include -#include +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace namespace RPC { @@ -412,12 +419,11 @@ deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs) blobs.metadata.begin(), blobs.metadata.end(), std::ostream_iterator(meta)); - BOOST_LOG_TRIVIAL(error) - << __func__ - << " Failed to deserialize transaction. txn = " << txn.str() - << " - meta = " << meta.str() - << " txn length = " << std::to_string(blobs.transaction.size()) - << " meta length = " << std::to_string(blobs.metadata.size()); + gLog.error() << "Failed to deserialize transaction. txn = " << txn.str() + << " - meta = " << meta.str() << " txn length = " + << std::to_string(blobs.transaction.size()) + << " meta length = " + << std::to_string(blobs.metadata.size()); throw e; } } @@ -807,21 +813,21 @@ traverseOwnedNodes( } auto end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(debug) - << "Time loading owned directories: " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds"; + gLog.debug() << "Time loading owned directories: " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; start = std::chrono::system_clock::now(); auto objects = backend.fetchLedgerObjects(keys, sequence, yield); end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(debug) - << "Time loading owned entries: " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds"; + gLog.debug() << "Time loading owned entries: " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; for (auto i = 0; i < objects.size(); ++i) { @@ -1341,7 +1347,7 @@ postProcessOrderBook( } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) << "caught exception: " << e.what(); + gLog.error() << "caught exception: " << e.what(); } } return jsonOffers; @@ -1676,9 +1682,7 @@ traverseTransactions( } else if (txnPlusMeta.ledgerSequence > maxIndex && !forward) { - BOOST_LOG_TRIVIAL(debug) - << __func__ - << " skipping over transactions from incomplete ledger"; + gLog.debug() << "Skipping over transactions from incomplete ledger"; continue; } @@ -1708,12 +1712,11 @@ traverseTransactions( response[JS(ledger_index_max)] = maxIndex; response[JS(transactions)] = txns; - BOOST_LOG_TRIVIAL(info) - << __func__ << " serialization took " - << std::chrono::duration_cast( - std::chrono::system_clock::now() - serializationStart) - .count() - << " milliseconds"; + gLog.info() << "serialization took " + << std::chrono::duration_cast( + std::chrono::system_clock::now() - serializationStart) + .count() + << " milliseconds"; return response; } diff --git a/src/rpc/WorkQueue.h b/src/rpc/WorkQueue.h index 3a666d27..c17634b8 100644 --- a/src/rpc/WorkQueue.h +++ b/src/rpc/WorkQueue.h @@ -1,10 +1,11 @@ #ifndef CLIO_WORK_QUEUE_H #define CLIO_WORK_QUEUE_H +#include + #include #include #include -#include #include #include @@ -20,6 +21,7 @@ class WorkQueue std::atomic_uint64_t curSize_ = 0; uint32_t maxSize_ = std::numeric_limits::max(); + clio::Logger log_{"RPC"}; public: WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0); @@ -30,10 +32,8 @@ public: { if (curSize_ >= maxSize_ && !isWhiteListed) { - BOOST_LOG_TRIVIAL(warning) - << __func__ - << " queue is full. rejecting job. current size = " << curSize_ - << " max size = " << maxSize_; + log_.warn() << "Queue is full. rejecting job. current size = " + << curSize_ << " max size = " << maxSize_; return false; } ++curSize_; @@ -52,8 +52,8 @@ public: // durationUs_ ++queued_; durationUs_ += wait; - BOOST_LOG_TRIVIAL(debug) << "WorkQueue wait time = " << wait - << " queue size = " << curSize_; + log_.info() << "WorkQueue wait time = " << wait + << " queue size = " << curSize_; f(yield); --curSize_; }); diff --git a/src/rpc/handlers/AccountTx.cpp b/src/rpc/handlers/AccountTx.cpp index 8fdd3da2..79d78f04 100644 --- a/src/rpc/handlers/AccountTx.cpp +++ b/src/rpc/handlers/AccountTx.cpp @@ -1,5 +1,13 @@ +#include #include +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + namespace RPC { Result @@ -10,25 +18,27 @@ doAccountTx(Context const& context) return status; constexpr std::string_view outerFuncName = __func__; - auto const maybeResponse = traverseTransactions( - context, - [&accountID, &outerFuncName]( - std::shared_ptr const& backend, - std::uint32_t const limit, - bool const forward, - std::optional const& cursorIn, - boost::asio::yield_context& yield) { - auto const start = std::chrono::system_clock::now(); - auto const txnsAndCursor = backend->fetchAccountTransactions( - accountID, limit, forward, cursorIn, yield); - BOOST_LOG_TRIVIAL(info) - << outerFuncName << " db fetch took " - << std::chrono::duration_cast( - std::chrono::system_clock::now() - start) - .count() - << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); - return txnsAndCursor; - }); + auto const maybeResponse = + traverseTransactions( + context, + [&accountID, &outerFuncName]( + std::shared_ptr const& backend, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) { + auto const start = std::chrono::system_clock::now(); + auto const txnsAndCursor = backend->fetchAccountTransactions( + accountID, limit, forward, cursorIn, yield); + gLog.info() + << outerFuncName << " db fetch took " + << std::chrono::duration_cast( + std::chrono::system_clock::now() - start) + .count() + << " milliseconds - num blobs = " + << txnsAndCursor.txns.size(); + return txnsAndCursor; + }); if (auto const status = std::get_if(&maybeResponse); status) return *status; diff --git a/src/rpc/handlers/BookOffers.cpp b/src/rpc/handlers/BookOffers.cpp index 3de025f8..0bdf79b7 100644 --- a/src/rpc/handlers/BookOffers.cpp +++ b/src/rpc/handlers/BookOffers.cpp @@ -4,12 +4,21 @@ #include #include #include -#include -#include -#include - #include #include +#include +#include + +#include + +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace namespace RPC { @@ -64,11 +73,11 @@ doBookOffers(Context const& context) bookBase, lgrInfo.seq, limit, marker, context.yield); auto end = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(warning) - << "Time loading books: " - << std::chrono::duration_cast(end - start) - .count() - << " milliseconds - request = " << request; + gLog.warn() << "Time loading books: " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds - request = " << request; response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); response[JS(ledger_index)] = lgrInfo.seq; @@ -78,11 +87,11 @@ doBookOffers(Context const& context) auto end2 = std::chrono::system_clock::now(); - BOOST_LOG_TRIVIAL(warning) - << "Time transforming to json: " - << std::chrono::duration_cast(end2 - end) - .count() - << " milliseconds - request = " << request; + gLog.warn() << "Time transforming to json: " + << std::chrono::duration_cast( + end2 - end) + .count() + << " milliseconds - request = " << request; if (retMarker) response["marker"] = ripple::strHex(*retMarker); diff --git a/src/rpc/handlers/LedgerData.cpp b/src/rpc/handlers/LedgerData.cpp index 335ec5e4..039974ec 100644 --- a/src/rpc/handlers/LedgerData.cpp +++ b/src/rpc/handlers/LedgerData.cpp @@ -1,9 +1,11 @@ #include #include +#include +#include +#include + #include -#include -#include // Get state nodes from a ledger // Inputs: // limit: integer, maximum number of entries @@ -18,6 +20,13 @@ // // +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + namespace RPC { using boost::json::value_to; @@ -65,7 +74,7 @@ doLedgerData(Context const& context) } else { - BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker"; + gLog.debug() << "Parsing marker"; marker = ripple::uint256{}; if (!marker->parseHex(request.at(JS(marker)).as_string().c_str())) @@ -168,9 +177,8 @@ doLedgerData(Context const& context) std::chrono::duration_cast(end - start) .count(); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " number of results = " << results.size() - << " fetched in " << time << " microseconds"; + gLog.debug() << "Number of results = " << results.size() << " fetched in " + << time << " microseconds"; boost::json::array objects; objects.reserve(results.size()); for (auto const& [key, object] : results) @@ -194,9 +202,8 @@ doLedgerData(Context const& context) time = std::chrono::duration_cast(end2 - end) .count(); - BOOST_LOG_TRIVIAL(debug) - << __func__ << " number of results = " << results.size() - << " serialized in " << time << " microseconds"; + gLog.debug() << "Number of results = " << results.size() + << " serialized in " << time << " microseconds"; return response; } diff --git a/src/rpc/handlers/NFTHistory.cpp b/src/rpc/handlers/NFTHistory.cpp index 2ca31217..fda8d0cb 100644 --- a/src/rpc/handlers/NFTHistory.cpp +++ b/src/rpc/handlers/NFTHistory.cpp @@ -1,5 +1,13 @@ +#include #include +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + namespace RPC { Result @@ -11,26 +19,28 @@ doNFTHistory(Context const& context) auto const tokenID = std::get(maybeTokenID); constexpr std::string_view outerFuncName = __func__; - auto const maybeResponse = traverseTransactions( - context, - [&tokenID, &outerFuncName]( - std::shared_ptr const& backend, - std::uint32_t const limit, - bool const forward, - std::optional const& cursorIn, - boost::asio::yield_context& yield) - -> Backend::TransactionsAndCursor { - auto const start = std::chrono::system_clock::now(); - auto const txnsAndCursor = backend->fetchNFTTransactions( - tokenID, limit, forward, cursorIn, yield); - BOOST_LOG_TRIVIAL(info) - << outerFuncName << " db fetch took " - << std::chrono::duration_cast( - std::chrono::system_clock::now() - start) - .count() - << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); - return txnsAndCursor; - }); + auto const maybeResponse = + traverseTransactions( + context, + [&tokenID, &outerFuncName]( + std::shared_ptr const& backend, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) + -> Backend::TransactionsAndCursor { + auto const start = std::chrono::system_clock::now(); + auto const txnsAndCursor = backend->fetchNFTTransactions( + tokenID, limit, forward, cursorIn, yield); + gLog.info() + << outerFuncName << " db fetch took " + << std::chrono::duration_cast( + std::chrono::system_clock::now() - start) + .count() + << " milliseconds - num blobs = " + << txnsAndCursor.txns.size(); + return txnsAndCursor; + }); if (auto const status = std::get_if(&maybeResponse); status) return *status; diff --git a/src/rpc/handlers/NFTOffers.cpp b/src/rpc/handlers/NFTOffers.cpp index 08c9a940..e26002e0 100644 --- a/src/rpc/handlers/NFTOffers.cpp +++ b/src/rpc/handlers/NFTOffers.cpp @@ -4,10 +4,12 @@ #include #include #include -#include -#include #include +#include + +#include + namespace json = boost::json; namespace ripple { diff --git a/src/subscriptions/SubscriptionManager.h b/src/subscriptions/SubscriptionManager.h index c00ce2ba..01de599b 100644 --- a/src/subscriptions/SubscriptionManager.h +++ b/src/subscriptions/SubscriptionManager.h @@ -3,9 +3,11 @@ #include #include -#include +#include #include +#include + class WsBase; class Subscription @@ -87,6 +89,7 @@ public: class SubscriptionManager { using session_ptr = std::shared_ptr; + clio::Logger log_{"Subscriptions"}; std::vector workers_; boost::asio::io_context ioc_; @@ -134,8 +137,8 @@ public: // We will eventually want to clamp this to be the number of strands, // since adding more threads than we have strands won't see any // performance benefits - BOOST_LOG_TRIVIAL(info) << "Starting subscription manager with " - << numThreads << " workers"; + log_.info() << "Starting subscription manager with " << numThreads + << " workers"; workers_.reserve(numThreads); for (auto i = numThreads; i > 0; --i) diff --git a/src/util/Taggable.h b/src/util/Taggable.h index f63ac350..4eaf4c6f 100644 --- a/src/util/Taggable.h +++ b/src/util/Taggable.h @@ -209,7 +209,8 @@ private: friend Type tag_invoke(boost::json::value_to_tag, boost::json::value const& value) { - assert(value.is_string()); + if (not value.is_string()) + throw std::runtime_error("`log_tag_style` must be a string"); auto const& style = value.as_string(); if (boost::iequals(style, "int") || boost::iequals(style, "uint")) diff --git a/src/webserver/HttpBase.h b/src/webserver/HttpBase.h index 9dd1285a..4bb6a39a 100644 --- a/src/webserver/HttpBase.h +++ b/src/webserver/HttpBase.h @@ -19,6 +19,7 @@ #include #include +#include #include
#include #include @@ -27,6 +28,7 @@ #include #include +// TODO: consider removing those - visible to anyone including this header namespace http = boost::beast::http; namespace net = boost::asio; namespace ssl = boost::asio::ssl; @@ -101,6 +103,7 @@ class HttpBase : public util::Taggable send_lambda lambda_; protected: + clio::Logger perfLog_{"Performance"}; boost::beast::flat_buffer buffer_; bool @@ -135,8 +138,7 @@ protected: if (!ec_ && ec != boost::asio::error::operation_aborted) { ec_ = ec; - BOOST_LOG_TRIVIAL(info) - << tag() << __func__ << ": " << what << ": " << ec.message(); + perfLog_.info() << tag() << ": " << what << ": " << ec.message(); boost::beast::get_lowest_layer(derived().stream()) .socket() .close(ec); @@ -168,11 +170,11 @@ public: , lambda_(*this) , buffer_(std::move(buffer)) { - BOOST_LOG_TRIVIAL(debug) << tag() << "http session created"; + perfLog_.debug() << tag() << "http session created"; } virtual ~HttpBase() { - BOOST_LOG_TRIVIAL(debug) << tag() << "http session closed"; + perfLog_.debug() << tag() << "http session closed"; } void @@ -234,9 +236,8 @@ public: if (!ip) return; - BOOST_LOG_TRIVIAL(debug) << tag() << "http::" << __func__ - << " received request from ip = " << *ip - << " - posting to WorkQueue"; + perfLog_.debug() << tag() << "Received request from ip = " << *ip + << " - posting to WorkQueue"; auto session = derived().shared_from_this(); @@ -256,7 +257,8 @@ public: dosGuard_, counters_, *ip, - session); + session, + perfLog_); }, dosGuard_.isWhiteListed(*ip))) { @@ -321,7 +323,8 @@ handle_request( DOSGuard& dosGuard, RPC::Counters& counters, std::string const& ip, - std::shared_ptr http) + std::shared_ptr http, + clio::Logger& perfLog) { auto const httpResponse = [&req]( http::status status, @@ -356,9 +359,9 @@ handle_request( try { - BOOST_LOG_TRIVIAL(debug) - << http->tag() - << "http received request from work queue: " << req.body(); + perfLog.debug() << http->tag() + << "http received request from work queue: " + << req.body(); boost::json::object request; std::string responseStr = ""; @@ -422,8 +425,8 @@ handle_request( error["request"] = request; result = error; - BOOST_LOG_TRIVIAL(debug) << http->tag() << __func__ - << " Encountered error: " << responseStr; + perfLog.debug() + << http->tag() << "Encountered error: " << responseStr; } else { @@ -457,8 +460,7 @@ handle_request( } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) - << http->tag() << __func__ << " Caught exception : " << e.what(); + perfLog.error() << http->tag() << "Caught exception : " << e.what(); return send(httpResponse( http::status::internal_server_error, "application/json", diff --git a/src/webserver/Listener.h b/src/webserver/Listener.h index 0767274b..272fdb9e 100644 --- a/src/webserver/Listener.h +++ b/src/webserver/Listener.h @@ -1,10 +1,7 @@ #ifndef LISTENER_H #define LISTENER_H -#include -#include -#include - +#include #include #include #include @@ -12,6 +9,10 @@ #include #include +#include +#include +#include + #include class SubscriptionManager; @@ -23,6 +24,7 @@ class Detector using std::enable_shared_from_this< Detector>::shared_from_this; + clio::Logger log_{"WebServer"}; boost::asio::io_context& ioc_; boost::beast::tcp_stream stream_; std::optional> ctx_; @@ -69,8 +71,7 @@ public: if (ec == net::ssl::error::stream_truncated) return; - BOOST_LOG_TRIVIAL(info) - << "Detector failed: " << message << ec.message() << std::endl; + log_.info() << "Detector failed (" << message << "): " << ec.message(); } // Launch the detector @@ -202,6 +203,7 @@ class Listener using std::enable_shared_from_this< Listener>::shared_from_this; + clio::Logger log_{"WebServer"}; boost::asio::io_context& ioc_; std::optional> ctx_; tcp::acceptor acceptor_; @@ -255,9 +257,8 @@ public: acceptor_.bind(endpoint, ec); if (ec) { - BOOST_LOG_TRIVIAL(error) - << "Failed to bind to endpoint: " << endpoint - << ". message: " << ec.message(); + log_.error() << "Failed to bind to endpoint: " << endpoint + << ". message: " << ec.message(); throw std::runtime_error("Failed to bind to specified endpoint"); } @@ -265,9 +266,8 @@ public: acceptor_.listen(net::socket_base::max_listen_connections, ec); if (ec) { - BOOST_LOG_TRIVIAL(error) - << "Failed to listen at endpoint: " << endpoint - << ". message: " << ec.message(); + log_.error() << "Failed to listen at endpoint: " << endpoint + << ". message: " << ec.message(); throw std::runtime_error("Failed to listen at specified endpoint"); } } @@ -336,6 +336,7 @@ make_HttpServer( std::shared_ptr etl, DOSGuard& dosGuard) { + static clio::Logger log{"WebServer"}; if (!config.contains("server")) return nullptr; @@ -348,8 +349,8 @@ make_HttpServer( auto const maxQueueSize = serverConfig.valueOr("max_queue_size", 0); // 0 is no limit - BOOST_LOG_TRIVIAL(info) << __func__ << " Number of workers = " << numThreads - << ". Max queue size = " << maxQueueSize; + log.info() << "Number of workers = " << numThreads + << ". Max queue size = " << maxQueueSize; auto server = std::make_shared( ioc, diff --git a/src/webserver/WsBase.h b/src/webserver/WsBase.h index 6e902299..250dc0e2 100644 --- a/src/webserver/WsBase.h +++ b/src/webserver/WsBase.h @@ -1,15 +1,10 @@ #ifndef RIPPLE_REPORTING_WS_BASE_SESSION_H #define RIPPLE_REPORTING_WS_BASE_SESSION_H -#include -#include - -#include -#include - #include #include #include +#include #include #include #include @@ -18,6 +13,13 @@ #include #include +#include +#include + +#include +#include + +// TODO: Consider removing these. Visible to anyone including this header. namespace http = boost::beast::http; namespace net = boost::asio; namespace ssl = boost::asio::ssl; @@ -27,8 +29,8 @@ using tcp = boost::asio::ip::tcp; inline void logError(boost::beast::error_code ec, char const* what) { - BOOST_LOG_TRIVIAL(debug) - << __func__ << " : " << what << ": " << ec.message() << "\n"; + static clio::Logger log{"WebServer"}; + log.debug() << what << ": " << ec.message() << "\n"; } inline boost::json::object @@ -48,6 +50,8 @@ getDefaultWsResponse(boost::json::value const& id) class WsBase : public util::Taggable { protected: + clio::Logger log_{"WebServer"}; + clio::Logger perfLog_{"Performance"}; boost::system::error_code ec_; public: @@ -114,8 +118,7 @@ class WsSession : public WsBase, if (!ec_ && ec != boost::asio::error::operation_aborted) { ec_ = ec; - BOOST_LOG_TRIVIAL(info) - << tag() << __func__ << ": " << what << ": " << ec.message(); + perfLog_.info() << tag() << ": " << what << ": " << ec.message(); boost::beast::get_lowest_layer(derived().ws()).socket().close(ec); if (auto manager = subscriptions_.lock(); manager) @@ -147,11 +150,11 @@ public: , counters_(counters) , queue_(queue) { - BOOST_LOG_TRIVIAL(info) << tag() << "session created"; + perfLog_.info() << tag() << "session created"; } virtual ~WsSession() { - BOOST_LOG_TRIVIAL(info) << tag() << "session closed"; + perfLog_.info() << tag() << "session closed"; } // Access the derived class, this is part of @@ -244,7 +247,7 @@ public: if (ec) return wsFail(ec, "accept"); - BOOST_LOG_TRIVIAL(info) << tag() << "accepting new connection"; + perfLog_.info() << tag() << "accepting new connection"; // Read a message do_read(); @@ -287,7 +290,7 @@ public: try { - BOOST_LOG_TRIVIAL(debug) + perfLog_.debug() << tag() << "ws received request from work queue : " << request; auto range = backend_->fetchLedgerRange(); @@ -309,8 +312,7 @@ public: if (!context) { - BOOST_LOG_TRIVIAL(warning) - << tag() << " could not create RPC context"; + perfLog_.warn() << tag() << "Could not create RPC context"; return sendError(RPC::RippledError::rpcBAD_SYNTAX); } @@ -344,8 +346,7 @@ public: } catch (std::exception const& e) { - BOOST_LOG_TRIVIAL(error) - << tag() << __func__ << " caught exception : " << e.what(); + perfLog_.error() << tag() << "Caught exception : " << e.what(); return sendError(RPC::RippledError::rpcINTERNAL); } @@ -385,8 +386,7 @@ public: if (!ip) return; - BOOST_LOG_TRIVIAL(info) << tag() << "ws::" << __func__ - << " received request from ip = " << *ip; + perfLog_.info() << tag() << "Received request from ip = " << *ip; auto sendError = [this, ip]( auto error, @@ -399,7 +399,7 @@ public: e["request"] = request; auto responseStr = boost::json::serialize(e); - BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr; + log_.trace() << responseStr; dosGuard_.add(*ip, responseStr.size()); send(std::move(responseStr)); }; @@ -429,8 +429,7 @@ public: } else { - BOOST_LOG_TRIVIAL(debug) - << tag() << __func__ << " adding to work queue"; + perfLog_.debug() << tag() << "Adding to work queue"; if (!queue_.postCoro( [shared_this = shared_from_this(), diff --git a/unittests/main.cpp b/unittests/Backend.cpp similarity index 97% rename from unittests/main.cpp rename to unittests/Backend.cpp index f3d073ad..c5b1ab20 100644 --- a/unittests/main.cpp +++ b/unittests/Backend.cpp @@ -1,17 +1,23 @@ -#include +#include +#include #include +#include #include -#include +#include #include +#include #include #include -#include -#include -#include -#include +#include -TEST(BackendTest, Basic) +#include + +class BackendTest : public NoLoggerFixture +{ +}; + +TEST_F(BackendTest, Basic) { boost::asio::io_context ioc; std::optional work; @@ -21,7 +27,7 @@ TEST(BackendTest, Basic) boost::asio::spawn( ioc, [&done, &work, &ioc](boost::asio::yield_context yield) { boost::log::core::get()->set_filter( - boost::log::trivial::severity >= boost::log::trivial::warning); + clio::log_severity >= clio::Severity::WARNING); std::string keyspace = "clio_test_" + std::to_string(std::chrono::system_clock::now() .time_since_epoch() @@ -41,7 +47,6 @@ TEST(BackendTest, Basic) std::vector configs = {cassandraConfig}; for (auto& config : configs) { - std::cout << keyspace << std::endl; auto backend = Backend::make_Backend(ioc, clio::Config{config}); std::string rawHeader = @@ -107,10 +112,8 @@ TEST(BackendTest, Basic) } { - std::cout << "fetching ledger by sequence" << std::endl; auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); - std::cout << "fetched ledger by sequence" << std::endl; ASSERT_TRUE(retLgr.has_value()); EXPECT_EQ(retLgr->seq, lgrInfo.seq); EXPECT_EQ( @@ -147,10 +150,8 @@ TEST(BackendTest, Basic) EXPECT_EQ(seq, lgrInfoNext.seq); } { - std::cout << "fetching ledger by sequence" << std::endl; auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - std::cout << "fetched ledger by sequence" << std::endl; EXPECT_TRUE(retLgr.has_value()); EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); EXPECT_EQ( @@ -802,9 +803,6 @@ TEST(BackendTest, Basic) auto objs, auto accountTx, auto state) { - std::cout - << "writing ledger = " << std::to_string(lgrInfo.seq) - << std::endl; backend->startWrites(); backend->writeLedger( @@ -978,11 +976,6 @@ TEST(BackendTest, Basic) uint32_t limit = 10; page = backend->fetchLedgerPage( page.cursor, seq, limit, false, yield); - std::cout << "fetched a page " << page.objects.size() - << std::endl; - if (page.cursor) - std::cout << ripple::strHex(*page.cursor) - << std::endl; // if (page.cursor) // EXPECT_EQ(page.objects.size(), limit); retObjs.insert( @@ -1007,8 +1000,7 @@ TEST(BackendTest, Basic) } } if (found != (obj.second.size() != 0)) - std::cout << ripple::strHex(obj.first) << std::endl; - ASSERT_EQ(found, obj.second.size() != 0); + ASSERT_EQ(found, obj.second.size() != 0); } }; @@ -1148,15 +1140,12 @@ TEST(BackendTest, Basic) for (auto [seq, diff] : state) { - std::cout << "flatteneing" << std::endl; auto flat = flatten(seq); - std::cout << "flattened" << std::endl; checkLedger( lgrInfos[seq], allTxns[seq], flat, flattenAccountTx(seq)); - std::cout << "checked" << std::endl; } } @@ -1168,11 +1157,11 @@ TEST(BackendTest, Basic) EXPECT_EQ(done, true); } -TEST(Backend, cache) +TEST_F(BackendTest, cache) { using namespace Backend; boost::log::core::get()->set_filter( - boost::log::trivial::severity >= boost::log::trivial::warning); + clio::log_severity >= clio::Severity::WARNING); SimpleCache cache; ASSERT_FALSE(cache.isFull()); cache.setFull(); @@ -1410,11 +1399,11 @@ TEST(Backend, cache) } } -TEST(Backend, cacheBackground) +TEST_F(BackendTest, cacheBackground) { using namespace Backend; boost::log::core::get()->set_filter( - boost::log::trivial::severity >= boost::log::trivial::warning); + clio::log_severity >= clio::Severity::WARNING); SimpleCache cache; ASSERT_FALSE(cache.isFull()); ASSERT_EQ(cache.size(), 0); @@ -1814,7 +1803,7 @@ TEST(Backend, cacheBackground) ASSERT_EQ(idx, allObjs.size()); } -TEST(Backend, cacheIntegration) +TEST_F(BackendTest, cacheIntegration) { boost::asio::io_context ioc; std::optional work; @@ -1824,7 +1813,7 @@ TEST(Backend, cacheIntegration) boost::asio::spawn( ioc, [&ioc, &done, &work](boost::asio::yield_context yield) { boost::log::core::get()->set_filter( - boost::log::trivial::severity >= boost::log::trivial::warning); + clio::log_severity >= clio::Severity::WARNING); std::string keyspace = "clio_test_" + std::to_string(std::chrono::system_clock::now() .time_since_epoch() @@ -1844,7 +1833,6 @@ TEST(Backend, cacheIntegration) std::vector configs = {cassandraConfig}; for (auto& config : configs) { - std::cout << keyspace << std::endl; auto backend = Backend::make_Backend(ioc, clio::Config{config}); backend->cache().setFull(); @@ -1927,10 +1915,8 @@ TEST(Backend, cacheIntegration) } { - std::cout << "fetching ledger by sequence" << std::endl; auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); - std::cout << "fetched ledger by sequence" << std::endl; ASSERT_TRUE(retLgr.has_value()); EXPECT_EQ(retLgr->seq, lgrInfo.seq); EXPECT_EQ( @@ -1967,10 +1953,8 @@ TEST(Backend, cacheIntegration) EXPECT_EQ(seq, lgrInfoNext.seq); } { - std::cout << "fetching ledger by sequence" << std::endl; auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - std::cout << "fetched ledger by sequence" << std::endl; EXPECT_TRUE(retLgr.has_value()); EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); EXPECT_EQ( @@ -2226,8 +2210,6 @@ TEST(Backend, cacheIntegration) return lgrInfo; }; auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { - std::cout << "writing ledger = " - << std::to_string(lgrInfo.seq); backend->startWrites(); backend->writeLedger( @@ -2344,11 +2326,6 @@ TEST(Backend, cacheIntegration) uint32_t limit = 10; page = backend->fetchLedgerPage( page.cursor, seq, limit, false, yield); - std::cout << "fetched a page " << page.objects.size() - << std::endl; - if (page.cursor) - std::cout << ripple::strHex(*page.cursor) - << std::endl; // if (page.cursor) // EXPECT_EQ(page.objects.size(), limit); retObjs.insert( @@ -2372,8 +2349,7 @@ TEST(Backend, cacheIntegration) } } if (found != (obj.second.size() != 0)) - std::cout << ripple::strHex(obj.first) << std::endl; - ASSERT_EQ(found, obj.second.size() != 0); + ASSERT_EQ(found, obj.second.size() != 0); } }; @@ -2437,11 +2413,8 @@ TEST(Backend, cacheIntegration) for (auto [seq, diff] : state) { - std::cout << "flatteneing" << std::endl; auto flat = flatten(seq); - std::cout << "flattened" << std::endl; checkLedger(lgrInfos[seq], flat); - std::cout << "checked" << std::endl; } } diff --git a/unittests/config.cpp b/unittests/Config.cpp similarity index 96% rename from unittests/config.cpp rename to unittests/Config.cpp index 731a1cfd..e3aaebe3 100644 --- a/unittests/config.cpp +++ b/unittests/Config.cpp @@ -1,9 +1,8 @@ #include +#include #include #include -#include -#include #include #include @@ -34,17 +33,10 @@ constexpr static auto JSONData = R"JSON( } )JSON"; -class ConfigTest : public ::testing::Test +class ConfigTest : public NoLoggerFixture { protected: Config cfg{json::parse(JSONData)}; - - void - SetUp() override - { - // disable logging in test - core::get()->set_logging_enabled(false); - } }; TEST_F(ConfigTest, SanityCheck) diff --git a/unittests/Logger.cpp b/unittests/Logger.cpp new file mode 100644 index 00000000..9042c910 --- /dev/null +++ b/unittests/Logger.cpp @@ -0,0 +1,49 @@ +#include +using namespace clio; + +// Used as a fixture for tests with enabled logging +class LoggerTest : public LoggerFixture +{ +}; + +// Used as a fixture for tests with disabled logging +class NoLoggerTest : public NoLoggerFixture +{ +}; + +TEST_F(LoggerTest, Basic) +{ + Logger log{"General"}; + log.info() << "Info line logged"; + checkEqual("General:NFO Info line logged"); + + LogService::debug() << "Debug line with numbers " << 12345; + checkEqual("General:DBG Debug line with numbers 12345"); + + LogService::warn() << "Warning is logged"; + checkEqual("General:WRN Warning is logged"); +} + +TEST_F(LoggerTest, Filtering) +{ + Logger log{"General"}; + log.trace() << "Should not be logged"; + checkEmpty(); + + log.warn() << "Warning is logged"; + checkEqual("General:WRN Warning is logged"); + + Logger tlog{"Trace"}; + tlog.trace() << "Trace line logged for 'Trace' component"; + checkEqual("Trace:TRC Trace line logged for 'Trace' component"); +} + +TEST_F(NoLoggerTest, Basic) +{ + Logger log{"Trace"}; + log.trace() << "Nothing"; + checkEmpty(); + + LogService::fatal() << "Still nothing"; + checkEmpty(); +} diff --git a/unittests/util/Fixtures.h b/unittests/util/Fixtures.h new file mode 100644 index 00000000..649e22f9 --- /dev/null +++ b/unittests/util/Fixtures.h @@ -0,0 +1,93 @@ +#ifndef CLIO_UNITTEST_FIXTURES_H +#define CLIO_UNITTEST_FIXTURES_H + +#include +#include + +#include +#include + +/** + * @brief Fixture with LogService support. + */ +class LoggerFixture : public ::testing::Test +{ + /** + * @brief A simple string buffer that can be used to mock std::cout for + * console logging. + */ + class FakeBuffer final : public std::stringbuf + { + public: + std::string + getStrAndReset() + { + auto value = str(); + str(""); + return value; + } + }; + + FakeBuffer buffer_; + std::ostream stream_ = std::ostream{&buffer_}; + +protected: + // Simulates the `LogService::init(config)` call + void + SetUp() override + { + static std::once_flag once_; + std::call_once(once_, [] { + boost::log::add_common_attributes(); + boost::log::register_simple_formatter_factory( + "Severity"); + }); + + namespace src = boost::log::sources; + namespace keywords = boost::log::keywords; + namespace sinks = boost::log::sinks; + namespace expr = boost::log::expressions; + auto core = boost::log::core::get(); + + core->remove_all_sinks(); + boost::log::add_console_log( + stream_, keywords::format = "%Channel%:%Severity% %Message%"); + auto min_severity = expr::channel_severity_filter( + clio::log_channel, clio::log_severity); + min_severity["General"] = clio::Severity::DEBUG; + min_severity["Trace"] = clio::Severity::TRACE; + core->set_filter(min_severity); + core->set_logging_enabled(true); + } + + void + checkEqual(std::string expected) + { + auto value = buffer_.getStrAndReset(); + ASSERT_EQ(value, expected + '\n'); + } + + void + checkEmpty() + { + ASSERT_TRUE(buffer_.getStrAndReset().empty()); + } +}; + +/** + * @brief Fixture with LogService support but completely disabled logging. + * + * This is meant to be used as a base for other fixtures. + */ +class NoLoggerFixture : public LoggerFixture +{ +protected: + void + SetUp() override + { + LoggerFixture::SetUp(); + boost::log::core::get()->set_logging_enabled(false); + } +}; + +#endif // CLIO_UNITTEST_FIXTURES_H