From aa64bb7b6b9ac2eb69625d00a434ae1353e74ba5 Mon Sep 17 00:00:00 2001 From: Peter Chen <34582813+PeterChen13579@users.noreply.github.com> Date: Wed, 8 Oct 2025 14:58:05 -0400 Subject: [PATCH 1/3] refactor: Keyspace comments (#2684) Co-authored-by: Ayaz Salikhov --- src/data/BackendFactory.hpp | 3 +- src/data/CassandraBackend.hpp | 5 ++- src/data/KeyspaceBackend.hpp | 41 ++++++++++--------- src/data/cassandra/CassandraBackendFamily.hpp | 26 ++++++------ src/data/cassandra/SettingsProvider.cpp | 2 +- src/data/cassandra/impl/Cluster.cpp | 2 +- src/data/cassandra/impl/Cluster.hpp | 24 ++++------- .../data/cassandra/BackendTests.cpp | 6 ++- .../CassandraMigrationManagerTests.cpp | 7 ++-- 9 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/data/BackendFactory.hpp b/src/data/BackendFactory.hpp index 3c7710ec..fe395895 100644 --- a/src/data/BackendFactory.hpp +++ b/src/data/BackendFactory.hpp @@ -46,6 +46,7 @@ namespace data { inline std::shared_ptr makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache) { + using namespace cassandra::impl; static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming) LOG(log.info()) << "Constructing BackendInterface"; @@ -56,7 +57,7 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI if (boost::iequals(type, "cassandra")) { auto const cfg = config.getObject("database." + type); - if (cfg.getValueView("provider").asString() == toString(cassandra::impl::Provider::Keyspace)) { + if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) { backend = std::make_shared( data::cassandra::SettingsProvider{cfg}, cache, readOnly ); diff --git a/src/data/CassandraBackend.hpp b/src/data/CassandraBackend.hpp index d7de8b40..e9b716b6 100644 --- a/src/data/CassandraBackend.hpp +++ b/src/data/CassandraBackend.hpp @@ -189,10 +189,11 @@ public: auto const nftUris = executor_.readEach(yield, selectNFTURIStatements); for (auto i = 0u; i < nftIDs.size(); i++) { - if (auto const maybeRow = nftInfos[i].template get(); maybeRow) { + if (auto const maybeRow = nftInfos[i].template get(); + maybeRow.has_value()) { auto [seq, owner, isBurned] = *maybeRow; NFT nft(nftIDs[i], seq, owner, isBurned); - if (auto const maybeUri = nftUris[i].template get(); maybeUri) + if (auto const maybeUri = nftUris[i].template get(); maybeUri.has_value()) nft.uri = *maybeUri; ret.nfts.push_back(nft); } diff --git a/src/data/KeyspaceBackend.hpp b/src/data/KeyspaceBackend.hpp index 4358a71f..eaf74dd9 100644 --- a/src/data/KeyspaceBackend.hpp +++ b/src/data/KeyspaceBackend.hpp @@ -57,9 +57,9 @@ namespace data::cassandra { /** * @brief Implements @ref CassandraBackendFamily for Keyspace * - * @tparam SettingsProviderType The settings provider type to use - * @tparam ExecutionStrategyType The execution strategy type to use - * @tparam FetchLedgerCacheType The ledger header cache type to use + * @tparam SettingsProviderType The settings provider type + * @tparam ExecutionStrategyType The execution strategy type + * @tparam FetchLedgerCacheType The ledger header cache type */ template < SomeSettingsProvider SettingsProviderType, @@ -101,9 +101,9 @@ public: // !range_.has_value() means the table 'ledger_range' is not populated; // This would be the first write to the table. // In this case, insert both min_sequence/max_sequence range into the table. - if (not(range_.has_value())) { - executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_); - executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_); + if (not range_.has_value()) { + executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_); + executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_); } if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) { @@ -130,7 +130,7 @@ public: // Keyspace and ScyllaDB uses the same logic for taxon-filtered queries nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield); } else { - // --- Amazon Keyspaces Workflow for non-taxon queries --- + // Amazon Keyspaces Workflow for non-taxon queries auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0; auto const startTokenID = cursorIn.value_or(ripple::uint256(0)); @@ -140,8 +140,8 @@ public: firstQuery.bindAt(3, Limit{limit}); auto const firstRes = executor_.read(yield, firstQuery); - if (firstRes) { - for (auto const [nftID] : extract(firstRes.value())) + if (firstRes.has_value()) { + for (auto const [nftID] : extract(*firstRes)) nftIDs.push_back(nftID); } @@ -152,8 +152,8 @@ public: secondQuery.bindAt(2, Limit{remainingLimit}); auto const secondRes = executor_.read(yield, secondQuery); - if (secondRes) { - for (auto const [nftID] : extract(secondRes.value())) + if (secondRes.has_value()) { + for (auto const [nftID] : extract(*secondRes)) nftIDs.push_back(nftID); } } @@ -163,7 +163,7 @@ public: /** * @brief (Unsupported in Keyspaces) Fetches account root object indexes by page. - * * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend. + * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend. * This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is * no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this * function will throw an exception. @@ -203,8 +203,8 @@ private: statement.bindAt(3, Limit{limit}); auto const res = executor_.read(yield, statement); - if (res && res.value().hasRows()) { - for (auto const [nftID] : extract(res.value())) + if (res.has_value() && res->hasRows()) { + for (auto const [nftID] : extract(*res)) nftIDs.push_back(nftID); } return nftIDs; @@ -229,8 +229,8 @@ private: firstQuery.bindAt(3, Limit{limit}); auto const firstRes = executor_.read(yield, firstQuery); - if (firstRes) { - for (auto const [nftID] : extract(firstRes.value())) + if (firstRes.has_value()) { + for (auto const [nftID] : extract(*firstRes)) nftIDs.push_back(nftID); } @@ -241,8 +241,8 @@ private: secondQuery.bindAt(2, Limit{remainingLimit}); auto const secondRes = executor_.read(yield, secondQuery); - if (secondRes) { - for (auto const [nftID] : extract(secondRes.value())) + if (secondRes.has_value()) { + for (auto const [nftID] : extract(*secondRes)) nftIDs.push_back(nftID); } } @@ -291,10 +291,11 @@ private: // Combine the results into final NFT objects. for (auto i = 0u; i < nftIDs.size(); ++i) { - if (auto const maybeRow = nftInfos[i].template get(); maybeRow) { + if (auto const maybeRow = nftInfos[i].template get(); + maybeRow.has_value()) { auto [seq, owner, isBurned] = *maybeRow; NFT nft(nftIDs[i], seq, owner, isBurned); - if (auto const maybeUri = nftUris[i].template get(); maybeUri) + if (auto const maybeUri = nftUris[i].template get(); maybeUri.has_value()) nft.uri = *maybeUri; ret.nfts.push_back(nft); } diff --git a/src/data/cassandra/CassandraBackendFamily.hpp b/src/data/cassandra/CassandraBackendFamily.hpp index aec50fdf..ad917e12 100644 --- a/src/data/cassandra/CassandraBackendFamily.hpp +++ b/src/data/cassandra/CassandraBackendFamily.hpp @@ -70,10 +70,10 @@ namespace data::cassandra { * * Note: This is a safer and more correct rewrite of the original implementation of the backend. * - * @tparam SettingsProviderType The settings provider type to use - * @tparam ExecutionStrategyType The execution strategy type to use - * @tparam SchemaType The Schema type to use - * @tparam FetchLedgerCacheType The ledger header cache type to use + * @tparam SettingsProviderType The settings provider type + * @tparam ExecutionStrategyType The execution strategy type + * @tparam SchemaType The Schema type + * @tparam FetchLedgerCacheType The ledger header cache type */ template < SomeSettingsProvider SettingsProviderType, @@ -100,8 +100,8 @@ public: /** * @brief Create a new cassandra/scylla backend instance. * - * @param settingsProvider The settings provider to use - * @param cache The ledger cache to use + * @param settingsProvider The settings provider + * @param cache The ledger cache * @param readOnly Whether the database should be in readonly mode */ CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly) @@ -111,18 +111,18 @@ public: , handle_{settingsProvider_.getSettings()} , executor_{settingsProvider_.getSettings(), handle_} { - if (auto const res = handle_.connect(); not res) + if (auto const res = handle_.connect(); not res.has_value()) throw std::runtime_error("Could not connect to database: " + res.error()); if (not readOnly) { - if (auto const res = handle_.execute(schema_.createKeyspace); not res) { + if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) { // on datastax, creation of keyspaces can be configured to only be done thru the admin // interface. this does not mean that the keyspace does not already exist tho. if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED) throw std::runtime_error("Could not create keyspace: " + res.error()); } - if (auto const res = handle_.executeEach(schema_.createSchema); not res) + if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value()) throw std::runtime_error("Could not create schema: " + res.error()); } @@ -233,10 +233,10 @@ public: std::optional fetchLatestLedgerSequence(boost::asio::yield_context yield) const override { - if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) { - if (auto const& result = res.value(); result) { - if (auto const maybeValue = result.template get(); maybeValue) - return maybeValue; + if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) { + if (auto const& rows = *res; rows) { + if (auto const maybeRow = rows.template get(); maybeRow.has_value()) + return maybeRow; LOG(log_.error()) << "Could not fetch latest ledger - no rows"; return std::nullopt; diff --git a/src/data/cassandra/SettingsProvider.cpp b/src/data/cassandra/SettingsProvider.cpp index 889633e4..4ce67c7f 100644 --- a/src/data/cassandra/SettingsProvider.cpp +++ b/src/data/cassandra/SettingsProvider.cpp @@ -97,7 +97,7 @@ SettingsProvider::parseSettings() const settings.coreConnectionsPerHost = config_.get("core_connections_per_host"); settings.queueSizeIO = config_.maybeValue("queue_size_io"); settings.writeBatchSize = config_.get("write_batch_size"); - settings.provider = config_.get("provider"); + settings.provider = impl::providerFromString(config_.get("provider")); if (config_.getValueView("connect_timeout").hasValue()) { auto const connectTimeoutSecond = config_.get("connect_timeout"); diff --git a/src/data/cassandra/impl/Cluster.cpp b/src/data/cassandra/impl/Cluster.cpp index 7dcf7e9a..5664e7c1 100644 --- a/src/data/cassandra/impl/Cluster.cpp +++ b/src/data/cassandra/impl/Cluster.cpp @@ -61,7 +61,7 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k cass_cluster_set_request_timeout(*this, settings.requestTimeout.count()); // TODO: AWS keyspace reads should be local_one to save cost - if (settings.provider == toString(cassandra::impl::Provider::Keyspace)) { + if (settings.provider == cassandra::impl::Provider::Keyspace) { if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) { throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc))); } diff --git a/src/data/cassandra/impl/Cluster.hpp b/src/data/cassandra/impl/Cluster.hpp index bd137650..3af21df9 100644 --- a/src/data/cassandra/impl/Cluster.hpp +++ b/src/data/cassandra/impl/Cluster.hpp @@ -20,6 +20,7 @@ #pragma once #include "data/cassandra/impl/ManagedObject.hpp" +#include "util/Assert.hpp" #include "util/log/Logger.hpp" #include @@ -31,29 +32,22 @@ #include #include #include -#include #include namespace data::cassandra::impl { -namespace { - enum class Provider { Cassandra, Keyspace }; -inline std::string -toString(Provider provider) +inline Provider +providerFromString(std::string const& provider) { - switch (provider) { - case Provider::Cassandra: - return "cassandra"; - case Provider::Keyspace: - return "aws_keyspace"; - } - std::unreachable(); + ASSERT( + provider == "cassandra" || provider == "aws_keyspace", + "Provider type must be one of 'cassandra' or 'aws_keyspace'" + ); + return provider == "cassandra" ? Provider::Cassandra : Provider::Keyspace; } -} // namespace - // TODO: move Settings to public interface, not impl /** @@ -109,7 +103,7 @@ struct Settings { std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE; /** @brief Provider to know if we are using scylladb or keyspace */ - std::string provider = toString(kDEFAULT_PROVIDER); + Provider provider = kDEFAULT_PROVIDER; /** @brief Size of the IO queue */ std::optional queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init) diff --git a/tests/integration/data/cassandra/BackendTests.cpp b/tests/integration/data/cassandra/BackendTests.cpp index 3e5a6ac6..04909575 100644 --- a/tests/integration/data/cassandra/BackendTests.cpp +++ b/tests/integration/data/cassandra/BackendTests.cpp @@ -85,15 +85,17 @@ using namespace data::cassandra; class BackendCassandraTestBase : public SyncAsioContextTest, public WithPrometheus { protected: + static constexpr auto kCASSANDRA = "cassandra"; + ClioConfigDefinition cfg_{ - {"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")}, + {"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)}, {"database.cassandra.contact_points", ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)}, {"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()}, {"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()}, {"database.cassandra.keyspace", ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)}, - {"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")}, + {"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)}, {"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)}, {"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()}, {"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)}, diff --git a/tests/integration/migration/cassandra/CassandraMigrationManagerTests.cpp b/tests/integration/migration/cassandra/CassandraMigrationManagerTests.cpp index 8b0325a2..482639e3 100644 --- a/tests/integration/migration/cassandra/CassandraMigrationManagerTests.cpp +++ b/tests/integration/migration/cassandra/CassandraMigrationManagerTests.cpp @@ -95,14 +95,15 @@ class MigrationCassandraSimpleTest : public WithPrometheus { } protected: - ClioConfigDefinition cfg_{ + static constexpr auto kCASSANDRA = "cassandra"; - {{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")}, + ClioConfigDefinition cfg_{ + {{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)}, {"database.cassandra.contact_points", ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)}, {"database.cassandra.keyspace", ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)}, - {"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")}, + {"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)}, {"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)}, {"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)}, {"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.defaultValue(2)}, From b4fb3e42b88548c819f87f89b69565fb03a46e82 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 9 Oct 2025 14:48:29 +0100 Subject: [PATCH 2/3] chore: Publish RCs as non-draft (#2685) --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9cb099ca..42bc9108 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -51,9 +51,9 @@ jobs: with: overwrite_release: false prerelease: ${{ contains(github.ref_name, '-') }} - title: "${{ github.ref_name}}" + title: "${{ github.ref_name }}" version: "${{ github.ref_name }}" header: > ${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }} generate_changelog: ${{ !contains(github.ref_name, '-') }} - draft: true + draft: ${{ !contains(github.ref_name, '-') }} From dabaa5bf800f6df1bb0a19b35a7915172337f5b6 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 9 Oct 2025 16:51:55 +0100 Subject: [PATCH 3/3] fix: Drop dynamic loggers to fix memory leak (#2686) --- src/util/config/ConfigConstraints.hpp | 20 ++++++------- src/util/log/Logger.cpp | 31 ++++++++++++++----- src/util/log/Logger.hpp | 11 +++---- tests/common/util/LoggerFixtures.cpp | 2 +- tests/unit/util/config/ConfigValueTests.cpp | 5 ++-- tests/unit/util/log/LogServiceInitTests.cpp | 2 +- tests/unit/util/log/LoggerTests.cpp | 33 +++++++++++++++++++++ 7 files changed, 78 insertions(+), 26 deletions(-) diff --git a/src/util/config/ConfigConstraints.hpp b/src/util/config/ConfigConstraints.hpp index 3ee89ee9..211c4ca9 100644 --- a/src/util/config/ConfigConstraints.hpp +++ b/src/util/config/ConfigConstraints.hpp @@ -45,7 +45,7 @@ class ConfigValue; /** * @brief specific values that are accepted for logger levels in config. */ -static constexpr std::array kLOG_LEVELS = { +static constexpr std::array kLOG_LEVELS = { "trace", "debug", "info", @@ -57,7 +57,7 @@ static constexpr std::array kLOG_LEVELS = { /** * @brief specific values that are accepted for logger tag style in config. */ -static constexpr std::array kLOG_TAGS = { +static constexpr std::array kLOG_TAGS = { "int", "uint", "null", @@ -68,7 +68,7 @@ static constexpr std::array kLOG_TAGS = { /** * @brief specific values that are accepted for cache loading in config. */ -static constexpr std::array kLOAD_CACHE_MODE = { +static constexpr std::array kLOAD_CACHE_MODE = { "sync", "async", "none", @@ -77,17 +77,17 @@ static constexpr std::array kLOAD_CACHE_MODE = { /** * @brief specific values that are accepted for database type in config. */ -static constexpr std::array kDATABASE_TYPE = {"cassandra"}; +static constexpr std::array kDATABASE_TYPE = {"cassandra"}; /** * @brief specific values that are accepted for server's processing_policy in config. */ -static constexpr std::array kPROCESSING_POLICY = {"parallel", "sequent"}; +static constexpr std::array kPROCESSING_POLICY = {"parallel", "sequent"}; /** * @brief specific values that are accepted for database provider in config. */ -static constexpr std::array kPROVIDER = {"cassandra", "aws_keyspace"}; +static constexpr std::array kPROVIDER = {"cassandra", "aws_keyspace"}; /** * @brief An interface to enforce constraints on certain values within ClioConfigDefinition. @@ -123,7 +123,7 @@ protected: */ template constexpr std::string - makeErrorMsg(std::string_view key, Value const& value, std::array arr) const + makeErrorMsg(std::string_view key, Value const& value, std::array arr) const { // Extract the value from the variant auto const valueStr = std::visit([](auto const& v) { return fmt::format("{}", v); }, value); @@ -271,7 +271,7 @@ public: * @param key The key of the ConfigValue that has this constraint * @param arr The value that has this constraint must be of the values in arr */ - constexpr OneOf(std::string_view key, std::array arr) : key_{key}, arr_{arr} + constexpr OneOf(std::string_view key, std::array arr) : key_{key}, arr_{arr} { } @@ -318,7 +318,7 @@ private: print(std::ostream& stream) const override { std::string valuesStream; - std::ranges::for_each(arr_, [&valuesStream](std::string const& elem) { + std::ranges::for_each(arr_, [&valuesStream](std::string_view elem) { valuesStream += fmt::format(" `{}`,", elem); }); // replace the last "," with "." @@ -327,7 +327,7 @@ private: } std::string_view key_; - std::array arr_; + std::array arr_; }; /** diff --git a/src/util/log/Logger.cpp b/src/util/log/Logger.cpp index 512842ad..5a3c2f77 100644 --- a/src/util/log/Logger.cpp +++ b/src/util/log/Logger.cpp @@ -220,10 +220,10 @@ LogService::createFileSink(FileLoggingParams const& params, std::string const& f * @param defaultSeverity The default severity level to use if not overridden. * @return A map of channel names to their minimum severity levels, or an error message if parsing fails. */ -static std::expected, std::string> +static std::expected, std::string> getMinSeverity(config::ClioConfigDefinition const& config, Severity defaultSeverity) { - std::unordered_map minSeverity; + std::unordered_map minSeverity; for (auto const& channel : Logger::kCHANNELS) minSeverity[channel] = defaultSeverity; @@ -284,13 +284,15 @@ LogServiceState::reset() } std::shared_ptr -LogServiceState::registerLogger(std::string const& channel, std::optional severity) +LogServiceState::registerLogger(std::string_view channel, std::optional severity) { if (not initialized_) { throw std::logic_error("LogService is not initialized"); } - std::shared_ptr existingLogger = spdlog::get(channel); + std::string const channelStr{channel}; + + std::shared_ptr existingLogger = spdlog::get(channelStr); if (existingLogger != nullptr) { if (severity.has_value()) existingLogger->set_level(toSpdlogLevel(*severity)); @@ -300,10 +302,10 @@ LogServiceState::registerLogger(std::string const& channel, std::optional logger; if (isAsync_) { logger = std::make_shared( - channel, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block + channelStr, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block ); } else { - logger = std::make_shared(channel, sinks_.begin(), sinks_.end()); + logger = std::make_shared(channelStr, sinks_.begin(), sinks_.end()); } logger->set_level(toSpdlogLevel(severity.value_or(defaultSeverity_))); @@ -427,10 +429,25 @@ LogServiceState::replaceSinks(std::vector> spdlog::apply_all([](std::shared_ptr logger) { logger->sinks() = sinks_; }); } -Logger::Logger(std::string channel) : logger_(LogServiceState::registerLogger(channel)) +Logger::Logger(std::string_view const channel) : logger_(LogServiceState::registerLogger(channel)) { } +Logger::~Logger() +{ + // One reference is held by logger_ and the other by spdlog registry + static constexpr size_t kLAST_LOGGER_REF_COUNT = 2; + + if (logger_ == nullptr) { + return; + } + + bool const isDynamic = !std::ranges::contains(kCHANNELS, logger_->name()); + if (isDynamic && logger_.use_count() == kLAST_LOGGER_REF_COUNT) { + spdlog::drop(logger_->name()); + } +} + Logger::Pump::Pump(std::shared_ptr logger, Severity sev, SourceLocationType const& loc) : logger_(std::move(logger)) , severity_(sev) diff --git a/src/util/log/Logger.hpp b/src/util/log/Logger.hpp index 7ba17b71..37062589 100644 --- a/src/util/log/Logger.hpp +++ b/src/util/log/Logger.hpp @@ -29,6 +29,7 @@ #include #include #include +#include #include // We forward declare spdlog::logger and spdlog::sinks::sink @@ -91,7 +92,7 @@ enum class Severity { * otherwise. See @ref LogService::init() for setup of the logging core and * severity levels for each channel. */ -class Logger final { +class Logger { std::shared_ptr logger_; friend class LogService; // to expose the Pump interface @@ -145,7 +146,7 @@ class Logger final { }; public: - static constexpr std::array kCHANNELS = { + static constexpr std::array kCHANNELS = { "General", "WebServer", "Backend", @@ -165,10 +166,10 @@ public: * * @param channel The channel this logger will report into. */ - Logger(std::string channel); + Logger(std::string_view const channel); Logger(Logger const&) = default; - ~Logger() = default; + ~Logger(); Logger(Logger&&) = default; Logger& @@ -291,7 +292,7 @@ protected: * @return Shared pointer to the registered spdlog logger */ static std::shared_ptr - registerLogger(std::string const& channel, std::optional severity = std::nullopt); + registerLogger(std::string_view channel, std::optional severity = std::nullopt); protected: static bool isAsync_; // NOLINT(readability-identifier-naming) diff --git a/tests/common/util/LoggerFixtures.cpp b/tests/common/util/LoggerFixtures.cpp index 23a8710e..50cd3b12 100644 --- a/tests/common/util/LoggerFixtures.cpp +++ b/tests/common/util/LoggerFixtures.cpp @@ -35,7 +35,7 @@ LoggerFixture::init() { util::LogServiceState::init(false, util::Severity::FTL, {}); - std::ranges::for_each(util::Logger::kCHANNELS, [](char const* channel) { + std::ranges::for_each(util::Logger::kCHANNELS, [](std::string_view const channel) { util::LogService::registerLogger(channel); }); diff --git a/tests/unit/util/config/ConfigValueTests.cpp b/tests/unit/util/config/ConfigValueTests.cpp index 492466ca..84963963 100644 --- a/tests/unit/util/config/ConfigValueTests.cpp +++ b/tests/unit/util/config/ConfigValueTests.cpp @@ -31,6 +31,7 @@ #include #include #include +#include using namespace util::config; @@ -164,7 +165,7 @@ TEST_F(ConstraintTest, SetValuesOnPortConstraint) TEST_F(ConstraintTest, OneOfConstraintOneValue) { - std::array const arr = {"tracer"}; + std::array const arr = {"tracer"}; auto const databaseConstraint{OneOf{"database.type", arr}}; EXPECT_FALSE(databaseConstraint.checkConstraint("tracer").has_value()); @@ -180,7 +181,7 @@ TEST_F(ConstraintTest, OneOfConstraintOneValue) TEST_F(ConstraintTest, OneOfConstraint) { - std::array const arr = {"123", "trace", "haha"}; + std::array const arr = {"123", "trace", "haha"}; auto const oneOfCons{OneOf{"log.level", arr}}; EXPECT_FALSE(oneOfCons.checkConstraint("trace").has_value()); diff --git a/tests/unit/util/log/LogServiceInitTests.cpp b/tests/unit/util/log/LogServiceInitTests.cpp index c2587bc8..33d08f4f 100644 --- a/tests/unit/util/log/LogServiceInitTests.cpp +++ b/tests/unit/util/log/LogServiceInitTests.cpp @@ -101,7 +101,7 @@ TEST_F(LogServiceInitTests, DefaultLogLevel) EXPECT_TRUE(LogService::init(config_)); std::string const logString = "some log"; - for (auto const& channel : Logger::kCHANNELS) { + for (std::string_view const channel : Logger::kCHANNELS) { Logger const log{channel}; log.trace() << logString; auto loggerStr = getLoggerString(); diff --git a/tests/unit/util/log/LoggerTests.cpp b/tests/unit/util/log/LoggerTests.cpp index b964198b..754e774b 100644 --- a/tests/unit/util/log/LoggerTests.cpp +++ b/tests/unit/util/log/LoggerTests.cpp @@ -21,11 +21,23 @@ #include "util/log/Logger.hpp" #include +#include +#include #include #include using namespace util; +namespace { +size_t +loggersNum() +{ + size_t counter = 0; + spdlog::apply_all([&counter](std::shared_ptr) { ++counter; }); + return counter; +} +} // namespace + // Used as a fixture for tests with enabled logging class LoggerTest : public LoggerFixture {}; @@ -71,3 +83,24 @@ TEST_F(LoggerTest, LOGMacro) EXPECT_TRUE(computeCalled); } #endif + +TEST_F(LoggerTest, ManyDynamicLoggers) +{ + static constexpr size_t kNUM_LOGGERS = 10'000; + + auto initialLoggers = loggersNum(); + + for (size_t i = 0; i < kNUM_LOGGERS; ++i) { + std::string const loggerName = "DynamicLogger" + std::to_string(i); + + Logger log{loggerName}; + log.info() << "Logger number " << i; + ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Logger number " + std::to_string(i) + "\n"); + + Logger copy = log; + copy.info() << "Copy of logger number " << i; + ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Copy of logger number " + std::to_string(i) + "\n"); + } + + ASSERT_EQ(loggersNum(), initialLoggers); +}