Compare commits

...

15 Commits

Author SHA1 Message Date
Alex Kremer
d053a1f8c8 Merge branch 'develop' into release/2.6.0 2025-10-20 14:05:37 +01:00
github-actions[bot]
2de49b4d33 style: clang-tidy auto fixes (#2706) 2025-10-20 10:41:59 +01:00
Ayaz Salikhov
3de2bf2910 ci: Update pre-commit workflow to latest version (#2702) 2025-10-18 07:55:14 +01:00
Peter Chen
7538efb01e fix: Add mpt_issuance_id to meta of MPTIssuanceCreate (#2701)
fixes #2332
2025-10-17 09:58:38 -04:00
Alex Kremer
685f611434 chore: Disable flaky DisconnectClientOnInactivity (#2699) 2025-10-16 18:55:58 +01:00
Ayaz Salikhov
2528dee6b6 chore: Use pre-commit image with libatomic (#2698) 2025-10-16 13:03:52 +01:00
Ayaz Salikhov
b2be4b51d1 ci: Add libatomic as dependency for pre-commit image (#2697) 2025-10-16 12:02:14 +01:00
Alex Kremer
b4e40558c9 fix: Address AmendmentBlockHandler flakiness in old ETL tests (#2694)
Co-authored-by: Ayaz Salikhov <mathbunnyru@users.noreply.github.com>
2025-10-15 17:15:12 +01:00
emrearıyürek
b361e3a108 feat: Support new types in ledger_entry (#2654)
Co-authored-by: Ayaz Salikhov <mathbunnyru@users.noreply.github.com>
2025-10-14 17:37:14 +01:00
Ayaz Salikhov
a4b47da57a docs: Update doxygen-awesome-css to 2.4.1 (#2690) 2025-10-13 18:55:21 +01:00
github-actions[bot]
2ed1a45ef1 style: clang-tidy auto fixes (#2688)
Co-authored-by: godexsoft <385326+godexsoft@users.noreply.github.com>
2025-10-10 10:45:47 +01:00
Alex Kremer
c12601c0f5 Merge branch 'develop' into release/2.6.0 2025-10-09 16:58:32 +01:00
Ayaz Salikhov
dabaa5bf80 fix: Drop dynamic loggers to fix memory leak (#2686) 2025-10-09 16:51:55 +01:00
Ayaz Salikhov
b4fb3e42b8 chore: Publish RCs as non-draft (#2685) 2025-10-09 14:48:29 +01:00
Peter Chen
aa64bb7b6b refactor: Keyspace comments (#2684)
Co-authored-by: Ayaz Salikhov <mathbunnyru@users.noreply.github.com>
2025-10-08 19:58:05 +01:00
40 changed files with 236 additions and 142 deletions

View File

@@ -4,7 +4,7 @@ import json
LINUX_OS = ["heavy", "heavy-arm64"]
LINUX_CONTAINERS = [
'{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
]
LINUX_COMPILERS = ["gcc", "clang"]

View File

@@ -45,7 +45,7 @@ jobs:
build_type: [Release, Debug]
container:
[
'{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }',
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }',
]
static: [true]
@@ -75,7 +75,7 @@ jobs:
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
conan_profile: gcc
build_type: Debug
download_ccache: true
@@ -94,7 +94,7 @@ jobs:
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
conan_profile: gcc
build_type: Release
download_ccache: true
@@ -111,7 +111,7 @@ jobs:
needs: build-and-test
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/checkout@v4

View File

@@ -17,7 +17,7 @@ jobs:
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/checkout@v4
@@ -67,7 +67,7 @@ jobs:
needs: build
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/download-artifact@v5

View File

@@ -27,7 +27,7 @@ jobs:
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
permissions:
contents: write

View File

@@ -14,7 +14,7 @@ jobs:
build:
runs-on: ubuntu-latest
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- name: Checkout

View File

@@ -39,17 +39,17 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
- os: heavy
conan_profile: gcc
build_type: Debug
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
- os: heavy
conan_profile: gcc.ubsan
build_type: Release
static: false
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
uses: ./.github/workflows/reusable-build-test.yml
with:
@@ -73,7 +73,7 @@ jobs:
include:
- os: heavy
conan_profile: clang
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
static: true
- os: macos15
conan_profile: apple-clang

View File

@@ -8,7 +8,7 @@ on:
jobs:
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
uses: XRPLF/actions/.github/workflows/pre-commit.yml@a8d7472b450eb53a1e5228f64552e5974457a21a
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'

View File

@@ -29,7 +29,7 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
uses: ./.github/workflows/reusable-build-test.yml
with:
@@ -51,9 +51,9 @@ jobs:
with:
overwrite_release: false
prerelease: ${{ contains(github.ref_name, '-') }}
title: "${{ github.ref_name}}"
title: "${{ github.ref_name }}"
version: "${{ github.ref_name }}"
header: >
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
generate_changelog: ${{ !contains(github.ref_name, '-') }}
draft: true
draft: ${{ !contains(github.ref_name, '-') }}

View File

@@ -42,7 +42,7 @@ jobs:
release:
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}

View File

@@ -44,7 +44,7 @@ jobs:
uses: ./.github/workflows/reusable-build-test.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
download_ccache: false
upload_ccache: false
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}

View File

@@ -1,6 +1,6 @@
services:
clio_develop:
image: ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
volumes:
- clio_develop_conan_data:/root/.conan2/p
- clio_develop_ccache:/root/.ccache

View File

@@ -17,6 +17,7 @@ RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
curl \
git \
libatomic1 \
software-properties-common \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -191,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
```sh
docker run -it ghcr.io/xrplf/clio-ci:213752862ca95ecadeb59a6176c3db91a7864b3e
docker run -it ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
git clone https://github.com/XRPLF/clio
cd clio
```

View File

@@ -951,7 +951,7 @@ span.arrowhead {
border-color: var(--primary-color);
}
#nav-tree ul li:first-child > div > a {
#nav-tree-contents > ul > li:first-child > div > a {
opacity: 0;
pointer-events: none;
}

View File

@@ -46,6 +46,7 @@ namespace data {
inline std::shared_ptr<BackendInterface>
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
{
using namespace cassandra::impl;
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
LOG(log.info()) << "Constructing BackendInterface";
@@ -56,7 +57,7 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
if (boost::iequals(type, "cassandra")) {
auto const cfg = config.getObject("database." + type);
if (cfg.getValueView("provider").asString() == toString(cassandra::impl::Provider::Keyspace)) {
if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) {
backend = std::make_shared<data::cassandra::KeyspaceBackend>(
data::cassandra::SettingsProvider{cfg}, cache, readOnly
);

View File

@@ -189,10 +189,11 @@ public:
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
for (auto i = 0u; i < nftIDs.size(); i++) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
maybeRow.has_value()) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}

View File

@@ -57,9 +57,9 @@ namespace data::cassandra {
/**
* @brief Implements @ref CassandraBackendFamily for Keyspace
*
* @tparam SettingsProviderType The settings provider type to use
* @tparam ExecutionStrategyType The execution strategy type to use
* @tparam FetchLedgerCacheType The ledger header cache type to use
* @tparam SettingsProviderType The settings provider type
* @tparam ExecutionStrategyType The execution strategy type
* @tparam FetchLedgerCacheType The ledger header cache type
*/
template <
SomeSettingsProvider SettingsProviderType,
@@ -101,9 +101,9 @@ public:
// !range_.has_value() means the table 'ledger_range' is not populated;
// This would be the first write to the table.
// In this case, insert both min_sequence/max_sequence range into the table.
if (not(range_.has_value())) {
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
if (not range_.has_value()) {
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
@@ -130,7 +130,7 @@ public:
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
} else {
// --- Amazon Keyspaces Workflow for non-taxon queries ---
// Amazon Keyspaces Workflow for non-taxon queries
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
@@ -140,8 +140,8 @@ public:
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes) {
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
if (firstRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
nftIDs.push_back(nftID);
}
@@ -152,8 +152,8 @@ public:
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes) {
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
if (secondRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
nftIDs.push_back(nftID);
}
}
@@ -163,7 +163,7 @@ public:
/**
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
* * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
* function will throw an exception.
@@ -203,8 +203,8 @@ private:
statement.bindAt(3, Limit{limit});
auto const res = executor_.read(yield, statement);
if (res && res.value().hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
if (res.has_value() && res->hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(*res))
nftIDs.push_back(nftID);
}
return nftIDs;
@@ -229,8 +229,8 @@ private:
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes) {
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
if (firstRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
nftIDs.push_back(nftID);
}
@@ -241,8 +241,8 @@ private:
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes) {
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
if (secondRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
nftIDs.push_back(nftID);
}
}
@@ -291,10 +291,11 @@ private:
// Combine the results into final NFT objects.
for (auto i = 0u; i < nftIDs.size(); ++i) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
maybeRow.has_value()) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}

View File

@@ -70,10 +70,10 @@ namespace data::cassandra {
*
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
*
* @tparam SettingsProviderType The settings provider type to use
* @tparam ExecutionStrategyType The execution strategy type to use
* @tparam SchemaType The Schema type to use
* @tparam FetchLedgerCacheType The ledger header cache type to use
* @tparam SettingsProviderType The settings provider type
* @tparam ExecutionStrategyType The execution strategy type
* @tparam SchemaType The Schema type
* @tparam FetchLedgerCacheType The ledger header cache type
*/
template <
SomeSettingsProvider SettingsProviderType,
@@ -100,8 +100,8 @@ public:
/**
* @brief Create a new cassandra/scylla backend instance.
*
* @param settingsProvider The settings provider to use
* @param cache The ledger cache to use
* @param settingsProvider The settings provider
* @param cache The ledger cache
* @param readOnly Whether the database should be in readonly mode
*/
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
@@ -111,18 +111,18 @@ public:
, handle_{settingsProvider_.getSettings()}
, executor_{settingsProvider_.getSettings(), handle_}
{
if (auto const res = handle_.connect(); not res)
if (auto const res = handle_.connect(); not res.has_value())
throw std::runtime_error("Could not connect to database: " + res.error());
if (not readOnly) {
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
// on datastax, creation of keyspaces can be configured to only be done thru the admin
// interface. this does not mean that the keyspace does not already exist tho.
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
throw std::runtime_error("Could not create keyspace: " + res.error());
}
if (auto const res = handle_.executeEach(schema_.createSchema); not res)
if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value())
throw std::runtime_error("Could not create schema: " + res.error());
}
@@ -233,10 +233,10 @@ public:
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
return maybeValue;
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) {
if (auto const& rows = *res; rows) {
if (auto const maybeRow = rows.template get<uint32_t>(); maybeRow.has_value())
return maybeRow;
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
return std::nullopt;

View File

@@ -97,7 +97,7 @@ SettingsProvider::parseSettings() const
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
settings.provider = config_.get<std::string>("provider");
settings.provider = impl::providerFromString(config_.get<std::string>("provider"));
if (config_.getValueView("connect_timeout").hasValue()) {
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");

View File

@@ -61,7 +61,7 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
// TODO: AWS keyspace reads should be local_one to save cost
if (settings.provider == toString(cassandra::impl::Provider::Keyspace)) {
if (settings.provider == cassandra::impl::Provider::Keyspace) {
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc)));
}

View File

@@ -20,6 +20,7 @@
#pragma once
#include "data/cassandra/impl/ManagedObject.hpp"
#include "util/Assert.hpp"
#include "util/log/Logger.hpp"
#include <cassandra.h>
@@ -31,29 +32,22 @@
#include <string>
#include <string_view>
#include <thread>
#include <utility>
#include <variant>
namespace data::cassandra::impl {
namespace {
enum class Provider { Cassandra, Keyspace };
inline std::string
toString(Provider provider)
inline Provider
providerFromString(std::string const& provider)
{
switch (provider) {
case Provider::Cassandra:
return "cassandra";
case Provider::Keyspace:
return "aws_keyspace";
}
std::unreachable();
ASSERT(
provider == "cassandra" || provider == "aws_keyspace",
"Provider type must be one of 'cassandra' or 'aws_keyspace'"
);
return provider == "cassandra" ? Provider::Cassandra : Provider::Keyspace;
}
} // namespace
// TODO: move Settings to public interface, not impl
/**
@@ -109,7 +103,7 @@ struct Settings {
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
/** @brief Provider to know if we are using scylladb or keyspace */
std::string provider = toString(kDEFAULT_PROVIDER);
Provider provider = kDEFAULT_PROVIDER;
/** @brief Size of the IO queue */
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)

View File

@@ -209,8 +209,9 @@ TransactionFeed::pub(
rpc::insertDeliveredAmount(pubObj[JS(meta)].as_object(), tx, meta, txMeta.date);
auto& txnPubobj = pubObj[txKey].as_object();
auto& metaPubobj = pubObj[JS(meta)].as_object();
rpc::insertDeliverMaxAlias(txnPubobj, version);
rpc::insertMPTIssuanceID(txnPubobj, meta);
rpc::insertMPTIssuanceID(txnPubobj, tx, metaPubobj, meta);
Json::Value nftJson;
ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta);

View File

@@ -34,7 +34,6 @@
#include "web/Context.hpp"
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/format/format_fwd.hpp>
#include <boost/format/free_funcs.hpp>
@@ -258,7 +257,7 @@ toExpandedJson(
auto metaJson = toJson(*meta);
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
insertDeliverMaxAlias(txnJson, apiVersion);
insertMPTIssuanceID(txnJson, meta);
insertMPTIssuanceID(txnJson, txn, metaJson, meta);
if (nftEnabled == NFTokenjson::ENABLE) {
Json::Value nftJson;
@@ -343,36 +342,41 @@ getMPTIssuanceID(std::shared_ptr<ripple::TxMeta const> const& meta)
/**
* @brief Check if transaction has a new MPToken created
*
* @param txnJson The transaction Json
* @param meta The metadata
* @param txn The transaction object
* @param meta The metadata object
* @return true if the transaction can have a mpt_issuance_id
*/
static bool
canHaveMPTIssuanceID(boost::json::object const& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
canHaveMPTIssuanceID(std::shared_ptr<ripple::STTx const> const& txn, std::shared_ptr<ripple::TxMeta const> const& meta)
{
if (txnJson.at(JS(TransactionType)).is_string() and
not boost::iequals(txnJson.at(JS(TransactionType)).as_string(), JS(MPTokenIssuanceCreate)))
if (txn->getTxnType() != ripple::ttMPTOKEN_ISSUANCE_CREATE)
return false;
if (meta->getResultTER() != ripple::tesSUCCESS)
return false;
return true;
return (meta->getResultTER() == ripple::tesSUCCESS);
}
bool
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
insertMPTIssuanceID(
boost::json::object& txnJson,
std::shared_ptr<ripple::STTx const> const& txn,
boost::json::object& metaJson,
std::shared_ptr<ripple::TxMeta const> const& meta
)
{
if (!canHaveMPTIssuanceID(txnJson, meta))
return false;
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate))
if (!canHaveMPTIssuanceID(txn, meta))
return false;
auto const id = getMPTIssuanceID(meta);
ASSERT(id.has_value(), "MPTIssuanceID must have value");
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
// For mpttokenissuance create, add mpt_issuance_id to metajson
// Otherwise, add it to txn json
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate)) {
metaJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
} else {
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
}
return true;
}

View File

@@ -201,15 +201,23 @@ insertDeliveredAmount(
/**
* @brief Add "mpt_issuance_id" into various MPTToken transaction json.
* @note We exclude "mpt_issuance_id" for MPTokenIssuanceCreate only. The reason is because the mpt_issuance_id
* is generated only after one submits MPTokenIssuanceCreate, so theres no way to know what the id is. (rippled)
* @note We add "mpt_issuance_id" into the meta part of MPTokenIssuanceCreate only. The reason is because the
* mpt_issuance_id is generated only after one submits MPTokenIssuanceCreate, so theres no way to know what the id is.
* (rippled)
*
* @param txnJson The transaction Json object
* @param txn The txn object
* @param metaJson The metadata Json object
* @param meta The metadata object
* @return true if the "mpt_issuance_id" is added to the txnJson JSON object
* @return true if the "mpt_issuance_id" is added to either txnJson or metaJson object
*/
bool
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta);
insertMPTIssuanceID(
boost::json::object& txnJson,
std::shared_ptr<ripple::STTx const> const& txn,
boost::json::object& metaJson,
std::shared_ptr<ripple::TxMeta const> const& meta
);
/**
* @brief Convert STBase object to JSON

View File

@@ -333,7 +333,13 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
{JS(mptoken), ripple::ltMPTOKEN},
{JS(permissioned_domain), ripple::ltPERMISSIONED_DOMAIN},
{JS(vault), ripple::ltVAULT},
{JS(delegate), ripple::ltDELEGATE}
{JS(delegate), ripple::ltDELEGATE},
{JS(amendments), ripple::ltAMENDMENTS},
{JS(fee), ripple::ltFEE_SETTINGS},
{JS(hashes), ripple::ltLEDGER_HASHES},
{JS(nft_offer), ripple::ltNFTOKEN_OFFER},
{JS(nunl), ripple::ltNEGATIVE_UNL},
{JS(signer_list), ripple::ltSIGNER_LIST}
};
auto const parseBridgeFromJson = [](boost::json::value const& bridgeJson) {

View File

@@ -428,6 +428,12 @@ public:
validation::CustomValidators::accountBase58Validator, Status(ClioError::RpcMalformedAddress)
}}
}}},
{JS(amendments), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(fee), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(hashes), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(nft_offer), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(nunl), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(signer_list), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
{JS(ledger), check::Deprecated{}},
{"include_deleted", validation::Type<bool>{}},
};

View File

@@ -45,7 +45,7 @@ class ConfigValue;
/**
* @brief specific values that are accepted for logger levels in config.
*/
static constexpr std::array<char const*, 6> kLOG_LEVELS = {
static constexpr std::array<std::string_view, 6> kLOG_LEVELS = {
"trace",
"debug",
"info",
@@ -57,7 +57,7 @@ static constexpr std::array<char const*, 6> kLOG_LEVELS = {
/**
* @brief specific values that are accepted for logger tag style in config.
*/
static constexpr std::array<char const*, 5> kLOG_TAGS = {
static constexpr std::array<std::string_view, 5> kLOG_TAGS = {
"int",
"uint",
"null",
@@ -68,7 +68,7 @@ static constexpr std::array<char const*, 5> kLOG_TAGS = {
/**
* @brief specific values that are accepted for cache loading in config.
*/
static constexpr std::array<char const*, 3> kLOAD_CACHE_MODE = {
static constexpr std::array<std::string_view, 3> kLOAD_CACHE_MODE = {
"sync",
"async",
"none",
@@ -77,17 +77,17 @@ static constexpr std::array<char const*, 3> kLOAD_CACHE_MODE = {
/**
* @brief specific values that are accepted for database type in config.
*/
static constexpr std::array<char const*, 1> kDATABASE_TYPE = {"cassandra"};
static constexpr std::array<std::string_view, 1> kDATABASE_TYPE = {"cassandra"};
/**
* @brief specific values that are accepted for server's processing_policy in config.
*/
static constexpr std::array<char const*, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
static constexpr std::array<std::string_view, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
/**
* @brief specific values that are accepted for database provider in config.
*/
static constexpr std::array<char const*, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
static constexpr std::array<std::string_view, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
/**
* @brief An interface to enforce constraints on certain values within ClioConfigDefinition.
@@ -123,7 +123,7 @@ protected:
*/
template <std::size_t ArrSize>
constexpr std::string
makeErrorMsg(std::string_view key, Value const& value, std::array<char const*, ArrSize> arr) const
makeErrorMsg(std::string_view key, Value const& value, std::array<std::string_view, ArrSize> arr) const
{
// Extract the value from the variant
auto const valueStr = std::visit([](auto const& v) { return fmt::format("{}", v); }, value);
@@ -271,7 +271,7 @@ public:
* @param key The key of the ConfigValue that has this constraint
* @param arr The value that has this constraint must be of the values in arr
*/
constexpr OneOf(std::string_view key, std::array<char const*, ArrSize> arr) : key_{key}, arr_{arr}
constexpr OneOf(std::string_view key, std::array<std::string_view, ArrSize> arr) : key_{key}, arr_{arr}
{
}
@@ -318,7 +318,7 @@ private:
print(std::ostream& stream) const override
{
std::string valuesStream;
std::ranges::for_each(arr_, [&valuesStream](std::string const& elem) {
std::ranges::for_each(arr_, [&valuesStream](std::string_view elem) {
valuesStream += fmt::format(" `{}`,", elem);
});
// replace the last "," with "."
@@ -327,7 +327,7 @@ private:
}
std::string_view key_;
std::array<char const*, ArrSize> arr_;
std::array<std::string_view, ArrSize> arr_;
};
/**

View File

@@ -220,10 +220,10 @@ LogService::createFileSink(FileLoggingParams const& params, std::string const& f
* @param defaultSeverity The default severity level to use if not overridden.
* @return A map of channel names to their minimum severity levels, or an error message if parsing fails.
*/
static std::expected<std::unordered_map<std::string, Severity>, std::string>
static std::expected<std::unordered_map<std::string_view, Severity>, std::string>
getMinSeverity(config::ClioConfigDefinition const& config, Severity defaultSeverity)
{
std::unordered_map<std::string, Severity> minSeverity;
std::unordered_map<std::string_view, Severity> minSeverity;
for (auto const& channel : Logger::kCHANNELS)
minSeverity[channel] = defaultSeverity;
@@ -284,13 +284,15 @@ LogServiceState::reset()
}
std::shared_ptr<spdlog::logger>
LogServiceState::registerLogger(std::string const& channel, std::optional<Severity> severity)
LogServiceState::registerLogger(std::string_view channel, std::optional<Severity> severity)
{
if (not initialized_) {
throw std::logic_error("LogService is not initialized");
}
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channel);
std::string const channelStr{channel};
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channelStr);
if (existingLogger != nullptr) {
if (severity.has_value())
existingLogger->set_level(toSpdlogLevel(*severity));
@@ -300,10 +302,10 @@ LogServiceState::registerLogger(std::string const& channel, std::optional<Severi
std::shared_ptr<spdlog::logger> logger;
if (isAsync_) {
logger = std::make_shared<spdlog::async_logger>(
channel, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
channelStr, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
);
} else {
logger = std::make_shared<spdlog::logger>(channel, sinks_.begin(), sinks_.end());
logger = std::make_shared<spdlog::logger>(channelStr, sinks_.begin(), sinks_.end());
}
logger->set_level(toSpdlogLevel(severity.value_or(defaultSeverity_)));
@@ -427,10 +429,25 @@ LogServiceState::replaceSinks(std::vector<std::shared_ptr<spdlog::sinks::sink>>
spdlog::apply_all([](std::shared_ptr<spdlog::logger> logger) { logger->sinks() = sinks_; });
}
Logger::Logger(std::string channel) : logger_(LogServiceState::registerLogger(channel))
Logger::Logger(std::string_view const channel) : logger_(LogServiceState::registerLogger(channel))
{
}
Logger::~Logger()
{
// One reference is held by logger_ and the other by spdlog registry
static constexpr size_t kLAST_LOGGER_REF_COUNT = 2;
if (logger_ == nullptr) {
return;
}
bool const isDynamic = !std::ranges::contains(kCHANNELS, logger_->name());
if (isDynamic && logger_.use_count() == kLAST_LOGGER_REF_COUNT) {
spdlog::drop(logger_->name());
}
}
Logger::Pump::Pump(std::shared_ptr<spdlog::logger> logger, Severity sev, SourceLocationType const& loc)
: logger_(std::move(logger))
, severity_(sev)

View File

@@ -29,6 +29,7 @@
#include <optional>
#include <sstream>
#include <string>
#include <string_view>
#include <vector>
// We forward declare spdlog::logger and spdlog::sinks::sink
@@ -91,7 +92,7 @@ enum class Severity {
* otherwise. See @ref LogService::init() for setup of the logging core and
* severity levels for each channel.
*/
class Logger final {
class Logger {
std::shared_ptr<spdlog::logger> logger_;
friend class LogService; // to expose the Pump interface
@@ -145,7 +146,7 @@ class Logger final {
};
public:
static constexpr std::array<char const*, 8> kCHANNELS = {
static constexpr std::array<std::string_view, 8> kCHANNELS = {
"General",
"WebServer",
"Backend",
@@ -165,10 +166,10 @@ public:
*
* @param channel The channel this logger will report into.
*/
Logger(std::string channel);
Logger(std::string_view const channel);
Logger(Logger const&) = default;
~Logger() = default;
~Logger();
Logger(Logger&&) = default;
Logger&
@@ -291,7 +292,7 @@ protected:
* @return Shared pointer to the registered spdlog logger
*/
static std::shared_ptr<spdlog::logger>
registerLogger(std::string const& channel, std::optional<Severity> severity = std::nullopt);
registerLogger(std::string_view channel, std::optional<Severity> severity = std::nullopt);
protected:
static bool isAsync_; // NOLINT(readability-identifier-naming)

View File

@@ -29,13 +29,14 @@
#include <algorithm>
#include <memory>
#include <string_view>
void
LoggerFixture::init()
{
util::LogServiceState::init(false, util::Severity::FTL, {});
std::ranges::for_each(util::Logger::kCHANNELS, [](char const* channel) {
std::ranges::for_each(util::Logger::kCHANNELS, [](std::string_view const channel) {
util::LogService::registerLogger(channel);
});

View File

@@ -85,15 +85,17 @@ using namespace data::cassandra;
class BackendCassandraTestBase : public SyncAsioContextTest, public WithPrometheus {
protected:
static constexpr auto kCASSANDRA = "cassandra";
ClioConfigDefinition cfg_{
{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
{"database.cassandra.contact_points",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
{"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
{"database.cassandra.keyspace",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},

View File

@@ -95,14 +95,15 @@ class MigrationCassandraSimpleTest : public WithPrometheus {
}
protected:
ClioConfigDefinition cfg_{
static constexpr auto kCASSANDRA = "cassandra";
{{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
ClioConfigDefinition cfg_{
{{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
{"database.cassandra.contact_points",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
{"database.cassandra.keyspace",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.defaultValue(2)},

View File

@@ -36,6 +36,9 @@ struct AmendmentBlockHandlerTest : util::prometheus::WithPrometheus, SyncAsioCon
etl::SystemState state;
};
// Note: This test can be flaky due to the way it was written (depends on time)
// Since the old ETL is going to be replaced by ETLng all tests including this one will be deleted anyway so the fix for
// flakiness is to increase the context runtime to 50ms until then (to not waste time).
TEST_F(AmendmentBlockHandlerTest, CallToNotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
{
AmendmentBlockHandler handler{ctx_, state, std::chrono::nanoseconds{1}, actionMock.AsStdFunction()};
@@ -45,12 +48,7 @@ TEST_F(AmendmentBlockHandlerTest, CallToNotifyAmendmentBlockedSetsStateAndRepeat
handler.notifyAmendmentBlocked();
EXPECT_TRUE(state.isAmendmentBlocked);
// Code runs significantly slower when assertions are enabled
#ifdef _GLIBCXX_ASSERTIONS
runContextFor(std::chrono::milliseconds{10});
#else
runContextFor(std::chrono::milliseconds{1});
#endif
runContextFor(std::chrono::milliseconds{50});
}
struct DefaultAmendmentBlockActionTest : LoggerFixture {};

View File

@@ -1282,7 +1282,8 @@ TEST_F(FeedTransactionTest, PublishesMPTokenIssuanceCreateTx)
}
],
"TransactionIndex": 0,
"TransactionResult": "tesSUCCESS"
"TransactionResult": "tesSUCCESS",
"mpt_issuance_id": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9"
},
"ctid": "C000002100000000",
"type": "transaction",

View File

@@ -1625,7 +1625,8 @@ TEST_F(RPCAccountTxHandlerTest, MPTTxs_API_v2)
}}
],
"TransactionIndex": 0,
"TransactionResult": "tesSUCCESS"
"TransactionResult": "tesSUCCESS",
"mpt_issuance_id": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9"
}},
"hash": "A52221F4003C281D3C83F501F418B55A1F9DC1C6A129EF13E1A8F0E5C008DAE3",
"ledger_index": 11,

View File

@@ -2311,11 +2311,23 @@ struct IndexTest : public HandlerBaseTest, public WithParamInterface<std::string
};
};
// content of index, payment_channel, nft_page and check fields is ledger index.
// content of index, amendments, check, fee, hashes, nft_offer, nunl, nft_page, payment_channel, signer_list fields is
// ledger index.
INSTANTIATE_TEST_CASE_P(
RPCLedgerEntryGroup3,
IndexTest,
Values("index", "nft_page", "payment_channel", "check"),
Values(
"index",
"amendments",
"check",
"fee",
"hashes",
"nft_offer",
"nunl",
"nft_page",
"payment_channel",
"signer_list"
),
IndexTest::NameGenerator{}
);

View File

@@ -31,6 +31,7 @@
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
using namespace util::config;
@@ -164,7 +165,7 @@ TEST_F(ConstraintTest, SetValuesOnPortConstraint)
TEST_F(ConstraintTest, OneOfConstraintOneValue)
{
std::array<char const*, 1> const arr = {"tracer"};
std::array<std::string_view, 1> const arr = {"tracer"};
auto const databaseConstraint{OneOf{"database.type", arr}};
EXPECT_FALSE(databaseConstraint.checkConstraint("tracer").has_value());
@@ -180,7 +181,7 @@ TEST_F(ConstraintTest, OneOfConstraintOneValue)
TEST_F(ConstraintTest, OneOfConstraint)
{
std::array<char const*, 3> const arr = {"123", "trace", "haha"};
std::array<std::string_view, 3> const arr = {"123", "trace", "haha"};
auto const oneOfCons{OneOf{"log.level", arr}};
EXPECT_FALSE(oneOfCons.checkConstraint("trace").has_value());

View File

@@ -101,7 +101,7 @@ TEST_F(LogServiceInitTests, DefaultLogLevel)
EXPECT_TRUE(LogService::init(config_));
std::string const logString = "some log";
for (auto const& channel : Logger::kCHANNELS) {
for (std::string_view const channel : Logger::kCHANNELS) {
Logger const log{channel};
log.trace() << logString;
auto loggerStr = getLoggerString();

View File

@@ -21,11 +21,24 @@
#include "util/log/Logger.hpp"
#include <gtest/gtest.h>
#include <spdlog/logger.h>
#include <spdlog/spdlog.h>
#include <cstddef>
#include <memory>
#include <string>
using namespace util;
namespace {
size_t
loggersNum()
{
size_t counter = 0;
spdlog::apply_all([&counter](std::shared_ptr<spdlog::logger>) { ++counter; });
return counter;
}
} // namespace
// Used as a fixture for tests with enabled logging
class LoggerTest : public LoggerFixture {};
@@ -71,3 +84,24 @@ TEST_F(LoggerTest, LOGMacro)
EXPECT_TRUE(computeCalled);
}
#endif
TEST_F(LoggerTest, ManyDynamicLoggers)
{
static constexpr size_t kNUM_LOGGERS = 10'000;
auto initialLoggers = loggersNum();
for (size_t i = 0; i < kNUM_LOGGERS; ++i) {
std::string const loggerName = "DynamicLogger" + std::to_string(i);
Logger const log{loggerName};
log.info() << "Logger number " << i;
ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Logger number " + std::to_string(i) + "\n");
Logger const copy = log;
copy.info() << "Copy of logger number " << i;
ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Copy of logger number " + std::to_string(i) + "\n");
}
ASSERT_EQ(loggersNum(), initialLoggers);
}

View File

@@ -108,7 +108,9 @@ TEST_F(WebWsConnectionTests, WasUpgraded)
});
}
TEST_F(WebWsConnectionTests, DisconnectClientOnInactivity)
// This test is either flaky or incorrect
// see https://github.com/XRPLF/clio/issues/2700
TEST_F(WebWsConnectionTests, DISABLED_DisconnectClientOnInactivity)
{
boost::asio::io_context clientCtx;
auto work = boost::asio::make_work_guard(clientCtx);