Compare commits

..

16 Commits

Author SHA1 Message Date
Peter Chen
7eaf0005e4 Merge tag 'experiment' into experimental_keyspace
test keyspace
2025-09-16 09:08:58 -07:00
Peter Chen
497721ee7c change connections per host 2025-09-15 10:06:12 -07:00
Peter Chen
26530108e3 fix comments 2025-09-15 08:55:14 -07:00
Alex Kremer
fc88abdaeb Merge branch 'develop' into SupportKeyspace 2025-09-15 14:59:54 +01:00
Ayaz Salikhov
8bc36c2c0b Merge branch 'develop' into SupportKeyspace 2025-09-04 18:23:37 +01:00
Peter Chen
4e9558f76b 'Merge branch 'SupportKeyspace' into experimental_keyspace 2025-09-04 08:44:50 -07:00
Ayaz Salikhov
84db880ce7 Merge branch 'develop' into SupportKeyspace 2025-08-28 15:11:58 +01:00
Peter Chen
f88ce31363 let scylladb use same schema to update ledger range as before 2025-08-27 14:34:34 -07:00
Peter Chen
e03f5e46c0 Merge branch 'develop' into SupportKeyspace 2025-08-24 18:58:38 -04:00
Peter Chen
30da8d8f63 fix val 2025-08-24 18:58:07 -04:00
Peter Chen
8f6bec2e25 merge develop 2025-08-21 12:37:19 -04:00
Peter Chen
0d9a83fd4d fix comments 2025-08-20 12:29:01 -04:00
Peter Chen
47c2af0421 Merge branch 'develop' into SupportKeyspace 2025-08-19 16:37:08 -04:00
Peter Chen
c3e04426d3 fix bug and comments 2025-08-19 16:19:44 -04:00
Peter Chen
d598396445 remove unnecessary comment 2025-08-18 16:33:12 -04:00
Peter Chen
bbd2884e3b feat: Support Keyspace 2025-08-18 15:38:56 -04:00
118 changed files with 555 additions and 1059 deletions

View File

@@ -62,8 +62,6 @@ jobs:
container: ${{ matrix.container }}
conan_profile: ${{ matrix.conan_profile }}
build_type: ${{ matrix.build_type }}
download_ccache: true
upload_ccache: true
static: ${{ matrix.static }}
run_unit_tests: true
run_integration_tests: false
@@ -78,8 +76,7 @@ jobs:
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
conan_profile: gcc
build_type: Debug
download_ccache: true
upload_ccache: false
disable_cache: false
code_coverage: true
static: true
upload_clio_server: false
@@ -97,8 +94,7 @@ jobs:
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
conan_profile: gcc
build_type: Release
download_ccache: true
upload_ccache: false
disable_cache: false
code_coverage: false
static: true
upload_clio_server: false

View File

@@ -23,14 +23,8 @@ on:
required: true
type: string
download_ccache:
description: Whether to download ccache from the cache
required: false
type: boolean
default: true
upload_ccache:
description: Whether to upload ccache to the cache
disable_cache:
description: Whether ccache should be disabled
required: false
type: boolean
default: false
@@ -83,8 +77,7 @@ jobs:
container: ${{ inputs.container }}
conan_profile: ${{ inputs.conan_profile }}
build_type: ${{ inputs.build_type }}
download_ccache: ${{ inputs.download_ccache }}
upload_ccache: ${{ inputs.upload_ccache }}
disable_cache: ${{ inputs.disable_cache }}
code_coverage: false
static: ${{ inputs.static }}
upload_clio_server: ${{ inputs.upload_clio_server }}

View File

@@ -23,17 +23,10 @@ on:
required: true
type: string
download_ccache:
description: Whether to download ccache from the cache
disable_cache:
description: Whether ccache should be disabled
required: false
type: boolean
default: true
upload_ccache:
description: Whether to upload ccache to the cache
required: false
type: boolean
default: false
code_coverage:
description: Whether to enable code coverage
@@ -97,15 +90,15 @@ jobs:
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
disable_ccache: ${{ !inputs.download_ccache }}
disable_ccache: ${{ inputs.disable_cache }}
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
if: runner.os == 'macOS'
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Restore cache
if: ${{ inputs.download_ccache }}
if: ${{ !inputs.disable_cache }}
uses: ./.github/actions/restore_cache
id: restore_cache
with:
@@ -151,7 +144,7 @@ jobs:
path: build_time_report.txt
- name: Show ccache's statistics
if: ${{ inputs.download_ccache }}
if: ${{ !inputs.disable_cache }}
shell: bash
id: ccache_stats
run: |
@@ -169,7 +162,7 @@ jobs:
run: strip build/clio_integration_tests
- name: Upload clio_server
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
if: inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time
uses: actions/upload-artifact@v4
with:
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
@@ -190,14 +183,14 @@ jobs:
path: build/clio_integration_tests
- name: Upload Clio Linux package
if: ${{ inputs.package }}
if: inputs.package
uses: actions/upload-artifact@v4
with:
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/*.deb
- name: Save cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
uses: ./.github/actions/save_cache
with:
conan_profile: ${{ inputs.conan_profile }}

View File

@@ -1,8 +1,6 @@
name: Clang-tidy check
on:
push:
branches: [develop]
schedule:
- cron: "0 9 * * 1-5"
workflow_dispatch:
@@ -24,7 +22,6 @@ env:
jobs:
clang_tidy:
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d

View File

@@ -0,0 +1,30 @@
name: Restart clang-tidy workflow
on:
push:
branches: [develop]
workflow_dispatch:
jobs:
restart_clang_tidy:
runs-on: ubuntu-latest
permissions:
actions: write
steps:
- uses: actions/checkout@v4
- name: Check last commit matches clang-tidy auto fixes
id: check
shell: bash
run: |
passed=$(if [[ "$(git log -1 --pretty=format:%s | grep 'style: clang-tidy auto fixes')" ]]; then echo 'true' ; else echo 'false' ; fi)
echo "passed=\"$passed\"" >> $GITHUB_OUTPUT
- name: Run clang-tidy workflow
if: ${{ contains(steps.check.outputs.passed, 'true') }}
shell: bash
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
run: gh workflow run clang-tidy.yml

View File

@@ -61,8 +61,7 @@ jobs:
run_unit_tests: true
run_integration_tests: true
upload_clio_server: true
download_ccache: false
upload_ccache: false
disable_cache: true
analyze_build_time:
name: Analyze Build Time
@@ -85,8 +84,7 @@ jobs:
container: ${{ matrix.container }}
conan_profile: ${{ matrix.conan_profile }}
build_type: Release
download_ccache: false
upload_ccache: false
disable_cache: true
code_coverage: false
static: ${{ matrix.static }}
upload_clio_server: false

View File

@@ -41,8 +41,7 @@ jobs:
run_unit_tests: true
run_integration_tests: true
upload_clio_server: true
download_ccache: false
upload_ccache: false
disable_cache: true
expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }}
release:

View File

@@ -45,8 +45,7 @@ jobs:
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
download_ccache: false
upload_ccache: false
disable_cache: true
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
build_type: ${{ matrix.build_type }}
static: false

View File

@@ -39,11 +39,11 @@ jobs:
runs-on: ${{ inputs.runs_on }}
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
if: ${{ inputs.run_unit_tests }}
if: inputs.run_unit_tests
env:
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.asan') || endsWith(inputs.conan_profile, '.tsan') }}
steps:
- name: Cleanup workspace
@@ -63,15 +63,15 @@ jobs:
run: chmod +x ./clio_tests
- name: Run clio_tests (regular)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'false' }}
if: env.SANITIZER_IGNORE_ERRORS == 'false'
run: ./clio_tests
- name: Run clio_tests (sanitizer errors ignored)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true'
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
- name: Check for sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true'
shell: bash
id: check_report
run: |
@@ -82,7 +82,7 @@ jobs:
fi
- name: Upload sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
uses: actions/upload-artifact@v4
with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
@@ -90,7 +90,7 @@ jobs:
include-hidden-files: true
- name: Create an issue
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
if: false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
uses: ./.github/actions/create_issue
env:
GH_TOKEN: ${{ github.token }}
@@ -108,7 +108,7 @@ jobs:
runs-on: ${{ inputs.runs_on }}
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
if: ${{ inputs.run_integration_tests }}
if: inputs.run_integration_tests
services:
scylladb:

View File

@@ -61,7 +61,7 @@ jobs:
files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -99,7 +99,7 @@ jobs:
files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -140,7 +140,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
if: github.event_name != 'pull_request'
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with:
registry: ghcr.io
@@ -148,14 +148,14 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to DockerHub
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request'
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_PW }}
- name: Create and push multi-arch manifest
if: ${{ github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true' }}
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
run: |
push_image() {
image=$1
@@ -188,7 +188,7 @@ jobs:
files: "docker/compilers/clang/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -224,7 +224,7 @@ jobs:
files: "docker/tools/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -255,7 +255,7 @@ jobs:
files: "docker/tools/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -289,7 +289,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
if: github.event_name != 'pull_request'
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with:
registry: ghcr.io
@@ -297,7 +297,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push multi-arch manifest
if: ${{ github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true' }}
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
run: |
image=${{ needs.repo.outputs.GHCR_REPO }}/clio-tools
docker buildx imagetools create \

View File

@@ -77,7 +77,7 @@ jobs:
disable_ccache: true
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
if: runner.os == 'macOS'
shell: bash
run: ./.github/scripts/conan/init.sh
@@ -94,9 +94,9 @@ jobs:
build_type: ${{ matrix.build_type }}
- name: Login to Conan
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request'
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} xrplf ${{ secrets.CONAN_USERNAME }}
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule'
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}

View File

@@ -3,7 +3,7 @@
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"xrpl/2.6.1-rc1#114893d73104b89b8112619796cfacc0%1758209579.8651578",
"xrpl/2.6.0#57b93b5a6c99dc8511fccb3bb5390352%1756820296.642",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
@@ -41,12 +41,15 @@
"overrides": {
"boost/1.83.0": [
null,
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
"boost/1.83.0"
],
"protobuf/3.21.12": [
null,
"protobuf/3.21.12"
],
"boost/1.86.0": [
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
],
"lz4/1.9.4": [
"lz4/1.10.0"
],

View File

@@ -18,7 +18,7 @@ class ClioConan(ConanFile):
'protobuf/3.21.12',
'grpc/1.50.1',
'openssl/1.1.1w',
'xrpl/2.6.1-rc1',
'xrpl/2.6.0',
'zlib/1.3.1',
'libbacktrace/cci.20210118',
'spdlog/1.15.3',

View File

@@ -89,6 +89,14 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: Represents the number of threads that will be used for database operations.
### database.cassandra.provider
- **Required**: True
- **Type**: string
- **Default value**: `cassandra`
- **Constraints**: The value must be one of the following: `cassandra`, `aws_keyspace`.
- **Description**: The specific database backend provider we are using.
### database.cassandra.core_connections_per_host
- **Required**: True

View File

@@ -189,7 +189,6 @@ ClioApplication::run(bool const useNgWebServer)
httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
httpServer->onGet("/health", HealthCheckHandler{});
httpServer->onGet("/cache_state", CacheStateHandler{cache});
auto requestHandler = RequestHandler{adminVerifier, handler};
httpServer->onPost("/", requestHandler);
httpServer->onWs(std::move(requestHandler));
@@ -215,7 +214,7 @@ ClioApplication::run(bool const useNgWebServer)
// Init the web server
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler);
// Blocks until stopped.
// When stopped, shared_ptrs fall out of scope

View File

@@ -120,34 +120,4 @@ HealthCheckHandler::operator()(
return web::ng::Response{boost::beast::http::status::ok, kHEALTH_CHECK_HTML, request};
}
web::ng::Response
CacheStateHandler::operator()(
web::ng::Request const& request,
web::ng::ConnectionMetadata&,
web::SubscriptionContextPtr,
boost::asio::yield_context
)
{
static constexpr auto kCACHE_CHECK_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is fully loaded</p></body>
</html>
)html";
static constexpr auto kCACHE_CHECK_NOT_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is not yet loaded</p></body>
</html>
)html";
if (cache_.get().isFull())
return web::ng::Response{boost::beast::http::status::ok, kCACHE_CHECK_LOADED_HTML, request};
return web::ng::Response{boost::beast::http::status::service_unavailable, kCACHE_CHECK_NOT_LOADED_HTML, request};
}
} // namespace app

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "rpc/Errors.hpp"
#include "util/log/Logger.hpp"
#include "web/AdminVerificationStrategy.hpp"
@@ -164,37 +163,6 @@ public:
);
};
/**
* @brief A function object that handles the cache state check endpoint.
*/
class CacheStateHandler {
std::reference_wrapper<data::LedgerCacheInterface const> cache_;
public:
/**
* @brief Construct a new CacheStateHandler object.
*
* @param cache The ledger cache to use.
*/
CacheStateHandler(data::LedgerCacheInterface const& cache) : cache_{cache}
{
}
/**
* @brief The call of the function object.
*
* @param request The request to handle.
* @return The response to the request
*/
web::ng::Response
operator()(
web::ng::Request const& request,
web::ng::ConnectionMetadata&,
web::SubscriptionContextPtr,
boost::asio::yield_context
);
};
/**
* @brief A function object that handles the websocket endpoint.
*

View File

@@ -225,8 +225,11 @@ public:
{
waitForWritesToFinish();
if (!range_) {
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
// !range_.has_value() means the table 'ledger_range' is not populated; This would be the first write to the
// table In this case, insert both min_sequence/max_sequence range into the table
if (!range_.has_value()) {
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
}
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
@@ -513,80 +516,14 @@ public:
boost::asio::yield_context yield
) const override
{
NFTsAndCursor ret;
Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
if (taxon.has_value()) {
auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
r.bindAt(1, *taxon);
r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
r.bindAt(3, Limit{limit});
return r;
}
auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
r.bindAt(
1,
std::make_tuple(
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
cursorIn.value_or(ripple::uint256(0))
)
);
r.bindAt(2, Limit{limit});
return r;
}();
// Query for all the NFTs issued by the account, potentially filtered by the taxon
auto const res = executor_.read(yield, idQueryStatement);
auto const& idQueryResults = res.value();
if (not idQueryResults.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> nftIDs;
for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
nftIDs.push_back(nftID);
if (nftIDs.empty())
return ret;
if (nftIDs.size() == limit)
ret.cursor = nftIDs.back();
std::vector<Statement> selectNFTStatements;
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
std::vector<Statement> selectNFTURIStatements;
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
for (auto i = 0u; i < nftIDs.size(); i++) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}
if (taxon.has_value()) {
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
} else {
nftIDs = fetchNFTIDsWithoutTaxon(issuer, limit, cursorIn, yield);
}
return ret;
return populateNFTsAndCreateCursor(nftIDs, ledgerSequence, limit, yield);
}
MPTHoldersAndCursor
@@ -803,8 +740,9 @@ public:
std::optional<ripple::AccountID> lastItem;
while (liveAccounts.size() < number) {
Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
: schema_->selectAccountFromBeginning.bind(Limit{pageSize});
Statement const statement = lastItem
? schema_->selectAccountFromTokenScylla->bind(*lastItem, Limit{pageSize})
: schema_->selectAccountFromBeginningScylla->bind(Limit{pageSize});
auto const res = executor_.read(yield, statement);
if (res) {
@@ -1116,6 +1054,139 @@ private:
return true;
}
std::vector<ripple::uint256>
fetchNFTIDsByTaxon(
ripple::AccountID const& issuer,
std::uint32_t const taxon,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
Statement statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
statement.bindAt(1, taxon);
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
statement.bindAt(3, Limit{limit});
auto const res = executor_.read(yield, statement);
if (res && res.value().hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
nftIDs.push_back(nftID);
}
return nftIDs;
}
std::vector<ripple::uint256>
fetchNFTIDsWithoutTaxon(
ripple::AccountID const& issuer,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
if (settingsProvider_.getSettings().provider == "aws_keyspace") {
// --- Amazon Keyspaces Workflow ---
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
firstQuery.bindAt(1, startTaxon);
firstQuery.bindAt(2, startTokenID);
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes) {
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
nftIDs.push_back(nftID);
}
if (nftIDs.size() < limit) {
auto const remainingLimit = limit - nftIDs.size();
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces->bind(issuer);
secondQuery.bindAt(1, startTaxon);
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes) {
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
nftIDs.push_back(nftID);
}
}
} else if (settingsProvider_.getSettings().provider == "scylladb") {
auto r = schema_->selectNFTsByIssuerScylla->bind(issuer);
r.bindAt(
1,
std::make_tuple(
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
cursorIn.value_or(ripple::uint256(0))
)
);
r.bindAt(2, Limit{limit});
auto const res = executor_.read(yield, r);
if (res && res.value().hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
nftIDs.push_back(nftID);
}
}
return nftIDs;
}
/**
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
*/
NFTsAndCursor
populateNFTsAndCreateCursor(
std::vector<ripple::uint256> const& nftIDs,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context yield
) const
{
if (nftIDs.empty()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
NFTsAndCursor ret;
if (nftIDs.size() == limit)
ret.cursor = nftIDs.back();
// Prepare and execute queries to fetch NFT info and URIs in parallel.
std::vector<Statement> selectNFTStatements;
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
);
std::vector<Statement> selectNFTURIStatements;
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
// Combine the results into final NFT objects.
for (auto i = 0u; i < nftIDs.size(); ++i) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}
}
return ret;
}
};
using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;

View File

@@ -28,6 +28,7 @@
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
@@ -347,6 +348,86 @@ public:
Statements(SettingsProviderType const& settingsProvider, Handle const& handle)
: settingsProvider_{settingsProvider}, handle_{std::cref(handle)}
{
// initialize scylladb supported queries
if (settingsProvider_.get().getSettings().provider == "scylladb") {
selectAccountFromBeginningScylla = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
selectAccountFromTokenScylla = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
selectNFTsByIssuerScylla = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
// AWS_keyspace supported queries
} else if (settingsProvider_.get().getSettings().provider == "aws_keyspace") {
selectNFTsAfterTaxonKeyspaces = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND taxon > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
}
}
//
@@ -526,6 +607,17 @@ public:
// Update (and "delete") queries
//
PreparedStatement insertLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
INSERT INTO {} (is_latest, sequence) VALUES (?, ?) IF NOT EXISTS
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -533,7 +625,7 @@ public:
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
IF sequence = ?
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
@@ -654,6 +746,10 @@ public:
);
}();
/*
Currently, these two SELECT statements is not used.
If we ever use them, will need to change the PER PARTITION LIMIT to support for Keyspace
PreparedStatement selectLedgerPageKeys = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -687,6 +783,7 @@ public:
)
);
}();
*/
PreparedStatement getToken = [this]() {
return handle_.get().prepare(
@@ -717,36 +814,6 @@ public:
);
}();
PreparedStatement selectAccountFromBeginning = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountFromToken = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountTxForward = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -827,22 +894,6 @@ public:
);
}();
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -953,6 +1004,15 @@ public:
)
);
}();
// For ScyllaDB / Cassandra ONLY
std::optional<PreparedStatement> selectAccountFromBeginningScylla;
std::optional<PreparedStatement> selectAccountFromTokenScylla;
std::optional<PreparedStatement> selectNFTsByIssuerScylla;
// For AWS Keyspaces ONLY
// NOTE: AWS keyspace is not able to load cache with accounts
std::optional<PreparedStatement> selectNFTsAfterTaxonKeyspaces;
};
/**

View File

@@ -97,6 +97,7 @@ SettingsProvider::parseSettings() const
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
settings.provider = config_.get<std::string>("provider");
if (config_.getValueView("connect_timeout").hasValue()) {
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");

View File

@@ -25,8 +25,8 @@
#include <utility>
namespace data::cassandra {
namespace impl {
namespace impl {
struct Settings;
class Session;
class Cluster;
@@ -36,7 +36,6 @@ struct Result;
class Statement;
class PreparedStatement;
struct Batch;
} // namespace impl
using Settings = impl::Settings;

View File

@@ -36,9 +36,18 @@ constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
namespace data::cassandra::impl {
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
/*
* There are 2 main batches of Cassandra Statements:
* LOGGED: Ensures all updates in the batch succeed together, or none do.
* Use this for critical, related changes (e.g., for the same user), but it is slower.
*
* UNLOGGED: For performance. Sends many separate updates in one network trip to be fast.
* Use this for bulk-loading unrelated data, but know there's NO all-or-nothing guarantee.
*
* More info here: https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
*/
Batch::Batch(std::vector<Statement> const& statements)
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_UNLOGGED), kBATCH_DELETER}
{
cass_batch_set_is_idempotent(*this, cass_true);

View File

@@ -60,6 +60,13 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
// TODO: AWS keyspace reads should be local_one to save cost
if (settings.provider == "aws_keyspace") {
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting cassandra consistency: {}", cass_error_desc(rc)));
}
}
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
rc != CASS_OK) {
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));

View File

@@ -45,6 +45,7 @@ struct Settings {
static constexpr uint32_t kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
static constexpr uint32_t kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
static constexpr std::size_t kDEFAULT_BATCH_SIZE = 20;
static constexpr std::string kDEFAULT_PROVIDER = "cassandra";
/**
* @brief Represents the configuration of contact points for cassandra.
@@ -83,11 +84,14 @@ struct Settings {
uint32_t maxReadRequestsOutstanding = kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
/** @brief The number of connection per host to always have active */
uint32_t coreConnectionsPerHost = 1u;
uint32_t coreConnectionsPerHost = 3u;
/** @brief Size of batches when writing */
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
/** @brief Provider to know if we are using scylladb or keyspace */
std::string provider = kDEFAULT_PROVIDER;
/** @brief Size of the IO queue */
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)

View File

@@ -58,14 +58,14 @@ public:
explicit Statement(std::string_view query, Args&&... args)
: ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), kDELETER}
{
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
cass_statement_set_is_idempotent(*this, cass_true);
bind<Args...>(std::forward<Args>(args)...);
}
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, kDELETER}
{
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
cass_statement_set_is_idempotent(*this, cass_true);
}

View File

@@ -22,7 +22,6 @@
#include "etl/NetworkValidatedLedgersInterface.hpp"
#include "feed/SubscriptionManagerInterface.hpp"
#include "rpc/JS.hpp"
#include "util/JsonUtils.hpp"
#include "util/Retry.hpp"
#include "util/Spawn.hpp"
#include "util/log/Logger.hpp"
@@ -216,7 +215,7 @@ SubscriptionSource::handleMessage(std::string const& message)
if (object.contains(JS(result))) {
auto const& result = object.at(JS(result)).as_object();
if (result.contains(JS(ledger_index)))
ledgerIndex = util::integralValueAs<uint32_t>(result.at(JS(ledger_index)));
ledgerIndex = result.at(JS(ledger_index)).as_int64();
if (result.contains(JS(validated_ledgers))) {
auto validatedLedgers = boost::json::value_to<std::string>(result.at(JS(validated_ledgers)));
@@ -228,7 +227,7 @@ SubscriptionSource::handleMessage(std::string const& message)
LOG(log_.debug()) << "Received a message of type 'ledgerClosed' on ledger subscription stream. Message: "
<< object;
if (object.contains(JS(ledger_index))) {
ledgerIndex = util::integralValueAs<uint32_t>(object.at(JS(ledger_index)));
ledgerIndex = object.at(JS(ledger_index)).as_int64();
}
if (object.contains(JS(validated_ledgers))) {
auto validatedLedgers = boost::json::value_to<std::string>(object.at(JS(validated_ledgers)));

View File

@@ -26,7 +26,6 @@
#include "rpc/JS.hpp"
#include "rpc/RPCHelpers.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
@@ -210,7 +209,6 @@ TransactionFeed::pub(
auto& txnPubobj = pubObj[txKey].as_object();
rpc::insertDeliverMaxAlias(txnPubobj, version);
rpc::insertMPTIssuanceID(txnPubobj, meta);
Json::Value nftJson;
ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta);
@@ -224,9 +222,8 @@ TransactionFeed::pub(
auto const& metaObj = pubObj[JS(meta)];
ASSERT(metaObj.is_object(), "meta must be an obj in rippled and clio");
if (metaObj.as_object().contains("TransactionIndex") && metaObj.as_object().at("TransactionIndex").is_int64()) {
if (auto const& ctid = rpc::encodeCTID(
lgrInfo.seq, util::integralValueAs<uint16_t>(metaObj.as_object().at("TransactionIndex")), networkID
);
if (auto const& ctid =
rpc::encodeCTID(lgrInfo.seq, metaObj.as_object().at("TransactionIndex").as_int64(), networkID);
ctid)
pubObj[JS(ctid)] = ctid.value();
}

View File

@@ -28,13 +28,11 @@
#include "rpc/common/Types.hpp"
#include "util/AccountUtils.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
#include "web/Context.hpp"
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/format/format_fwd.hpp>
#include <boost/format/free_funcs.hpp>
@@ -174,7 +172,10 @@ canHaveDeliveredAmount(
if (tt != ripple::ttPAYMENT && tt != ripple::ttCHECK_CASH && tt != ripple::ttACCOUNT_DELETE)
return false;
return meta->getResultTER() == ripple::tesSUCCESS;
if (meta->getResultTER() != ripple::tesSUCCESS)
return false;
return true;
}
std::optional<ripple::AccountID>
@@ -258,7 +259,6 @@ toExpandedJson(
auto metaJson = toJson(*meta);
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
insertDeliverMaxAlias(txnJson, apiVersion);
insertMPTIssuanceID(txnJson, meta);
if (nftEnabled == NFTokenjson::ENABLE) {
Json::Value nftJson;
@@ -317,66 +317,6 @@ insertDeliveredAmount(
return false;
}
/**
* @brief Get the delivered amount
*
* @param meta The metadata
* @return The mpt_issuance_id or std::nullopt if not available
*/
static std::optional<ripple::uint192>
getMPTIssuanceID(std::shared_ptr<ripple::TxMeta const> const& meta)
{
ripple::TxMeta const& transactionMeta = *meta;
for (ripple::STObject const& node : transactionMeta.getNodes()) {
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltMPTOKEN_ISSUANCE ||
node.getFName() != ripple::sfCreatedNode)
continue;
auto const& mptNode = node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>();
return ripple::makeMptID(mptNode[ripple::sfSequence], mptNode[ripple::sfIssuer]);
}
return {};
}
/**
* @brief Check if transaction has a new MPToken created
*
* @param txnJson The transaction Json
* @param meta The metadata
* @return true if the transaction can have a mpt_issuance_id
*/
static bool
canHaveMPTIssuanceID(boost::json::object const& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
{
if (txnJson.at(JS(TransactionType)).is_string() and
not boost::iequals(txnJson.at(JS(TransactionType)).as_string(), JS(MPTokenIssuanceCreate)))
return false;
if (meta->getResultTER() != ripple::tesSUCCESS)
return false;
return true;
}
bool
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
{
if (!canHaveMPTIssuanceID(txnJson, meta))
return false;
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate))
return false;
auto const id = getMPTIssuanceID(meta);
ASSERT(id.has_value(), "MPTIssuanceID must have value");
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
return true;
}
void
insertDeliverMaxAlias(boost::json::object& txJson, std::uint32_t const apiVersion)
{
@@ -493,8 +433,8 @@ ledgerHeaderFromRequest(std::shared_ptr<data::BackendInterface const> const& bac
} else {
ledgerSequence = parseStringAsUInt(stringIndex);
}
} else if (indexValue.is_int64() or indexValue.is_uint64()) {
ledgerSequence = util::integralValueAs<uint32_t>(indexValue);
} else if (indexValue.is_int64()) {
ledgerSequence = indexValue.as_int64();
}
} else {
ledgerSequence = ctx.range.maxSequence;

View File

@@ -199,18 +199,6 @@ insertDeliveredAmount(
uint32_t date
);
/**
* @brief Add "mpt_issuance_id" into various MPTToken transaction json.
* @note We exclude "mpt_issuance_id" for MPTokenIssuanceCreate only. The reason is because the mpt_issuance_id
* is generated only after one submits MPTokenIssuanceCreate, so theres no way to know what the id is. (rippled)
*
* @param txnJson The transaction Json object
* @param meta The metadata object
* @return true if the "mpt_issuance_id" is added to the txnJson JSON object
*/
bool
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta);
/**
* @brief Convert STBase object to JSON
*

View File

@@ -23,49 +23,10 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <concepts>
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
namespace rpc::validation {
namespace impl {
template <std::unsigned_integral Expected>
void
clampAs(boost::json::value& value)
{
if (value.is_uint64()) {
auto const valueUint = value.as_uint64();
if (valueUint > static_cast<uint64_t>(std::numeric_limits<Expected>::max()))
value = std::numeric_limits<Expected>::max();
} else if (value.is_int64()) {
auto const valueInt = value.as_int64();
if (valueInt > static_cast<int64_t>(std::numeric_limits<Expected>::max()))
value = std::numeric_limits<Expected>::max();
}
}
template <std::signed_integral Expected>
void
clampAs(boost::json::value& value)
{
if (value.is_uint64()) {
auto const valueUint = value.as_uint64();
if (valueUint > static_cast<uint64_t>(std::numeric_limits<Expected>::max()))
value = std::numeric_limits<Expected>::max();
} else if (value.is_int64()) {
auto const valueInt = value.as_int64();
if (valueInt > static_cast<int64_t>(std::numeric_limits<Expected>::max())) {
value = std::numeric_limits<Expected>::max();
} else if (valueInt < static_cast<int64_t>(std::numeric_limits<Expected>::min())) {
value = std::numeric_limits<Expected>::min();
}
}
}
} // namespace impl
/**
* @brief Check that the type is the same as what was expected.
@@ -75,7 +36,7 @@ clampAs(boost::json::value& value)
* @return true if convertible; false otherwise
*/
template <typename Expected>
[[nodiscard]] bool
[[nodiscard]] static bool
checkType(boost::json::value const& value)
{
auto hasError = false;
@@ -97,7 +58,7 @@ checkType(boost::json::value const& value)
} else if constexpr (std::is_convertible_v<Expected, uint64_t> or std::is_convertible_v<Expected, int64_t>) {
if (not value.is_int64() && not value.is_uint64())
hasError = true;
// if the type specified is unsigned, it should not be negative
// specify the type is unsigened, it can not be negative
if constexpr (std::is_unsigned_v<Expected>) {
if (value.is_int64() and value.as_int64() < 0)
hasError = true;
@@ -107,28 +68,4 @@ checkType(boost::json::value const& value)
return not hasError;
}
/**
* @brief Check that the type is the same as what was expected optionally clamping it into range.
*
* This is used to automatically clamp the value into the range available to the specified type. It is needed in
* order to avoid Min, Max and other validators throw "not exact" error from Boost.Json library if the value does not
* fit in the specified type.
*
* @tparam Expected The expected type that value should be convertible to
* @param value The json value to check the type of
* @return true if convertible; false otherwise
*/
template <typename Expected>
[[nodiscard]] bool
checkTypeAndClamp(boost::json::value& value)
{
if (not checkType<Expected>(value))
return false; // fails basic type check
if constexpr (std::is_integral_v<Expected> and not std::is_same_v<Expected, bool>)
impl::clampAs<Expected>(value);
return true;
}
} // namespace rpc::validation

View File

@@ -142,21 +142,19 @@ template <typename... Types>
struct Type final {
/**
* @brief Verify that the JSON value is (one) of specified type(s).
* @note The value itself can only change for integral types and only if the value is outside of the range of the
* expected integer type (see checkTypeAndClamp).
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer object
* @return `RippledError::rpcINVALID_PARAMS` if validation failed; otherwise no error is returned
*/
[[nodiscard]] MaybeError
verify(boost::json::value& value, std::string_view key) const
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key))
return {}; // ignore. If field is supposed to exist, let 'required' fail instead
auto& res = value.as_object().at(key);
auto const convertible = (checkTypeAndClamp<Types>(res) || ...);
auto const& res = value.as_object().at(key);
auto const convertible = (checkType<Types>(res) || ...);
if (not convertible)
return Error{Status{RippledError::rpcINVALID_PARAMS}};

View File

@@ -19,7 +19,6 @@
#include "rpc/common/impl/APIVersionParser.hpp"
#include "util/JsonUtils.hpp"
#include "util/config/ObjectView.hpp"
#include "util/log/Logger.hpp"
@@ -63,7 +62,7 @@ ProductionAPIVersionParser::parse(boost::json::object const& request) const
if (!request.at("api_version").is_int64())
return Error{"API version must be an integer"};
auto const version = util::integralValueAs<uint32_t>(request.at("api_version"));
auto const version = request.at("api_version").as_int64();
if (version > maxVersion_)
return Error{fmt::format("Requested API version is higher than maximum supported ({})", maxVersion_)};

View File

@@ -29,7 +29,6 @@
#include "rpc/common/Types.hpp"
#include "rpc/common/Validators.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -318,7 +317,7 @@ tag_invoke(boost::json::value_to_tag<AMMInfoHandler::Input>, boost::json::value
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -42,7 +41,6 @@
#include <xrpl/protocol/jss.h>
#include <xrpl/protocol/tokens.h>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
@@ -143,7 +141,7 @@ tag_invoke(boost::json::value_to_tag<AccountChannelsHandler::Input>, boost::json
input.account = boost::json::value_to<std::string>(jv.at(JS(account)));
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
input.limit = jv.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jv.at(JS(marker)));
@@ -156,7 +154,7 @@ tag_invoke(boost::json::value_to_tag<AccountChannelsHandler::Input>, boost::json
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
@@ -130,7 +129,7 @@ tag_invoke(boost::json::value_to_tag<AccountCurrenciesHandler::Input>, boost::js
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -26,7 +26,6 @@
#include "rpc/common/JsonBool.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -43,7 +42,6 @@
#include <xrpl/protocol/jss.h>
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <string>
@@ -206,7 +204,7 @@ tag_invoke(boost::json::value_to_tag<AccountInfoHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -41,7 +40,6 @@
#include <xrpl/protocol/UintTypes.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
@@ -201,7 +199,7 @@ tag_invoke(boost::json::value_to_tag<AccountLinesHandler::Input>, boost::json::v
input.account = boost::json::value_to<std::string>(jv.at(JS(account)));
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
input.limit = jv.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jv.at(JS(marker)));
@@ -217,7 +215,7 @@ tag_invoke(boost::json::value_to_tag<AccountLinesHandler::Input>, boost::json::v
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
@@ -159,14 +158,14 @@ tag_invoke(boost::json::value_to_tag<AccountNFTsHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jsonObject.at(JS(marker)));

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include "util/LedgerUtils.hpp"
#include <boost/json/array.hpp>
@@ -39,7 +38,6 @@
#include <xrpl/protocol/jss.h>
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <string>
@@ -155,7 +153,7 @@ tag_invoke(boost::json::value_to_tag<AccountObjectsHandler::Input>, boost::json:
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}
@@ -167,7 +165,7 @@ tag_invoke(boost::json::value_to_tag<AccountObjectsHandler::Input>, boost::json:
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
input.limit = jv.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jv.at(JS(marker)));

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
@@ -41,7 +40,6 @@
#include <xrpl/protocol/UintTypes.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <string>
#include <vector>
@@ -171,14 +169,14 @@ tag_invoke(boost::json::value_to_tag<AccountOffersHandler::Input>, boost::json::
}
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jsonObject.at(JS(marker)));

View File

@@ -167,8 +167,7 @@ AccountTxHandler::process(AccountTxHandler::Input const& input, Context const& c
if (auto const& etlState = etl_->getETLState(); etlState.has_value())
networkID = etlState->networkID;
auto const txnIdx =
util::integralValueAs<uint16_t>(obj[JS(meta)].as_object().at("TransactionIndex"));
auto const txnIdx = obj[JS(meta)].as_object().at("TransactionIndex").as_int64();
if (auto const& ctid = rpc::encodeCTID(txnPlusMeta.ledgerSequence, txnIdx, networkID); ctid)
obj[txKey].as_object()[JS(ctid)] = ctid.value();
}
@@ -246,20 +245,18 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
input.account = boost::json::value_to<std::string>(jsonObject.at(JS(account)));
if (jsonObject.contains(JS(ledger_index_min)) &&
util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_min))) != -1)
input.ledgerIndexMin = util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_min)));
if (jsonObject.contains(JS(ledger_index_min)) && jsonObject.at(JS(ledger_index_min)).as_int64() != -1)
input.ledgerIndexMin = jsonObject.at(JS(ledger_index_min)).as_int64();
if (jsonObject.contains(JS(ledger_index_max)) &&
util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_max))) != -1)
input.ledgerIndexMax = util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_max)));
if (jsonObject.contains(JS(ledger_index_max)) && jsonObject.at(JS(ledger_index_max)).as_int64() != -1)
input.ledgerIndexMax = jsonObject.at(JS(ledger_index_max)).as_int64();
if (jsonObject.contains(JS(ledger_hash)))
input.ledgerHash = boost::json::value_to<std::string>(jsonObject.at(JS(ledger_hash)));
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
} else {
@@ -275,12 +272,12 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
input.forward = boost::json::value_to<JsonBool>(jsonObject.at(JS(forward)));
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker))) {
input.marker = AccountTxHandler::Marker{
.ledger = util::integralValueAs<uint32_t>(jsonObject.at(JS(marker)).as_object().at(JS(ledger))),
.seq = util::integralValueAs<uint32_t>(jsonObject.at(JS(marker)).as_object().at(JS(seq)))
.ledger = jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(),
.seq = jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64()
};
}

View File

@@ -25,7 +25,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -36,7 +35,6 @@
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <string>
#include <vector>
@@ -93,7 +91,7 @@ tag_invoke(boost::json::value_to_tag<BookChangesHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -23,7 +23,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -38,7 +37,6 @@
#include <xrpl/protocol/UintTypes.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <string>
namespace rpc {
@@ -124,7 +122,7 @@ tag_invoke(boost::json::value_to_tag<BookOffersHandler::Input>, boost::json::val
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}
@@ -137,7 +135,7 @@ tag_invoke(boost::json::value_to_tag<BookOffersHandler::Input>, boost::json::val
input.domain = boost::json::value_to<std::string>(jv.at(JS(domain)));
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
input.limit = jv.at(JS(limit)).as_int64();
return input;
}

View File

@@ -25,7 +25,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -43,7 +42,6 @@
#include <xrpl/protocol/Serializer.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
@@ -148,7 +146,7 @@ tag_invoke(boost::json::value_to_tag<DepositAuthorizedHandler::Input>, boost::js
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -28,7 +28,6 @@
#include "rpc/common/Types.hpp"
#include "rpc/common/Validators.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
@@ -170,7 +169,7 @@ tag_invoke(boost::json::value_to_tag<FeatureHandler::Input>, boost::json::value
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -251,7 +250,7 @@ tag_invoke(boost::json::value_to_tag<GatewayBalancesHandler::Input>, boost::json
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -25,7 +25,6 @@
#include "rpc/common/Types.hpp"
#include "util/AccountUtils.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/bimap/bimap.hpp>
@@ -265,7 +264,7 @@ tag_invoke(boost::json::value_to_tag<GetAggregatePriceHandler::Input>, boost::js
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}
@@ -285,10 +284,10 @@ tag_invoke(boost::json::value_to_tag<GetAggregatePriceHandler::Input>, boost::js
input.quoteAsset = boost::json::value_to<std::string>(jv.at(JS(quote_asset)));
if (jsonObject.contains(JS(trim)))
input.trim = util::integralValueAs<uint8_t>(jv.at(JS(trim)));
input.trim = jv.at(JS(trim)).as_int64();
if (jsonObject.contains(JS(time_threshold)))
input.timeThreshold = util::integralValueAs<uint32_t>(jv.at(JS(time_threshold)));
input.timeThreshold = jv.at(JS(time_threshold)).as_int64();
return input;
}

View File

@@ -131,8 +131,9 @@ public:
!oracle.as_object().contains(JS(account)))
return Error{Status{RippledError::rpcORACLE_MALFORMED}};
auto maybeError =
validation::Type<std::uint32_t, std::string>{}.verify(oracle, JS(oracle_document_id));
auto maybeError = validation::Type<std::uint32_t, std::string>{}.verify(
oracle.as_object(), JS(oracle_document_id)
);
if (!maybeError)
return maybeError;

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -43,7 +42,6 @@
#include <xrpl/protocol/jss.h>
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
@@ -211,7 +209,7 @@ tag_invoke(boost::json::value_to_tag<LedgerHandler::Input>, boost::json::value c
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -25,7 +25,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include "util/LedgerUtils.hpp"
#include "util/log/Logger.hpp"
@@ -45,7 +44,6 @@
#include <algorithm>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
@@ -194,7 +192,7 @@ tag_invoke(boost::json::value_to_tag<LedgerDataHandler::Input>, boost::json::val
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains("out_of_order"))
input.outOfOrder = jsonObject.at("out_of_order").as_bool();
@@ -203,7 +201,7 @@ tag_invoke(boost::json::value_to_tag<LedgerDataHandler::Input>, boost::json::val
if (jsonObject.at(JS(marker)).is_string()) {
input.marker = ripple::uint256{boost::json::value_to<std::string>(jsonObject.at(JS(marker))).data()};
} else {
input.diffMarker = util::integralValueAs<uint32_t>(jsonObject.at(JS(marker)));
input.diffMarker = jsonObject.at(JS(marker)).as_int64();
}
}
@@ -212,7 +210,7 @@ tag_invoke(boost::json::value_to_tag<LedgerDataHandler::Input>, boost::json::val
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}

View File

@@ -26,7 +26,6 @@
#include "rpc/common/Types.hpp"
#include "util/AccountUtils.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -95,7 +94,7 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input const& input, Context cons
auto const id = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(input.escrow->at(JS(owner)))
);
key = ripple::keylet::escrow(*id, util::integralValueAs<uint32_t>(input.escrow->at(JS(seq)))).key;
key = ripple::keylet::escrow(*id, input.escrow->at(JS(seq)).as_int64()).key;
} else if (input.depositPreauth) {
auto const owner = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(input.depositPreauth->at(JS(owner)))
@@ -129,7 +128,7 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input const& input, Context cons
boost::json::value_to<std::string>(input.ticket->at(JS(account)))
);
key = ripple::getTicketIndex(*id, util::integralValueAs<uint32_t>(input.ticket->at(JS(ticket_seq))));
key = ripple::getTicketIndex(*id, input.ticket->at(JS(ticket_seq)).as_int64());
} else if (input.amm) {
auto const getIssuerFromJson = [](auto const& assetJson) {
// the field check has been done in validator
@@ -183,12 +182,12 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input const& input, Context cons
auto const account = ripple::parseBase58<ripple::AccountID>(
boost::json::value_to<std::string>(input.permissionedDomain->at(JS(account)))
);
auto const seq = util::integralValueAs<uint32_t>(input.permissionedDomain->at(JS(seq)));
auto const seq = input.permissionedDomain->at(JS(seq)).as_int64();
key = ripple::keylet::permissionedDomain(*account, seq).key;
} else if (input.vault) {
auto const account =
ripple::parseBase58<ripple::AccountID>(boost::json::value_to<std::string>(input.vault->at(JS(owner))));
auto const seq = util::integralValueAs<uint32_t>(input.vault->at(JS(seq)));
auto const seq = input.vault->at(JS(seq)).as_int64();
key = ripple::keylet::vault(*account, seq).key;
} else if (input.delegate) {
auto const account =
@@ -305,7 +304,7 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -39,7 +38,6 @@
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/STLedgerEntry.h>
#include <cstdint>
#include <optional>
#include <string>
@@ -126,14 +124,14 @@ tag_invoke(boost::json::value_to_tag<MPTHoldersHandler::Input>, boost::json::val
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = jsonObject.at(JS(marker)).as_string().c_str();

View File

@@ -25,7 +25,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
@@ -204,20 +203,18 @@ tag_invoke(boost::json::value_to_tag<NFTHistoryHandler::Input>, boost::json::val
input.nftID = boost::json::value_to<std::string>(jsonObject.at(JS(nft_id)));
if (jsonObject.contains(JS(ledger_index_min)) &&
util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_min))) != -1)
input.ledgerIndexMin = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index_min)));
if (jsonObject.contains(JS(ledger_index_min)) && jsonObject.at(JS(ledger_index_min)).as_int64() != -1)
input.ledgerIndexMin = jsonObject.at(JS(ledger_index_min)).as_int64();
if (jsonObject.contains(JS(ledger_index_max)) &&
util::integralValueAs<int32_t>(jsonObject.at(JS(ledger_index_max))) != -1)
input.ledgerIndexMax = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index_max)));
if (jsonObject.contains(JS(ledger_index_max)) && jsonObject.at(JS(ledger_index_max)).as_int64() != -1)
input.ledgerIndexMax = jsonObject.at(JS(ledger_index_max)).as_int64();
if (jsonObject.contains(JS(ledger_hash)))
input.ledgerHash = boost::json::value_to<std::string>(jsonObject.at(JS(ledger_hash)));
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}
@@ -230,12 +227,12 @@ tag_invoke(boost::json::value_to_tag<NFTHistoryHandler::Input>, boost::json::val
input.forward = jsonObject.at(JS(forward)).as_bool();
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker))) {
input.marker = NFTHistoryHandler::Marker{
.ledger = util::integralValueAs<uint32_t>(jsonObject.at(JS(marker)).as_object().at(JS(ledger))),
.seq = util::integralValueAs<uint32_t>(jsonObject.at(JS(marker)).as_object().at(JS(seq)))
.ledger = jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(),
.seq = jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64()
};
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -37,7 +36,6 @@
#include <xrpl/protocol/jss.h>
#include <xrpl/protocol/nft.h>
#include <cstdint>
#include <string>
using namespace ripple;
@@ -118,7 +116,7 @@ tag_invoke(boost::json::value_to_tag<NFTInfoHandler::Input>, boost::json::value
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/conversion.hpp>
@@ -196,7 +195,7 @@ tag_invoke(boost::json::value_to_tag<NFTOffersHandlerBase::Input>, boost::json::
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}
@@ -206,7 +205,7 @@ tag_invoke(boost::json::value_to_tag<NFTOffersHandlerBase::Input>, boost::json::
input.marker = boost::json::value_to<std::string>(jsonObject.at(JS(marker)));
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
return input;
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -38,7 +37,6 @@
#include <xrpl/protocol/jss.h>
#include <xrpl/protocol/nft.h>
#include <cstdint>
#include <optional>
#include <string>
@@ -138,17 +136,17 @@ tag_invoke(boost::json::value_to_tag<NFTsByIssuerHandler::Input>, boost::json::v
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}
}
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(nft_taxon)))
input.nftTaxon = util::integralValueAs<uint32_t>(jsonObject.at(JS(nft_taxon)));
input.nftTaxon = jsonObject.at(JS(nft_taxon)).as_int64();
if (jsonObject.contains(JS(marker)))
input.marker = boost::json::value_to<std::string>(jsonObject.at(JS(marker)));

View File

@@ -25,7 +25,6 @@
#include "rpc/common/JsonBool.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/array.hpp>
#include <boost/json/conversion.hpp>
@@ -188,7 +187,7 @@ tag_invoke(boost::json::value_to_tag<NoRippleCheckHandler::Input>, boost::json::
input.roleGateway = jsonObject.at(JS(role)).as_string() == "gateway";
if (jsonObject.contains(JS(limit)))
input.limit = util::integralValueAs<uint32_t>(jsonObject.at(JS(limit)));
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(transactions)))
input.transactions = boost::json::value_to<JsonBool>(jsonObject.at(JS(transactions)));
@@ -198,7 +197,7 @@ tag_invoke(boost::json::value_to_tag<NoRippleCheckHandler::Input>, boost::json::
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jsonObject.at(JS(ledger_index))));
}

View File

@@ -24,7 +24,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
@@ -34,7 +33,6 @@
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <string>
#include <utility>
@@ -112,7 +110,7 @@ tag_invoke(boost::json::value_to_tag<TransactionEntryHandler::Input>, boost::jso
if (jsonObject.contains(JS(ledger_index))) {
if (!jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
}

View File

@@ -332,10 +332,10 @@ private:
input.binary = boost::json::value_to<JsonBool>(jsonObject.at(JS(binary)));
if (jsonObject.contains(JS(min_ledger)))
input.minLedger = util::integralValueAs<uint32_t>(jv.at(JS(min_ledger)));
input.minLedger = jv.at(JS(min_ledger)).as_int64();
if (jsonObject.contains(JS(max_ledger)))
input.maxLedger = util::integralValueAs<uint32_t>(jv.at(JS(max_ledger)));
input.maxLedger = jv.at(JS(max_ledger)).as_int64();
return input;
}

View File

@@ -25,7 +25,6 @@
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include "util/JsonUtils.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
@@ -172,14 +171,14 @@ tag_invoke(boost::json::value_to_tag<VaultInfoHandler::Input>, boost::json::valu
input.owner = jsonObject.at(JS(owner)).as_string();
if (jsonObject.contains(JS(seq)))
input.tnxSequence = util::integralValueAs<uint32_t>(jsonObject.at(JS(seq)));
input.tnxSequence = static_cast<uint32_t>(jsonObject.at(JS(seq)).as_int64());
if (jsonObject.contains(JS(vault_id)))
input.vaultID = jsonObject.at(JS(vault_id)).as_string();
if (jsonObject.contains(JS(ledger_index))) {
if (not jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = util::integralValueAs<uint32_t>(jsonObject.at(JS(ledger_index)));
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}

View File

@@ -24,8 +24,6 @@
#include <algorithm>
#include <cctype>
#include <concepts>
#include <stdexcept>
#include <string>
/**
@@ -88,26 +86,4 @@ removeSecret(boost::json::object const& object)
return newObject;
}
/**
* @brief Detects the type of number stored in value and casts it back to the requested Type.
* @note This conversion can possibly cause wrapping around or UB. Use with caution.
*
* @tparam Type The type to cast to
* @param value The JSON value to cast
* @return Value casted to the requested type
* @throws logic_error if the underlying number is neither int64 nor uint64
*/
template <std::integral Type>
Type
integralValueAs(boost::json::value const& value)
{
if (value.is_uint64())
return static_cast<Type>(value.as_uint64());
if (value.is_int64())
return static_cast<Type>(value.as_int64());
throw std::logic_error("Value neither uint64 nor int64");
}
} // namespace util

View File

@@ -30,6 +30,7 @@
#include <utility>
namespace util {
namespace impl {
class SignalsHandlerStatic {

View File

@@ -37,10 +37,9 @@
#include <optional>
namespace util {
namespace impl {
class SignalsHandlerStatic;
} // namespace impl
/**

View File

@@ -84,6 +84,11 @@ static constexpr std::array<char const*, 1> kDATABASE_TYPE = {"cassandra"};
*/
static constexpr std::array<char const*, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
/**
* @brief specific values that are accepted for database provider in config.
*/
static constexpr std::array<char const*, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
/**
* @brief An interface to enforce constraints on certain values within ClioConfigDefinition.
*/
@@ -470,6 +475,7 @@ static constinit OneOf gValidateCassandraName{"database.type", kDATABASE_TYPE};
static constinit OneOf gValidateLoadMode{"cache.load", kLOAD_CACHE_MODE};
static constinit OneOf gValidateLogTag{"log.tag_style", kLOG_TAGS};
static constinit OneOf gValidateProcessingPolicy{"server.processing_policy", kPROCESSING_POLICY};
static constinit OneOf gValidateProvider{"database.cassandra.provider", kPROVIDER};
static constinit PositiveDouble gValidatePositiveDouble{};

View File

@@ -285,6 +285,8 @@ getClioConfig()
{"database.cassandra.username", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.password", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.certfile", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.provider",
ConfigValue{ConfigType::String}.defaultValue("cassandra").withConstraint(gValidateProvider)},
{"allow_no_etl", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"__ng_etl", ConfigValue{ConfigType::Boolean}.defaultValue(false)},

View File

@@ -173,6 +173,7 @@ This document provides a list of all available Clio configuration properties in
"Maximum number of outstanding read requests. Read requests are API calls that read from the database."},
KV{.key = "database.cassandra.threads",
.value = "Represents the number of threads that will be used for database operations."},
KV{.key = "database.cassandra.provider", .value = "The specific database backend provider we are using."},
KV{.key = "database.cassandra.core_connections_per_host",
.value = "The number of core connections per host for the Cassandra database."},
KV{.key = "database.cassandra.queue_size_io",

View File

@@ -41,7 +41,6 @@
#include <spdlog/spdlog.h>
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <filesystem>

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "util/Taggable.hpp"
#include "web/AdminVerificationStrategy.hpp"
#include "web/PlainWsSession.hpp"
@@ -70,7 +69,6 @@ public:
* @param tagFactory A factory that is used to generate tags to track requests and sessions
* @param dosGuard The denial of service guard to use
* @param handler The server handler to use
* @param cache The ledger cache to use
* @param buffer Buffer with initial data received from the peer
* @param maxWsSendingQueueSize The maximum size of the sending queue for websocket
*/
@@ -82,7 +80,6 @@ public:
std::reference_wrapper<util::TagDecoratorFactory const> tagFactory,
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard,
std::shared_ptr<HandlerType> const& handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache,
boost::beast::flat_buffer buffer,
std::uint32_t maxWsSendingQueueSize
)
@@ -93,7 +90,6 @@ public:
std::move(proxyIpResolver),
dosGuard,
handler,
cache,
std::move(buffer)
)
, stream_(std::move(socket))

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "util/Taggable.hpp"
#include "util/log/Logger.hpp"
#include "web/AdminVerificationStrategy.hpp"
@@ -86,7 +85,6 @@ class Detector : public std::enable_shared_from_this<Detector<PlainSessionType,
std::reference_wrapper<util::TagDecoratorFactory const> tagFactory_;
std::reference_wrapper<dosguard::DOSGuardInterface> const dosGuard_;
std::shared_ptr<HandlerType> const handler_;
std::reference_wrapper<data::LedgerCacheInterface const> cache_;
boost::beast::flat_buffer buffer_;
std::shared_ptr<AdminVerificationStrategy> const adminVerification_;
std::uint32_t maxWsSendingQueueSize_;
@@ -101,7 +99,6 @@ public:
* @param tagFactory A factory that is used to generate tags to track requests and sessions
* @param dosGuard The denial of service guard to use
* @param handler The server handler to use
* @param cache The ledger cache to use
* @param adminVerification The admin verification strategy to use
* @param maxWsSendingQueueSize The maximum size of the sending queue for websocket
* @param proxyIpResolver The client ip resolver if a request was forwarded by a proxy
@@ -112,7 +109,6 @@ public:
std::reference_wrapper<util::TagDecoratorFactory const> tagFactory,
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard,
std::shared_ptr<HandlerType> handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache,
std::shared_ptr<AdminVerificationStrategy> adminVerification,
std::uint32_t maxWsSendingQueueSize,
std::shared_ptr<ProxyIpResolver> proxyIpResolver
@@ -122,7 +118,6 @@ public:
, tagFactory_(std::cref(tagFactory))
, dosGuard_(dosGuard)
, handler_(std::move(handler))
, cache_(cache)
, adminVerification_(std::move(adminVerification))
, maxWsSendingQueueSize_(maxWsSendingQueueSize)
, proxyIpResolver_(std::move(proxyIpResolver))
@@ -184,7 +179,6 @@ public:
tagFactory_,
dosGuard_,
handler_,
cache_,
std::move(buffer_),
maxWsSendingQueueSize_
)
@@ -200,7 +194,6 @@ public:
tagFactory_,
dosGuard_,
handler_,
cache_,
std::move(buffer_),
maxWsSendingQueueSize_
)
@@ -230,7 +223,6 @@ class Server : public std::enable_shared_from_this<Server<PlainSessionType, SslS
util::TagDecoratorFactory tagFactory_;
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard_;
std::shared_ptr<HandlerType> handler_;
std::reference_wrapper<data::LedgerCacheInterface const> cache_;
tcp::acceptor acceptor_;
std::shared_ptr<AdminVerificationStrategy> adminVerification_;
std::uint32_t maxWsSendingQueueSize_;
@@ -246,7 +238,6 @@ public:
* @param tagFactory A factory that is used to generate tags to track requests and sessions
* @param dosGuard The denial of service guard to use
* @param handler The server handler to use
* @param cache The ledger cache to use
* @param adminVerification The admin verification strategy to use
* @param maxWsSendingQueueSize The maximum size of the sending queue for websocket
* @param proxyIpResolver The client ip resolver if a request was forwarded by a proxy
@@ -258,7 +249,6 @@ public:
util::TagDecoratorFactory tagFactory,
dosguard::DOSGuardInterface& dosGuard,
std::shared_ptr<HandlerType> handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache,
std::shared_ptr<AdminVerificationStrategy> adminVerification,
std::uint32_t maxWsSendingQueueSize,
ProxyIpResolver proxyIpResolver
@@ -268,7 +258,6 @@ public:
, tagFactory_(tagFactory)
, dosGuard_(std::ref(dosGuard))
, handler_(std::move(handler))
, cache_(cache)
, acceptor_(boost::asio::make_strand(ioc))
, adminVerification_(std::move(adminVerification))
, maxWsSendingQueueSize_(maxWsSendingQueueSize)
@@ -331,7 +320,6 @@ private:
std::cref(tagFactory_),
dosGuard_,
handler_,
cache_,
adminVerification_,
maxWsSendingQueueSize_,
proxyIpResolver_
@@ -355,7 +343,6 @@ using HttpServer = Server<HttpSession, SslHttpSession, HandlerType>;
* @param ioc The server will run under this io_context
* @param dosGuard The dos guard to protect the server
* @param handler The handler to process the request
* @param cache The ledger cache to use
* @return The server instance
*/
template <typename HandlerType>
@@ -364,8 +351,7 @@ makeHttpServer(
util::config::ClioConfigDefinition const& config,
boost::asio::io_context& ioc,
dosguard::DOSGuardInterface& dosGuard,
std::shared_ptr<HandlerType> const& handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache
std::shared_ptr<HandlerType> const& handler
)
{
static util::Logger const log{"WebServer"}; // NOLINT(readability-identifier-naming)
@@ -399,7 +385,6 @@ makeHttpServer(
util::TagDecoratorFactory(config),
dosGuard,
handler,
cache,
std::move(expectedAdminVerification).value(),
maxWsSendingQueueSize,
std::move(proxyIpResolver)

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "util/Taggable.hpp"
#include "web/AdminVerificationStrategy.hpp"
#include "web/ProxyIpResolver.hpp"
@@ -77,7 +76,6 @@ public:
* @param tagFactory A factory that is used to generate tags to track requests and sessions
* @param dosGuard The denial of service guard to use
* @param handler The server handler to use
* @param cache The ledger cache to use
* @param buffer Buffer with initial data received from the peer
* @param maxWsSendingQueueSize The maximum size of the sending queue for websocket
*/
@@ -90,7 +88,6 @@ public:
std::reference_wrapper<util::TagDecoratorFactory const> tagFactory,
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard,
std::shared_ptr<HandlerType> const& handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache,
boost::beast::flat_buffer buffer,
std::uint32_t maxWsSendingQueueSize
)
@@ -101,7 +98,6 @@ public:
std::move(proxyIpResolver),
dosGuard,
handler,
cache,
std::move(buffer)
)
, stream_(std::move(socket), ctx)

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "rpc/Errors.hpp"
#include "util/Assert.hpp"
#include "util/Taggable.hpp"
@@ -72,22 +71,6 @@ static constexpr auto kHEALTH_CHECK_HTML = R"html(
</html>
)html";
static constexpr auto kCACHE_CHECK_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is fully loaded</p></body>
</html>
)html";
static constexpr auto kCACHE_CHECK_NOT_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is not yet loaded</p></body>
</html>
)html";
using tcp = boost::asio::ip::tcp;
/**
@@ -145,7 +128,6 @@ protected:
http::request<http::string_body> req_;
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard_;
std::shared_ptr<HandlerType> const handler_;
std::reference_wrapper<data::LedgerCacheInterface const> cache_;
util::Logger log_{"WebServer"};
util::Logger perfLog_{"Performance"};
@@ -187,7 +169,6 @@ public:
std::shared_ptr<ProxyIpResolver> proxyIpResolver,
std::reference_wrapper<dosguard::DOSGuardInterface> dosGuard,
std::shared_ptr<HandlerType> handler,
std::reference_wrapper<data::LedgerCacheInterface const> cache,
boost::beast::flat_buffer buffer
)
: ConnectionBase(tagFactory, ip)
@@ -197,7 +178,6 @@ public:
, buffer_(std::move(buffer))
, dosGuard_(dosGuard)
, handler_(std::move(handler))
, cache_(cache)
{
LOG(perfLog_.debug()) << tag() << "http session created";
dosGuard_.get().increment(ip);
@@ -242,13 +222,6 @@ public:
if (req_.method() == http::verb::get and req_.target() == "/health")
return sender_(httpResponse(http::status::ok, "text/html", kHEALTH_CHECK_HTML));
if (req_.method() == http::verb::get and req_.target() == "/cache_state") {
if (cache_.get().isFull())
return sender_(httpResponse(http::status::ok, "text/html", kCACHE_CHECK_LOADED_HTML));
return sender_(httpResponse(http::status::service_unavailable, "text/html", kCACHE_CHECK_NOT_LOADED_HTML));
}
if (auto resolvedIp = proxyIpResolver_->resolveClientIp(clientIp_, req_); resolvedIp != clientIp_) {
LOG(log_.info()) << tag() << "Detected a forwarded request from proxy. Proxy ip: " << clientIp_
<< ". Resolved client ip: " << resolvedIp;

View File

@@ -46,6 +46,7 @@
#include <openssl/tls1.h>
#include <chrono>
#include <optional>
#include <string>
#include <string_view>
#include <utility>

View File

@@ -1528,66 +1528,6 @@ createMPTIssuanceCreateTxWithMetadata(std::string_view accountId, uint32_t fee,
return ret;
}
ripple::STObject
createMPTokenAuthorizeTx(
std::string_view accountId,
ripple::uint192 const& mptIssuanceID,
uint32_t fee,
uint32_t seq,
std::optional<std::string_view> holder,
std::optional<std::uint32_t> flags
)
{
ripple::STObject tx(ripple::sfTransaction);
tx.setFieldU16(ripple::sfTransactionType, ripple::ttMPTOKEN_AUTHORIZE);
tx.setAccountID(ripple::sfAccount, getAccountIdWithString(accountId));
tx[ripple::sfMPTokenIssuanceID] = mptIssuanceID;
tx.setFieldAmount(ripple::sfFee, ripple::STAmount(fee, false));
tx.setFieldU32(ripple::sfSequence, seq);
tx.setFieldVL(ripple::sfSigningPubKey, kSLICE);
if (holder)
tx.setAccountID(ripple::sfHolder, getAccountIdWithString(*holder));
if (flags)
tx.setFieldU32(ripple::sfFlags, *flags);
return tx;
}
data::TransactionAndMetadata
createMPTokenAuthorizeTxWithMetadata(
std::string_view accountId,
ripple::uint192 const& mptIssuanceID,
uint32_t fee,
uint32_t seq
)
{
ripple::STObject const tx = createMPTokenAuthorizeTx(accountId, mptIssuanceID, fee, seq);
ripple::STObject metaObj(ripple::sfTransactionMetaData);
metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS);
metaObj.setFieldU32(ripple::sfTransactionIndex, 0);
ripple::STObject finalFields(ripple::sfFinalFields);
finalFields.setFieldU16(ripple::sfLedgerEntryType, ripple::ltMPTOKEN);
finalFields[ripple::sfMPTokenIssuanceID] = mptIssuanceID;
finalFields.setFieldU64(ripple::sfMPTAmount, 0);
ripple::STObject modifiedNode(ripple::sfModifiedNode);
modifiedNode.setFieldU16(ripple::sfLedgerEntryType, ripple::ltMPTOKEN);
modifiedNode.setFieldH256(ripple::sfLedgerIndex, ripple::uint256{});
modifiedNode.emplace_back(std::move(finalFields));
ripple::STArray affectedNodes(ripple::sfAffectedNodes);
affectedNodes.push_back(std::move(modifiedNode));
metaObj.setFieldArray(ripple::sfAffectedNodes, affectedNodes);
data::TransactionAndMetadata ret;
ret.transaction = tx.getSerializer().peekData();
ret.metadata = metaObj.getSerializer().peekData();
return ret;
}
ripple::STObject
createPermissionedDomainObject(
std::string_view accountId,

View File

@@ -462,26 +462,6 @@ createMPTIssuanceCreateTx(std::string_view accountId, uint32_t fee, uint32_t seq
[[nodiscard]] data::TransactionAndMetadata
createMPTIssuanceCreateTxWithMetadata(std::string_view accountId, uint32_t fee, uint32_t seq);
[[nodiscard]]
ripple::STObject
createMPTokenAuthorizeTx(
std::string_view accountId,
ripple::uint192 const& mptIssuanceID,
uint32_t fee,
uint32_t seq,
std::optional<std::string_view> holder = std::nullopt,
std::optional<std::uint32_t> flags = std::nullopt
);
[[nodiscard]]
data::TransactionAndMetadata
createMPTokenAuthorizeTxWithMetadata(
std::string_view accountId,
ripple::uint192 const& mptIssuanceID,
uint32_t fee,
uint32_t seq
);
[[nodiscard]] ripple::STObject
createPermissionedDomainObject(
std::string_view accountId,

View File

@@ -17,7 +17,6 @@
*/
//==============================================================================
#include "util/LoggerFixtures.hpp"
#include "util/TerminationHandler.hpp"
#include <TestGlobals.hpp>
@@ -34,8 +33,6 @@ main(int argc, char* argv[])
{
util::setTerminationHandler();
testing::InitGoogleTest(&argc, argv);
LoggerFixture::init();
TestGlobals::instance().parse(argc, argv);
return RUN_ALL_TESTS();

View File

@@ -44,6 +44,7 @@ using namespace util::config;
struct BackendCassandraFactoryTest : SyncAsioContextTest, util::prometheus::WithPrometheus {
static constexpr auto kKEYSPACE = "factory_test";
static constexpr auto kPROVIDER = "cassandra";
protected:
ClioConfigDefinition cfg_{
@@ -53,6 +54,7 @@ protected:
{"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
{"database.cassandra.keyspace", ConfigValue{ConfigType::String}.defaultValue(kKEYSPACE)},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kPROVIDER)},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},

View File

@@ -93,6 +93,7 @@ protected:
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
{"database.cassandra.keyspace",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},

View File

@@ -19,6 +19,7 @@
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "util/LoggerFixtures.hpp"
#include <TestGlobals.hpp>
#include <cassandra.h>

View File

@@ -33,6 +33,7 @@
#include "migration/impl/MigrationManagerBase.hpp"
#include "migration/impl/MigratorsRegister.hpp"
#include "util/CassandraDBHelper.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockPrometheus.hpp"
#include "util/config/ConfigConstraints.hpp"
#include "util/config/ConfigDefinition.hpp"
@@ -102,6 +103,7 @@ protected:
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
{"database.cassandra.keyspace",
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.defaultValue(2)},

View File

@@ -20,13 +20,8 @@
#include "util/JsonUtils.hpp"
#include <boost/json/parse.hpp>
#include <boost/json/value.hpp>
#include <gtest/gtest.h>
#include <cstdint>
#include <limits>
#include <stdexcept>
TEST(JsonUtils, RemoveSecrets)
{
auto json = boost::json::parse(R"JSON({
@@ -65,26 +60,3 @@ TEST(JsonUtils, RemoveSecrets)
EXPECT_EQ(json2.at("seed_hex").as_string(), "*");
EXPECT_EQ(json2.at("passphrase").as_string(), "*");
}
TEST(JsonUtils, integralValueAs)
{
auto const expectedResultUint64 = static_cast<uint64_t>(std::numeric_limits<int32_t>::max()) + 1u;
auto const uint64Json = boost::json::value(expectedResultUint64);
EXPECT_EQ(util::integralValueAs<int32_t>(uint64Json), std::numeric_limits<int32_t>::min());
EXPECT_EQ(util::integralValueAs<uint32_t>(uint64Json), expectedResultUint64);
EXPECT_EQ(util::integralValueAs<int64_t>(uint64Json), expectedResultUint64);
EXPECT_EQ(util::integralValueAs<uint64_t>(uint64Json), expectedResultUint64);
auto const expectedResultInt64 = static_cast<int64_t>(std::numeric_limits<int32_t>::max()) + 1u;
auto const int64Json = boost::json::value(expectedResultInt64);
EXPECT_EQ(util::integralValueAs<int32_t>(int64Json), std::numeric_limits<int32_t>::min());
EXPECT_EQ(util::integralValueAs<uint32_t>(int64Json), expectedResultInt64);
EXPECT_EQ(util::integralValueAs<int64_t>(int64Json), expectedResultInt64);
EXPECT_EQ(util::integralValueAs<uint64_t>(int64Json), expectedResultInt64);
auto const doubleJson = boost::json::value(3.14);
EXPECT_THROW(util::integralValueAs<int>(doubleJson), std::logic_error);
auto const stringJson = boost::json::value("not a number");
EXPECT_THROW(util::integralValueAs<int>(stringJson), std::logic_error);
}

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "app/Stopper.hpp"
#include "util/AsioContextTestFixture.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockBackend.hpp"
#include "util/MockETLService.hpp"
#include "util/MockLoadBalancer.hpp"

View File

@@ -20,7 +20,7 @@
#include "app/WebHandlers.hpp"
#include "rpc/Errors.hpp"
#include "util/AsioContextTestFixture.hpp"
#include "util/MockLedgerCache.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockPrometheus.hpp"
#include "util/Taggable.hpp"
#include "util/config/ConfigDefinition.hpp"
@@ -150,32 +150,6 @@ TEST_F(HealthCheckHandlerTests, Call)
});
}
struct CacheStateHandlerTests : SyncAsioContextTest, WebHandlersTest {
web::ng::Request request{http::request<http::string_body>{http::verb::get, "/", 11}};
MockLedgerCache cache;
CacheStateHandler cacheStateHandler{cache};
};
TEST_F(CacheStateHandlerTests, CallWithCacheLoaded)
{
EXPECT_CALL(cache, isFull()).WillRepeatedly(testing::Return(true));
runSpawn([&](boost::asio::yield_context yield) {
auto response = cacheStateHandler(request, connectionMock, nullptr, yield);
auto const httpResponse = std::move(response).intoHttpResponse();
EXPECT_EQ(httpResponse.result(), boost::beast::http::status::ok);
});
}
TEST_F(CacheStateHandlerTests, CallWithoutCacheLoaded)
{
EXPECT_CALL(cache, isFull()).WillRepeatedly(testing::Return(false));
runSpawn([&](boost::asio::yield_context yield) {
auto response = cacheStateHandler(request, connectionMock, nullptr, yield);
auto const httpResponse = std::move(response).intoHttpResponse();
EXPECT_EQ(httpResponse.result(), boost::beast::http::status::service_unavailable);
});
}
struct RequestHandlerTest : SyncAsioContextTest, WebHandlersTest {
AdminVerificationStrategyStrictMockPtr adminVerifier{
std::make_shared<testing::StrictMock<AdminVerificationStrategyMock>>()

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "data/LedgerCache.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockPrometheus.hpp"
#include "util/prometheus/Bool.hpp"

View File

@@ -19,6 +19,7 @@
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/TmpFile.hpp"
#include "util/config/ConfigDefinition.hpp"
#include "util/config/ConfigFileJson.hpp"
@@ -58,6 +59,7 @@ getParseSettingsConfig(boost::json::value val)
{"database.cassandra.certificate", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.username", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.password", ConfigValue{ConfigType::String}.optional()},
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
{"database.cassandra.queue_size_io", ConfigValue{ConfigType::Integer}.optional()},
{"database.cassandra.write_batch_size", ConfigValue{ConfigType::Integer}.defaultValue(20)},
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.optional()},

View File

@@ -19,6 +19,7 @@
#include "etl/CorruptionDetector.hpp"
#include "etl/SystemState.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockLedgerCache.hpp"
#include "util/MockPrometheus.hpp"

View File

@@ -19,6 +19,7 @@
#include "etl/ETLState.hpp"
#include "rpc/Errors.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockSource.hpp"
#include <boost/json/parse.hpp>

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "etl/impl/ExtractionDataPipe.hpp"
#include "util/LoggerFixtures.hpp"
#include <gtest/gtest.h>

View File

@@ -20,6 +20,7 @@
#include "etl/SystemState.hpp"
#include "etl/impl/Extractor.hpp"
#include "util/FakeFetchResponse.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockExtractionDataPipe.hpp"
#include "util/MockLedgerFetcher.hpp"
#include "util/MockNetworkValidatedLedgers.hpp"

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "etl/impl/GrpcSource.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockBackend.hpp"
#include "util/MockPrometheus.hpp"
#include "util/MockXrpLedgerAPIService.hpp"

View File

@@ -19,6 +19,7 @@
#include "data/DBHelpers.hpp"
#include "etl/NFTHelpers.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/TestObject.hpp"
#include <gtest/gtest.h>

View File

@@ -23,6 +23,7 @@
#include "etlng/Models.hpp"
#include "etlng/impl/Extraction.hpp"
#include "util/BinaryTestObject.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockAssert.hpp"
#include "util/TestObject.hpp"

View File

@@ -26,6 +26,7 @@
#include "etlng/impl/GrpcSource.hpp"
#include "util/AsioContextTestFixture.hpp"
#include "util/Assert.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockXrpLedgerAPIService.hpp"
#include "util/Mutex.hpp"
#include "util/TestObject.hpp"

View File

@@ -20,6 +20,7 @@
#include "etl/NetworkValidatedLedgers.hpp"
#include "etl/NetworkValidatedLedgersInterface.hpp"
#include "etlng/impl/AmendmentBlockHandler.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/async/context/BasicExecutionContext.hpp"
#include <gmock/gmock.h>

View File

@@ -22,6 +22,7 @@
#include "etlng/MonitorInterface.hpp"
#include "etlng/impl/Registry.hpp"
#include "util/BinaryTestObject.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockPrometheus.hpp"
#include "util/TestObject.hpp"

View File

@@ -21,6 +21,7 @@
#include "etlng/SchedulerInterface.hpp"
#include "etlng/impl/Loading.hpp"
#include "etlng/impl/Scheduling.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockNetworkValidatedLedgers.hpp"
#include <gmock/gmock.h>

View File

@@ -25,6 +25,7 @@
#include "etlng/impl/Loading.hpp"
#include "etlng/impl/TaskManager.hpp"
#include "util/BinaryTestObject.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/TestObject.hpp"
#include "util/async/AnyExecutionContext.hpp"
#include "util/async/context/BasicExecutionContext.hpp"

View File

@@ -21,6 +21,7 @@
#include "etlng/impl/CacheUpdater.hpp"
#include "etlng/impl/ext/Cache.hpp"
#include "util/BinaryTestObject.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockLedgerCache.hpp"
#include "util/MockPrometheus.hpp"
#include "util/TestObject.hpp"

View File

@@ -1247,133 +1247,6 @@ TEST_F(FeedTransactionTest, PubTransactionWithOwnerFundFrozenLPToken)
testFeedPtr->pub(trans1, ledgerHeader, backend_, mockAmendmentCenterPtr_, kNETWORK_ID);
}
TEST_F(FeedTransactionTest, PublishesMPTokenIssuanceCreateTx)
{
constexpr auto kMPTOKEN_ISSUANCE_CREATE_TRAN_V1 =
R"JSON({
"transaction": {
"Account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn",
"Fee": "12",
"Sequence": 1,
"SigningPubKey": "74657374",
"TransactionType": "MPTokenIssuanceCreate",
"hash": "B565E9E541E9C4615C920807AC8104D26F961424A06F3BB25A083DD47680EF45",
"date": 0
},
"meta": {
"AffectedNodes": [
{
"CreatedNode": {
"LedgerEntryType": "MPTokenIssuance",
"LedgerIndex": "0000000000000000000000000000000000000000000000000000000000000000",
"NewFields": {
"Flags": 0,
"Issuer": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn",
"LedgerEntryType": "MPTokenIssuance",
"MPTokenMetadata": "746573742D6D657461",
"MaximumAmount": "0",
"OutstandingAmount": "0",
"OwnerNode": "0",
"PreviousTxnID": "0000000000000000000000000000000000000000000000000000000000000000",
"PreviousTxnLgrSeq": 0,
"Sequence": 1
}
}
}
],
"TransactionIndex": 0,
"TransactionResult": "tesSUCCESS"
},
"ctid": "C000002100000000",
"type": "transaction",
"validated": true,
"status": "closed",
"ledger_index": 33,
"close_time_iso": "2000-01-01T00:00:00Z",
"ledger_hash": "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652",
"engine_result_code": 0,
"engine_result": "tesSUCCESS",
"engine_result_message": "The transaction was applied. Only final in a validated ledger."
})JSON";
EXPECT_CALL(*mockSessionPtr, onDisconnect);
testFeedPtr->sub(sessionPtr);
EXPECT_EQ(testFeedPtr->transactionSubCount(), 1);
auto const ledgerHeader = createLedgerHeader(kLEDGER_HASH, 33);
auto const trans = createMPTIssuanceCreateTxWithMetadata(kACCOUNT1, 12, 1);
EXPECT_CALL(*mockSessionPtr, apiSubversion).WillOnce(testing::Return(1));
EXPECT_CALL(*mockSessionPtr, send(sharedStringJsonEq(kMPTOKEN_ISSUANCE_CREATE_TRAN_V1)));
testFeedPtr->pub(trans, ledgerHeader, backend_, mockAmendmentCenterPtr_, kNETWORK_ID);
testFeedPtr->unsub(sessionPtr);
EXPECT_EQ(testFeedPtr->transactionSubCount(), 0);
testFeedPtr->pub(trans, ledgerHeader, backend_, mockAmendmentCenterPtr_, kNETWORK_ID);
}
TEST_F(FeedTransactionTest, PublishesMPTokenAuthorizeTx)
{
constexpr auto kMPTOKEN_AUTHORIZE_TRAN_V1 =
R"JSON({
"transaction": {
"Account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn",
"Fee": "15",
"MPTokenIssuanceID": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9",
"Sequence": 5,
"SigningPubKey": "74657374",
"TransactionType": "MPTokenAuthorize",
"hash": "94ACAB5D571C4A2B8D76979B76E8A82FA91915AEB3FD0A9917223308D5EAE331",
"date": 0
},
"meta": {
"AffectedNodes": [
{
"ModifiedNode": {
"FinalFields": {
"LedgerEntryType": "MPToken",
"MPTAmount": "0",
"MPTokenIssuanceID": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9"
},
"LedgerEntryType": "MPToken",
"LedgerIndex": "0000000000000000000000000000000000000000000000000000000000000000"
}
}
],
"TransactionIndex": 0,
"TransactionResult": "tesSUCCESS"
},
"ctid": "C000002100000000",
"type": "transaction",
"validated": true,
"status": "closed",
"ledger_index": 33,
"close_time_iso": "2000-01-01T00:00:00Z",
"ledger_hash": "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652",
"engine_result_code": 0,
"engine_result": "tesSUCCESS",
"engine_result_message": "The transaction was applied. Only final in a validated ledger."
})JSON";
EXPECT_CALL(*mockSessionPtr, onDisconnect);
testFeedPtr->sub(sessionPtr);
auto const ledgerHeader = createLedgerHeader(kLEDGER_HASH, 33);
// The issuance ID that this transaction is authorizing
auto const mptIssuanceID = ripple::makeMptID(1, getAccountIdWithString(kACCOUNT1));
auto const trans = createMPTokenAuthorizeTxWithMetadata(kACCOUNT1, mptIssuanceID, 15, 5);
EXPECT_CALL(*mockSessionPtr, apiSubversion).WillOnce(testing::Return(1));
EXPECT_CALL(*mockSessionPtr, send(sharedStringJsonEq(kMPTOKEN_AUTHORIZE_TRAN_V1)));
testFeedPtr->pub(trans, ledgerHeader, backend_, mockAmendmentCenterPtr_, kNETWORK_ID);
testFeedPtr->unsub(sessionPtr);
testFeedPtr->pub(trans, ledgerHeader, backend_, mockAmendmentCenterPtr_, kNETWORK_ID);
}
struct TransactionFeedMockPrometheusTest : WithMockPrometheus, SyncExecutionCtxFixture {
protected:
web::SubscriptionContextPtr sessionPtr_ = std::make_shared<MockSession>();

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "migration/impl/MigrationManagerFactory.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockLedgerCache.hpp"
#include "util/config/ConfigDefinition.hpp"
#include "util/config/ConfigValue.hpp"

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include "migration/cassandra/impl/FullTableScanner.hpp"
#include "util/LoggerFixtures.hpp"
#include "util/MockAssert.hpp"
#include <boost/asio/spawn.hpp>

Some files were not shown because too many files have changed in this diff Show More