Compare commits

..

22 Commits

Author SHA1 Message Date
Valon Mamudi
f50a0a9258 Merge branch 'develop' into nudbBlockSize 2025-08-28 00:06:12 +02:00
Valon Mamudi
b4eabf384e removed trailing whitespaces 2025-08-27 23:59:45 +02:00
Ed Hennis
2f155d9273 Merge branch 'develop' into nudbBlockSize 2025-08-26 17:01:43 -04:00
Valon Mamudi
53fcfda242 Remove unnecessary warning text as EXPERIMENTAL tag is self-explanatory 2025-08-12 22:50:52 +02:00
Valon Mamudi
540f0aff7f Apply clang-format to NuDBFactory_test.cpp 2025-08-12 22:32:56 +02:00
Valon Mamudi
20e0fefa2c Enhance nudb_block_size documentation with performance testing recommendation
- Move 'Default is 4096.' to parameter definition line for better organization
- Add performance testing recommendation for non-default block sizes
- Maintain visual spacing in documentation structure
- Net result: +2 content lines with improved readability
2025-08-12 20:22:21 +02:00
Valon Mamudi
0db558cf83 Enhance nudb_block_size documentation with detailed performance characteristics
- Expand 4096 bytes description with memory footprint and latency details
- Add sequential throughput and I/O operation details for 8192-16384 bytes
- Include enterprise environment recommendations for 32768 bytes
- Provide comprehensive guidance for block size selection based on hardware
2025-08-12 20:15:29 +02:00
Valon Mamudi
6385985ee2 Merge branch 'develop' into nudbBlockSize 2025-08-12 17:52:10 +02:00
Valon Mamudi
4d7173b5d9 Mark nudb_block_size as experimental in configuration documentation
Add clear warning that nudb_block_size is an experimental feature that may
affect database performance and stability, as suggested in code review.
2025-08-12 17:47:27 +02:00
Valon Mamudi
b3bc48999b Resolve merge conflict 2025-08-12 17:32:27 +02:00
Ed Hennis
aea76c8693 Some improvements:
- Get the default value from NuDB.
- Throw on error instead of using the default value.
- Check the parsed block size in unit tests.
- Add some more BEAST_EXPECT checks.
2025-08-12 08:25:33 +02:00
Bronek Kozicki
9d20f27a55 Merge branch 'develop' into nudbBlockSize 2025-08-08 16:24:00 +01:00
Valon Mamudi
5026b64180 Merge branch 'develop' into nudbBlockSize 2025-06-25 00:04:52 +02:00
Valon Mamudi
9fd0f72039 Merge branch 'develop' into nudbBlockSize 2025-06-18 01:10:22 +02:00
Valon Mamudi
45f024ddef Merge branch 'develop' into nudbBlockSize 2025-06-10 22:53:13 +02:00
Valon Mamudi
ec25b9d24a format NuDBFactory_test.cpp with clang-format 2025-06-10 22:52:37 +02:00
Valon Mamudi
f8687226ea Add comprehensive test coverage for NuDBFactory block size validation
- Test default block size behavior
- Test valid block sizes (4096, 8192, 16384, 32768)
- Test invalid block size fallback to default
- Test log message verification for valid/invalid values
- Test power of 2 validation logic
- Test configuration parsing edge cases
- Test data persistence across different block sizes
2025-06-10 22:49:25 +02:00
Valon Mamudi
ec530a9b0c Merge branch 'XRPLF:develop' into nudbBlockSize 2025-06-07 00:01:14 +02:00
Valon Mamudi
086b9f62d4 Improve NuDB block size configuration with early return pattern and 32K support
- Implement early return pattern in parseBlockSize function to reduce nesting
- Fix unqualified assignment by using properly scoped const variable
- Decreased maximum block size limit from 64K to 32K (32768 bytes)
- Update configuration documentation to reflect correct 32K maximum
- Add guidance for 32K block size usage in high-performance scenarios
- Apply clang-format fixes to resolve CI formatting checks

This enhances NuDB performance configurability while maintaining code quality
and following modern C++ best practices. The 32K limit reflects the actual
maximum supported by NuDB as confirmed by testing.
2025-06-07 00:00:11 +02:00
Valon Mamudi
1eb4b08592 Merge branch 'develop' into nudbBlockSize 2025-06-03 20:46:54 +02:00
Valon Mamudi
a7c9c69fbd Merge branch 'develop' into nudbBlockSize 2025-06-03 18:03:53 +02:00
Valon Mamudi
6e11a3f1a3 Add configurable NuDB block size feature
- Implement parseBlockSize() function with validation
- Add nudb_block_size configuration parameter
- Support block sizes from 4K to 64K (power of 2)
- Add comprehensive logging and error handling
- Maintain backward compatibility with 4K default
2025-06-03 17:45:35 +02:00
30 changed files with 2578 additions and 2084 deletions

View File

@@ -1,163 +0,0 @@
# This workflow builds the binary from the selected commit (not earlier than 2.5.0 release)
name: Build selected commit
# This workflow can only be triggered manually, by a project maintainer
on:
workflow_dispatch:
inputs:
commit:
description: "Commit to build from."
required: false
type: string
build_container:
description: "Build container image to use"
required: true
type: string
default: ghcr.io/xrplf/ci/debian-bullseye:gcc-12
strip_symbols:
description: "Strip debug symbols"
required: true
type: boolean
default: true
archive_archive:
description: "Archive rippled binary"
required: true
type: boolean
default: false
build_only:
description: "Only build, do not run unit tests"
required: true
type: boolean
default: false
build_type:
description: "Build type (Debug or Release)"
required: true
type: choice
default: Release
options:
- Debug
- Release
cmake_args:
description: "CMake args for build"
required: true
type: string
default: "-Dxrpld=ON -Dtests=ON -Dassert=OFF -Dunity=OFF"
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
BUILD_DIR: .build
jobs:
build:
runs-on: ["self-hosted", "Linux", "X64", "heavy"]
container: ${{ inputs.build_container }}
steps:
- name: Checkout this workflow
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
sparse-checkout: |
.github
conan
- name: Move workflow files on a side
run: |
mkdir -p ${{ runner.temp }}
mv .github conan ${{ runner.temp }}
rm -rf .git
- name: Checkout the commit to build
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
ref: ${{ inputs.commit }}
- name: Restore workflow files
run: |
rm -rf .github conan
mv ${{ runner.temp }}/.github .
mv ${{ runner.temp }}/conan .
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: true
- name: Check configuration
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${CC} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
cat conan/global.conf >> $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add --index 0 --force ${{ env.CONAN_REMOTE_NAME }} ${{ env.CONAN_REMOTE_URL }}
echo 'Listing Conan remotes.'
conan remote list
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ env.BUILD_DIR }}
build_type: ${{ inputs.build_type }}
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
force_build: ${{ inputs.dependencies_force_build }}
force_upload: false
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ env.BUILD_DIR }}
build_only: ${{ inputs.build_only }}
build_type: ${{ inputs.build_type }}
cmake_args: ${{ inputs.cmake_args }}
cmake_target: "all"
os: "linux"
- name: Strip symbols
if: ${{ inputs.strip_symbols == 'true' }}
run: |
strip -D --strip-unneeded ${{ env.BUILD_DIR }}/rippled
${{ env.BUILD_DIR }}/rippled --version
- name: Move the binary
run: |
mv ${{ env.BUILD_DIR }}/rippled .
- name: Archive rippled binary
if: ${{ inputs.archive_archive == 'true' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: rippled
path: ./rippled
retention-days: 90
compression-level: 8
overwrite: true

View File

@@ -101,7 +101,6 @@ jobs:
echo 'CMake arguments: ${{ matrix.cmake_args }}'
echo 'CMake target: ${{ matrix.cmake_target }}'
echo 'Config name: ${{ matrix.config_name }}'
- name: Clean workspace (MacOS)
if: ${{ inputs.os == 'macos' }}
run: |
@@ -112,12 +111,18 @@ jobs:
exit 1
fi
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
- name: Set up Python (Windows)
if: ${{ inputs.os == 'windows' }}
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Install build tools (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Installing build tools.'
pip install wheel conan
- name: Check configuration (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
@@ -129,6 +134,11 @@ jobs:
echo 'Checking Conan version.'
conan --version
- name: Install build tools (MacOS)
if: ${{ inputs.os == 'macos' }}
run: |
echo 'Installing build tools.'
brew install --quiet cmake conan ninja coreutils
- name: Check configuration (Linux and MacOS)
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
run: |
@@ -152,7 +162,18 @@ jobs:
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan home directory (MacOS)
if: ${{ inputs.os == 'macos' }}
run: |
echo 'Setting up Conan home directory.'
export CONAN_HOME=${{ github.workspace }}/.conan
mkdir -p ${CONAN_HOME}
- name: Set up Conan home directory (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Setting up Conan home directory.'
set CONAN_HOME=${{ github.workspace }}\.conan
mkdir -p %CONAN_HOME%
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
@@ -175,7 +196,6 @@ jobs:
echo 'Listing Conan remotes.'
conan remote list
- name: Build dependencies
uses: ./.github/actions/build-deps
with:

View File

@@ -28,26 +28,30 @@ env:
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
# This job determines whether the rest of the workflow should run. It runs
# when the PR is not a draft (which should also cover merge-group) or
# has the 'DraftRunCI' label.
# This job determines whether the workflow should run. It runs when the PR is
# not a draft or has the 'DraftRunCI' label.
should-run:
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-latest
steps:
- name: No-op
run: true
# This job checks whether any files have changed that should cause the next
# jobs to run. We do it this way rather than using `paths` in the `on:`
# section, because all required checks must pass, even for changes that do not
# modify anything that affects those checks. We would therefore like to make
# the checks required only if the job runs, but GitHub does not support that
# directly. By always executing the workflow on new commits and by using the
# changed-files action below, we ensure that Github considers any skipped jobs
# to have passed, and in turn the required checks as well.
any-changed:
needs: should-run
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Determine changed files
# This step checks whether any files have changed that should
# cause the next jobs to run. We do it this way rather than
# using `paths` in the `on:` section, because all required
# checks must pass, even for changes that do not modify anything
# that affects those checks. We would therefore like to make the
# checks required only if the job runs, but GitHub does not
# support that directly. By always executing the workflow on new
# commits and by using the changed-files action below, we ensure
# that Github considers any skipped jobs to have passed, and in
# turn the required checks as well.
id: changes
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
@@ -75,40 +79,24 @@ jobs:
tests/**
CMakeLists.txt
conanfile.py
- name: Check whether to run
# This step determines whether the rest of the workflow should
# run. The rest of the workflow will run if this job runs AND at
# least one of:
# * Any of the files checked in the `changes` step were modified
# * The PR is NOT a draft and is labeled "Ready to merge"
# * The workflow is running from the merge queue
id: go
env:
FILES: ${{ steps.changes.outputs.any_changed }}
DRAFT: ${{ github.event.pull_request.draft }}
READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }}
MERGE: ${{ github.event_name == 'merge_group' }}
run: |
echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}"
cat "${GITHUB_OUTPUT}"
outputs:
go: ${{ steps.go.outputs.go == 'true' }}
changed: ${{ steps.changes.outputs.any_changed }}
check-format:
needs: should-run
if: needs.should-run.outputs.go == 'true'
needs: any-changed
if: needs.any-changed.outputs.changed == 'true'
uses: ./.github/workflows/check-format.yml
check-levelization:
needs: should-run
if: needs.should-run.outputs.go == 'true'
needs: any-changed
if: needs.any-changed.outputs.changed == 'true'
uses: ./.github/workflows/check-levelization.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows.
generate-outputs:
needs: should-run
if: needs.should-run.outputs.go == 'true'
needs: any-changed
if: needs.any-changed.outputs.changed == 'true'
runs-on: ubuntu-latest
steps:
- name: No-op
@@ -142,13 +130,3 @@ jobs:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
needs:
- build-test
- check-format
- check-levelization
runs-on: ubuntu-latest
steps:
- name: No-op
run: true

View File

@@ -975,6 +975,47 @@
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# Optional keys for NuDB only:
#
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
# Must be a power of 2 between 4096 and 32768. Default is 4096.
#
# This parameter controls the fundamental storage unit
# size for NuDB's internal data structures. The choice
# of block size can significantly impact performance
# depending on your storage hardware and filesystem:
#
# - 4096 bytes: Optimal for most standard SSDs and
# traditional filesystems (ext4, NTFS, HFS+).
# Provides good balance of performance and storage
# efficiency. Recommended for most deployments.
# Minimizes memory footprint and provides consistent
# low-latency access patterns across diverse hardware.
#
# - 8192-16384 bytes: May improve performance on
# high-end NVMe SSDs and copy-on-write filesystems
# like ZFS or Btrfs that benefit from larger block
# alignment. Can reduce metadata overhead for large
# databases. Offers better sequential throughput and
# reduced I/O operations at the cost of higher memory
# usage per operation.
#
# - 32768 bytes (32K): Maximum supported block size
# for high-performance scenarios with very fast
# storage. May increase memory usage and reduce
# efficiency for smaller databases. Best suited for
# enterprise environments with abundant RAM.
#
# Performance testing is recommended before deploying
# any non-default block size in production environments.
#
# Note: This setting cannot be changed after database
# creation without rebuilding the entire database.
# Choose carefully based on your hardware and expected
# database size.
#
# Example: nudb_block_size=4096
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
[node_db]
type=NuDB
path=/var/lib/rippled/db/nudb
nudb_block_size=4096
online_delete=512
advisory_delete=0

View File

@@ -101,9 +101,6 @@
# 2025-05-12, Jingchen Wu
# - add -fprofile-update=atomic to ensure atomic profile generation
#
# 2025-08-28, Bronek Kozicki
# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175
#
# USAGE:
#
# 1. Copy this file into your cmake modules path.
@@ -449,7 +446,7 @@ function(setup_target_for_coverage_gcovr)
# Show info where to find the report
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
COMMAND echo
COMMAND ;
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction() # setup_target_for_coverage_gcovr

View File

@@ -14,6 +14,12 @@ find_package(Boost 1.82 REQUIRED
add_library(ripple_boost INTERFACE)
add_library(Ripple::boost ALIAS ripple_boost)
if(XCODE)
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
else()
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
endif()
target_link_libraries(ripple_boost
INTERFACE

View File

@@ -157,12 +157,7 @@ enum error_code_i {
// Pathfinding
rpcDOMAIN_MALFORMED = 97,
// ledger_entry
rpcENTRY_NOT_FOUND = 98,
rpcUNEXPECTED_LEDGER_TYPE = 99,
rpcLAST =
rpcUNEXPECTED_LEDGER_TYPE // rpcLAST should always equal the last code.
rpcLAST = rpcDOMAIN_MALFORMED // rpcLAST should always equal the last code.
};
/** Codes returned in the `warnings` array of certain RPC commands.

View File

@@ -68,13 +68,9 @@ JSS(Flags); // in/out: TransactionSign; field.
JSS(Holder); // field.
JSS(Invalid); //
JSS(Issuer); // in: Credential transactions
JSS(IssuingChainDoor); // field.
JSS(IssuingChainIssue); // field.
JSS(LastLedgerSequence); // in: TransactionSign; field
JSS(LastUpdateTime); // field.
JSS(LimitAmount); // field.
JSS(LockingChainDoor); // field.
JSS(LockingChainIssue); // field.
JSS(NetworkID); // field.
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens

View File

@@ -24,7 +24,6 @@
#include <xrpl/json/json_value.h>
#include <xrpl/json/json_writer.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <string>
@@ -686,9 +685,7 @@ Value::isConvertibleTo(ValueType other) const
(other == intValue && value_.real_ >= minInt &&
value_.real_ <= maxInt) ||
(other == uintValue && value_.real_ >= 0 &&
value_.real_ <= maxUInt &&
std::fabs(round(value_.real_) - value_.real_) <
std::numeric_limits<double>::epsilon()) ||
value_.real_ <= maxUInt) ||
other == realValue || other == stringValue ||
other == booleanValue;

View File

@@ -36,7 +36,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "2.6.0"
char const* const versionString = "2.6.0-rc3"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

View File

@@ -117,10 +117,7 @@ constexpr static ErrorInfo unorderedErrorInfos[]{
{rpcORACLE_MALFORMED, "oracleMalformed", "Oracle request is malformed.", 400},
{rpcBAD_CREDENTIALS, "badCredentials", "Credentials do not exist, are not accepted, or have expired.", 400},
{rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400},
{rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400},
{rpcENTRY_NOT_FOUND, "entryNotFound", "Entry not found.", 400},
{rpcUNEXPECTED_LEDGER_TYPE, "unexpectedLedgerType", "Unexpected ledger type.", 400},
};
{rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400}};
// clang-format on
// Sort and validate unorderedErrorInfos at compile time. Should be

View File

@@ -27,7 +27,6 @@
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/STXChainBridge.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/protocol/jss.h>
#include <boost/format/free_funcs.hpp>
@@ -99,10 +98,12 @@ STXChainBridge::STXChainBridge(SField const& name, Json::Value const& v)
};
checkExtra(v);
Json::Value const& lockingChainDoorStr = v[jss::LockingChainDoor];
Json::Value const& lockingChainIssue = v[jss::LockingChainIssue];
Json::Value const& issuingChainDoorStr = v[jss::IssuingChainDoor];
Json::Value const& issuingChainIssue = v[jss::IssuingChainIssue];
Json::Value const& lockingChainDoorStr =
v[sfLockingChainDoor.getJsonName()];
Json::Value const& lockingChainIssue = v[sfLockingChainIssue.getJsonName()];
Json::Value const& issuingChainDoorStr =
v[sfIssuingChainDoor.getJsonName()];
Json::Value const& issuingChainIssue = v[sfIssuingChainIssue.getJsonName()];
if (!lockingChainDoorStr.isString())
{
@@ -160,10 +161,10 @@ Json::Value
STXChainBridge::getJson(JsonOptions jo) const
{
Json::Value v;
v[jss::LockingChainDoor] = lockingChainDoor_.getJson(jo);
v[jss::LockingChainIssue] = lockingChainIssue_.getJson(jo);
v[jss::IssuingChainDoor] = issuingChainDoor_.getJson(jo);
v[jss::IssuingChainIssue] = issuingChainIssue_.getJson(jo);
v[sfLockingChainDoor.getJsonName()] = lockingChainDoor_.getJson(jo);
v[sfLockingChainIssue.getJsonName()] = lockingChainIssue_.getJson(jo);
v[sfIssuingChainDoor.getJsonName()] = issuingChainDoor_.getJson(jo);
v[sfIssuingChainIssue.getJsonName()] = issuingChainIssue_.getJson(jo);
return v;
}

View File

@@ -3028,6 +3028,18 @@ class Vault_test : public beast::unit_test::suite
"malformedRequest");
}
{
testcase("RPC ledger_entry zero seq");
Json::Value jvParams;
jvParams[jss::ledger_index] = jss::validated;
jvParams[jss::vault][jss::owner] = issuer.human();
jvParams[jss::vault][jss::seq] = 0;
auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams));
BEAST_EXPECT(
jvVault[jss::result][jss::error].asString() ==
"malformedRequest");
}
{
testcase("RPC ledger_entry negative seq");
Json::Value jvParams;

View File

@@ -44,10 +44,10 @@ bridge(
Issue const& issuingChainIssue)
{
Json::Value jv;
jv[jss::LockingChainDoor] = lockingChainDoor.human();
jv[jss::LockingChainIssue] = to_json(lockingChainIssue);
jv[jss::IssuingChainDoor] = issuingChainDoor.human();
jv[jss::IssuingChainIssue] = to_json(issuingChainIssue);
jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human();
jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue);
jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human();
jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue);
return jv;
}
@@ -60,10 +60,10 @@ bridge_rpc(
Issue const& issuingChainIssue)
{
Json::Value jv;
jv[jss::LockingChainDoor] = lockingChainDoor.human();
jv[jss::LockingChainIssue] = to_json(lockingChainIssue);
jv[jss::IssuingChainDoor] = issuingChainDoor.human();
jv[jss::IssuingChainIssue] = to_json(issuingChainIssue);
jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human();
jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue);
jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human();
jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue);
return jv;
}

View File

@@ -0,0 +1,478 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/nodestore/TestBase.h>
#include <test/unit_test/SuiteJournal.h>
#include <xrpld/nodestore/DummyScheduler.h>
#include <xrpld/nodestore/Manager.h>
#include <xrpl/basics/BasicConfig.h>
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/temp_dir.h>
#include <memory>
#include <sstream>
namespace ripple {
namespace NodeStore {
class NuDBFactory_test : public TestBase
{
private:
// Helper function to create a Section with specified parameters
Section
createSection(std::string const& path, std::string const& blockSize = "")
{
Section params;
params.set("type", "nudb");
params.set("path", path);
if (!blockSize.empty())
params.set("nudb_block_size", blockSize);
return params;
}
// Helper function to create a backend and test basic functionality
bool
testBackendFunctionality(
Section const& params,
std::size_t expectedBlocksize)
{
try
{
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
if (!BEAST_EXPECT(backend))
return false;
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
return false;
backend->open();
if (!BEAST_EXPECT(backend->isOpen()))
return false;
// Test basic store/fetch functionality
auto batch = createPredictableBatch(10, 12345);
storeBatch(*backend, batch);
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
backend->close();
return areBatchesEqual(batch, copy);
}
catch (...)
{
return false;
}
}
// Helper function to test log messages
void
testLogMessage(
Section const& params,
beast::severities::Severity level,
std::string const& expectedMessage)
{
test::StreamSink sink(level);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
std::string logOutput = sink.messages().str();
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
}
// Helper function to test power of two validation
void
testPowerOfTwoValidation(std::string const& size, bool shouldWork)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
std::string logOutput = sink.messages().str();
bool hasWarning =
logOutput.find("Invalid nudb_block_size") != std::string::npos;
BEAST_EXPECT(hasWarning == !shouldWork);
}
public:
void
testDefaultBlockSize()
{
testcase("Default block size (no nudb_block_size specified)");
beast::temp_dir tempDir;
auto params = createSection(tempDir.path());
// Should work with default 4096 block size
BEAST_EXPECT(testBackendFunctionality(params, 4096));
}
void
testValidBlockSizes()
{
testcase("Valid block sizes");
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
for (auto const& size : validSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), to_string(size));
BEAST_EXPECT(testBackendFunctionality(params, size));
}
// Empty value is ignored by the config parser, so uses the
// default
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "");
BEAST_EXPECT(testBackendFunctionality(params, 4096));
}
void
testInvalidBlockSizes()
{
testcase("Invalid block sizes");
std::vector<std::string> invalidSizes = {
"2048", // Too small
"1024", // Too small
"65536", // Too large
"131072", // Too large
"5000", // Not power of 2
"6000", // Not power of 2
"10000", // Not power of 2
"0", // Zero
"-1", // Negative
"abc", // Non-numeric
"4k", // Invalid format
"4096.5" // Decimal
};
for (auto const& size : invalidSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
}
// Test whitespace cases separately since lexical_cast may handle them
std::vector<std::string> whitespaceInvalidSizes = {
"4096 ", // Trailing space - might be handled by lexical_cast
" 4096" // Leading space - might be handled by lexical_cast
};
for (auto const& size : whitespaceInvalidSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
}
}
void
testLogMessages()
{
testcase("Log message verification");
// Test valid custom block size logging
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "8192");
testLogMessage(
params,
beast::severities::kInfo,
"Using custom NuDB block size: 8192");
}
// Test invalid block size failure
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "5000");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size: 5000") !=
std::string::npos);
BEAST_EXPECT(
logOutput.find(
"Must be power of 2 between 4096 and 32768") !=
std::string::npos);
}
}
// Test non-numeric value failure
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "invalid");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size value: invalid") !=
std::string::npos);
}
}
}
void
testPowerOfTwoValidation()
{
testcase("Power of 2 validation logic");
// Test edge cases around valid range
std::vector<std::pair<std::string, bool>> testCases = {
{"4095", false}, // Just below minimum
{"4096", true}, // Minimum valid
{"4097", false}, // Just above minimum, not power of 2
{"8192", true}, // Valid power of 2
{"8193", false}, // Just above valid power of 2
{"16384", true}, // Valid power of 2
{"32768", true}, // Maximum valid
{"32769", false}, // Just above maximum
{"65536", false} // Power of 2 but too large
};
for (auto const& [size, shouldWork] : testCases)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// We test the validation logic by catching exceptions for invalid
// values
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
BEAST_EXPECT(shouldWork);
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size") !=
std::string::npos);
}
}
}
void
testBothConstructorVariants()
{
testcase("Both constructor variants work with custom block size");
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "16384");
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Test first constructor (without nudb::context)
{
auto backend1 = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend1 != nullptr);
BEAST_EXPECT(testBackendFunctionality(params, 16384));
}
// Test second constructor (with nudb::context)
// Note: This would require access to nudb::context, which might not be
// easily testable without more complex setup. For now, we test that
// the factory can create backends with the first constructor.
}
void
testConfigurationParsing()
{
testcase("Configuration parsing edge cases");
// Test that whitespace is handled correctly
std::vector<std::string> validFormats = {
"8192" // Basic valid format
};
// Test whitespace handling separately since lexical_cast behavior may
// vary
std::vector<std::string> whitespaceFormats = {
" 8192", // Leading space - may or may not be handled by
// lexical_cast
"8192 " // Trailing space - may or may not be handled by
// lexical_cast
};
// Test basic valid format
for (auto const& format : validFormats)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), format);
test::StreamSink sink(beast::severities::kInfo);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
// Should log success message for valid values
std::string logOutput = sink.messages().str();
bool hasSuccessMessage =
logOutput.find("Using custom NuDB block size") !=
std::string::npos;
BEAST_EXPECT(hasSuccessMessage);
}
// Test whitespace formats - these should work if lexical_cast handles
// them
for (auto const& format : whitespaceFormats)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), format);
// Use a lower threshold to capture both info and warning messages
test::StreamSink sink(beast::severities::kDebug);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (...)
{
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 8192));
}
}
}
void
testDataPersistence()
{
testcase("Data persistence with different block sizes");
std::vector<std::string> blockSizes = {
"4096", "8192", "16384", "32768"};
for (auto const& size : blockSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Create test data
auto batch = createPredictableBatch(50, 54321);
// Store data
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
storeBatch(*backend, batch);
backend->close();
}
// Retrieve data in new backend instance
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
BEAST_EXPECT(areBatchesEqual(batch, copy));
backend->close();
}
}
}
void
run() override
{
testDefaultBlockSize();
testValidBlockSizes();
testInvalidBlockSizes();
testLogMessages();
testPowerOfTwoValidation();
testBothConstructorVariants();
testConfigurationParsing();
testDataPersistence();
}
};
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
} // namespace NodeStore
} // namespace ripple

View File

@@ -183,7 +183,7 @@ private:
boost::asio::ip::make_address("172.1.1." + std::to_string(rid_)));
PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519)));
auto consumer = overlay.resourceManager().newInboundEndpoint(remote);
auto [slot, _] = overlay.peerFinder().new_inbound_slot(local, remote);
auto slot = overlay.peerFinder().new_inbound_slot(local, remote);
auto const peer = std::make_shared<PeerTest>(
env.app(),
slot,

View File

@@ -20,7 +20,6 @@
#include <test/unit_test/SuiteJournal.h>
#include <xrpld/core/Config.h>
#include <xrpld/peerfinder/PeerfinderManager.h>
#include <xrpld/peerfinder/detail/Logic.h>
#include <xrpl/basics/chrono.h>
@@ -99,7 +98,7 @@ public:
if (!list.empty())
{
BEAST_EXPECT(list.size() == 1);
auto const [slot, _] = logic.new_outbound_slot(list.front());
auto const slot = logic.new_outbound_slot(list.front());
BEAST_EXPECT(logic.onConnected(
slot, beast::IP::Endpoint::from_string("65.0.0.2:5")));
logic.on_closed(slot);
@@ -140,7 +139,7 @@ public:
if (!list.empty())
{
BEAST_EXPECT(list.size() == 1);
auto const [slot, _] = logic.new_outbound_slot(list.front());
auto const slot = logic.new_outbound_slot(list.front());
if (!BEAST_EXPECT(logic.onConnected(
slot, beast::IP::Endpoint::from_string("65.0.0.2:5"))))
return;
@@ -159,7 +158,6 @@ public:
BEAST_EXPECT(n <= (seconds + 59) / 60);
}
// test accepting an incoming slot for an already existing outgoing slot
void
test_duplicateOutIn()
{
@@ -168,6 +166,8 @@ public:
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
logic.addFixedPeer(
"test", beast::IP::Endpoint::from_string("65.0.0.1:5"));
{
Config c;
c.autoConnect = false;
@@ -176,24 +176,28 @@ public:
logic.config(c);
}
auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5");
auto const [slot1, r] = logic.new_outbound_slot(remote);
BEAST_EXPECT(slot1 != nullptr);
BEAST_EXPECT(r == Result::success);
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const [slot2, r2] = logic.new_inbound_slot(local, remote);
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
BEAST_EXPECT(r2 == Result::duplicatePeer);
if (!BEAST_EXPECT(slot2 == nullptr))
logic.on_closed(slot2);
logic.on_closed(slot1);
auto const list = logic.autoconnect();
if (BEAST_EXPECT(!list.empty()))
{
BEAST_EXPECT(list.size() == 1);
auto const remote = list.front();
auto const slot1 = logic.new_outbound_slot(remote);
if (BEAST_EXPECT(slot1 != nullptr))
{
BEAST_EXPECT(
logic.connectedAddresses_.count(remote.address()) == 1);
auto const local =
beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const slot2 = logic.new_inbound_slot(local, remote);
BEAST_EXPECT(
logic.connectedAddresses_.count(remote.address()) == 1);
if (!BEAST_EXPECT(slot2 == nullptr))
logic.on_closed(slot2);
logic.on_closed(slot1);
}
}
}
// test establishing outgoing slot for an already existing incoming slot
void
test_duplicateInOut()
{
@@ -202,6 +206,8 @@ public:
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
logic.addFixedPeer(
"test", beast::IP::Endpoint::from_string("65.0.0.1:5"));
{
Config c;
c.autoConnect = false;
@@ -210,202 +216,33 @@ public:
logic.config(c);
}
auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5");
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const [slot1, r] = logic.new_inbound_slot(local, remote);
BEAST_EXPECT(slot1 != nullptr);
BEAST_EXPECT(r == Result::success);
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
auto const [slot2, r2] = logic.new_outbound_slot(remote);
BEAST_EXPECT(r2 == Result::duplicatePeer);
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
if (!BEAST_EXPECT(slot2 == nullptr))
logic.on_closed(slot2);
logic.on_closed(slot1);
}
void
test_peerLimitExceeded()
{
testcase("peer limit exceeded");
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
auto const list = logic.autoconnect();
if (BEAST_EXPECT(!list.empty()))
{
Config c;
c.autoConnect = false;
c.listeningPort = 1024;
c.ipLimit = 2;
logic.config(c);
BEAST_EXPECT(list.size() == 1);
auto const remote = list.front();
auto const local =
beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const slot1 = logic.new_inbound_slot(local, remote);
if (BEAST_EXPECT(slot1 != nullptr))
{
BEAST_EXPECT(
logic.connectedAddresses_.count(remote.address()) == 1);
auto const slot2 = logic.new_outbound_slot(remote);
BEAST_EXPECT(
logic.connectedAddresses_.count(remote.address()) == 1);
if (!BEAST_EXPECT(slot2 == nullptr))
logic.on_closed(slot2);
logic.on_closed(slot1);
}
}
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const [slot, r] = logic.new_inbound_slot(
local, beast::IP::Endpoint::from_string("55.104.0.2:1025"));
BEAST_EXPECT(slot != nullptr);
BEAST_EXPECT(r == Result::success);
auto const [slot1, r1] = logic.new_inbound_slot(
local, beast::IP::Endpoint::from_string("55.104.0.2:1026"));
BEAST_EXPECT(slot1 != nullptr);
BEAST_EXPECT(r1 == Result::success);
auto const [slot2, r2] = logic.new_inbound_slot(
local, beast::IP::Endpoint::from_string("55.104.0.2:1027"));
BEAST_EXPECT(r2 == Result::ipLimitExceeded);
if (!BEAST_EXPECT(slot2 == nullptr))
logic.on_closed(slot2);
logic.on_closed(slot1);
logic.on_closed(slot);
}
void
test_activate_duplicate_peer()
{
testcase("test activate duplicate peer");
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
{
Config c;
c.autoConnect = false;
c.listeningPort = 1024;
c.ipLimit = 2;
logic.config(c);
}
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first);
auto const [slot, rSlot] = logic.new_outbound_slot(
beast::IP::Endpoint::from_string("55.104.0.2:1025"));
BEAST_EXPECT(slot != nullptr);
BEAST_EXPECT(rSlot == Result::success);
auto const [slot2, r2Slot] = logic.new_outbound_slot(
beast::IP::Endpoint::from_string("55.104.0.2:1026"));
BEAST_EXPECT(slot2 != nullptr);
BEAST_EXPECT(r2Slot == Result::success);
BEAST_EXPECT(logic.onConnected(slot, local));
BEAST_EXPECT(logic.onConnected(slot2, local));
BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success);
// activating a different slot with the same node ID (pk) must fail
BEAST_EXPECT(
logic.activate(slot2, pk1, false) == Result::duplicatePeer);
logic.on_closed(slot);
// accept the same key for a new slot after removing the old slot
BEAST_EXPECT(logic.activate(slot2, pk1, false) == Result::success);
logic.on_closed(slot2);
}
void
test_activate_inbound_disabled()
{
testcase("test activate inbound disabled");
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
{
Config c;
c.autoConnect = false;
c.listeningPort = 1024;
c.ipLimit = 2;
logic.config(c);
}
PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first);
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
auto const [slot, rSlot] = logic.new_inbound_slot(
local, beast::IP::Endpoint::from_string("55.104.0.2:1025"));
BEAST_EXPECT(slot != nullptr);
BEAST_EXPECT(rSlot == Result::success);
BEAST_EXPECT(
logic.activate(slot, pk1, false) == Result::inboundDisabled);
{
Config c;
c.autoConnect = false;
c.listeningPort = 1024;
c.ipLimit = 2;
c.inPeers = 1;
logic.config(c);
}
// new inbound slot must succeed when inbound connections are enabled
BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success);
// creating a new inbound slot must succeed as IP Limit is not exceeded
auto const [slot2, r2Slot] = logic.new_inbound_slot(
local, beast::IP::Endpoint::from_string("55.104.0.2:1026"));
BEAST_EXPECT(slot2 != nullptr);
BEAST_EXPECT(r2Slot == Result::success);
PublicKey const pk2(randomKeyPair(KeyType::secp256k1).first);
// an inbound slot exceeding inPeers limit must fail
BEAST_EXPECT(logic.activate(slot2, pk2, false) == Result::full);
logic.on_closed(slot2);
logic.on_closed(slot);
}
void
test_addFixedPeer_no_port()
{
testcase("test addFixedPeer no port");
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
try
{
logic.addFixedPeer(
"test", beast::IP::Endpoint::from_string("65.0.0.2"));
fail("invalid endpoint successfully added");
}
catch (std::runtime_error const& e)
{
pass();
}
}
void
test_onConnected_self_connection()
{
testcase("test onConnected self connection");
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1234");
auto const [slot, r] = logic.new_outbound_slot(local);
BEAST_EXPECT(slot != nullptr);
BEAST_EXPECT(r == Result::success);
// Must fail when a slot is to our own IP address
BEAST_EXPECT(!logic.onConnected(slot, local));
logic.on_closed(slot);
}
void
test_config()
{
// if peers_max is configured then peers_in_max and peers_out_max
// are ignored
// if peers_max is configured then peers_in_max and peers_out_max are
// ignored
auto run = [&](std::string const& test,
std::optional<std::uint16_t> maxPeers,
std::optional<std::uint16_t> maxIn,
@@ -445,21 +282,13 @@ public:
Counts counts;
counts.onConfig(config);
BEAST_EXPECT(
counts.out_max() == expectOut && counts.in_max() == expectIn &&
counts.out_max() == expectOut &&
counts.inboundSlots() == expectIn &&
config.ipLimit == expectIpLimit);
TestStore store;
TestChecker checker;
TestStopwatch clock;
Logic<TestChecker> logic(clock, store, checker, journal_);
logic.config(config);
BEAST_EXPECT(logic.config() == config);
};
// if max_peers == 0 => maxPeers = 21,
// else if max_peers < 10 => maxPeers = 10 else maxPeers =
// max_peers
// else if max_peers < 10 => maxPeers = 10 else maxPeers = max_peers
// expectOut => if legacy => max(0.15 * maxPeers, 10),
// if legacy && !wantIncoming => maxPeers else max_out_peers
// expectIn => if legacy && wantIncoming => maxPeers - outPeers
@@ -535,11 +364,6 @@ public:
test_duplicateInOut();
test_config();
test_invalid_config();
test_peerLimitExceeded();
test_activate_duplicate_peer();
test_activate_inbound_disabled();
test_addFixedPeer_no_port();
test_onConnected_self_connection();
}
};

File diff suppressed because it is too large Load Diff

View File

@@ -3074,6 +3074,7 @@ rippleUnlockEscrowMPT(
auto const delta = amount.mpt().value();
// Underflow check for subtraction
// LCOV_EXCL_START
if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta)))
{ // LCOV_EXCL_START
JLOG(j.error())

View File

@@ -53,6 +53,14 @@ public:
virtual std::string
getName() = 0;
/** Get the block size for backends that support it
*/
virtual std::optional<std::size_t>
getBlockSize() const
{
return std::nullopt;
}
/** Open the backend.
@param createIfMissing Create the database files if necessary.
This allows the caller to catch exceptions.

View File

@@ -24,6 +24,7 @@
#include <xrpld/nodestore/detail/codec.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/filesystem.hpp>
@@ -52,6 +53,7 @@ public:
size_t const keyBytes_;
std::size_t const burstSize_;
std::string const name_;
std::size_t const blockSize_;
nudb::store db_;
std::atomic<bool> deletePath_;
Scheduler& scheduler_;
@@ -66,6 +68,7 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, deletePath_(false)
, scheduler_(scheduler)
{
@@ -85,6 +88,7 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, db_(context)
, deletePath_(false)
, scheduler_(scheduler)
@@ -114,6 +118,12 @@ public:
return name_;
}
std::optional<std::size_t>
getBlockSize() const override
{
return blockSize_;
}
void
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
override
@@ -143,7 +153,7 @@ public:
uid,
salt,
keyBytes_,
nudb::block_size(kp),
blockSize_,
0.50,
ec);
if (ec == nudb::errc::file_exists)
@@ -359,6 +369,56 @@ public:
{
return 3;
}
private:
static std::size_t
parseBlockSize(
std::string const& name,
Section const& keyValues,
beast::Journal journal)
{
using namespace boost::filesystem;
auto const folder = path(name);
auto const kp = (folder / "nudb.key").string();
std::size_t const defaultSize =
nudb::block_size(kp); // Default 4K from NuDB
std::size_t blockSize = defaultSize;
std::string blockSizeStr;
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
{
return blockSize; // Early return with default
}
try
{
std::size_t const parsedBlockSize =
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
// Validate: must be power of 2 between 4K and 32K
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
{
std::stringstream s;
s << "Invalid nudb_block_size: " << parsedBlockSize
<< ". Must be power of 2 between 4096 and 32768.";
Throw<std::runtime_error>(s.str());
}
JLOG(journal.info())
<< "Using custom NuDB block size: " << parsedBlockSize
<< " bytes";
return parsedBlockSize;
}
catch (std::exception const& e)
{
std::stringstream s;
s << "Invalid nudb_block_size value: " << blockSizeStr
<< ". Error: " << e.what();
Throw<std::runtime_error>(s.str());
}
}
};
//------------------------------------------------------------------------------

View File

@@ -195,16 +195,14 @@ OverlayImpl::onHandoff(
if (consumer.disconnect(journal))
return handoff;
auto const [slot, result] = m_peerFinder->new_inbound_slot(
auto const slot = m_peerFinder->new_inbound_slot(
beast::IPAddressConversion::from_asio(local_endpoint),
beast::IPAddressConversion::from_asio(remote_endpoint));
if (slot == nullptr)
{
// connection refused either IP limit exceeded or self-connect
// self-connect, close
handoff.moved = false;
JLOG(journal.debug())
<< "Peer " << remote_endpoint << " refused, " << to_string(result);
return handoff;
}
@@ -404,11 +402,10 @@ OverlayImpl::connect(beast::IP::Endpoint const& remote_endpoint)
return;
}
auto const [slot, result] = peerFinder().new_outbound_slot(remote_endpoint);
auto const slot = peerFinder().new_outbound_slot(remote_endpoint);
if (slot == nullptr)
{
JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint
<< ": " << to_string(result);
JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint;
return;
}

View File

@@ -109,9 +109,6 @@ struct Config
std::uint16_t port,
bool validationPublicKey,
int ipLimit);
friend bool
operator==(Config const& lhs, Config const& rhs);
};
//------------------------------------------------------------------------------
@@ -139,13 +136,7 @@ using Endpoints = std::vector<Endpoint>;
//------------------------------------------------------------------------------
/** Possible results from activating a slot. */
enum class Result {
inboundDisabled,
duplicatePeer,
ipLimitExceeded,
full,
success
};
enum class Result { duplicate, full, success };
/**
* @brief Converts a `Result` enum value to its string representation.
@@ -166,16 +157,12 @@ to_string(Result result) noexcept
{
switch (result)
{
case Result::inboundDisabled:
return "inbound disabled";
case Result::duplicatePeer:
return "peer already connected";
case Result::ipLimitExceeded:
return "ip limit exceeded";
case Result::full:
return "slots full";
case Result::success:
return "success";
case Result::duplicate:
return "duplicate connection";
case Result::full:
return "slots full";
}
return "unknown";
@@ -247,7 +234,7 @@ public:
If nullptr is returned, then the slot could not be assigned.
Usually this is because of a detected self-connection.
*/
virtual std::pair<std::shared_ptr<Slot>, Result>
virtual std::shared_ptr<Slot>
new_inbound_slot(
beast::IP::Endpoint const& local_endpoint,
beast::IP::Endpoint const& remote_endpoint) = 0;
@@ -256,7 +243,7 @@ public:
If nullptr is returned, then the slot could not be assigned.
Usually this is because of a duplicate connection.
*/
virtual std::pair<std::shared_ptr<Slot>, Result>
virtual std::shared_ptr<Slot>
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) = 0;
/** Called when mtENDPOINTS is received. */

View File

@@ -163,7 +163,7 @@ public:
/** Returns the total number of inbound slots. */
int
in_max() const
inboundSlots() const
{
return m_in_max;
}

View File

@@ -172,7 +172,9 @@ public:
void
addFixedPeer(std::string const& name, beast::IP::Endpoint const& ep)
{
addFixedPeer(name, std::vector<beast::IP::Endpoint>{ep});
std::vector<beast::IP::Endpoint> v;
v.push_back(ep);
addFixedPeer(name, v);
}
void
@@ -259,7 +261,7 @@ public:
//--------------------------------------------------------------------------
std::pair<SlotImp::ptr, Result>
SlotImp::ptr
new_inbound_slot(
beast::IP::Endpoint const& local_endpoint,
beast::IP::Endpoint const& remote_endpoint)
@@ -275,12 +277,12 @@ public:
{
auto const count =
connectedAddresses_.count(remote_endpoint.address());
if (count + 1 > config_.ipLimit)
if (count > config_.ipLimit)
{
JLOG(m_journal.debug())
<< beast::leftw(18) << "Logic dropping inbound "
<< remote_endpoint << " because of ip limits.";
return {SlotImp::ptr(), Result::ipLimitExceeded};
return SlotImp::ptr();
}
}
@@ -290,7 +292,7 @@ public:
JLOG(m_journal.debug())
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
<< " as duplicate incoming";
return {SlotImp::ptr(), Result::duplicatePeer};
return SlotImp::ptr();
}
// Create the slot
@@ -312,11 +314,11 @@ public:
// Update counts
counts_.add(*slot);
return {result.first->second, Result::success};
return result.first->second;
}
// Can't check for self-connect because we don't know the local endpoint
std::pair<SlotImp::ptr, Result>
SlotImp::ptr
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint)
{
JLOG(m_journal.debug())
@@ -330,7 +332,7 @@ public:
JLOG(m_journal.debug())
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
<< " as duplicate connect";
return {SlotImp::ptr(), Result::duplicatePeer};
return SlotImp::ptr();
}
// Create the slot
@@ -351,7 +353,7 @@ public:
// Update counts
counts_.add(*slot);
return {result.first->second, Result::success};
return result.first->second;
}
bool
@@ -415,7 +417,7 @@ public:
// Check for duplicate connection by key
if (keys_.find(key) != keys_.end())
return Result::duplicatePeer;
return Result::duplicate;
// If the peer belongs to a cluster or is reserved,
// update the slot to reflect that.
@@ -428,8 +430,6 @@ public:
{
if (!slot->inbound())
bootcache_.on_success(slot->remote_endpoint());
if (slot->inbound() && counts_.in_max() == 0)
return Result::inboundDisabled;
return Result::full;
}
@@ -651,7 +651,7 @@ public:
// 2. We have slots
// 3. We haven't failed the firewalled test
//
if (config_.wantIncoming && counts_.in_max() > 0)
if (config_.wantIncoming && counts_.inboundSlots() > 0)
{
Endpoint ep;
ep.hops = 0;

View File

@@ -34,17 +34,6 @@ Config::Config()
{
}
bool
operator==(Config const& lhs, Config const& rhs)
{
return lhs.autoConnect == rhs.autoConnect &&
lhs.peerPrivate == rhs.peerPrivate &&
lhs.wantIncoming == rhs.wantIncoming && lhs.inPeers == rhs.inPeers &&
lhs.maxPeers == rhs.maxPeers && lhs.outPeers == rhs.outPeers &&
lhs.features == lhs.features && lhs.ipLimit == rhs.ipLimit &&
lhs.listeningPort == rhs.listeningPort;
}
std::size_t
Config::calcOutPeers() const
{

View File

@@ -125,7 +125,7 @@ public:
//--------------------------------------------------------------------------
std::pair<std::shared_ptr<Slot>, Result>
std::shared_ptr<Slot>
new_inbound_slot(
beast::IP::Endpoint const& local_endpoint,
beast::IP::Endpoint const& remote_endpoint) override
@@ -133,7 +133,7 @@ public:
return m_logic.new_inbound_slot(local_endpoint, remote_endpoint);
}
std::pair<std::shared_ptr<Slot>, Result>
std::shared_ptr<Slot>
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) override
{
return m_logic.new_outbound_slot(remote_endpoint);

View File

@@ -190,7 +190,7 @@ getAccountObjects(
auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue);
// this is a mutable version of limit, used to seamlessly switch
// this is a mutable version of limit, used to seemlessly switch
// to iterating directory entries when nftokenpages are exhausted
uint32_t mlimit = limit;
@@ -373,7 +373,7 @@ ledgerFromRequest(T& ledger, JsonContext& context)
indexValue = legacyLedger;
}
if (!hashValue.isNull())
if (hashValue)
{
if (!hashValue.isString())
return {rpcINVALID_PARAMS, "ledgerHashNotString"};
@@ -384,9 +384,6 @@ ledgerFromRequest(T& ledger, JsonContext& context)
return getLedger(ledger, ledgerHash, context);
}
if (!indexValue.isConvertibleTo(Json::stringValue))
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
auto const index = indexValue.asString();
if (index == "current" || index.empty())
@@ -398,11 +395,11 @@ ledgerFromRequest(T& ledger, JsonContext& context)
if (index == "closed")
return getLedger(ledger, LedgerShortcut::CLOSED, context);
std::uint32_t val;
if (!beast::lexicalCastChecked(val, index))
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
std::uint32_t iVal;
if (beast::lexicalCastChecked(iVal, index))
return getLedger(ledger, iVal, context);
return getLedger(ledger, val, context);
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
}
} // namespace
@@ -589,7 +586,7 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
return Status::OK;
}
// Explicit instantiation of above three functions
// Explicit instantiaion of above three functions
template Status
getLedger<>(std::shared_ptr<ReadView const>&, uint32_t, Context&);

File diff suppressed because it is too large Load Diff

View File

@@ -1,299 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/rpc/detail/RPCHelpers.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/json/json_errors.h>
#include <xrpl/protocol/ErrorCodes.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/RPCErr.h>
#include <xrpl/protocol/STXChainBridge.h>
#include <xrpl/protocol/jss.h>
#include <functional>
namespace ripple {
namespace LedgerEntryHelpers {
Unexpected<Json::Value>
missingFieldError(
Json::StaticString const field,
std::optional<std::string> err = std::nullopt)
{
Json::Value json = Json::objectValue;
auto error = RPC::missing_field_message(std::string(field.c_str()));
json[jss::error] = err.value_or("malformedRequest");
json[jss::error_code] = rpcINVALID_PARAMS;
json[jss::error_message] = std::move(error);
return Unexpected(json);
}
Unexpected<Json::Value>
invalidFieldError(
std::string const& err,
Json::StaticString const field,
std::string const& type)
{
Json::Value json = Json::objectValue;
auto error = RPC::expected_field_message(field, type);
json[jss::error] = err;
json[jss::error_code] = rpcINVALID_PARAMS;
json[jss::error_message] = std::move(error);
return Unexpected(json);
}
Unexpected<Json::Value>
malformedError(std::string const& err, std::string const& message)
{
Json::Value json = Json::objectValue;
json[jss::error] = err;
json[jss::error_code] = rpcINVALID_PARAMS;
json[jss::error_message] = message;
return Unexpected(json);
}
Expected<bool, Json::Value>
hasRequired(
Json::Value const& params,
std::initializer_list<Json::StaticString> fields,
std::optional<std::string> err = std::nullopt)
{
for (auto const field : fields)
{
if (!params.isMember(field) || params[field].isNull())
{
return missingFieldError(field, err);
}
}
return true;
}
template <class T>
std::optional<T>
parse(Json::Value const& param);
template <class T>
Expected<T, Json::Value>
required(
Json::Value const& params,
Json::StaticString const fieldName,
std::string const& err,
std::string const& expectedType)
{
if (!params.isMember(fieldName) || params[fieldName].isNull())
{
return missingFieldError(fieldName);
}
if (auto obj = parse<T>(params[fieldName]))
{
return *obj;
}
return invalidFieldError(err, fieldName, expectedType);
}
template <>
std::optional<AccountID>
parse(Json::Value const& param)
{
if (!param.isString())
return std::nullopt;
auto const account = parseBase58<AccountID>(param.asString());
if (!account || account->isZero())
{
return std::nullopt;
}
return account;
}
Expected<AccountID, Json::Value>
requiredAccountID(
Json::Value const& params,
Json::StaticString const fieldName,
std::string const& err)
{
return required<AccountID>(params, fieldName, err, "AccountID");
}
std::optional<Blob>
parseHexBlob(Json::Value const& param, std::size_t maxLength)
{
if (!param.isString())
return std::nullopt;
auto const blob = strUnHex(param.asString());
if (!blob || blob->empty() || blob->size() > maxLength)
return std::nullopt;
return blob;
}
Expected<Blob, Json::Value>
requiredHexBlob(
Json::Value const& params,
Json::StaticString const fieldName,
std::size_t maxLength,
std::string const& err)
{
if (!params.isMember(fieldName) || params[fieldName].isNull())
{
return missingFieldError(fieldName);
}
if (auto blob = parseHexBlob(params[fieldName], maxLength))
{
return *blob;
}
return invalidFieldError(err, fieldName, "hex string");
}
template <>
std::optional<std::uint32_t>
parse(Json::Value const& param)
{
if (param.isUInt() || (param.isInt() && param.asInt() >= 0))
return param.asUInt();
if (param.isString())
{
std::uint32_t v;
if (beast::lexicalCastChecked(v, param.asString()))
return v;
}
return std::nullopt;
}
Expected<std::uint32_t, Json::Value>
requiredUInt32(
Json::Value const& params,
Json::StaticString const fieldName,
std::string const& err)
{
return required<std::uint32_t>(params, fieldName, err, "number");
}
template <>
std::optional<uint256>
parse(Json::Value const& param)
{
uint256 uNodeIndex;
if (!param.isString() || !uNodeIndex.parseHex(param.asString()))
{
return std::nullopt;
}
return uNodeIndex;
}
Expected<uint256, Json::Value>
requiredUInt256(
Json::Value const& params,
Json::StaticString const fieldName,
std::string const& err)
{
return required<uint256>(params, fieldName, err, "Hash256");
}
template <>
std::optional<uint192>
parse(Json::Value const& param)
{
uint192 field;
if (!param.isString() || !field.parseHex(param.asString()))
{
return std::nullopt;
}
return field;
}
Expected<uint192, Json::Value>
requiredUInt192(
Json::Value const& params,
Json::StaticString const fieldName,
std::string const& err)
{
return required<uint192>(params, fieldName, err, "Hash192");
}
Expected<STXChainBridge, Json::Value>
parseBridgeFields(Json::Value const& params)
{
if (auto const value = hasRequired(
params,
{jss::LockingChainDoor,
jss::LockingChainIssue,
jss::IssuingChainDoor,
jss::IssuingChainIssue});
!value)
{
return Unexpected(value.error());
}
auto const lockingChainDoor = requiredAccountID(
params, jss::LockingChainDoor, "malformedLockingChainDoor");
if (!lockingChainDoor)
{
return Unexpected(lockingChainDoor.error());
}
auto const issuingChainDoor = requiredAccountID(
params, jss::IssuingChainDoor, "malformedIssuingChainDoor");
if (!issuingChainDoor)
{
return Unexpected(issuingChainDoor.error());
}
Issue lockingChainIssue;
try
{
lockingChainIssue = issueFromJson(params[jss::LockingChainIssue]);
}
catch (std::runtime_error const& ex)
{
return invalidFieldError(
"malformedIssue", jss::LockingChainIssue, "Issue");
}
Issue issuingChainIssue;
try
{
issuingChainIssue = issueFromJson(params[jss::IssuingChainIssue]);
}
catch (std::runtime_error const& ex)
{
return invalidFieldError(
"malformedIssue", jss::IssuingChainIssue, "Issue");
}
return STXChainBridge(
*lockingChainDoor,
lockingChainIssue,
*issuingChainDoor,
issuingChainIssue);
}
} // namespace LedgerEntryHelpers
} // namespace ripple