Compare commits

...

31 Commits

Author SHA1 Message Date
cyan317
4f3b6e98ad Subscribe cleanup (#940)
Fix #939
2023-12-19 15:52:12 +00:00
cyan317
27279ceb6d Fix messages pile up (#921)
Fix #924
2023-12-19 15:51:39 +00:00
Alex Kremer
7a1f902f42 Fix http params handling discrepancy (#913)
Fixes #909
2023-10-11 00:40:41 +01:00
Alex Kremer
7e621b2518 Add field name to output of invalidParams for OneOf (#906)
Fixes #901
2023-10-11 00:39:01 +01:00
cyan317
e32e2ebee4 Fix account_tx response both both ledger range and ledger index/hash are specified (#904)
Fix mismatch with rippled
2023-10-11 00:34:01 +01:00
Alex Kremer
7742c4a5e3 Add inLedger to tx and account_tx (#895)
Fixes #890
2023-10-11 00:32:58 +01:00
cyan317
c24c3b536f Fix trans order of subscription transactions stream (#882)
Fix #833
2023-10-11 00:26:06 +01:00
Alex Kremer
4d42f7c4e4 Change consume to full buffer recreate (#899) 2023-10-11 00:10:20 +01:00
cyan317
c634f0f0ba Fix ledger_entry error code (#891)
Fix #896
2023-10-10 23:03:12 +01:00
Alex Kremer
2ef766a740 Fixes broken counters for broken pipe connections (#880)
Fixes #885
2023-10-10 23:03:04 +01:00
Sergey Kuznetsov
69f5025a29 Add compiler flags (#850)
Fixes #435
2023-10-02 16:45:48 +01:00
Sergey Kuznetsov
d1c41a8bb7 Don't use clio for conan cache hash (#879) 2023-10-02 11:43:51 +01:00
Sergey Kuznetsov
207ba51461 Fix CI (#878)
* Put conan-non-prod artifactory first

* Rebuild all conan packages if no cache

* Save cache only if there was no cache found
2023-09-28 16:49:15 +01:00
Sergey Kuznetsov
ebe7688ccb Api v1 bool support (#877)
* Allow not bool for signer_lists

* Allow transactions to be not bool for v1

* Add tests for JsonBool
2023-09-28 12:56:38 +01:00
Sergey Kuznetsov
6d9f8a7ead CI improvements (#867)
* Generate conan profile in CI

* Move linux build into main workflow

* Add saving/restoring conan data

* Move cache to Linux

* Fix error

* Change key to hash from conanfile

* Fix path error

* Populate cache only in develop branch

* Big refactor

- Move duplicated code to actions
- Isolate mac build from home directory
- Separate ccache and conan caches

* Fix errors

* Change ccache cache name and fix errors

* Always populate cache

* Use newer ccache on Linux

* Strip tests

* Better conan hash
2023-09-28 11:36:03 +01:00
Sergey Kuznetsov
6ca777ea96 Account tx v1 api support (#874)
* Don't fail on ledger params for v1

* Different error on invalid ledger indexes for v1

* Allow forward and binary to be not bool for v1

* Minor fixes

* Fix tests

* Don't fail if input ledger index is out of range for v1

* Restore deleted test

* Fix comparison of integers with different signedness

* Updated default api version in README and example config
2023-09-28 11:31:35 +01:00
cyan317
963685dd31 Ledger_entry return invalid parameter error for v1 (#873)
Fixes #875
2023-09-28 09:14:01 +01:00
cyan317
e36545058d Duplicate signer_lists in account_info (#870)
Fix #871
2023-09-25 13:24:16 +01:00
cyan317
44527140f0 Fix inaccurate coverage caused by LOG (#868)
Fix #845
2023-09-21 16:19:53 +01:00
Alex Kremer
0eaaa1fb31 Add workaround for async_compose (#841)
Fixes #840
2023-09-18 18:52:32 +01:00
Alex Kremer
1846f629a5 AccountTx filtering by transaction type (#851)
Fixes #685
2023-09-18 18:52:00 +01:00
Alex Kremer
83af5af3c6 Remove deprecated cassandra options (#852)
Fixes #849
2023-09-18 13:40:38 +01:00
Alex Kremer
418a0ddbf2 Add libxrpl version to server_info output (#854)
Fixes #853
2023-09-18 13:39:01 +01:00
Alex Kremer
6cfbfda014 Repeatedly log on amendment block (#829)
Fixes #364
2023-09-13 13:34:02 +01:00
Alex Kremer
91648f98ad Fix malformed taker error to match rippled (#827)
Fixes #352
2023-09-11 19:39:10 +01:00
Sergey Kuznetsov
71e1637c5f Add options for better clangd support (#836)
Fixes #839
2023-09-11 17:53:30 +01:00
Sergey Kuznetsov
59cd2ce5aa Fix missing lock (#837) 2023-09-11 16:19:57 +01:00
Alex Kremer
d783edd57a Add working dir to git command executions (#828) 2023-09-11 13:40:22 +01:00
cyan317
1ce8a58167 Add number of requests to log (#838) 2023-09-11 12:58:45 +01:00
Peter Chen
92e5c4792b Change boost::json to json in unittests (#831) 2023-09-11 12:39:38 +01:00
Michael Legleux
d7f36733bc Link libstd++ and gcc lib statically (#830) 2023-08-24 12:41:08 +01:00
122 changed files with 3347 additions and 1439 deletions

36
.github/actions/build_clio/action.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: Build clio
description: Build clio in build directory
inputs:
conan_profile:
description: Conan profile name
required: true
default: default
conan_cache_hit:
description: Whether conan cache has been downloaded
required: true
runs:
using: composite
steps:
- name: Get number of threads on mac
id: mac_threads
if: ${{ runner.os == 'macOS' }}
shell: bash
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
- name: Get number of threads on Linux
id: linux_threads
if: ${{ runner.os == 'Linux' }}
shell: bash
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
- name: Build Clio
shell: bash
env:
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
run: |
mkdir -p build
cd build
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True --profile ${{ inputs.conan_profile }}
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
cmake --build . --parallel $threads_num

View File

@@ -1,3 +1,5 @@
name: Check format
description: Check format using clang-format-11
runs:
using: composite
steps:

View File

@@ -0,0 +1,14 @@
name: Git common ancestor
description: Find the closest common commit
outputs:
commit:
description: Hash of commit
value: ${{ steps.find_common_ancestor.outputs.commit }}
runs:
using: composite
steps:
- name: Find common git ancestor
id: find_common_ancestor
shell: bash
run: |
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
conan profile new default --detect
conan profile update settings.compiler.cppstd=20 default
conan profile update settings.compiler.libcxx=libstdc++11 default
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
cd rippled
conan export external/snappy snappy/1.1.10@
conan export external/soci soci/4.0.3@
conan export .
conan install --output-folder build_rippled -install-folder build_rippled --build missing --settings build_type=Release
cmake -B build_rippled -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release
cmake --build build_rippled --target xrpl_core --parallel $(($(nproc) - 2))
cd ..
conan export external/cassandra
conan install . -if build_clio -of build_clio --build missing --settings build_type=Release -o tests=True
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -B build_clio
cmake --build build_clio --parallel $(($(nproc) - 2))

View File

@@ -0,0 +1,50 @@
name: Restore cache
description: Find and restores conan and ccache cache
inputs:
conan_dir:
description: Path to .conan directory
required: true
ccache_dir:
description: Path to .ccache directory
required: true
outputs:
conan_hash:
description: Hash to use as a part of conan cache key
value: ${{ steps.conan_hash.outputs.hash }}
conan_cache_hit:
description: True if conan cache has been downloaded
value: ${{ steps.conan_cache.outputs.cache-hit }}
ccache_cache_hit:
description: True if ccache cache has been downloaded
value: ${{ steps.ccache_cache.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Calculate conan hash
id: conan_hash
shell: bash
run: |
conan info . -j info.json
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
echo "$packages_info"
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
rm info.json
echo "hash=$hash" >> $GITHUB_OUTPUT
- name: Restore conan cache
uses: actions/cache/restore@v3
id: conan_cache
with:
path: ${{ inputs.conan_dir }}/data
key: clio-conan_data-${{ runner.os }}-develop-${{ steps.conan_hash.outputs.hash }}
- name: Restore ccache cache
uses: actions/cache/restore@v3
id: ccache_cache
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

46
.github/actions/save_cache/action.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: Save cache
description: Save conan and ccache cache for develop branch
inputs:
conan_dir:
description: Path to .conan directory
required: true
conan_hash:
description: Hash to use as a part of conan cache key
required: true
conan_cache_hit:
description: Whether conan cache has been downloaded
required: true
ccache_dir:
description: Path to .ccache directory
required: true
ccache_cache_hit:
description: Whether conan cache has been downloaded
required: true
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Cleanup conan directory from extra data
if: ${{ inputs.conan_cache_hit != 'true' }}
shell: bash
run: |
conan remove "*" -s -b -f
- name: Save conan cache
if: ${{ inputs.conan_cache_hit != 'true' }}
uses: actions/cache/save@v3
with:
path: ${{ inputs.conan_dir }}/data
key: clio-conan_data-${{ runner.os }}-develop-${{ inputs.conan_hash }}
- name: Save ccache cache
if: ${{ inputs.ccache_cache_hit != 'true' }}
uses: actions/cache/save@v3
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

55
.github/actions/setup_conan/action.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Setup conan
description: Setup conan profile and artifactory
outputs:
conan_profile:
description: Created conan profile name
value: ${{ steps.conan_export_output.outputs.conan_profile }}
runs:
using: composite
steps:
- name: On mac
if: ${{ runner.os == 'macOS' }}
shell: bash
env:
CONAN_PROFILE: clio_clang_14
id: conan_setup_mac
run: |
echo "Creating $CONAN_PROFILE conan profile";
clang_path="$(brew --prefix llvm@14)/bin/clang"
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
conan profile new $CONAN_PROFILE --detect --force
conan profile update settings.compiler=clang $CONAN_PROFILE
conan profile update settings.compiler.version=14 $CONAN_PROFILE
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
conan profile update env.CC="$clang_path" $CONAN_PROFILE
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
- name: On linux
if: ${{ runner.os == 'Linux' }}
shell: bash
id: conan_setup_linux
run: |
conan profile new default --detect
conan profile update settings.compiler.cppstd=20 default
conan profile update settings.compiler.libcxx=libstdc++11 default
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
- name: Export output variable
shell: bash
id: conan_export_output
run: |
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
- name: Add conan-non-prod artifactory
shell: bash
run: |
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
echo "Adding conan-non-prod"
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
else
echo "Conan-non-prod is available"
fi

View File

@@ -13,103 +13,142 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Run clang-format
uses: ./.github/actions/lint
uses: ./.github/actions/clang_format
build_mac:
name: Build macOS
needs: lint
continue-on-error: true
runs-on: [self-hosted, macOS]
env:
CCACHE_DIR: ${{ github.workspace }}/.ccache
CONAN_USER_HOME: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3
with:
path: clio
fetch-depth: 0
- name: List conan artifactory
- name: Install packages
run: |
conan search
conan remote list
if [[ $(conan remote list |grep conan-non-prod| wc -c) -ne 0 ]]; then
echo "conan-non-prod is available"
else
echo "adding conan-non-prod"
conan remote add conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
fi
brew install llvm@14 pkg-config ninja bison cmake ccache jq
- name: Install dependencies
run: |
brew install llvm@14 pkg-config ninja bison cmake
- name: Setup conan
uses: ./.github/actions/setup_conan
id: conan
- name: Setup environment for llvm-14
run: |
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
- name: Restore cache
uses: ./.github/actions/restore_cache
id: restore_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Build Clio
run: |
cd clio
mkdir -p build
cd build
conan install .. -of . -b missing -s build_type=Release -o clio:tests=True
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel $(($(sysctl -n hw.logicalcpu) - 2))
uses: ./.github/actions/build_clio
with:
conan_profile: ${{ steps.conan.outputs.conan_profile }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
- name: Strip tests
run: strip build/clio_tests
- name: Upload clio_tests
uses: actions/upload-artifact@v3
with:
name: clio_tests_mac
path: ./clio/build/clio_tests
path: build/clio_tests
- name: Save cache
uses: ./.github/actions/save_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
ccache_dir: ${{ env.CCACHE_DIR }}
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
build_linux:
name: Build linux
needs: lint
continue-on-error: true
runs-on: [self-hosted, Linux]
container:
image: conanio/gcc11:1.60.2
image: conanio/gcc11:1.61.0
options: --user root
env:
CCACHE_DIR: /root/.ccache
CONAN_USER_HOME: /root/
steps:
- name: Get Clio
uses: actions/checkout@v3
- name: Get rippled
uses: actions/checkout@v3
with:
repository: thejohnfreeman/rippled
ref: clio
path: rippled
fetch-depth: 0
- name: Install packages
run: |
apt update -qq
apt install -y jq
- name: Install ccache
run: |
wget https://github.com/ccache/ccache/releases/download/v4.8.3/ccache-4.8.3-linux-x86_64.tar.xz
tar xf ./ccache-4.8.3-linux-x86_64.tar.xz
mv ./ccache-4.8.3-linux-x86_64/ccache /usr/bin/ccache
- name: Fix git permissions
run: git config --global --add safe.directory $PWD
- name: Setup conan
uses: ./.github/actions/setup_conan
- name: Restore cache
uses: ./.github/actions/restore_cache
id: restore_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Build Clio
run: |
./.github/actions/linux_build/build.sh
uses: ./.github/actions/build_clio
with:
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
- name: Strip tests
run: strip build/clio_tests
- name: Upload clio_tests
uses: actions/upload-artifact@v3
with:
name: clio_tests_linux
path: ./build_clio/clio_tests
path: build/clio_tests
- name: Save cache
uses: ./.github/actions/save_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
ccache_dir: ${{ env.CCACHE_DIR }}
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
test_mac:
needs: build_mac
runs-on: [self-hosted, macOS]
steps:
- uses: actions/download-artifact@v3
with:
name: clio_tests_mac
- name: Run clio_tests
run: |
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
- uses: actions/download-artifact@v3
with:
name: clio_tests_mac
- name: Run clio_tests
run: |
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
test_linux:
needs: build_linux
runs-on: [self-hosted, x-heavy]
steps:
- uses: actions/download-artifact@v3
with:
name: clio_tests_linux
- name: Run clio_tests
run: |
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
- uses: actions/download-artifact@v3
with:
name: clio_tests_linux
- name: Run clio_tests
run: |
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"

3
.gitignore vendored
View File

@@ -1,6 +1,7 @@
*clio*.log
build*/
/build*/
.build
.cache
.vscode
.python-version
CMakeUserPresets.json

5
CMake/Ccache.cmake Normal file
View File

@@ -0,0 +1,5 @@
find_program (CCACHE_PATH "ccache")
if (CCACHE_PATH)
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
message (STATUS "Using ccache: ${CCACHE_PATH}")
endif ()

View File

@@ -5,21 +5,27 @@
find_package (Git REQUIRED)
set (GIT_COMMAND rev-parse --short HEAD)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
set (GIT_COMMAND branch --show-current)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
if (BRANCH STREQUAL "")
set (BRANCH "dev")
endif ()
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
set (VERSION "${DATE}-${BRANCH}-${REV}")
else ()
set (GIT_COMMAND describe --tags)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
set (VERSION "${TAG_VERSION}-${REV}")
endif ()

View File

@@ -1,7 +1,45 @@
target_compile_options (clio PUBLIC
set(COMPILER_FLAGS
-Wall
-Wcast-align
-Wdouble-promotion
-Wextra
-Werror
-Wformat=2
-Wimplicit-fallthrough
-Wmisleading-indentation
-Wno-narrowing
-Wno-deprecated-declarations
-Wno-deprecated-declarations
-Wno-dangling-else
-Wno-unused-but-set-variable)
-Wno-unused-but-set-variable
-Wnon-virtual-dtor
-Wnull-dereference
-Wold-style-cast
-pedantic
-Wpedantic
-Wunused
)
if (is_gcc AND NOT lint)
list(APPEND COMPILER_FLAGS
-Wduplicated-branches
-Wduplicated-cond
-Wlogical-op
-Wuseless-cast
)
endif ()
if (is_clang)
list(APPEND COMPILER_FLAGS
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
)
endif ()
if (is_appleclang)
list(APPEND COMPILER_FLAGS
-Wreorder-init-list
)
endif ()
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})

View File

@@ -4,17 +4,19 @@ project(clio)
# ========================================================================== #
# Options #
# ========================================================================== #
option (verbose "Verbose build" FALSE)
option (verbose "Verbose build" FALSE)
option (tests "Build tests" FALSE)
option (docs "Generate doxygen docs" FALSE)
option (coverage "Build test coverage report" FALSE)
option (packaging "Create distribution packages" FALSE)
# ========================================================================== #
set (san "" CACHE STRING "Add sanitizer instrumentation")
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
# ========================================================================== #
# Include required modules
include (CMake/Ccache.cmake)
include (CheckCXXCompilerFlag)
if (verbose)
@@ -135,7 +137,11 @@ target_sources (clio PRIVATE
# Clio server
add_executable (clio_server src/main/Main.cpp)
target_link_libraries (clio_server PUBLIC clio)
target_link_libraries (clio_server PRIVATE clio)
target_link_options(clio_server
PRIVATE
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
)
# Unittesting
if (tests)
@@ -158,6 +164,7 @@ if (tests)
unittests/etl/ExtractorTests.cpp
unittests/etl/TransformerTests.cpp
unittests/etl/CacheLoaderTests.cpp
unittests/etl/AmendmentBlockHandlerTests.cpp
# RPC
unittests/rpc/ErrorTests.cpp
unittests/rpc/BaseTests.cpp
@@ -168,6 +175,7 @@ if (tests)
unittests/rpc/ForwardingProxyTests.cpp
unittests/rpc/WorkQueueTests.cpp
unittests/rpc/AmendmentsTests.cpp
unittests/rpc/JsonBoolTests.cpp
## RPC handlers
unittests/rpc/handlers/DefaultProcessorTests.cpp
unittests/rpc/handlers/TestHandlerTests.cpp
@@ -219,7 +227,7 @@ if (tests)
# See https://github.com/google/googletest/issues/3475
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
# Fix for dwarf5 bug on ci
# Fix for dwarf5 bug on ci
target_compile_options (clio PUBLIC -gdwarf-4)
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
@@ -229,6 +237,7 @@ if (tests)
# Generate `clio_tests-ccov` if coverage is enabled
# Note: use `make clio_tests-ccov` to generate report
if (coverage)
target_compile_definitions(${TEST_TARGET} PRIVATE COVERAGE_ENABLED)
include (CMake/Coverage.cmake)
add_coverage (${TEST_TARGET})
endif ()

View File

@@ -224,16 +224,16 @@ a database in each region, and the Clio nodes in each region use their region's
This is effectively two systems.
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
It's possible to configure `minimum`, `maximum` and `default` version like so:
It's possible to configure `minimum`, `maximum` and `default` version like so:
```json
"api_version": {
"min": 1,
"max": 2,
"default": 2
"default": 1
}
```
All of the above are optional.
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
All of the above are optional.
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
> **Note:** See `example-config.json` for more details.

View File

@@ -24,7 +24,7 @@ class Clio(ConanFile):
'fmt/10.0.0',
'grpc/1.50.1',
'openssl/1.1.1u',
'xrpl/1.12.0-b2',
'xrpl/1.12.0',
]
default_options = {

View File

@@ -16,21 +16,12 @@
//
// Advanced options. USE AT OWN RISK:
// ---
"max_connections_per_host": 1, // Defaults to 2
"core_connections_per_host": 1, // Defaults to 2
"max_concurrent_requests_threshold": 55000 // Defaults to ((max_read + max_write) / core_connections_per_host)
"core_connections_per_host": 1 // Defaults to 1
//
// Below options will use defaults from cassandra driver if left unspecified.
// See https://docs.datastax.com/en/developer/cpp-driver/2.0/api/struct.CassCluster/ for details.
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
//
// "queue_size_event": 1,
// "queue_size_io": 2,
// "write_bytes_high_water_mark": 3,
// "write_bytes_low_water_mark": 4,
// "pending_requests_high_water_mark": 5,
// "pending_requests_low_water_mark": 6,
// "max_requests_per_flush": 7,
// "max_concurrent_creation": 8
// "queue_size_io": 2
//
// ---
}
@@ -120,8 +111,8 @@
// "ssl_cert_file" : "/full/path/to/cert.file",
// "ssl_key_file" : "/full/path/to/key.file"
"api_version": {
"min": 2,
"max": 2,
"default": 2 // Clio only supports API v2 and newer
"min": 1, // Minimum API version supported (could be 1 or 2)
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
"default": 1 // Clio behaves the same as rippled by default
}
}

View File

@@ -93,14 +93,15 @@ synchronous(FnType&& func)
if constexpr (!std::is_same<R, void>::value)
{
R res;
boost::asio::spawn(ctx, [&func, &res](auto yield) { res = func(yield); });
boost::asio::spawn(
ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) { res = func(yield); });
ctx.run();
return res;
}
else
{
boost::asio::spawn(ctx, [&func](auto yield) { func(yield); });
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
ctx.run();
}
}

View File

@@ -649,11 +649,11 @@ public:
return {};
}
std::vector<ripple::uint256> keys;
std::vector<ripple::uint256> resultKeys;
for (auto [key] : extract<ripple::uint256>(results))
keys.push_back(key);
resultKeys.push_back(key);
return keys;
return resultKeys;
});
// one of the above errors must have happened

View File

@@ -40,7 +40,7 @@ struct AccountTransactionsData
std::uint32_t transactionIndex;
ripple::uint256 txHash;
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
: accounts(meta.getAffectedAccounts())
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())

View File

@@ -69,7 +69,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock{mtx_};
std::shared_lock lck{mtx_};
successorReqCounter_++;
if (seq != latestSeq_)
return {};
@@ -146,7 +146,7 @@ LedgerCache::getObjectHitRate() const
{
if (!objectReqCounter_)
return 1;
return ((float)objectHitCounter_) / objectReqCounter_;
return static_cast<float>(objectHitCounter_) / objectReqCounter_;
}
float
@@ -154,7 +154,7 @@ LedgerCache::getSuccessorHitRate() const
{
if (!successorReqCounter_)
return 1;
return ((float)successorHitCounter_) / successorReqCounter_;
return static_cast<float>(successorHitCounter_) / successorReqCounter_;
}
} // namespace data

View File

@@ -118,6 +118,8 @@ struct TransactionsCursor
{
}
TransactionsCursor(TransactionsCursor const&) = default;
TransactionsCursor&
operator=(TransactionsCursor const&) = default;

View File

@@ -116,22 +116,10 @@ SettingsProvider::parseSettings() const
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
settings.maxReadRequestsOutstanding =
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
settings.maxConnectionsPerHost =
config_.valueOr<uint32_t>("max_connections_per_host", settings.maxConnectionsPerHost);
settings.coreConnectionsPerHost =
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
settings.maxConcurrentRequestsThreshold = config_.valueOr<uint32_t>(
"max_concurrent_requests_threshold",
(settings.maxReadRequestsOutstanding + settings.maxWriteRequestsOutstanding) / settings.coreConnectionsPerHost);
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
settings.queueSizeEvent = config_.maybeValue<uint32_t>("queue_size_event");
settings.writeBytesHighWatermark = config_.maybeValue<uint32_t>("write_bytes_high_water_mark");
settings.writeBytesLowWatermark = config_.maybeValue<uint32_t>("write_bytes_low_water_mark");
settings.pendingRequestsHighWatermark = config_.maybeValue<uint32_t>("pending_requests_high_water_mark");
settings.pendingRequestsLowWatermark = config_.maybeValue<uint32_t>("pending_requests_low_water_mark");
settings.maxRequestsPerFlush = config_.maybeValue<uint32_t>("max_requests_per_flush");
settings.maxConcurrentCreation = config_.maybeValue<uint32_t>("max_concurrent_creation");
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
if (connectTimeoutSecond)

View File

@@ -64,19 +64,6 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
if (auto const rc =
cass_cluster_set_max_concurrent_requests_threshold(*this, settings.maxConcurrentRequestsThreshold);
rc != CASS_OK)
{
throw std::runtime_error(
fmt::format("Could not set max concurrent requests per host threshold: {}", cass_error_desc(rc)));
}
if (auto const rc = cass_cluster_set_max_connections_per_host(*this, settings.maxConnectionsPerHost); rc != CASS_OK)
{
throw std::runtime_error(fmt::format("Could not set max connections per host: {}", cass_error_desc(rc)));
}
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
rc != CASS_OK)
{
@@ -90,71 +77,13 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
}
auto apply = []<typename ValueType, typename Fn>(
std::optional<ValueType> const& maybeValue, Fn&& fn) requires std::is_object_v<Fn>
{
if (maybeValue)
std::invoke(fn, maybeValue.value());
};
apply(settings.queueSizeEvent, [this](auto value) {
if (auto const rc = cass_cluster_set_queue_size_event(*this, value); rc != CASS_OK)
throw std::runtime_error(
fmt::format("Could not set queue size for events per host: {}", cass_error_desc(rc)));
});
apply(settings.writeBytesHighWatermark, [this](auto value) {
if (auto const rc = cass_cluster_set_write_bytes_high_water_mark(*this, value); rc != CASS_OK)
throw std::runtime_error(fmt::format("Could not set write bytes high water_mark: {}", cass_error_desc(rc)));
});
apply(settings.writeBytesLowWatermark, [this](auto value) {
if (auto const rc = cass_cluster_set_write_bytes_low_water_mark(*this, value); rc != CASS_OK)
throw std::runtime_error(fmt::format("Could not set write bytes low water mark: {}", cass_error_desc(rc)));
});
apply(settings.pendingRequestsHighWatermark, [this](auto value) {
if (auto const rc = cass_cluster_set_pending_requests_high_water_mark(*this, value); rc != CASS_OK)
throw std::runtime_error(
fmt::format("Could not set pending requests high water mark: {}", cass_error_desc(rc)));
});
apply(settings.pendingRequestsLowWatermark, [this](auto value) {
if (auto const rc = cass_cluster_set_pending_requests_low_water_mark(*this, value); rc != CASS_OK)
throw std::runtime_error(
fmt::format("Could not set pending requests low water mark: {}", cass_error_desc(rc)));
});
apply(settings.maxRequestsPerFlush, [this](auto value) {
if (auto const rc = cass_cluster_set_max_requests_per_flush(*this, value); rc != CASS_OK)
throw std::runtime_error(fmt::format("Could not set max requests per flush: {}", cass_error_desc(rc)));
});
apply(settings.maxConcurrentCreation, [this](auto value) {
if (auto const rc = cass_cluster_set_max_concurrent_creation(*this, value); rc != CASS_OK)
throw std::runtime_error(fmt::format("Could not set max concurrent creation: {}", cass_error_desc(rc)));
});
setupConnection(settings);
setupCertificate(settings);
setupCredentials(settings);
auto valueOrDefault = []<typename T>(std::optional<T> const& maybeValue) -> std::string {
return maybeValue ? to_string(*maybeValue) : "default";
};
LOG(log_.info()) << "Threads: " << settings.threads;
LOG(log_.info()) << "Max concurrent requests per host: " << settings.maxConcurrentRequestsThreshold;
LOG(log_.info()) << "Max connections per host: " << settings.maxConnectionsPerHost;
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
LOG(log_.info()) << "IO queue size: " << queueSize;
LOG(log_.info()) << "Event queue size: " << valueOrDefault(settings.queueSizeEvent);
LOG(log_.info()) << "Write bytes high watermark: " << valueOrDefault(settings.writeBytesHighWatermark);
LOG(log_.info()) << "Write bytes low watermark: " << valueOrDefault(settings.writeBytesLowWatermark);
LOG(log_.info()) << "Pending requests high watermark: " << valueOrDefault(settings.pendingRequestsHighWatermark);
LOG(log_.info()) << "Pending requests low watermark: " << valueOrDefault(settings.pendingRequestsLowWatermark);
LOG(log_.info()) << "Max requests per flush: " << valueOrDefault(settings.maxRequestsPerFlush);
LOG(log_.info()) << "Max concurrent creation: " << valueOrDefault(settings.maxConcurrentCreation);
}
void

View File

@@ -46,7 +46,7 @@ struct Settings
struct ContactPoints
{
std::string contactPoints = "127.0.0.1"; // defaults to localhost
std::optional<uint16_t> port;
std::optional<uint16_t> port = {};
};
/**
@@ -73,45 +73,17 @@ struct Settings
uint32_t threads = std::thread::hardware_concurrency();
/** @brief The maximum number of outstanding write requests at any given moment */
uint32_t maxWriteRequestsOutstanding = 10'000;
uint32_t maxWriteRequestsOutstanding = 10'000u;
/** @brief The maximum number of outstanding read requests at any given moment */
uint32_t maxReadRequestsOutstanding = 100'000;
/** @brief The maximum number of connections per host */
uint32_t maxConnectionsPerHost = 2u;
uint32_t maxReadRequestsOutstanding = 100'000u;
/** @brief The number of connection per host to always have active */
uint32_t coreConnectionsPerHost = 2u;
/** @brief The maximum concurrent requests per connection; new connections will be created when reached */
uint32_t maxConcurrentRequestsThreshold =
(maxWriteRequestsOutstanding + maxReadRequestsOutstanding) / coreConnectionsPerHost;
/** @brief Size of the event queue */
std::optional<uint32_t> queueSizeEvent;
uint32_t coreConnectionsPerHost = 1u;
/** @brief Size of the IO queue */
std::optional<uint32_t> queueSizeIO;
/** @brief High watermark for bytes written */
std::optional<uint32_t> writeBytesHighWatermark;
/** @brief Low watermark for bytes written */
std::optional<uint32_t> writeBytesLowWatermark;
/** @brief High watermark for pending requests */
std::optional<uint32_t> pendingRequestsHighWatermark;
/** @brief Low watermark for pending requests */
std::optional<uint32_t> pendingRequestsLowWatermark;
/** @brief Maximum number of requests per flush */
std::optional<uint32_t> maxRequestsPerFlush;
/** @brief Maximum number of connections that will be created concurrently */
std::optional<uint32_t> maxConcurrentCreation;
/** @brief SSL certificate */
std::optional<std::string> certificate; // ssl context

View File

@@ -235,21 +235,15 @@ public:
while (true)
{
numReadRequestsOutstanding_ += numStatements;
// TODO: see if we can avoid using shared_ptr for self here
auto init = [this, &statements, &future]<typename Self>(Self& self) {
future.emplace(handle_.get().asyncExecute(
statements, [sself = std::make_shared<Self>(std::move(self))](auto&& res) mutable {
// Note: explicit work below needed on linux/gcc11
auto executor = boost::asio::get_associated_executor(*sself);
boost::asio::post(
executor,
[sself = std::move(sself),
res = std::move(res),
_ = boost::asio::make_work_guard(executor)]() mutable {
sself->complete(std::move(res));
sself.reset();
});
}));
auto sself = std::make_shared<Self>(std::move(self));
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
}));
};
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
@@ -287,25 +281,21 @@ public:
while (true)
{
++numReadRequestsOutstanding_;
// TODO: see if we can avoid using shared_ptr for self here
auto init = [this, &statement, &future]<typename Self>(Self& self) {
future.emplace(handle_.get().asyncExecute(
statement, [sself = std::make_shared<Self>(std::move(self))](auto&&) mutable {
// Note: explicit work below needed on linux/gcc11
auto executor = boost::asio::get_associated_executor(*sself);
boost::asio::post(
executor, [sself = std::move(sself), _ = boost::asio::make_work_guard(executor)]() mutable {
sself->complete();
sself.reset();
});
}));
auto sself = std::make_shared<Self>(std::move(self));
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
}));
};
boost::asio::async_compose<CompletionTokenType, void()>(
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
init, token, boost::asio::get_associated_executor(token));
--numReadRequestsOutstanding_;
if (auto res = future->get(); res)
if (res)
{
return res;
}
@@ -339,22 +329,15 @@ public:
futures.reserve(numOutstanding);
auto init = [this, &statements, &futures, &hadError, &numOutstanding]<typename Self>(Self& self) {
auto sself = std::make_shared<Self>(std::move(self)); // TODO: see if we can avoid this
auto executionHandler = [&hadError, &numOutstanding, sself = std::move(sself)](auto const& res) mutable {
auto sself = std::make_shared<Self>(std::move(self));
auto executionHandler = [&hadError, &numOutstanding, sself](auto const& res) mutable {
if (not res)
hadError = true;
// when all async operations complete unblock the result
if (--numOutstanding == 0)
{
// Note: explicit work below needed on linux/gcc11
auto executor = boost::asio::get_associated_executor(*sself);
boost::asio::post(
executor, [sself = std::move(sself), _ = boost::asio::make_work_guard(executor)]() mutable {
sself->complete();
sself.reset();
});
}
boost::asio::get_associated_executor(*sself), [sself]() mutable { sself->complete(); });
};
std::transform(

View File

@@ -73,7 +73,10 @@ void
invokeHelper(CassFuture* ptr, void* cbPtr)
{
// Note: can't use Future{ptr}.get() because double free will occur :/
// Note2: we are moving/copying it locally as a workaround for an issue we are seeing from asio recently.
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
if (auto const rc = cass_future_error_code(ptr); rc)
{
auto const errMsg = [&ptr](std::string const& label) {
@@ -82,11 +85,11 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
cass_future_error_message(ptr, &message, &len);
return label + ": " + std::string{message, len};
}("invokeHelper");
(*cb)(Error{CassandraError{errMsg, rc}});
(*local)(Error{CassandraError{errMsg, rc}});
}
else
{
(*cb)(Result{cass_future_get_result(ptr)});
(*local)(Result{cass_future_get_result(ptr)});
}
}

View File

@@ -38,7 +38,7 @@ public:
}
ManagedObject(ManagedObject&&) = default;
operator Managed* const() const
operator Managed*() const
{
return ptr_.get();
}

View File

@@ -47,7 +47,8 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
extractors.push_back(std::make_unique<ExtractorType>(
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
auto transformer = TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, startSequence, state_};
auto transformer =
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
transformer.waitTillFinished(); // suspend current thread until exit condition is met
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
@@ -110,12 +111,8 @@ ETLService::monitor()
}
catch (std::runtime_error const& e)
{
setAmendmentBlocked();
log_.fatal()
<< "Failed to load initial ledger, Exiting monitor loop: " << e.what()
<< " Possible cause: The ETL node is not compatible with the version of the rippled lib Clio is using.";
return;
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
return amendmentBlockHandler_.onAmendmentBlock();
}
if (ledger)
@@ -145,43 +142,50 @@ ETLService::monitor()
while (true)
{
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
nextSequence = publishNextSequence(nextSequence);
}
}
uint32_t
ETLService::publishNextSequence(uint32_t nextSequence)
{
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
{
ledgerPublisher_.publish(nextSequence, {});
++nextSequence;
}
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
{
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
<< "Attempting to find in database and publish";
// Attempt to take over responsibility of ETL writer after 10 failed
// attempts to publish the ledger. publishLedger() fails if the
// ledger that has been validated by the network is not found in the
// database after the specified number of attempts. publishLedger()
// waits one second between each attempt to read the ledger from the
// database
constexpr size_t timeoutSeconds = 10;
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
if (!success)
{
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
// returns the most recent sequence published empty optional if no sequence was published
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
// if no ledger was published, don't increment nextSequence
if (lastPublished)
nextSequence = *lastPublished + 1;
}
else
{
ledgerPublisher_.publish(nextSequence, {});
++nextSequence;
}
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
{
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
<< "Attempting to find in database and publish";
// Attempt to take over responsibility of ETL writer after 10 failed
// attempts to publish the ledger. publishLedger() fails if the
// ledger that has been validated by the network is not found in the
// database after the specified number of attempts. publishLedger()
// waits one second between each attempt to read the ledger from the
// database
constexpr size_t timeoutSeconds = 10;
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
if (!success)
{
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
// returns the most recent sequence published empty optional if no sequence was published
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
// if no ledger was published, don't increment nextSequence
if (lastPublished)
nextSequence = *lastPublished + 1;
}
else
{
++nextSequence;
}
}
}
return nextSequence;
}
void
@@ -189,21 +193,29 @@ ETLService::monitorReadOnly()
{
LOG(log_.debug()) << "Starting reporting in strict read only mode";
auto rng = backend_->hardFetchLedgerRangeNoThrow();
uint32_t latestSequence;
const auto latestSequenceOpt = [this]() -> std::optional<uint32_t> {
auto rng = backend_->hardFetchLedgerRangeNoThrow();
if (!rng)
{
if (auto net = networkValidatedLedgers_->getMostRecent())
latestSequence = *net;
if (!rng)
{
if (auto net = networkValidatedLedgers_->getMostRecent())
return *net;
else
return std::nullopt;
}
else
return;
}
else
{
return rng->maxSequence;
}
}();
if (!latestSequenceOpt.has_value())
{
latestSequence = rng->maxSequence;
return;
}
uint32_t latestSequence = *latestSequenceOpt;
cacheLoader_.load(latestSequence);
latestSequence++;
@@ -259,6 +271,7 @@ ETLService::ETLService(
, ledgerFetcher_(backend, balancer)
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
, ledgerPublisher_(ioc, backend, subscriptions, state_)
, amendmentBlockHandler_(ioc, state_)
{
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");

View File

@@ -24,6 +24,7 @@
#include <etl/LoadBalancer.h>
#include <etl/Source.h>
#include <etl/SystemState.h>
#include <etl/impl/AmendmentBlock.h>
#include <etl/impl/CacheLoader.h>
#include <etl/impl/ExtractionDataPipe.h>
#include <etl/impl/Extractor.h>
@@ -35,6 +36,7 @@
#include <util/log/Logger.h>
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
#include <boost/asio/steady_timer.hpp>
#include <grpcpp/grpcpp.h>
#include <memory>
@@ -76,7 +78,9 @@ class ETLService
using ExtractorType = etl::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
using LedgerLoaderType = etl::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType>;
using TransformerType = etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType>;
using AmendmentBlockHandlerType = etl::detail::AmendmentBlockHandler<>;
using TransformerType =
etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
util::Logger log_{"ETL"};
@@ -91,6 +95,7 @@ class ETLService
LedgerFetcherType ledgerFetcher_;
LedgerLoaderType ledgerLoader_;
LedgerPublisherType ledgerPublisher_;
AmendmentBlockHandlerType amendmentBlockHandler_;
SystemState state_;
@@ -225,6 +230,15 @@ private:
void
monitor();
/**
* @brief Monitor the network for newly validated ledgers and publish them to the ledgers stream
*
* @param nextSequence the ledger sequence to publish
* @return the next ledger sequence to publish
*/
uint32_t
publishNextSequence(uint32_t nextSequence);
/**
* @brief Monitor the database for newly written ledgers.
*
@@ -267,14 +281,5 @@ private:
*/
void
doWork();
/**
* @brief Sets amendment blocked flag.
*/
void
setAmendmentBlocked()
{
state_.isAmendmentBlocked = true;
}
};
} // namespace etl

View File

@@ -147,7 +147,7 @@ LoadBalancer::forwardToRippled(
std::string const& clientIp,
boost::asio::yield_context yield) const
{
srand((unsigned)time(0));
srand(static_cast<unsigned>(time(0)));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0u;
@@ -193,7 +193,7 @@ template <class Func>
bool
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
{
srand((unsigned)time(0));
srand(static_cast<unsigned>(time(0)));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0;

View File

@@ -160,7 +160,7 @@ ProbingSource::make_SSLHooks() noexcept
return SourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
[this](auto /* ec */) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{
@@ -189,7 +189,7 @@ ProbingSource::make_PlainHooks() noexcept
return SourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
[this](auto /* ec */) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{

View File

@@ -36,10 +36,18 @@ struct SystemState
*/
bool isReadOnly = false;
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
std::atomic_bool isAmendmentBlocked = false; /**< @brief Whether we detected an amendment block. */
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
/**
* @brief Whether clio detected an amendment block.
*
* Being amendment blocked means that Clio was compiled with libxrpl that does not yet support some field that
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
* and should log this error and only handle RPC requests.
*/
std::atomic_bool isAmendmentBlocked = false;
};
} // namespace etl

View File

@@ -0,0 +1,96 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <etl/SystemState.h>
#include <util/log/Logger.h>
#include <boost/asio/io_context.hpp>
#include <boost/asio/steady_timer.hpp>
#include <chrono>
#include <functional>
namespace etl::detail {
struct AmendmentBlockAction
{
void
operator()()
{
static util::Logger log{"ETL"};
LOG(log.fatal())
<< "Can't process new ledgers: The current ETL source is not compatible with the version of the "
"libxrpl Clio is currently using. Please upgrade Clio to a newer version.";
}
};
template <typename ActionCallableType = AmendmentBlockAction>
class AmendmentBlockHandler
{
std::reference_wrapper<boost::asio::io_context> ctx_;
std::reference_wrapper<SystemState> state_;
boost::asio::steady_timer timer_;
std::chrono::milliseconds interval_;
ActionCallableType action_;
public:
template <typename DurationType = std::chrono::seconds>
AmendmentBlockHandler(
boost::asio::io_context& ioc,
SystemState& state,
DurationType interval = DurationType{1},
ActionCallableType&& action = ActionCallableType())
: ctx_{std::ref(ioc)}
, state_{std::ref(state)}
, timer_{ioc}
, interval_{std::chrono::duration_cast<std::chrono::milliseconds>(interval)}
, action_{std::move(action)}
{
}
~AmendmentBlockHandler()
{
boost::asio::post(ctx_.get(), [this]() { timer_.cancel(); });
}
void
onAmendmentBlock()
{
state_.get().isAmendmentBlocked = true;
startReportingTimer();
}
private:
void
startReportingTimer()
{
action_();
timer_.expires_after(interval_);
timer_.async_wait([this](auto ec) {
if (!ec)
boost::asio::post(ctx_.get(), [this] { startReportingTimer(); });
});
}
};
} // namespace etl::detail

View File

@@ -128,7 +128,7 @@ public:
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
if (!more && nextPrefix_ != 0x00)
{
if (((unsigned char)obj.key()[0]) >= nextPrefix_)
if (static_cast<unsigned char>(obj.key()[0]) >= nextPrefix_)
continue;
}
cacheUpdates.push_back(

View File

@@ -68,6 +68,7 @@ class CacheLoader
std::vector<ClioPeer> clioPeers_;
std::thread thread_;
std::atomic_bool stopping_ = false;
public:
@@ -115,6 +116,8 @@ public:
~CacheLoader()
{
stop();
if (thread_.joinable())
thread_.join();
}
/**
@@ -367,7 +370,7 @@ private:
LOG(log_.info()) << "Loading cache. num cursors = " << cursors.size() - 1;
LOG(log_.trace()) << "cursors = " << cursorStr.str();
boost::asio::post(ioContext_.get(), [this, seq, cursors = std::move(cursors)]() {
thread_ = std::thread{[this, seq, cursors = std::move(cursors)]() {
auto startTime = std::chrono::system_clock::now();
auto markers = std::make_shared<std::atomic_int>(0);
auto numRemaining = std::make_shared<std::atomic_int>(cursors.size() - 1);
@@ -425,7 +428,7 @@ private:
}
});
}
});
}};
}
};

View File

@@ -107,9 +107,8 @@ public:
if (maybeNFT)
result.nfTokensData.push_back(*maybeNFT);
auto journal = ripple::debugLog();
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID());
std::string keyStr{reinterpret_cast<const char*>(sttx.getTransactionID().data()), 32};
backend_->writeTransaction(
std::move(keyStr),
ledger.seq,

View File

@@ -171,6 +171,16 @@ public:
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
// order with transaction index
std::sort(transactions.begin(), transactions.end(), [](auto const& t1, auto const& t2) {
ripple::SerialIter iter1{t1.metadata.data(), t1.metadata.size()};
ripple::STObject const object1(iter1, ripple::sfMetadata);
ripple::SerialIter iter2{t2.metadata.data(), t2.metadata.size()};
ripple::STObject const object2(iter2, ripple::sfMetadata);
return object1.getFieldU32(ripple::sfTransactionIndex) <
object2.getFieldU32(ripple::sfTransactionIndex);
});
for (auto& txAndMeta : transactions)
subscriptions_->pubTransaction(txAndMeta, lgrInfo);

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <etl/SystemState.h>
#include <etl/impl/AmendmentBlock.h>
#include <etl/impl/LedgerLoader.h>
#include <util/LedgerUtils.h>
#include <util/Profiler.h>
@@ -47,7 +48,11 @@ namespace etl::detail {
/**
* @brief Transformer thread that prepares new ledger out of raw data from GRPC.
*/
template <typename DataPipeType, typename LedgerLoaderType, typename LedgerPublisherType>
template <
typename DataPipeType,
typename LedgerLoaderType,
typename LedgerPublisherType,
typename AmendmentBlockHandlerType>
class Transformer
{
using GetLedgerResponseType = typename LedgerLoaderType::GetLedgerResponseType;
@@ -59,6 +64,8 @@ class Transformer
std::shared_ptr<BackendInterface> backend_;
std::reference_wrapper<LedgerLoaderType> loader_;
std::reference_wrapper<LedgerPublisherType> publisher_;
std::reference_wrapper<AmendmentBlockHandlerType> amendmentBlockHandler_;
uint32_t startSequence_;
std::reference_wrapper<SystemState> state_; // shared state for ETL
@@ -76,12 +83,14 @@ public:
std::shared_ptr<BackendInterface> backend,
LedgerLoaderType& loader,
LedgerPublisherType& publisher,
AmendmentBlockHandlerType& amendmentBlockHandler,
uint32_t startSequence,
SystemState& state)
: pipe_(std::ref(pipe))
: pipe_{std::ref(pipe)}
, backend_{backend}
, loader_(std::ref(loader))
, publisher_(std::ref(publisher))
, loader_{std::ref(loader)}
, publisher_{std::ref(publisher)}
, amendmentBlockHandler_{std::ref(amendmentBlockHandler)}
, startSequence_{startSequence}
, state_{std::ref(state)}
{
@@ -185,11 +194,9 @@ private:
}
catch (std::runtime_error const& e)
{
setAmendmentBlocked();
LOG(log_.fatal()) << "Failed to build next ledger: " << e.what();
log_.fatal()
<< "Failed to build next ledger: " << e.what()
<< " Possible cause: The ETL node is not compatible with the version of the rippled lib Clio is using.";
amendmentBlockHandler_.get().onAmendmentBlock();
return {ripple::LedgerHeader{}, false};
}
@@ -238,7 +245,7 @@ private:
LOG(log_.debug()) << "object neighbors not included. using cache";
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
throw std::runtime_error("Cache is not full, but object neighbors were not included");
throw std::logic_error("Cache is not full, but object neighbors were not included");
auto const blob = obj.mutable_data();
auto checkBookBase = false;
@@ -288,7 +295,7 @@ private:
{
LOG(log_.debug()) << "object neighbors not included. using cache";
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq)
throw std::runtime_error("Cache is not full, but object neighbors were not included");
throw std::logic_error("Cache is not full, but object neighbors were not included");
for (auto const& obj : cacheUpdates)
{
@@ -423,19 +430,6 @@ private:
{
state_.get().writeConflict = conflict;
}
/**
* @brief Sets the amendment blocked flag.
*
* Being amendment blocked means that Clio was compiled with libxrpl that does not yet support some field that
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
* and should log this error and only handle RPC requests.
*/
void
setAmendmentBlocked()
{
state_.get().isAmendmentBlocked = true;
}
};
} // namespace etl::detail

View File

@@ -35,6 +35,12 @@ Subscription::unsubscribe(SessionPtrType const& session)
boost::asio::post(strand_, [this, session]() { removeSession(session, subscribers_, subCount_); });
}
bool
Subscription::hasSession(SessionPtrType const& session)
{
return subscribers_.contains(session);
}
void
Subscription::publish(std::shared_ptr<std::string> const& message)
{
@@ -334,6 +340,8 @@ SubscriptionManager::unsubProposedTransactions(SessionPtrType session)
void
SubscriptionManager::subscribeHelper(SessionPtrType const& session, Subscription& subs, CleanupFunction&& func)
{
if (subs.hasSession(session))
return;
subs.subscribe(session);
std::scoped_lock lk(cleanupMtx_);
cleanupFuncs_[session].push_back(std::move(func));
@@ -347,6 +355,8 @@ SubscriptionManager::subscribeHelper(
SubscriptionMap<Key>& subs,
CleanupFunction&& func)
{
if (subs.hasSession(session, k))
return;
subs.subscribe(session, k);
std::scoped_lock lk(cleanupMtx_);
cleanupFuncs_[session].push_back(std::move(func));

View File

@@ -139,6 +139,15 @@ public:
void
unsubscribe(SessionPtrType const& session);
/**
* @brief Check if a session has been in subscribers list.
*
* @param session The session to check
* @return true if the session is in the subscribers list; false otherwise
*/
bool
hasSession(SessionPtrType const& session);
/**
* @brief Sends the given message to all subscribers.
*
@@ -232,6 +241,22 @@ public:
});
}
/**
* @brief Check if a session has been in subscribers list.
*
* @param session The session to check
* @param key The key for the subscription to check
* @return true if the session is in the subscribers list; false otherwise
*/
bool
hasSession(SessionPtrType const& session, Key const& key)
{
if (!subscribers_.contains(key))
return false;
return subscribers_[key].contains(session);
}
/**
* @brief Sends the given message to all subscribers.
*

View File

@@ -206,7 +206,7 @@ try
auto const handlerProvider = std::make_shared<rpc::detail::ProductionHandlerProvider const>(
config, backend, subscriptions, balancer, etl, counters);
auto const rpcEngine = rpc::RPCEngine::make_RPCEngine(
config, backend, subscriptions, balancer, etl, dosGuard, workQueue, counters, handlerProvider);
backend, subscriptions, balancer, dosGuard, workQueue, counters, handlerProvider);
// Init the web server
auto handler = std::make_shared<web::RPCServerHandler<rpc::RPCEngine, etl::ETLService>>(

View File

@@ -199,6 +199,7 @@ private:
case ripple::ttOFFER_CREATE:
if (tx->isFieldPresent(ripple::sfOfferSequence))
return tx->getFieldU32(ripple::sfOfferSequence);
[[fallthrough]];
default:
return std::nullopt;
}

View File

@@ -35,8 +35,6 @@ make_WsContext(
string const& clientIp,
std::reference_wrapper<APIVersionParser const> apiVersionParser)
{
using Error = Unexpected<Status>;
boost::json::value commandValue = nullptr;
if (!request.contains("command") && request.contains("method"))
commandValue = request.at("method");
@@ -63,8 +61,6 @@ make_HttpContext(
string const& clientIp,
std::reference_wrapper<APIVersionParser const> apiVersionParser)
{
using Error = Unexpected<Status>;
if (!request.contains("method"))
return Error{{ClioError::rpcCOMMAND_IS_MISSING}};

View File

@@ -19,10 +19,15 @@
#pragma once
#include <util/JsonUtils.h>
#include <ripple/protocol/jss.h>
/** @brief Helper macro for borrowing from ripple::jss static (J)son (S)trings. */
#define JS(x) ripple::jss::x.c_str()
/** @brief Access the lower case copy of a static (J)son (S)tring. */
#define JSL(x) util::toLower(JS(x))
/** @brief Provides access to (SF)ield name (S)trings. */
#define SFS(x) ripple::x.jsonName.c_str()

View File

@@ -83,7 +83,6 @@ public:
std::shared_ptr<BackendInterface> const& backend,
std::shared_ptr<feed::SubscriptionManager> const& subscriptions,
std::shared_ptr<etl::LoadBalancer> const& balancer,
std::shared_ptr<etl::ETLService> const& etl,
web::DOSGuard const& dosGuard,
WorkQueue& workQueue,
Counters& counters,
@@ -101,18 +100,16 @@ public:
static std::shared_ptr<RPCEngineBase>
make_RPCEngine(
util::Config const& config,
std::shared_ptr<BackendInterface> const& backend,
std::shared_ptr<feed::SubscriptionManager> const& subscriptions,
std::shared_ptr<etl::LoadBalancer> const& balancer,
std::shared_ptr<etl::ETLService> const& etl,
web::DOSGuard const& dosGuard,
WorkQueue& workQueue,
Counters& counters,
std::shared_ptr<HandlerProvider const> const& handlerProvider)
{
return std::make_shared<RPCEngineBase>(
backend, subscriptions, balancer, etl, dosGuard, workQueue, counters, handlerProvider);
backend, subscriptions, balancer, dosGuard, workQueue, counters, handlerProvider);
}
/**
@@ -153,11 +150,9 @@ public:
if (v)
return v->as_object();
else
{
notifyErrored(ctx.method);
return Status{v.error()};
}
notifyErrored(ctx.method);
return Status{v.error()};
}
catch (data::DatabaseTimeout const& t)
{

View File

@@ -33,6 +33,7 @@
// local to compilation unit loggers
namespace {
util::Logger gLog{"RPC"};
} // namespace
namespace rpc {
@@ -141,6 +142,7 @@ accountFromStringStrict(std::string const& account)
else
return {};
}
std::pair<std::shared_ptr<ripple::STTx const>, std::shared_ptr<ripple::STObject const>>
deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs)
{
@@ -564,10 +566,10 @@ traverseOwnedNodes(
if (!hintDir)
return Status(ripple::rpcINVALID_PARAMS, "Invalid marker.");
ripple::SerialIter it{hintDir->data(), hintDir->size()};
ripple::SLE sle{it, hintIndex.key};
ripple::SerialIter hintDirIt{hintDir->data(), hintDir->size()};
ripple::SLE hintDirSle{hintDirIt, hintIndex.key};
if (auto const& indexes = sle.getFieldV256(ripple::sfIndexes);
if (auto const& indexes = hintDirSle.getFieldV256(ripple::sfIndexes);
std::find(std::begin(indexes), std::end(indexes), hexMarker) == std::end(indexes))
{
// the index specified by marker is not in the page specified by marker
@@ -583,10 +585,10 @@ traverseOwnedNodes(
if (!ownerDir)
return Status(ripple::rpcINVALID_PARAMS, "Owner directory not found.");
ripple::SerialIter it{ownerDir->data(), ownerDir->size()};
ripple::SLE sle{it, currentIndex.key};
ripple::SerialIter ownedDirIt{ownerDir->data(), ownerDir->size()};
ripple::SLE ownedDirSle{ownedDirIt, currentIndex.key};
for (auto const& key : sle.getFieldV256(ripple::sfIndexes))
for (auto const& key : ownedDirSle.getFieldV256(ripple::sfIndexes))
{
if (!found)
{
@@ -610,7 +612,7 @@ traverseOwnedNodes(
break;
}
// the next page
auto const uNodeNext = sle.getFieldU64(ripple::sfIndexNext);
auto const uNodeNext = ownedDirSle.getFieldU64(ripple::sfIndexNext);
if (uNodeNext == 0)
break;
@@ -627,10 +629,10 @@ traverseOwnedNodes(
if (!ownerDir)
break;
ripple::SerialIter it{ownerDir->data(), ownerDir->size()};
ripple::SLE sle{it, currentIndex.key};
ripple::SerialIter ownedDirIt{ownerDir->data(), ownerDir->size()};
ripple::SLE ownedDirSle{ownedDirIt, currentIndex.key};
for (auto const& key : sle.getFieldV256(ripple::sfIndexes))
for (auto const& key : ownedDirSle.getFieldV256(ripple::sfIndexes))
{
keys.push_back(key);
@@ -644,7 +646,7 @@ traverseOwnedNodes(
break;
}
auto const uNodeNext = sle.getFieldU64(ripple::sfIndexNext);
auto const uNodeNext = ownedDirSle.getFieldU64(ripple::sfIndexNext);
if (uNodeNext == 0)
break;
@@ -654,8 +656,10 @@ traverseOwnedNodes(
}
auto end = std::chrono::system_clock::now();
LOG(gLog.debug()) << "Time loading owned directories: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << " milliseconds";
LOG(gLog.debug()) << fmt::format(
"Time loading owned directories: {} milliseconds, entries size: {}",
std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(),
keys.size());
auto [objects, timeDiff] = util::timed([&]() { return backend.fetchLedgerObjects(keys, sequence, yield); });
@@ -1145,21 +1149,15 @@ parseBook(ripple::Currency pays, ripple::AccountID payIssuer, ripple::Currency g
{
if (isXRP(pays) && !isXRP(payIssuer))
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Unneeded field 'taker_pays.issuer' for XRP currency "
"specification."};
RippledError::rpcSRC_ISR_MALFORMED, "Unneeded field 'taker_pays.issuer' for XRP currency specification."};
if (!isXRP(pays) && isXRP(payIssuer))
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Invalid field 'taker_pays.issuer', expected non-XRP "
"issuer."};
RippledError::rpcSRC_ISR_MALFORMED, "Invalid field 'taker_pays.issuer', expected non-XRP issuer."};
if (ripple::isXRP(gets) && !ripple::isXRP(getIssuer))
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Unneeded field 'taker_gets.issuer' for XRP currency "
"specification."};
RippledError::rpcDST_ISR_MALFORMED, "Unneeded field 'taker_gets.issuer' for XRP currency specification."};
if (!ripple::isXRP(gets) && ripple::isXRP(getIssuer))
return Status{
@@ -1229,15 +1227,11 @@ parseBook(boost::json::object const& request)
if (isXRP(pay_currency) && !isXRP(pay_issuer))
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Unneeded field 'taker_pays.issuer' for XRP currency "
"specification."};
RippledError::rpcSRC_ISR_MALFORMED, "Unneeded field 'taker_pays.issuer' for XRP currency specification."};
if (!isXRP(pay_currency) && isXRP(pay_issuer))
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Invalid field 'taker_pays.issuer', expected non-XRP "
"issuer."};
RippledError::rpcSRC_ISR_MALFORMED, "Invalid field 'taker_pays.issuer', expected non-XRP issuer."};
if ((!isXRP(pay_currency)) && (!taker_pays.contains("issuer")))
return Status{RippledError::rpcSRC_ISR_MALFORMED, "Missing non-XRP issuer."};
@@ -1254,9 +1248,7 @@ parseBook(boost::json::object const& request)
if (get_issuer == ripple::noAccount())
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Invalid field 'taker_gets.issuer', bad issuer account "
"one."};
RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', bad issuer account one."};
}
else
{
@@ -1265,9 +1257,7 @@ parseBook(boost::json::object const& request)
if (ripple::isXRP(get_currency) && !ripple::isXRP(get_issuer))
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Unneeded field 'taker_gets.issuer' for XRP currency "
"specification."};
RippledError::rpcDST_ISR_MALFORMED, "Unneeded field 'taker_gets.issuer' for XRP currency specification."};
if (!ripple::isXRP(get_currency) && ripple::isXRP(get_issuer))
return Status{

View File

@@ -31,7 +31,7 @@ namespace rpc {
/**
* @brief Default API version to use if no version is specified by clients
*/
static constexpr uint32_t API_VERSION_DEFAULT = 2u;
static constexpr uint32_t API_VERSION_DEFAULT = 1u;
/**
* @brief Minimum API version supported by this build

46
src/rpc/common/JsonBool.h Normal file
View File

@@ -0,0 +1,46 @@
#pragma once
#include <boost/json/value_to.hpp>
namespace rpc {
/**
* @brief A wrapper around bool that allows to convert from any JSON value
*/
struct JsonBool
{
bool value = false;
operator bool() const
{
return value;
}
};
inline JsonBool
tag_invoke(boost::json::value_to_tag<JsonBool> const&, boost::json::value const& jsonValue)
{
switch (jsonValue.kind())
{
case boost::json::kind::null:
return JsonBool{false};
case boost::json::kind::bool_:
return JsonBool{jsonValue.as_bool()};
case boost::json::kind::uint64:
[[fallthrough]];
case boost::json::kind::int64:
return JsonBool{jsonValue.as_int64() != 0};
case boost::json::kind::double_:
return JsonBool{jsonValue.as_double() != 0.0};
case boost::json::kind::string:
// Also should be `jsonValue.as_string() != "false"` but rippled doesn't do that. Anyway for v2 api we have
// bool validation
return JsonBool{!jsonValue.as_string().empty() && jsonValue.as_string()[0] != 0};
case boost::json::kind::array:
return JsonBool{!jsonValue.as_array().empty()};
case boost::json::kind::object:
return JsonBool{!jsonValue.as_object().empty()};
}
throw std::runtime_error("Invalid json value");
}
} // namespace rpc

View File

@@ -22,6 +22,9 @@
#include <rpc/common/Concepts.h>
#include <rpc/common/Specs.h>
#include <rpc/common/Types.h>
#include <util/JsonUtils.h>
#include <string_view>
namespace rpc::modifiers {
@@ -68,4 +71,32 @@ public:
}
};
/**
* @brief Convert input string to lower case.
*
* Note: the conversion is only performed if the input value is a string.
*/
struct ToLower final
{
/**
* @brief Update the input string to lower case.
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the modified value from the outer object
* @return Possibly an error
*/
[[nodiscard]] MaybeError
modify(boost::json::value& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail instead
if (not value.as_object().at(key.data()).is_string())
return {}; // ignore for non-string types
value.as_object()[key.data()] = util::toLower(value.as_object().at(key.data()).as_string().c_str());
return {};
}
};
} // namespace rpc::modifiers

View File

@@ -76,6 +76,18 @@ struct RpcSpec final
{
}
/**
* @brief Construct a full RPC request specification from another spec and additional fields.
*
* @param other The other spec to copy fields from
* @param additionalFields The additional fields to add to the spec
*/
RpcSpec(const RpcSpec& other, std::initializer_list<FieldSpec> additionalFields) : fields_{other.fields_}
{
for (auto& f : additionalFields)
fields_.push_back(std::move(f));
}
/**
* @brief Processos the passed JSON value using the stored field specs.
*

View File

@@ -83,9 +83,9 @@ struct VoidOutput
struct Context
{
boost::asio::yield_context yield;
std::shared_ptr<web::ConnectionBase> session;
std::shared_ptr<web::ConnectionBase> session = {};
bool isAdmin = false;
std::string clientIp;
std::string clientIp = {};
uint32_t apiVersion = 0u; // invalid by default
};

View File

@@ -68,7 +68,7 @@ CustomValidator Uint256HexStringValidator =
}};
CustomValidator LedgerIndexValidator =
CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
CustomValidator{[](boost::json::value const& value, std::string_view /* key */) -> MaybeError {
auto err = Error{Status{RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}};
if (!value.is_string() && !(value.is_uint64() || value.is_int64()))
@@ -146,11 +146,7 @@ CustomValidator IssuerValidator =
if (issuer == ripple::noAccount())
return Error{Status{
RippledError::rpcINVALID_PARAMS,
fmt::format(
"Invalid field '{}', bad issuer account "
"one.",
key)}};
RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}', bad issuer account one.", key)}};
return MaybeError{};
}};

View File

@@ -417,7 +417,7 @@ public:
auto const res = value_to<Type>(value.as_object().at(key.data()));
if (std::find(std::begin(options_), std::end(options_), res) == std::end(options_))
return Error{Status{RippledError::rpcINVALID_PARAMS}};
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}'.", key)}};
return {};
}

View File

@@ -56,10 +56,6 @@ public:
if (ctx.method == "subscribe" || ctx.method == "unsubscribe")
return false;
// TODO: if needed, make configurable with json config option
if (ctx.apiVersion == 1)
return true;
if (handlerProvider_->isClioOnly(ctx.method))
return false;

View File

@@ -80,10 +80,17 @@ AccountInfoHandler::process(AccountInfoHandler::Input input, Context const& ctx)
}
return Output(
lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, isDisallowIncomingEnabled, isClawbackEnabled, signerList);
lgrInfo.seq,
ripple::strHex(lgrInfo.hash),
sle,
isDisallowIncomingEnabled,
isClawbackEnabled,
ctx.apiVersion,
signerList);
}
return Output(lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, isDisallowIncomingEnabled, isClawbackEnabled);
return Output(
lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, isDisallowIncomingEnabled, isClawbackEnabled, ctx.apiVersion);
}
void
@@ -128,8 +135,7 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl
for (auto const& lsf : lsFlags)
acctFlags[lsf.first.data()] = output.accountData.isFlag(lsf.second);
// wait for conan integration-> jss::account_flags
jv.as_object()["account_flags"] = std::move(acctFlags);
jv.as_object()[JS(account_flags)] = std::move(acctFlags);
if (output.signerLists)
{
@@ -139,8 +145,10 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl
std::cend(output.signerLists.value()),
std::back_inserter(signers),
[](auto const& signerList) { return toJson(signerList); });
// version 2 puts the signer_lists out of the account_data
jv.as_object()[JS(signer_lists)] = std::move(signers);
if (output.apiVersion == 1)
jv.as_object()[JS(account_data)].as_object()[JS(signer_lists)] = std::move(signers);
else
jv.as_object()[JS(signer_lists)] = signers;
}
}
@@ -168,7 +176,7 @@ tag_invoke(boost::json::value_to_tag<AccountInfoHandler::Input>, boost::json::va
}
if (jsonObject.contains(JS(signer_lists)))
input.signerLists = jsonObject.at(JS(signer_lists)).as_bool();
input.signerLists = boost::json::value_to<JsonBool>(jsonObject.at(JS(signer_lists)));
return input;
}

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <rpc/common/JsonBool.h>
#include <rpc/common/MetaProcessors.h>
#include <rpc/common/Types.h>
#include <rpc/common/Validators.h>
@@ -44,6 +45,7 @@ public:
ripple::STLedgerEntry accountData;
bool isDisallowIncomingEnabled = false;
bool isClawbackEnabled = false;
uint32_t apiVersion;
std::optional<std::vector<ripple::STLedgerEntry>> signerLists;
// validated should be sent via framework
bool validated = true;
@@ -54,12 +56,14 @@ public:
ripple::STLedgerEntry sle,
bool isDisallowIncomingEnabled,
bool isClawbackEnabled,
uint32_t version,
std::optional<std::vector<ripple::STLedgerEntry>> signerLists = std::nullopt)
: ledgerIndex(ledgerId)
, ledgerHash(std::move(ledgerHash))
, accountData(std::move(sle))
, isDisallowIncomingEnabled(isDisallowIncomingEnabled)
, isClawbackEnabled(isClawbackEnabled)
, apiVersion(version)
, signerLists(std::move(signerLists))
{
}
@@ -73,7 +77,7 @@ public:
std::optional<std::string> ident;
std::optional<std::string> ledgerHash;
std::optional<uint32_t> ledgerIndex;
bool signerLists = false;
JsonBool signerLists{false};
};
using Result = HandlerReturnType<Output>;
@@ -85,14 +89,15 @@ public:
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
{
static auto const rpcSpec = RpcSpec{
static auto const rpcSpecV1 = RpcSpec{
{JS(account), validation::AccountValidator},
{JS(ident), validation::AccountValidator},
{JS(ledger_hash), validation::Uint256HexStringValidator},
{JS(ledger_index), validation::LedgerIndexValidator},
{JS(signer_lists), validation::Type<bool>{}}};
{JS(ledger_index), validation::LedgerIndexValidator}};
return rpcSpec;
static auto const rpcSpec = RpcSpec{rpcSpecV1, {{JS(signer_lists), validation::Type<bool>{}}}};
return apiVersion == 1 ? rpcSpecV1 : rpcSpec;
}
Result

View File

@@ -21,6 +21,7 @@
namespace rpc {
// found here : https://xrpl.org/ledger_entry.html#:~:text=valid%20fields%20are%3A-,index,-account_root
std::unordered_map<std::string, ripple::LedgerEntryType> const AccountObjectsHandler::TYPESMAP{
{"state", ripple::ltRIPPLE_STATE},
{"ticket", ripple::ltTICKET},

View File

@@ -18,10 +18,50 @@
//==============================================================================
#include <rpc/handlers/AccountTx.h>
#include <util/JsonUtils.h>
#include <util/Profiler.h>
namespace rpc {
// found here : https://xrpl.org/transaction-types.html
// TODO [https://github.com/XRPLF/clio/issues/856]: add AMMBid, AMMCreate, AMMDelete, AMMDeposit, AMMVote, AMMWithdraw
std::unordered_map<std::string, ripple::TxType> const AccountTxHandler::TYPESMAP{
{JSL(AccountSet), ripple::ttACCOUNT_SET},
{JSL(AccountDelete), ripple::ttACCOUNT_DELETE},
{JSL(CheckCancel), ripple::ttCHECK_CANCEL},
{JSL(CheckCash), ripple::ttCHECK_CASH},
{JSL(CheckCreate), ripple::ttCHECK_CREATE},
{JSL(Clawback), ripple::ttCLAWBACK},
{JSL(DepositPreauth), ripple::ttDEPOSIT_PREAUTH},
{JSL(EscrowCancel), ripple::ttESCROW_CANCEL},
{JSL(EscrowCreate), ripple::ttESCROW_CREATE},
{JSL(EscrowFinish), ripple::ttESCROW_FINISH},
{JSL(NFTokenAcceptOffer), ripple::ttNFTOKEN_ACCEPT_OFFER},
{JSL(NFTokenBurn), ripple::ttNFTOKEN_BURN},
{JSL(NFTokenCancelOffer), ripple::ttNFTOKEN_CANCEL_OFFER},
{JSL(NFTokenCreateOffer), ripple::ttNFTOKEN_CREATE_OFFER},
{JSL(NFTokenMint), ripple::ttNFTOKEN_MINT},
{JSL(OfferCancel), ripple::ttOFFER_CANCEL},
{JSL(OfferCreate), ripple::ttOFFER_CREATE},
{JSL(Payment), ripple::ttPAYMENT},
{JSL(PaymentChannelClaim), ripple::ttPAYCHAN_CLAIM},
{JSL(PaymentChannelCreate), ripple::ttCHECK_CREATE},
{JSL(PaymentChannelFund), ripple::ttPAYCHAN_FUND},
{JSL(SetRegularKey), ripple::ttREGULAR_KEY_SET},
{JSL(SignerListSet), ripple::ttSIGNER_LIST_SET},
{JSL(TicketCreate), ripple::ttTICKET_CREATE},
{JSL(TrustSet), ripple::ttTRUST_SET},
};
// TODO: should be std::views::keys when clang supports it
std::unordered_set<std::string> const AccountTxHandler::TYPES_KEYS = [] {
std::unordered_set<std::string> keys;
std::transform(TYPESMAP.begin(), TYPESMAP.end(), std::inserter(keys, keys.begin()), [](auto const& pair) {
return pair.first;
});
return keys;
}();
// TODO: this is currently very similar to nft_history but its own copy for time
// being. we should aim to reuse common logic in some way in the future.
AccountTxHandler::Result
@@ -32,36 +72,53 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) con
if (input.ledgerIndexMin)
{
if (range->maxSequence < input.ledgerIndexMin || range->minSequence > input.ledgerIndexMin)
if (ctx.apiVersion > 1u &&
(input.ledgerIndexMin > range->maxSequence || input.ledgerIndexMin < range->minSequence))
{
return Error{Status{RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMinOutOfRange"}};
}
minIndex = *input.ledgerIndexMin;
if (static_cast<std::uint32_t>(*input.ledgerIndexMin) > minIndex)
minIndex = *input.ledgerIndexMin;
}
if (input.ledgerIndexMax)
{
if (range->maxSequence < input.ledgerIndexMax || range->minSequence > input.ledgerIndexMax)
if (ctx.apiVersion > 1u &&
(input.ledgerIndexMax > range->maxSequence || input.ledgerIndexMax < range->minSequence))
{
return Error{Status{RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMaxOutOfRange"}};
}
maxIndex = *input.ledgerIndexMax;
if (static_cast<std::uint32_t>(*input.ledgerIndexMax) < maxIndex)
maxIndex = *input.ledgerIndexMax;
}
if (minIndex > maxIndex)
{
if (ctx.apiVersion == 1u)
return Error{Status{RippledError::rpcLGR_IDXS_INVALID}};
return Error{Status{RippledError::rpcINVALID_LGR_RANGE}};
}
if (input.ledgerHash || input.ledgerIndex || input.usingValidatedLedger)
{
// rippled does not have this check
if (input.ledgerIndexMax || input.ledgerIndexMin)
if (ctx.apiVersion > 1u && (input.ledgerIndexMax || input.ledgerIndexMin))
return Error{Status{RippledError::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"}};
auto const lgrInfoOrStatus = getLedgerInfoFromHashOrSeq(
*sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence);
if (!input.ledgerIndexMax && !input.ledgerIndexMin)
{
// mimic rippled, when both range and index specified, respect the range.
// take ledger from ledgerHash or ledgerIndex only when range is not specified
auto const lgrInfoOrStatus = getLedgerInfoFromHashOrSeq(
*sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence);
if (auto status = std::get_if<Status>(&lgrInfoOrStatus))
return Error{*status};
if (auto status = std::get_if<Status>(&lgrInfoOrStatus))
return Error{*status};
maxIndex = minIndex = std::get<ripple::LedgerHeader>(lgrInfoOrStatus).seq;
maxIndex = minIndex = std::get<ripple::LedgerHeader>(lgrInfoOrStatus).seq;
}
}
std::optional<data::TransactionsCursor> cursor;
@@ -116,8 +173,23 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) con
auto [txn, meta] = toExpandedJson(txnPlusMeta, NFTokenjson::ENABLE);
obj[JS(meta)] = std::move(meta);
obj[JS(tx)] = std::move(txn);
obj[JS(tx)].as_object()[JS(ledger_index)] = txnPlusMeta.ledgerSequence;
if (obj[JS(tx)].as_object().contains(JS(TransactionType)))
{
auto const objTransactionType = obj[JS(tx)].as_object()[JS(TransactionType)];
auto const strType = util::toLower(objTransactionType.as_string().c_str());
// if transactionType does not match
if (input.transactionType.has_value() && AccountTxHandler::TYPESMAP.contains(strType) &&
AccountTxHandler::TYPESMAP.at(strType) != input.transactionType.value())
continue;
}
obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date;
obj[JS(tx)].as_object()[JS(ledger_index)] = txnPlusMeta.ledgerSequence;
if (ctx.apiVersion < 2u)
obj[JS(tx)].as_object()[JS(inLedger)] = txnPlusMeta.ledgerSequence;
}
else
{
@@ -195,10 +267,10 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
}
if (jsonObject.contains(JS(binary)))
input.binary = jsonObject.at(JS(binary)).as_bool();
input.binary = boost::json::value_to<JsonBool>(jsonObject.at(JS(binary)));
if (jsonObject.contains(JS(forward)))
input.forward = jsonObject.at(JS(forward)).as_bool();
input.forward = boost::json::value_to<JsonBool>(jsonObject.at(JS(forward)));
if (jsonObject.contains(JS(limit)))
input.limit = jsonObject.at(JS(limit)).as_int64();
@@ -208,6 +280,12 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(),
jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64()};
if (jsonObject.contains("tx_type"))
{
auto objTransactionType = jsonObject.at("tx_type");
input.transactionType = AccountTxHandler::TYPESMAP.at(objTransactionType.as_string().c_str());
}
return input;
}

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <rpc/common/JsonBool.h>
#include <rpc/common/MetaProcessors.h>
#include <rpc/common/Modifiers.h>
#include <rpc/common/Types.h>
@@ -39,6 +40,9 @@ class AccountTxHandler
util::Logger log_{"RPC"};
std::shared_ptr<BackendInterface> sharedPtrBackend_;
static std::unordered_map<std::string, ripple::TxType> const TYPESMAP;
static const std::unordered_set<std::string> TYPES_KEYS;
public:
// no max limit
static auto constexpr LIMIT_MIN = 1;
@@ -73,10 +77,11 @@ public:
std::optional<int32_t> ledgerIndexMin;
std::optional<int32_t> ledgerIndexMax;
bool usingValidatedLedger = false;
bool binary = false;
bool forward = false;
JsonBool binary{false};
JsonBool forward{false};
std::optional<uint32_t> limit;
std::optional<Marker> marker;
std::optional<ripple::TxType> transactionType;
};
using Result = HandlerReturnType<Output>;
@@ -88,14 +93,12 @@ public:
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
{
static auto const rpcSpec = RpcSpec{
static auto const rpcSpecForV1 = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},
{JS(ledger_hash), validation::Uint256HexStringValidator},
{JS(ledger_index), validation::LedgerIndexValidator},
{JS(ledger_index_min), validation::Type<int32_t>{}},
{JS(ledger_index_max), validation::Type<int32_t>{}},
{JS(binary), validation::Type<bool>{}},
{JS(forward), validation::Type<bool>{}},
{JS(limit),
validation::Type<uint32_t>{},
validation::Min(1u),
@@ -109,9 +112,22 @@ public:
{JS(ledger), validation::Required{}, validation::Type<uint32_t>{}},
{JS(seq), validation::Required{}, validation::Type<uint32_t>{}},
}},
{
"tx_type",
validation::Type<std::string>{},
modifiers::ToLower{},
validation::OneOf<std::string>(TYPES_KEYS.cbegin(), TYPES_KEYS.cend()),
},
};
return rpcSpec;
static auto const rpcSpec = RpcSpec{
rpcSpecForV1,
{
{JS(binary), validation::Type<bool>{}},
{JS(forward), validation::Type<bool>{}},
}};
return apiVersion == 1 ? rpcSpecForV1 : rpcSpec;
}
Result

View File

@@ -95,7 +95,7 @@ public:
// return INVALID_PARAMS if account format is wrong for "taker"
{JS(taker),
meta::WithCustomError{
validation::AccountValidator, Status(RippledError::rpcINVALID_PARAMS, "Invalid field 'taker'")}},
validation::AccountValidator, Status(RippledError::rpcINVALID_PARAMS, "Invalid field 'taker'.")}},
{JS(limit),
validation::Type<uint32_t>{},
validation::Min(1u),

View File

@@ -100,9 +100,7 @@ public:
meta::WithCustomError{
validation::Type<std::string>{},
Status{ripple::rpcINVALID_PARAMS, "Invalid field 'type', not string."}},
meta::WithCustomError{
validation::OneOf<std::string>(TYPES_KEYS.cbegin(), TYPES_KEYS.cend()),
Status{ripple::rpcINVALID_PARAMS, "Invalid field 'type'."}}},
validation::OneOf<std::string>(TYPES_KEYS.cbegin(), TYPES_KEYS.cend())},
};
return rpcSpec;

View File

@@ -82,7 +82,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
else
{
// Must specify 1 of the following fields to indicate what type
return Error{Status{ClioError::rpcUNKNOWN_OPTION}};
if (ctx.apiVersion == 1)
return Error{Status{ClioError::rpcUNKNOWN_OPTION}};
return Error{Status{RippledError::rpcINVALID_PARAMS}};
}
// check ledger exists

View File

@@ -81,7 +81,7 @@ public:
// The accounts array must have two different elements
// Each element must be a valid address
static auto const rippleStateAccountsCheck =
validation::CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
validation::CustomValidator{[](boost::json::value const& value, std::string_view /* key */) -> MaybeError {
if (!value.is_array() || value.as_array().size() != 2 || !value.as_array()[0].is_string() ||
!value.as_array()[1].is_string() ||
value.as_array()[0].as_string() == value.as_array()[1].as_string())

View File

@@ -132,7 +132,7 @@ NFTOffersHandlerBase::iterateOfferDirectory(
std::move(std::begin(offers), std::end(offers), std::back_inserter(output.offers));
return std::move(output);
return output;
}
void

View File

@@ -38,13 +38,13 @@ public:
struct Output
{
std::string nftID;
std::vector<ripple::SLE> offers;
std::string nftID = {};
std::vector<ripple::SLE> offers = {};
// validated should be sent via framework
bool validated = true;
std::optional<uint32_t> limit;
std::optional<std::string> marker;
std::optional<uint32_t> limit = {};
std::optional<std::string> marker = {};
};
struct Input

View File

@@ -167,7 +167,7 @@ tag_invoke(boost::json::value_to_tag<NoRippleCheckHandler::Input>, boost::json::
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(transactions)))
input.transactions = jsonObject.at(JS(transactions)).as_bool();
input.transactions = boost::json::value_to<JsonBool>(jsonObject.at(JS(transactions)));
if (jsonObject.contains(JS(ledger_hash)))
input.ledgerHash = jsonObject.at(JS(ledger_hash)).as_string().c_str();

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <rpc/common/JsonBool.h>
#include <rpc/common/MetaProcessors.h>
#include <rpc/common/Modifiers.h>
#include <rpc/common/Types.h>
@@ -62,7 +63,7 @@ public:
std::optional<std::string> ledgerHash;
std::optional<uint32_t> ledgerIndex;
uint32_t limit = LIMIT_DEFAULT;
bool transactions = false;
JsonBool transactions{false};
};
using Result = HandlerReturnType<Output>;
@@ -75,7 +76,7 @@ public:
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
{
static auto const rpcSpec = RpcSpec{
static auto const rpcSpecV1 = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},
{JS(role),
validation::Required{},
@@ -87,11 +88,15 @@ public:
{JS(limit),
validation::Type<uint32_t>(),
validation::Min(1u),
modifiers::Clamp<int32_t>{LIMIT_MIN, LIMIT_MAX}},
{JS(transactions), validation::Type<bool>()},
};
modifiers::Clamp<int32_t>{LIMIT_MIN, LIMIT_MAX}}};
return rpcSpec;
static auto const rpcSpec = RpcSpec{
rpcSpecV1,
{
{JS(transactions), validation::Type<bool>()},
}};
return apiVersion == 1 ? rpcSpecV1 : rpcSpec;
}
Result

View File

@@ -28,6 +28,7 @@
#include <rpc/common/Validators.h>
#include <ripple/basics/chrono.h>
#include <ripple/protocol/BuildInfo.h>
#include <chrono>
#include <fmt/core.h>
@@ -87,6 +88,7 @@ public:
std::chrono::time_point<std::chrono::system_clock> time = std::chrono::system_clock::now();
std::chrono::seconds uptime = {};
std::string clioVersion = Build::getClioVersionString();
std::string xrplVersion = ripple::BuildInfo::getVersionString();
std::optional<boost::json::object> rippledInfo = std::nullopt;
ValidatedLedgerSection validatedLedger = {};
CacheSection cache = {};
@@ -194,6 +196,7 @@ private:
{JS(time), to_string(std::chrono::floor<std::chrono::microseconds>(info.time))},
{JS(uptime), info.uptime.count()},
{"clio_version", info.clioVersion},
{"libxrpl_version", info.xrplVersion},
{JS(validated_ledger), value_from(info.validatedLedger)},
{"cache", value_from(info.cache)},
};

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <rpc/common/MetaProcessors.h>
#include <rpc/common/Types.h>
#include <rpc/common/Validators.h>
@@ -95,7 +96,11 @@ public:
return Error{Status{RippledError::rpcINVALID_PARAMS, "snapshotNotBool"}};
if (book.as_object().contains("taker"))
if (auto const err = validation::AccountValidator.verify(book.as_object(), "taker"); !err)
if (auto const err = meta::WithCustomError(
validation::AccountValidator,
Status{RippledError::rpcBAD_ISSUER, "Issuer account malformed."})
.verify(book.as_object(), "taker");
!err)
return err;
auto const parsedBook = parseBook(book.as_object());

View File

@@ -36,7 +36,7 @@ TxHandler::process(Input input, Context const& ctx) const
return Error{Status{RippledError::rpcEXCESSIVE_LGR_RANGE}};
}
auto output = TxHandler::Output{};
auto output = TxHandler::Output{.apiVersion = ctx.apiVersion};
auto const dbResponse =
sharedPtrBackend_->fetchTransaction(ripple::uint256{std::string_view(input.transaction)}, ctx.yield);
@@ -55,7 +55,6 @@ TxHandler::process(Input input, Context const& ctx) const
return Error{Status{RippledError::rpcTXN_NOT_FOUND}};
}
// clio does not implement 'inLedger' which is a deprecated field
if (!input.binary)
{
auto const [txn, meta] = toExpandedJson(*dbResponse, NFTokenjson::ENABLE);
@@ -95,6 +94,9 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, TxHandler::Outpu
obj[JS(date)] = output.date;
obj[JS(ledger_index)] = output.ledgerIndex;
if (output.apiVersion < 2u)
obj[JS(inLedger)] = output.ledgerIndex;
jv = std::move(obj);
}

View File

@@ -38,13 +38,14 @@ class TxHandler
public:
struct Output
{
uint32_t date;
std::string hash;
uint32_t ledgerIndex;
std::optional<boost::json::object> meta;
std::optional<boost::json::object> tx;
std::optional<std::string> metaStr;
std::optional<std::string> txStr;
uint32_t date = 0u;
std::string hash{};
uint32_t ledgerIndex = 0u;
std::optional<boost::json::object> meta{};
std::optional<boost::json::object> tx{};
std::optional<std::string> metaStr{};
std::optional<std::string> txStr{};
uint32_t apiVersion = 0u;
bool validated = true;
};

View File

@@ -21,6 +21,8 @@
#include <boost/json.hpp>
#include <algorithm>
#include <cctype>
#include <string>
/**
@@ -28,6 +30,13 @@
*/
namespace util {
inline std::string
toLower(std::string str)
{
std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return std::tolower(c); });
return str;
}
/**
* @brief Removes any detected secret information from a response JSON object.
*

View File

@@ -93,12 +93,16 @@ using SourceLocationType = SourceLocation;
*
* Note: Currently this introduces potential shadowing (unlikely).
*/
#ifndef COVERAGE_ENABLED
#define LOG(x) \
if (auto clio_pump__ = x; not clio_pump__) \
{ \
} \
else \
clio_pump__
#else
#define LOG(x) x
#endif
/**
* @brief Custom severity levels for @ref util::Logger.

View File

@@ -89,8 +89,8 @@ public:
auto req = boost::json::parse(request).as_object();
LOG(perfLog_.debug()) << connection->tag() << "Adding to work queue";
if (not connection->upgraded and not req.contains("params"))
req["params"] = boost::json::array({boost::json::object{}});
if (not connection->upgraded and shouldReplaceParams(req))
req[JS(params)] = boost::json::array({boost::json::object{}});
if (!rpcEngine_->post(
[this, request = std::move(req), connection](boost::asio::yield_context yield) mutable {
@@ -190,13 +190,13 @@ private:
return web::detail::ErrorHelper(connection, request).sendError(err);
}
auto [v, timeDiff] = util::timed([&]() { return rpcEngine_->buildResponse(*context); });
auto [result, timeDiff] = util::timed([&]() { return rpcEngine_->buildResponse(*context); });
auto us = std::chrono::duration<int, std::milli>(timeDiff);
rpc::logDuration(*context, us);
boost::json::object response;
if (auto const status = std::get_if<rpc::Status>(&v))
if (auto const status = std::get_if<rpc::Status>(&result))
{
// note: error statuses are counted/notified in buildResponse itself
response = web::detail::ErrorHelper(connection, request).composeError(*status);
@@ -210,20 +210,20 @@ private:
// This can still technically be an error. Clio counts forwarded requests as successful.
rpcEngine_->notifyComplete(context->method, us);
auto& result = std::get<boost::json::object>(v);
auto const isForwarded = result.contains("forwarded") && result.at("forwarded").is_bool() &&
result.at("forwarded").as_bool();
auto& json = std::get<boost::json::object>(result);
auto const isForwarded =
json.contains("forwarded") && json.at("forwarded").is_bool() && json.at("forwarded").as_bool();
// if the result is forwarded - just use it as is
// if forwarded request has error, for http, error should be in "result"; for ws, error should be at top
if (isForwarded && (result.contains("result") || connection->upgraded))
if (isForwarded && (json.contains("result") || connection->upgraded))
{
for (auto const& [k, v] : result)
for (auto const& [k, v] : json)
response.insert_or_assign(k, v);
}
else
{
response["result"] = result;
response["result"] = json;
}
// for ws there is an additional field "status" in the response,
@@ -267,6 +267,27 @@ private:
return web::detail::ErrorHelper(connection, request).sendInternalError();
}
}
bool
shouldReplaceParams(boost::json::object const& req) const
{
auto const hasParams = req.contains(JS(params));
auto const paramsIsArray = hasParams and req.at(JS(params)).is_array();
auto const paramsIsEmptyString =
hasParams and req.at(JS(params)).is_string() and req.at(JS(params)).as_string().empty();
auto const paramsIsEmptyObject =
hasParams and req.at(JS(params)).is_object() and req.at(JS(params)).as_object().empty();
auto const paramsIsNull = hasParams and req.at(JS(params)).is_null();
auto const arrayIsEmpty = paramsIsArray and req.at(JS(params)).as_array().empty();
auto const arrayIsNotEmpty = paramsIsArray and not req.at(JS(params)).as_array().empty();
auto const firstArgIsNull = arrayIsNotEmpty and req.at(JS(params)).as_array().at(0).is_null();
auto const firstArgIsEmptyString = arrayIsNotEmpty and req.at(JS(params)).as_array().at(0).is_string() and
req.at(JS(params)).as_array().at(0).as_string().empty();
// Note: all this compatibility dance is to match `rippled` as close as possible
return not hasParams or paramsIsEmptyString or paramsIsNull or paramsIsEmptyObject or arrayIsEmpty or
firstArgIsEmptyString or firstArgIsNull;
}
};
} // namespace web

View File

@@ -180,10 +180,10 @@ public:
if (boost::beast::websocket::is_upgrade(req_))
{
upgraded = true;
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
// Disable the timeout. The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(derived().stream()).expires_never();
upgraded = true;
return derived().upgrade();
}

View File

@@ -63,7 +63,7 @@ protected:
if (!ec_ && ec != boost::asio::error::operation_aborted)
{
ec_ = ec;
LOG(perfLog_.info()) << tag() << ": " << what << ": " << ec.message();
LOG(perfLog_.error()) << tag() << ": " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
(*handler_)(ec, derived().shared_from_this());
}
@@ -106,14 +106,14 @@ public:
void
onWrite(boost::system::error_code ec, std::size_t)
{
messages_.pop();
sending_ = false;
if (ec)
{
wsFail(ec, "Failed to write");
}
else
{
messages_.pop();
sending_ = false;
maybeSendNext();
}
}
@@ -121,6 +121,10 @@ public:
void
maybeSendNext()
{
// cleanup if needed. can't do this in destructor so it's here
if (dead())
(*handler_)(ec_, derived().shared_from_this());
if (ec_ || sending_ || messages_.empty())
return;
@@ -150,7 +154,7 @@ public:
* If the DOSGuard is triggered, the message will be modified to include a warning
*/
void
send(std::string&& msg, http::status _ = http::status::ok) override
send(std::string&& msg, http::status = http::status::ok) override
{
if (!dosGuard_.get().add(clientIp, msg.size()))
{
@@ -204,8 +208,8 @@ public:
if (dead())
return;
// Clear the buffer
buffer_.consume(buffer_.size());
// Note: use entirely new buffer so previously used, potentially large, capacity is deallocated
buffer_ = boost::beast::flat_buffer{};
derived().ws().async_read(buffer_, boost::beast::bind_front_handler(&WsBase::onRead, this->shared_from_this()));
}

View File

@@ -66,12 +66,11 @@ public:
* @brief Send via shared_ptr of string, that enables SubscriptionManager to publish to clients.
*
* @param msg The message to send
* @throws Not supported unless implemented in child classes. Will always throw std::runtime_error.
* @throws Not supported unless implemented in child classes. Will always throw std::logic_error.
*/
virtual void
send(std::shared_ptr<std::string> msg)
virtual void send(std::shared_ptr<std::string> /* msg */)
{
throw std::runtime_error("web server can not send the shared payload");
throw std::logic_error("web server can not send the shared payload");
}
/**

View File

@@ -57,6 +57,7 @@ TEST_F(LoggerTest, Filtering)
checkEqual("Trace:TRC Trace line logged for 'Trace' component");
}
#ifndef COVERAGE_ENABLED
TEST_F(LoggerTest, LOGMacro)
{
Logger log{"General"};
@@ -73,6 +74,7 @@ TEST_F(LoggerTest, LOGMacro)
log.trace() << compute();
EXPECT_TRUE(computeCalled);
}
#endif
TEST_F(NoLoggerTest, Basic)
{

View File

@@ -161,7 +161,6 @@ TEST_F(SubscriptionManagerSimpleBackendTest, ReportCurrentSubscriber)
EXPECT_EQ(reportReturn["books"], result);
};
checkResult(subManagerPtr->report(), 1);
subManagerPtr->cleanup(session2);
subManagerPtr->cleanup(session2); // clean a removed session
std::this_thread::sleep_for(20ms);
checkResult(subManagerPtr->report(), 0);
@@ -270,7 +269,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerAccountProposedT
})";
subManagerPtr->forwardProposedTransaction(json::parse(dummyTransaction).get_object());
CheckSubscriberMessage(dummyTransaction, session);
auto rawIdle = (MockSession*)(sessionIdle.get());
auto rawIdle = static_cast<MockSession*>(sessionIdle.get());
EXPECT_EQ("", rawIdle->message);
}

View File

@@ -25,7 +25,6 @@
#include <boost/json/parse.hpp>
#include <gmock/gmock.h>
namespace json = boost::json;
using namespace feed;
// io_context
@@ -54,6 +53,8 @@ TEST_F(SubscriptionTest, SubscriptionCount)
ctx.restart();
ctx.run();
EXPECT_EQ(sub.count(), 2);
EXPECT_TRUE(sub.hasSession(session1));
EXPECT_TRUE(sub.hasSession(session2));
EXPECT_FALSE(sub.empty());
sub.unsubscribe(session1);
ctx.restart();
@@ -68,6 +69,8 @@ TEST_F(SubscriptionTest, SubscriptionCount)
ctx.run();
EXPECT_EQ(sub.count(), 0);
EXPECT_TRUE(sub.empty());
EXPECT_FALSE(sub.hasSession(session1));
EXPECT_FALSE(sub.hasSession(session2));
}
// send interface will be called when publish called
@@ -83,9 +86,9 @@ TEST_F(SubscriptionTest, SubscriptionPublish)
sub.publish(std::make_shared<std::string>("message"));
ctx.restart();
ctx.run();
MockSession* p1 = (MockSession*)(session1.get());
MockSession* p1 = static_cast<MockSession*>(session1.get());
EXPECT_EQ(p1->message, "message");
MockSession* p2 = (MockSession*)(session2.get());
MockSession* p2 = static_cast<MockSession*>(session2.get());
EXPECT_EQ(p2->message, "message");
sub.unsubscribe(session1);
ctx.restart();
@@ -132,6 +135,9 @@ TEST_F(SubscriptionMapTest, SubscriptionMapCount)
ctx.restart();
ctx.run();
EXPECT_EQ(subMap.count(), 3);
EXPECT_TRUE(subMap.hasSession(session1, "topic1"));
EXPECT_TRUE(subMap.hasSession(session2, "topic1"));
EXPECT_TRUE(subMap.hasSession(session3, "topic2"));
subMap.unsubscribe(session1, "topic1");
ctx.restart();
ctx.run();
@@ -140,6 +146,9 @@ TEST_F(SubscriptionMapTest, SubscriptionMapCount)
subMap.unsubscribe(session3, "topic2");
ctx.restart();
ctx.run();
EXPECT_FALSE(subMap.hasSession(session1, "topic1"));
EXPECT_FALSE(subMap.hasSession(session2, "topic1"));
EXPECT_FALSE(subMap.hasSession(session3, "topic2"));
EXPECT_EQ(subMap.count(), 0);
subMap.unsubscribe(session3, "topic2");
subMap.unsubscribe(session3, "no exist");
@@ -166,9 +175,9 @@ TEST_F(SubscriptionMapTest, SubscriptionMapPublish)
subMap.publish(std::make_shared<std::string>(topic2Message.data()), topic2); // rvalue
ctx.restart();
ctx.run();
MockSession* p1 = (MockSession*)(session1.get());
MockSession* p1 = static_cast<MockSession*>(session1.get());
EXPECT_EQ(p1->message, topic1Message);
MockSession* p2 = (MockSession*)(session2.get());
MockSession* p2 = static_cast<MockSession*>(session2.get());
EXPECT_EQ(p2->message, topic2Message);
}
@@ -190,9 +199,9 @@ TEST_F(SubscriptionMapTest, SubscriptionMapDeadRemoveSubscriber)
subMap.publish(std::make_shared<std::string>(topic2Message), topic2); // rvalue
ctx.restart();
ctx.run();
MockDeadSession* p1 = (MockDeadSession*)(session1.get());
MockDeadSession* p1 = static_cast<MockDeadSession*>(session1.get());
EXPECT_EQ(p1->dead(), true);
MockSession* p2 = (MockSession*)(session2.get());
MockSession* p2 = static_cast<MockSession*>(session2.get());
EXPECT_EQ(p2->message, topic2Message);
subMap.publish(message1, topic1);
ctx.restart();

View File

@@ -122,9 +122,9 @@ TEST_F(BackendCassandraTest, Basic)
lgrInfoNext.hash++;
lgrInfoNext.accountHash = ~lgrInfo.accountHash;
{
std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext);
std::string infoBlob = ledgerInfoToBinaryString(lgrInfoNext);
backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob));
backend->writeLedger(lgrInfoNext, std::move(infoBlob));
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
{
@@ -349,14 +349,13 @@ TEST_F(BackendCassandraTest, Basic)
ripple::uint256 hash256;
EXPECT_TRUE(hash256.parseHex(hashHex));
ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob};
auto journal = ripple::debugLog();
auto accountsSet = txMeta.getAffectedAccounts();
for (auto& a : accountsSet)
{
affectedAccounts.push_back(a);
}
std::vector<AccountTransactionsData> accountTxData;
accountTxData.emplace_back(txMeta, hash256, journal);
accountTxData.emplace_back(txMeta, hash256);
ripple::uint256 nftHash256;
EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex));
@@ -399,18 +398,22 @@ TEST_F(BackendCassandraTest, Basic)
auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield);
EXPECT_TRUE(retLgr);
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoNext));
auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield);
ASSERT_EQ(txns.size(), 1);
EXPECT_STREQ((const char*)txns[0].transaction.data(), (const char*)txnBlob.data());
EXPECT_STREQ((const char*)txns[0].metadata.data(), (const char*)metaBlob.data());
auto allTransactions = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield);
ASSERT_EQ(allTransactions.size(), 1);
EXPECT_STREQ(
reinterpret_cast<const char*>(allTransactions[0].transaction.data()),
static_cast<const char*>(txnBlob.data()));
EXPECT_STREQ(
reinterpret_cast<const char*>(allTransactions[0].metadata.data()),
static_cast<const char*>(metaBlob.data()));
auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield);
EXPECT_EQ(hashes.size(), 1);
EXPECT_EQ(ripple::strHex(hashes[0]), hashHex);
for (auto& a : affectedAccounts)
{
auto [txns, cursor] = backend->fetchAccountTransactions(a, 100, true, {}, yield);
EXPECT_EQ(txns.size(), 1);
EXPECT_EQ(txns[0], txns[0]);
auto [accountTransactions, cursor] = backend->fetchAccountTransactions(a, 100, true, {}, yield);
EXPECT_EQ(accountTransactions.size(), 1);
EXPECT_EQ(accountTransactions[0], accountTransactions[0]);
EXPECT_FALSE(cursor);
}
auto nft = backend->fetchNFT(nftID, lgrInfoNext.seq, yield);
@@ -424,10 +427,10 @@ TEST_F(BackendCassandraTest, Basic)
EXPECT_TRUE(key256.parseHex(accountIndexHex));
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -463,13 +466,13 @@ TEST_F(BackendCassandraTest, Basic)
EXPECT_TRUE(key256.parseHex(accountIndexHex));
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlobOld.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -505,7 +508,7 @@ TEST_F(BackendCassandraTest, Basic)
EXPECT_FALSE(obj);
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlobOld.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -518,7 +521,7 @@ TEST_F(BackendCassandraTest, Basic)
for (auto& blob : res)
{
++key;
std::string keyStr{(const char*)key.data(), key.size()};
std::string keyStr{reinterpret_cast<const char*>(key.data()), key.size()};
blob.first = keyStr;
blob.second = std::to_string(ledgerSequence) + keyStr;
}
@@ -538,7 +541,7 @@ TEST_F(BackendCassandraTest, Basic)
for (auto& blob : res)
{
++base;
std::string hashStr{(const char*)base.data(), base.size()};
std::string hashStr{reinterpret_cast<const char*>(base.data()), base.size()};
std::string txnStr = "tx" + std::to_string(ledgerSequence) + hashStr;
std::string metaStr = "meta" + std::to_string(ledgerSequence) + hashStr;
blob = std::make_tuple(hashStr, txnStr, metaStr);
@@ -642,8 +645,14 @@ TEST_F(BackendCassandraTest, Basic)
bool found = false;
for (auto [retTxn, retMeta, retSeq, retDate] : retTxns)
{
if (std::strncmp((const char*)retTxn.data(), (const char*)txn.data(), txn.size()) == 0 &&
std::strncmp((const char*)retMeta.data(), (const char*)meta.data(), meta.size()) == 0)
if (std::strncmp(
reinterpret_cast<const char*>(retTxn.data()),
static_cast<const char*>(txn.data()),
txn.size()) == 0 &&
std::strncmp(
reinterpret_cast<const char*>(retMeta.data()),
static_cast<const char*>(meta.data()),
meta.size()) == 0)
found = true;
}
ASSERT_TRUE(found);
@@ -655,19 +664,20 @@ TEST_F(BackendCassandraTest, Basic)
do
{
uint32_t limit = 10;
auto [txns, retCursor] = backend->fetchAccountTransactions(account, limit, false, cursor, yield);
auto [accountTransactions, retCursor] =
backend->fetchAccountTransactions(account, limit, false, cursor, yield);
if (retCursor)
EXPECT_EQ(txns.size(), limit);
retData.insert(retData.end(), txns.begin(), txns.end());
EXPECT_EQ(accountTransactions.size(), limit);
retData.insert(retData.end(), accountTransactions.begin(), accountTransactions.end());
cursor = retCursor;
} while (cursor);
EXPECT_EQ(retData.size(), data.size());
for (size_t i = 0; i < retData.size(); ++i)
{
auto [txn, meta, seq, date] = retData[i];
auto [hash, expTxn, expMeta] = data[i];
EXPECT_STREQ((const char*)txn.data(), (const char*)expTxn.data());
EXPECT_STREQ((const char*)meta.data(), (const char*)expMeta.data());
auto [txn, meta, _, __] = retData[i];
auto [___, expTxn, expMeta] = data[i];
EXPECT_STREQ(reinterpret_cast<const char*>(txn.data()), static_cast<const char*>(expTxn.data()));
EXPECT_STREQ(reinterpret_cast<const char*>(meta.data()), static_cast<const char*>(expMeta.data()));
}
}
std::vector<ripple::uint256> keys;
@@ -677,7 +687,7 @@ TEST_F(BackendCassandraTest, Basic)
if (obj.size())
{
ASSERT_TRUE(retObj.has_value());
EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data());
EXPECT_STREQ(static_cast<const char*>(obj.data()), reinterpret_cast<const char*>(retObj->data()));
}
else
{
@@ -697,7 +707,8 @@ TEST_F(BackendCassandraTest, Basic)
if (obj.size())
{
ASSERT_TRUE(retObj.size());
EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data());
EXPECT_STREQ(
static_cast<const char*>(obj.data()), reinterpret_cast<const char*>(retObj.data()));
}
else
{
@@ -747,7 +758,7 @@ TEST_F(BackendCassandraTest, Basic)
for (auto account : rec.accounts)
{
allAccountTx[lgrInfoNext.seq][account].push_back(
std::string{(const char*)rec.txHash.data(), rec.txHash.size()});
std::string{reinterpret_cast<const char*>(rec.txHash.data()), rec.txHash.size()});
}
}
EXPECT_EQ(objs.size(), 25);
@@ -780,7 +791,7 @@ TEST_F(BackendCassandraTest, Basic)
for (auto account : rec.accounts)
{
allAccountTx[lgrInfoNext.seq][account].push_back(
std::string{(const char*)rec.txHash.data(), rec.txHash.size()});
std::string{reinterpret_cast<const char*>(rec.txHash.data()), rec.txHash.size()});
}
}
EXPECT_EQ(objs.size(), 25);
@@ -916,10 +927,10 @@ TEST_F(BackendCassandraTest, CacheIntegration)
lgrInfoNext.hash++;
lgrInfoNext.accountHash = ~lgrInfo.accountHash;
{
std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext);
std::string infoBlob = ledgerInfoToBinaryString(lgrInfoNext);
backend->startWrites();
backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob));
backend->writeLedger(lgrInfoNext, std::move(infoBlob));
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
{
@@ -979,10 +990,10 @@ TEST_F(BackendCassandraTest, CacheIntegration)
EXPECT_TRUE(key256.parseHex(accountIndexHex));
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -1017,13 +1028,13 @@ TEST_F(BackendCassandraTest, CacheIntegration)
EXPECT_TRUE(key256.parseHex(accountIndexHex));
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlob.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlobOld.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -1059,7 +1070,7 @@ TEST_F(BackendCassandraTest, CacheIntegration)
EXPECT_FALSE(obj);
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield);
EXPECT_TRUE(obj);
EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data());
EXPECT_STREQ(reinterpret_cast<const char*>(obj->data()), static_cast<const char*>(accountBlobOld.data()));
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield);
EXPECT_FALSE(obj);
}
@@ -1072,7 +1083,7 @@ TEST_F(BackendCassandraTest, CacheIntegration)
for (auto& blob : res)
{
++key;
std::string keyStr{(const char*)key.data(), key.size()};
std::string keyStr{reinterpret_cast<const char*>(key.data()), key.size()};
blob.first = keyStr;
blob.second = std::to_string(ledgerSequence) + keyStr;
}
@@ -1154,7 +1165,7 @@ TEST_F(BackendCassandraTest, CacheIntegration)
if (obj.size())
{
ASSERT_TRUE(retObj.has_value());
EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data());
EXPECT_STREQ(static_cast<const char*>(obj.data()), reinterpret_cast<const char*>(retObj->data()));
}
else
{
@@ -1174,7 +1185,8 @@ TEST_F(BackendCassandraTest, CacheIntegration)
if (obj.size())
{
ASSERT_TRUE(retObj.size());
EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data());
EXPECT_STREQ(
static_cast<const char*>(obj.data()), reinterpret_cast<const char*>(retObj.data()));
}
else
{

View File

@@ -30,8 +30,6 @@ using namespace std;
using namespace data::cassandra;
namespace json = boost::json;
class BackendCassandraBaseTest : public NoLoggerFixture
{
protected:
@@ -88,7 +86,7 @@ protected:
int64_t idx = 1000;
for (auto const& entry : entries)
statements.push_back(insert.bind(entry, static_cast<int64_t>(idx++)));
statements.push_back(insert.bind(entry, idx++));
EXPECT_EQ(statements.size(), entries.size());
EXPECT_TRUE(handle.execute(statements));
@@ -241,9 +239,11 @@ TEST_F(BackendCassandraBaseTest, CreateTableWithStrings)
)",
5000);
auto const f1 = handle.asyncExecute(q1);
auto const rc = f1.await();
ASSERT_TRUE(rc) << rc.error();
{
auto const f1 = handle.asyncExecute(q1);
auto const rc = f1.await();
ASSERT_TRUE(rc) << rc.error();
}
std::string q2 = "INSERT INTO strings (hash, sequence) VALUES (?, ?)";
auto insert = handle.prepare(q2);
@@ -254,7 +254,7 @@ TEST_F(BackendCassandraBaseTest, CreateTableWithStrings)
int64_t idx = 1000;
for (auto const& entry : entries)
futures.push_back(handle.asyncExecute(insert, entry, static_cast<int64_t>(idx++)));
futures.push_back(handle.asyncExecute(insert, entry, idx++));
ASSERT_EQ(futures.size(), entries.size());
for (auto const& f : futures)
@@ -302,9 +302,11 @@ TEST_F(BackendCassandraBaseTest, BatchInsert)
WITH default_time_to_live = {}
)",
5000);
auto const f1 = handle.asyncExecute(q1);
auto const rc = f1.await();
ASSERT_TRUE(rc) << rc.error();
{
auto const f1 = handle.asyncExecute(q1);
auto const rc = f1.await();
ASSERT_TRUE(rc) << rc.error();
}
std::string q2 = "INSERT INTO strings (hash, sequence) VALUES (?, ?)";
auto const insert = handle.prepare(q2);
@@ -315,7 +317,7 @@ TEST_F(BackendCassandraBaseTest, BatchInsert)
int64_t idx = 1000;
for (auto const& entry : entries)
statements.push_back(insert.bind(entry, static_cast<int64_t>(idx++)));
statements.push_back(insert.bind(entry, idx++));
ASSERT_EQ(statements.size(), entries.size());
@@ -374,7 +376,7 @@ TEST_F(BackendCassandraBaseTest, BatchInsertAsync)
int64_t idx = 1000;
for (auto const& entry : entries)
statements.push_back(insert.bind(entry, static_cast<int64_t>(idx++)));
statements.push_back(insert.bind(entry, idx++));
ASSERT_EQ(statements.size(), entries.size());
fut.emplace(handle.asyncExecute(statements, [&](auto const res) {
@@ -434,8 +436,7 @@ TEST_F(BackendCassandraBaseTest, AlterTableMoveToNewTable)
{
static_assert(std::is_same_v<decltype(hash), std::string>);
static_assert(std::is_same_v<decltype(seq), int64_t>);
migrationStatements.push_back(
migrationInsert.bind(hash, static_cast<int64_t>(seq), static_cast<int64_t>(seq + 1u)));
migrationStatements.push_back(migrationInsert.bind(hash, seq, seq + 1u));
}
EXPECT_TRUE(handle.execute(migrationStatements));

View File

@@ -38,7 +38,7 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineSuccessful)
auto strat = DefaultExecutionStrategy{Settings{}, handle};
ON_CALL(handle, asyncExecute(An<FakeStatement const&>(), An<std::function<void(FakeResultOrError)>&&>()))
.WillByDefault([](auto const& statement, auto&& cb) {
.WillByDefault([](auto const& /* statement */, auto&& cb) {
cb({}); // pretend we got data
return FakeFutureWithCallback{};
});

View File

@@ -53,20 +53,11 @@ TEST_F(SettingsProviderTest, Defaults)
EXPECT_EQ(settings.requestTimeout, std::chrono::milliseconds{0});
EXPECT_EQ(settings.maxWriteRequestsOutstanding, 10'000);
EXPECT_EQ(settings.maxReadRequestsOutstanding, 100'000);
EXPECT_EQ(settings.maxConnectionsPerHost, 2);
EXPECT_EQ(settings.coreConnectionsPerHost, 2);
EXPECT_EQ(settings.maxConcurrentRequestsThreshold, (100'000 + 10'000) / 2);
EXPECT_EQ(settings.coreConnectionsPerHost, 1);
EXPECT_EQ(settings.certificate, std::nullopt);
EXPECT_EQ(settings.username, std::nullopt);
EXPECT_EQ(settings.password, std::nullopt);
EXPECT_EQ(settings.queueSizeIO, std::nullopt);
EXPECT_EQ(settings.queueSizeEvent, std::nullopt);
EXPECT_EQ(settings.writeBytesHighWatermark, std::nullopt);
EXPECT_EQ(settings.writeBytesLowWatermark, std::nullopt);
EXPECT_EQ(settings.pendingRequestsHighWatermark, std::nullopt);
EXPECT_EQ(settings.pendingRequestsLowWatermark, std::nullopt);
EXPECT_EQ(settings.maxRequestsPerFlush, std::nullopt);
EXPECT_EQ(settings.maxConcurrentCreation, std::nullopt);
auto const* cp = std::get_if<Settings::ContactPoints>(&settings.connectionInfo);
ASSERT_TRUE(cp != nullptr);
@@ -103,69 +94,16 @@ TEST_F(SettingsProviderTest, SimpleConfig)
EXPECT_EQ(provider.getTablePrefix(), "prefix");
}
TEST_F(SettingsProviderTest, DriverOptionCalculation)
{
Config cfg{json::parse(R"({
"contact_points": "123.123.123.123",
"max_write_requests_outstanding": 100,
"max_read_requests_outstanding": 200
})")};
SettingsProvider provider{cfg};
auto const settings = provider.getSettings();
EXPECT_EQ(settings.maxReadRequestsOutstanding, 200);
EXPECT_EQ(settings.maxWriteRequestsOutstanding, 100);
EXPECT_EQ(settings.maxConnectionsPerHost, 2);
EXPECT_EQ(settings.coreConnectionsPerHost, 2);
EXPECT_EQ(settings.maxConcurrentRequestsThreshold, 150); // calculated from above
}
TEST_F(SettingsProviderTest, DriverOptionSecifiedMaxConcurrentRequestsThreshold)
{
Config cfg{json::parse(R"({
"contact_points": "123.123.123.123",
"max_write_requests_outstanding": 100,
"max_read_requests_outstanding": 200,
"max_connections_per_host": 5,
"core_connections_per_host": 4,
"max_concurrent_requests_threshold": 1234
})")};
SettingsProvider provider{cfg};
auto const settings = provider.getSettings();
EXPECT_EQ(settings.maxReadRequestsOutstanding, 200);
EXPECT_EQ(settings.maxWriteRequestsOutstanding, 100);
EXPECT_EQ(settings.maxConnectionsPerHost, 5);
EXPECT_EQ(settings.coreConnectionsPerHost, 4);
EXPECT_EQ(settings.maxConcurrentRequestsThreshold, 1234);
}
TEST_F(SettingsProviderTest, DriverOptionalOptionsSpecified)
{
Config cfg{json::parse(R"({
"contact_points": "123.123.123.123",
"queue_size_event": 1,
"queue_size_io": 2,
"write_bytes_high_water_mark": 3,
"write_bytes_low_water_mark": 4,
"pending_requests_high_water_mark": 5,
"pending_requests_low_water_mark": 6,
"max_requests_per_flush": 7,
"max_concurrent_creation": 8
"queue_size_io": 2
})")};
SettingsProvider provider{cfg};
auto const settings = provider.getSettings();
EXPECT_EQ(settings.queueSizeEvent, 1);
EXPECT_EQ(settings.queueSizeIO, 2);
EXPECT_EQ(settings.writeBytesHighWatermark, 3);
EXPECT_EQ(settings.writeBytesLowWatermark, 4);
EXPECT_EQ(settings.pendingRequestsHighWatermark, 5);
EXPECT_EQ(settings.pendingRequestsLowWatermark, 6);
EXPECT_EQ(settings.maxRequestsPerFlush, 7);
EXPECT_EQ(settings.maxConcurrentCreation, 8);
}
TEST_F(SettingsProviderTest, SecureBundleConfig)

View File

@@ -115,8 +115,7 @@ struct FakeRetryPolicy
{
FakeRetryPolicy(boost::asio::io_context&){}; // required by concept
std::chrono::milliseconds
calculateDelay(uint32_t attempt)
std::chrono::milliseconds calculateDelay(uint32_t /* attempt */)
{
return std::chrono::milliseconds{1};
}

View File

@@ -0,0 +1,50 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <util/FakeAmendmentBlockAction.h>
#include <util/Fixtures.h>
#include <etl/impl/AmendmentBlock.h>
#include <gtest/gtest.h>
using namespace testing;
using namespace etl;
class AmendmentBlockHandlerTest : public NoLoggerFixture
{
protected:
using AmendmentBlockHandlerType = detail::AmendmentBlockHandler<FakeAmendmentBlockAction>;
boost::asio::io_context ioc_;
};
TEST_F(AmendmentBlockHandlerTest, CallToOnAmendmentBlockSetsStateAndRepeatedlyCallsAction)
{
std::size_t callCount = 0;
SystemState state;
AmendmentBlockHandlerType handler{ioc_, state, std::chrono::nanoseconds{1}, {std::ref(callCount)}};
EXPECT_FALSE(state.isAmendmentBlocked);
handler.onAmendmentBlock();
EXPECT_TRUE(state.isAmendmentBlocked);
ioc_.run_for(std::chrono::milliseconds{1});
EXPECT_TRUE(callCount >= 10);
}

View File

@@ -68,8 +68,7 @@ public:
TEST_F(ETLExtractorTest, StopsWhenCurrentSequenceExceedsFinishSequence)
{
auto const rawNetworkValidatedLedgersPtr =
static_cast<MockNetworkValidatedLedgers*>(networkValidatedLedgers_.get());
auto const rawNetworkValidatedLedgersPtr = networkValidatedLedgers_.get();
ON_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).WillByDefault(Return(true));
EXPECT_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).Times(3);
@@ -107,8 +106,7 @@ TEST_F(ETLExtractorTest, StopsOnServerShutdown)
// stop extractor thread if fetcheResponse is empty
TEST_F(ETLExtractorTest, StopsIfFetchIsUnsuccessful)
{
auto const rawNetworkValidatedLedgersPtr =
static_cast<MockNetworkValidatedLedgers*>(networkValidatedLedgers_.get());
auto const rawNetworkValidatedLedgersPtr = networkValidatedLedgers_.get();
ON_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).WillByDefault(Return(true));
EXPECT_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).Times(1);
@@ -123,8 +121,7 @@ TEST_F(ETLExtractorTest, StopsIfFetchIsUnsuccessful)
TEST_F(ETLExtractorTest, StopsIfWaitingUntilValidatedByNetworkTimesOut)
{
auto const rawNetworkValidatedLedgersPtr =
static_cast<MockNetworkValidatedLedgers*>(networkValidatedLedgers_.get());
auto const rawNetworkValidatedLedgersPtr = networkValidatedLedgers_.get();
// note that in actual clio code we don't return false unless a timeout is specified and exceeded
ON_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).WillByDefault(Return(false));
@@ -137,8 +134,7 @@ TEST_F(ETLExtractorTest, StopsIfWaitingUntilValidatedByNetworkTimesOut)
TEST_F(ETLExtractorTest, SendsCorrectResponseToDataPipe)
{
auto const rawNetworkValidatedLedgersPtr =
static_cast<MockNetworkValidatedLedgers*>(networkValidatedLedgers_.get());
auto const rawNetworkValidatedLedgersPtr = networkValidatedLedgers_.get();
ON_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).WillByDefault(Return(true));
EXPECT_CALL(*rawNetworkValidatedLedgersPtr, waitUntilValidatedByNetwork).Times(1);

View File

@@ -0,0 +1,304 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <etl/impl/LedgerPublisher.h>
#include <util/Fixtures.h>
#include <util/MockCache.h>
#include <util/TestObject.h>
#include <fmt/core.h>
#include <gtest/gtest.h>
#include <chrono>
using namespace testing;
using namespace etl;
namespace json = boost::json;
using namespace std::chrono;
static auto constexpr ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
static auto constexpr ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun";
static auto constexpr LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652";
static auto constexpr SEQ = 30;
static auto constexpr AGE = 800;
class ETLLedgerPublisherTest : public MockBackendTest, public SyncAsioContextTest, public MockSubscriptionManagerTest
{
void
SetUp() override
{
MockBackendTest::SetUp();
SyncAsioContextTest::SetUp();
MockSubscriptionManagerTest::SetUp();
}
void
TearDown() override
{
MockSubscriptionManagerTest::TearDown();
SyncAsioContextTest::TearDown();
MockBackendTest::TearDown();
}
protected:
util::Config cfg{json::parse("{}")};
MockCache mockCache;
};
TEST_F(ETLLedgerPublisherTest, PublishLedgerInfoIsWritingFalse)
{
SystemState dummyState;
dummyState.isWriting = false;
auto const dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, AGE);
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
publisher.publish(dummyLedgerInfo);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
ASSERT_NE(rawBackendPtr, nullptr);
ON_CALL(*rawBackendPtr, fetchLedgerDiff(SEQ, _)).WillByDefault(Return(std::vector<LedgerObject>{}));
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(SEQ, _)).Times(1);
// setLastPublishedSequence not in strand, should verify before run
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), SEQ);
EXPECT_CALL(mockCache, updateImp).Times(1);
ctx.run();
EXPECT_TRUE(rawBackendPtr->fetchLedgerRange());
EXPECT_EQ(rawBackendPtr->fetchLedgerRange().value().minSequence, SEQ);
EXPECT_EQ(rawBackendPtr->fetchLedgerRange().value().maxSequence, SEQ);
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerInfoIsWritingTrue)
{
SystemState dummyState;
dummyState.isWriting = true;
auto const dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, AGE);
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
publisher.publish(dummyLedgerInfo);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(_, _)).Times(0);
// setLastPublishedSequence not in strand, should verify before run
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), SEQ);
ctx.run();
EXPECT_FALSE(rawBackendPtr->fetchLedgerRange());
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerInfoInRange)
{
SystemState dummyState;
dummyState.isWriting = true;
auto const dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, 0); // age is 0
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
mockBackendPtr->updateRange(SEQ - 1);
mockBackendPtr->updateRange(SEQ);
publisher.publish(dummyLedgerInfo);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(_, _)).Times(0);
// mock fetch fee
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, SEQ, _))
.WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0)));
// mock fetch transactions
EXPECT_CALL(*rawBackendPtr, fetchAllTransactionsInLedger).Times(1);
TransactionAndMetadata t1;
t1.transaction = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 100, 3, SEQ).getSerializer().peekData();
t1.metadata = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 110, 30).getSerializer().peekData();
t1.ledgerSequence = SEQ;
ON_CALL(*rawBackendPtr, fetchAllTransactionsInLedger(SEQ, _))
.WillByDefault(Return(std::vector<TransactionAndMetadata>{t1}));
// setLastPublishedSequence not in strand, should verify before run
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), SEQ);
MockSubscriptionManager* rawSubscriptionManagerPtr =
dynamic_cast<MockSubscriptionManager*>(mockSubscriptionManagerPtr.get());
EXPECT_CALL(*rawSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", SEQ - 1, SEQ), 1)).Times(1);
EXPECT_CALL(*rawSubscriptionManagerPtr, pubBookChanges).Times(1);
// mock 1 transaction
EXPECT_CALL(*rawSubscriptionManagerPtr, pubTransaction).Times(1);
ctx.run();
// last publish time should be set
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerInfoCloseTimeGreaterThanNow)
{
SystemState dummyState;
dummyState.isWriting = true;
ripple::LedgerInfo dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, 0);
auto const nowPlus10 = system_clock::now() + seconds(10);
auto const closeTime = duration_cast<seconds>(nowPlus10.time_since_epoch()).count() - rippleEpochStart;
dummyLedgerInfo.closeTime = ripple::NetClock::time_point{seconds{closeTime}};
mockBackendPtr->updateRange(SEQ - 1);
mockBackendPtr->updateRange(SEQ);
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
publisher.publish(dummyLedgerInfo);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(_, _)).Times(0);
// mock fetch fee
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, SEQ, _))
.WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0)));
// mock fetch transactions
EXPECT_CALL(*rawBackendPtr, fetchAllTransactionsInLedger).Times(1);
TransactionAndMetadata t1;
t1.transaction = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 100, 3, SEQ).getSerializer().peekData();
t1.metadata = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 110, 30).getSerializer().peekData();
t1.ledgerSequence = SEQ;
ON_CALL(*rawBackendPtr, fetchAllTransactionsInLedger(SEQ, _))
.WillByDefault(Return(std::vector<TransactionAndMetadata>{t1}));
// setLastPublishedSequence not in strand, should verify before run
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), SEQ);
MockSubscriptionManager* rawSubscriptionManagerPtr =
dynamic_cast<MockSubscriptionManager*>(mockSubscriptionManagerPtr.get());
EXPECT_CALL(*rawSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", SEQ - 1, SEQ), 1)).Times(1);
EXPECT_CALL(*rawSubscriptionManagerPtr, pubBookChanges).Times(1);
// mock 1 transaction
EXPECT_CALL(*rawSubscriptionManagerPtr, pubTransaction).Times(1);
ctx.run();
// last publish time should be set
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue)
{
SystemState dummyState;
dummyState.isStopping = true;
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
EXPECT_FALSE(publisher.publish(SEQ, {}));
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttampt)
{
SystemState dummyState;
dummyState.isStopping = false;
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
static auto constexpr MAX_ATTEMPT = 2;
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, hardFetchLedgerRange).Times(MAX_ATTEMPT);
LedgerRange const range{.minSequence = SEQ - 1, .maxSequence = SEQ - 1};
ON_CALL(*rawBackendPtr, hardFetchLedgerRange(_)).WillByDefault(Return(range));
EXPECT_FALSE(publisher.publish(SEQ, MAX_ATTEMPT));
}
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsFalse)
{
SystemState dummyState;
dummyState.isStopping = false;
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
LedgerRange const range{.minSequence = SEQ, .maxSequence = SEQ};
ON_CALL(*rawBackendPtr, hardFetchLedgerRange(_)).WillByDefault(Return(range));
EXPECT_CALL(*rawBackendPtr, hardFetchLedgerRange).Times(1);
auto const dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, AGE);
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(SEQ, _)).WillByDefault(Return(dummyLedgerInfo));
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
ON_CALL(*rawBackendPtr, fetchLedgerDiff(SEQ, _)).WillByDefault(Return(std::vector<LedgerObject>{}));
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(SEQ, _)).Times(1);
EXPECT_CALL(mockCache, updateImp).Times(1);
EXPECT_TRUE(publisher.publish(SEQ, {}));
ctx.run();
}
TEST_F(ETLLedgerPublisherTest, PublishMultipleTxInOrder)
{
SystemState dummyState;
dummyState.isWriting = true;
auto const dummyLedgerInfo = CreateLedgerInfo(LEDGERHASH, SEQ, 0); // age is 0
detail::LedgerPublisher publisher(ctx, mockBackendPtr, mockCache, mockSubscriptionManagerPtr, dummyState);
mockBackendPtr->updateRange(SEQ - 1);
mockBackendPtr->updateRange(SEQ);
publisher.publish(dummyLedgerInfo);
MockBackend* rawBackendPtr = dynamic_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, fetchLedgerDiff(_, _)).Times(0);
// mock fetch fee
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, SEQ, _))
.WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0)));
// mock fetch transactions
EXPECT_CALL(*rawBackendPtr, fetchAllTransactionsInLedger).Times(1);
// t1 index > t2 index
TransactionAndMetadata t1;
t1.transaction = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 100, 3, SEQ).getSerializer().peekData();
t1.metadata = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 110, 30, 2).getSerializer().peekData();
t1.ledgerSequence = SEQ;
t1.date = 1;
TransactionAndMetadata t2;
t2.transaction = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 100, 3, SEQ).getSerializer().peekData();
t2.metadata = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 110, 30, 1).getSerializer().peekData();
t2.ledgerSequence = SEQ;
t2.date = 2;
ON_CALL(*rawBackendPtr, fetchAllTransactionsInLedger(SEQ, _))
.WillByDefault(Return(std::vector<TransactionAndMetadata>{t1, t2}));
// setLastPublishedSequence not in strand, should verify before run
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), SEQ);
MockSubscriptionManager* rawSubscriptionManagerPtr =
dynamic_cast<MockSubscriptionManager*>(mockSubscriptionManagerPtr.get());
EXPECT_CALL(*rawSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", SEQ - 1, SEQ), 2)).Times(1);
EXPECT_CALL(*rawSubscriptionManagerPtr, pubBookChanges).Times(1);
// should call pubTransaction t2 first (greater tx index)
Sequence const s;
EXPECT_CALL(*rawSubscriptionManagerPtr, pubTransaction(t2, _)).InSequence(s);
EXPECT_CALL(*rawSubscriptionManagerPtr, pubTransaction(t1, _)).InSequence(s);
ctx.run();
// last publish time should be set
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}

View File

@@ -20,6 +20,7 @@
#include <etl/impl/Transformer.h>
#include <util/FakeFetchResponse.h>
#include <util/Fixtures.h>
#include <util/MockAmendmentBlockHandler.h>
#include <util/MockExtractionDataPipe.h>
#include <util/MockLedgerLoader.h>
#include <util/MockLedgerPublisher.h>
@@ -47,11 +48,14 @@ protected:
using ExtractionDataPipeType = MockExtractionDataPipe;
using LedgerLoaderType = MockLedgerLoader;
using LedgerPublisherType = MockLedgerPublisher;
using TransformerType = etl::detail::Transformer<ExtractionDataPipeType, LedgerLoaderType, LedgerPublisherType>;
using AmendmentBlockHandlerType = MockAmendmentBlockHandler;
using TransformerType = etl::detail::
Transformer<ExtractionDataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
ExtractionDataPipeType dataPipe_;
LedgerLoaderType ledgerLoader_;
LedgerPublisherType ledgerPublisher_;
AmendmentBlockHandlerType amendmentBlockHandler_;
SystemState state_;
std::unique_ptr<TransformerType> transformer_;
@@ -82,8 +86,8 @@ TEST_F(ETLTransformerTest, StopsOnWriteConflict)
EXPECT_CALL(dataPipe_, popNext).Times(0);
EXPECT_CALL(ledgerPublisher_, publish(_)).Times(0);
transformer_ =
std::make_unique<TransformerType>(dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, 0, state_);
transformer_ = std::make_unique<TransformerType>(
dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, 0, state_);
transformer_->waitTillFinished(); // explicitly joins the thread
}
@@ -114,8 +118,8 @@ TEST_F(ETLTransformerTest, StopsOnEmptyFetchResponse)
EXPECT_CALL(*rawBackendPtr, doFinishWrites).Times(AtLeast(1));
EXPECT_CALL(ledgerPublisher_, publish(_)).Times(AtLeast(1));
transformer_ =
std::make_unique<TransformerType>(dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, 0, state_);
transformer_ = std::make_unique<TransformerType>(
dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, 0, state_);
// after 10ms we start spitting out empty responses which means the extractor is finishing up
// this is normally combined with stopping the entire thing by setting the isStopping flag.
@@ -147,6 +151,8 @@ TEST_F(ETLTransformerTest, DoesNotPublishIfCanNotBuildNextLedger)
// should not call publish
EXPECT_CALL(ledgerPublisher_, publish(_)).Times(0);
transformer_ =
std::make_unique<TransformerType>(dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, 0, state_);
transformer_ = std::make_unique<TransformerType>(
dataPipe_, mockBackendPtr, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, 0, state_);
}
// TODO: implement tests for amendment block. requires more refactoring

View File

@@ -346,7 +346,7 @@ TEST_F(RPCBaseTest, CustomValidator)
{
// clang-format off
auto customFormatCheck = CustomValidator{
[](json::value const& value, std::string_view key) -> MaybeError {
[](json::value const& value, std::string_view /* key */) -> MaybeError {
return value.as_string().size() == 34 ?
MaybeError{} : Error{rpc::Status{"Uh oh"}};
}
@@ -568,3 +568,25 @@ TEST_F(RPCBaseTest, ClampingModifier)
ASSERT_TRUE(spec.process(passingInput3));
ASSERT_EQ(passingInput3.at("amount").as_uint64(), 20u); // clamped
}
TEST_F(RPCBaseTest, ToLowerModifier)
{
auto spec = RpcSpec{
{"str", ToLower{}},
};
auto passingInput = json::parse(R"({ "str": "TesT" })");
ASSERT_TRUE(spec.process(passingInput));
ASSERT_EQ(passingInput.at("str").as_string(), "test");
auto passingInput2 = json::parse(R"({ "str2": "TesT" })");
ASSERT_TRUE(spec.process(passingInput2)); // no str no problem
auto passingInput3 = json::parse(R"({ "str": "already lower case" })");
ASSERT_TRUE(spec.process(passingInput3));
ASSERT_EQ(passingInput3.at("str").as_string(), "already lower case");
auto passingInput4 = json::parse(R"({ "str": "" })");
ASSERT_TRUE(spec.process(passingInput4)); // empty str no problem
ASSERT_EQ(passingInput4.at("str").as_string(), "");
}

View File

@@ -30,6 +30,7 @@
using namespace rpc;
using namespace testing;
namespace json = boost::json;
constexpr static auto CLIENT_IP = "127.0.0.1";
@@ -51,10 +52,10 @@ protected:
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfClioOnly)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "test";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(true));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -71,10 +72,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfClioOnly)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfProxied)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "submit";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -91,10 +92,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfProxied)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfCurrentLedgerSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "anymethod";
auto const params = boost::json::parse(R"({"ledger_index": "current"})");
auto const params = json::parse(R"({"ledger_index": "current"})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -111,10 +112,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfCurrentLedgerSpecified)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfClosedLedgerSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "anymethod";
auto const params = boost::json::parse(R"({"ledger_index": "closed"})");
auto const params = json::parse(R"({"ledger_index": "closed"})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -131,10 +132,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfClosedLedgerSpecified)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfAccountInfoWithQueueSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "account_info";
auto const params = boost::json::parse(R"({"queue": true})");
auto const params = json::parse(R"({"queue": true})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -151,10 +152,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfAccountInfoWithQueueSpe
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithQueueSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"queue": true})");
auto const params = json::parse(R"({"queue": true})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -171,10 +172,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithQueueSpecifie
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithFullSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"full": true})");
auto const params = json::parse(R"({"full": true})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -191,10 +192,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithFullSpecified
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithAccountsSpecified)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"accounts": true})");
auto const params = json::parse(R"({"accounts": true})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -211,10 +212,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfLedgerWithAccountsSpeci
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfAccountInfoQueueIsFalse)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "account_info";
auto const params = boost::json::parse(R"({"queue": false})");
auto const params = json::parse(R"({"queue": false})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -231,10 +232,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfAccountInfoQueueIsFals
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerQueueIsFalse)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"queue": false})");
auto const params = json::parse(R"({"queue": false})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -251,10 +252,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerQueueIsFalse)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerFullIsFalse)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"full": false})");
auto const params = json::parse(R"({"full": false})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -271,10 +272,10 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerFullIsFalse)
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerAccountsIsFalse)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "ledger";
auto const params = boost::json::parse(R"({"accounts": false})");
auto const params = json::parse(R"({"accounts": false})");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -289,11 +290,15 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfLedgerAccountsIsFalse)
});
}
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfAPIVersionIsV1)
TEST_F(RPCForwardingProxyTest, ShouldNotForwardReturnsTrueIfAPIVersionIsV1)
{
auto const apiVersion = 1u;
auto const method = "api_version_check";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
auto const rawHandlerProviderPtr = handlerProvider.get();
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
runSpawn([&](auto yield) {
auto const range = mockBackendPtr->fetchLedgerRange();
@@ -301,16 +306,16 @@ TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsTrueIfAPIVersionIsV1)
web::Context(yield, method, apiVersion, params.as_object(), nullptr, tagFactory, *range, CLIENT_IP);
auto const res = proxy.shouldForward(ctx);
ASSERT_TRUE(res);
ASSERT_FALSE(res);
});
}
TEST_F(RPCForwardingProxyTest, ShouldForwardReturnsFalseIfAPIVersionIsV2)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const apiVersion = 2u;
auto const method = "api_version_check";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
ON_CALL(*rawHandlerProviderPtr, isClioOnly(_)).WillByDefault(Return(false));
EXPECT_CALL(*rawHandlerProviderPtr, isClioOnly(method)).Times(1);
@@ -329,7 +334,7 @@ TEST_F(RPCForwardingProxyTest, ShouldNeverForwardSubscribe)
{
auto const apiVersion = 1u;
auto const method = "subscribe";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
runSpawn([&](auto yield) {
auto const range = mockBackendPtr->fetchLedgerRange();
@@ -345,7 +350,7 @@ TEST_F(RPCForwardingProxyTest, ShouldNeverForwardUnsubscribe)
{
auto const apiVersion = 1u;
auto const method = "unsubscribe";
auto const params = boost::json::parse("{}");
auto const params = json::parse("{}");
runSpawn([&](auto yield) {
auto const range = mockBackendPtr->fetchLedgerRange();
@@ -359,14 +364,14 @@ TEST_F(RPCForwardingProxyTest, ShouldNeverForwardUnsubscribe)
TEST_F(RPCForwardingProxyTest, ForwardCallsBalancerWithCorrectParams)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawBalancerPtr = static_cast<MockLoadBalancer*>(loadBalancer.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const rawBalancerPtr = loadBalancer.get();
auto const apiVersion = 2u;
auto const method = "submit";
auto const params = boost::json::parse(R"({"test": true})");
auto const forwarded = boost::json::parse(R"({"test": true, "command": "submit"})");
auto const params = json::parse(R"({"test": true})");
auto const forwarded = json::parse(R"({"test": true, "command": "submit"})");
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(std::make_optional<boost::json::object>()));
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(std::make_optional<json::object>()));
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(forwarded.as_object(), CLIENT_IP, _)).Times(1);
ON_CALL(*rawHandlerProviderPtr, contains).WillByDefault(Return(true));
@@ -382,19 +387,19 @@ TEST_F(RPCForwardingProxyTest, ForwardCallsBalancerWithCorrectParams)
auto const res = proxy.forward(ctx);
auto const data = std::get_if<boost::json::object>(&res);
auto const data = std::get_if<json::object>(&res);
EXPECT_TRUE(data != nullptr);
});
}
TEST_F(RPCForwardingProxyTest, ForwardingFailYieldsErrorStatus)
{
auto const rawHandlerProviderPtr = static_cast<MockHandlerProvider*>(handlerProvider.get());
auto const rawBalancerPtr = static_cast<MockLoadBalancer*>(loadBalancer.get());
auto const rawHandlerProviderPtr = handlerProvider.get();
auto const rawBalancerPtr = loadBalancer.get();
auto const apiVersion = 2u;
auto const method = "submit";
auto const params = boost::json::parse(R"({"test": true})");
auto const forwarded = boost::json::parse(R"({"test": true, "command": "submit"})");
auto const params = json::parse(R"({"test": true})");
auto const forwarded = json::parse(R"({"test": true, "command": "submit"})");
ON_CALL(*rawBalancerPtr, forwardToRippled).WillByDefault(Return(std::nullopt));
EXPECT_CALL(*rawBalancerPtr, forwardToRippled(forwarded.as_object(), CLIENT_IP, _)).Times(1);

View File

@@ -0,0 +1,81 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <rpc/common/JsonBool.h>
#include <boost/json/parse.hpp>
#include <gtest/gtest.h>
using namespace rpc;
namespace json = boost::json;
using namespace testing;
struct JsonBoolTestsCaseBundle
{
std::string testName;
std::string json;
bool expectedBool;
};
class JsonBoolTests : public TestWithParam<JsonBoolTestsCaseBundle>
{
public:
struct NameGenerator
{
template <class ParamType>
std::string
operator()(const testing::TestParamInfo<ParamType>& info) const
{
auto bundle = static_cast<JsonBoolTestsCaseBundle>(info.param);
return bundle.testName;
}
};
static auto
generateTestValuesForParametersTest()
{
return std::vector<JsonBoolTestsCaseBundle>{
{"NullValue", R"({ "test_bool": null })", false},
{"BoolTrueValue", R"({ "test_bool": true })", true},
{"BoolFalseValue", R"({ "test_bool": false })", false},
{"IntTrueValue", R"({ "test_bool": 1 })", true},
{"IntFalseValue", R"({ "test_bool": 0 })", false},
{"DoubleTrueValue", R"({ "test_bool": 0.1 })", true},
{"DoubleFalseValue", R"({ "test_bool": 0.0 })", false},
{"StringTrueValue", R"({ "test_bool": "true" })", true},
{"StringFalseValue", R"({ "test_bool": "false" })", true},
{"ArrayTrueValue", R"({ "test_bool": [0] })", true},
{"ArrayFalseValue", R"({ "test_bool": [] })", false},
{"ObjectTrueValue", R"({ "test_bool": { "key": null } })", true},
{"ObjectFalseValue", R"({ "test_bool": {} })", false}};
}
};
INSTANTIATE_TEST_CASE_P(
JsonBoolCheckGroup,
JsonBoolTests,
ValuesIn(JsonBoolTests::generateTestValuesForParametersTest()),
JsonBoolTests::NameGenerator{});
TEST_P(JsonBoolTests, Parse)
{
auto const testBundle = GetParam();
const auto jv = json::parse(testBundle.json).as_object();
ASSERT_TRUE(jv.contains("test_bool"));
EXPECT_EQ(testBundle.expectedBool, value_to<JsonBool>(jv.at("test_bool")).value);
}

View File

@@ -55,7 +55,7 @@ TEST_F(RPCWorkQueueTest, WhitelistedExecutionCountAddsUp)
for (auto i = 0u; i < TOTAL; ++i)
{
queue.postCoro(
[&executeCount, &sem, &mtx](auto yield) {
[&executeCount, &sem, &mtx](auto /* yield */) {
std::lock_guard lk(mtx);
if (++executeCount; executeCount == TOTAL)
sem.release(); // 1) note we are still in user function
@@ -91,7 +91,7 @@ TEST_F(RPCWorkQueueTest, NonWhitelistedPreventSchedulingAtQueueLimitExceeded)
for (auto i = 0u; i < TOTAL; ++i)
{
auto res = queue.postCoro(
[&](auto yield) {
[&](auto /* yield */) {
std::unique_lock lk{mtx};
cv.wait(lk, [&] { return unblocked == true; });

View File

@@ -52,7 +52,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, AccountNotExist)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -76,7 +76,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaIntSequence)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -101,7 +101,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaStringSequence)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(12, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":"{}"
@@ -128,7 +128,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaHash)
ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_hash":"{}"
@@ -180,16 +180,13 @@ TEST_F(RPCAccountCurrenciesHandlerTest, DefaultParameter)
// ACCOUNT can receive USD 10 from ACCOUNT2 and send USD 20 to ACCOUNT2, now
// the balance is 100, ACCOUNT can only send USD to ACCOUNT2
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
// ACCOUNT2 can receive JPY 10 from ACCOUNT and send JPY 20 to ACCOUNT, now
// the balance is 100, ACCOUNT2 can only send JPY to ACCOUNT
auto const line2 =
CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0);
auto const line2 = CreateRippleStateLedgerObject("JPY", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0);
// ACCOUNT can receive EUR 10 from ACCOUNT and send EUR 20 to ACCOUNT2, now
// the balance is 8, ACCOUNT can receive/send EUR to/from ACCOUNT2
auto const line3 =
CreateRippleStateLedgerObject(ACCOUNT, "EUR", ISSUER, 8, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line3 = CreateRippleStateLedgerObject("EUR", ISSUER, 8, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
std::vector<Blob> bbs;
bbs.push_back(line1.getSerializer().peekData());
bbs.push_back(line2.getSerializer().peekData());
@@ -197,7 +194,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, DefaultParameter)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -229,13 +226,12 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderHash)
.WillByDefault(Return(ownerDir.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_hash":"{}"
@@ -270,13 +266,12 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderSeq)
.WillByDefault(Return(ownerDir.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":{}

View File

@@ -107,7 +107,7 @@ TEST_P(AccountInfoParameterTest, InvalidParams)
runSpawn([&, this](auto yield) {
auto const handler = AnyHandler{AccountInfoHandler{mockBackendPtr}};
auto const req = json::parse(testBundle.testJson);
auto const output = handler.process(req, Context{yield});
auto const output = handler.process(req, Context{.yield = yield, .apiVersion = 2});
ASSERT_FALSE(output);
auto const err = rpc::makeError(output.error());
@@ -116,6 +116,25 @@ TEST_P(AccountInfoParameterTest, InvalidParams)
});
}
TEST_F(AccountInfoParameterTest, ApiV1SignerListIsNotBool)
{
static constexpr auto reqJson = R"(
{"ident":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", "signer_lists":1}
)";
auto* rawBackendPtr = static_cast<MockBackend*>(mockBackendPtr.get());
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence);
runSpawn([&, this](auto yield) {
auto const handler = AnyHandler{AccountInfoHandler{mockBackendPtr}};
auto const req = json::parse(reqJson);
auto const output = handler.process(req, Context{.yield = yield, .apiVersion = 1});
ASSERT_FALSE(output);
auto const err = rpc::makeError(output.error());
EXPECT_EQ(err.at("error").as_string(), "lgrNotFound");
EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound");
});
}
TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaIntSequence)
{
auto const rawBackendPtr = static_cast<MockBackend*>(mockBackendPtr.get());
@@ -125,7 +144,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaIntSequence)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"ledger_index": 30
@@ -150,7 +169,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaStringSequence)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::nullopt));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"ledger_index": "30"
@@ -176,7 +195,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaHash)
ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"ledger_hash": "{}"
@@ -205,7 +224,7 @@ TEST_F(RPCAccountInfoHandlerTest, AccountNotExist)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}"
}})",
@@ -233,7 +252,7 @@ TEST_F(RPCAccountInfoHandlerTest, AccountInvalid)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0)));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}"
}})",
@@ -269,7 +288,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsInvalid)
.WillByDefault(Return(CreateAmendmentsObject({}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(4);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"signer_lists": true
@@ -285,11 +304,12 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsInvalid)
});
}
TEST_F(RPCAccountInfoHandlerTest, SignerListsTrue)
TEST_F(RPCAccountInfoHandlerTest, SignerListsTrueV2)
{
auto const expectedOutput = fmt::format(
R"({{
"account_data": {{
"account_data":
{{
"Account": "{}",
"Balance": "200",
"Flags": 0,
@@ -302,36 +322,37 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrue)
"index": "13F1A95D7AAB7108D5CE7EEAF504B2894B8C674E6D68499076441C4837282BF8"
}},
"signer_lists":
[
{{
"Flags": 0,
"LedgerEntryType": "SignerList",
"OwnerNode": "0",
"PreviousTxnID": "0000000000000000000000000000000000000000000000000000000000000000",
"PreviousTxnLgrSeq": 0,
"SignerEntries":
[
[
{{
"Flags": 0,
"LedgerEntryType": "SignerList",
"OwnerNode": "0",
"PreviousTxnID": "0000000000000000000000000000000000000000000000000000000000000000",
"PreviousTxnLgrSeq": 0,
"SignerEntries":
[
{{
"SignerEntry":
{{
"SignerEntry":
{{
"Account": "{}",
"SignerWeight": 1
}}
}},
{{
"SignerEntry":
{{
"Account": "{}",
"SignerWeight": 1
}}
"Account": "{}",
"SignerWeight": 1
}}
],
"SignerListID": 0,
"SignerQuorum": 2,
"index": "A9C28A28B85CD533217F5C0A0C7767666B093FA58A0F2D80026FCC4CD932DDC7"
}}
],
"account_flags": {{
}},
{{
"SignerEntry":
{{
"Account": "{}",
"SignerWeight": 1
}}
}}
],
"SignerListID": 0,
"SignerQuorum": 2,
"index": "A9C28A28B85CD533217F5C0A0C7767666B093FA58A0F2D80026FCC4CD932DDC7"
}}
],
"account_flags":
{{
"defaultRipple": false,
"depositAuth": false,
"disableMasterKey": false,
@@ -370,7 +391,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrue)
.WillByDefault(Return(CreateAmendmentsObject({}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(4);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"signer_lists": true
@@ -378,7 +399,108 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrue)
ACCOUNT));
auto const handler = AnyHandler{AccountInfoHandler{mockBackendPtr}};
runSpawn([&](auto yield) {
auto const output = handler.process(input, Context{yield});
auto const output = handler.process(input, Context{.yield = yield, .apiVersion = 2});
ASSERT_TRUE(output);
EXPECT_EQ(*output, json::parse(expectedOutput));
});
}
TEST_F(RPCAccountInfoHandlerTest, SignerListsTrueV1)
{
auto const expectedOutput = fmt::format(
R"({{
"account_data":
{{
"Account": "{}",
"Balance": "200",
"Flags": 0,
"LedgerEntryType": "AccountRoot",
"OwnerCount": 2,
"PreviousTxnID": "{}",
"PreviousTxnLgrSeq": 2,
"Sequence": 2,
"TransferRate": 0,
"index": "13F1A95D7AAB7108D5CE7EEAF504B2894B8C674E6D68499076441C4837282BF8",
"signer_lists":
[
{{
"Flags": 0,
"LedgerEntryType": "SignerList",
"OwnerNode": "0",
"PreviousTxnID": "0000000000000000000000000000000000000000000000000000000000000000",
"PreviousTxnLgrSeq": 0,
"SignerEntries":
[
{{
"SignerEntry":
{{
"Account": "{}",
"SignerWeight": 1
}}
}},
{{
"SignerEntry":
{{
"Account": "{}",
"SignerWeight": 1
}}
}}
],
"SignerListID": 0,
"SignerQuorum": 2,
"index": "A9C28A28B85CD533217F5C0A0C7767666B093FA58A0F2D80026FCC4CD932DDC7"
}}
]
}},
"account_flags":
{{
"defaultRipple": false,
"depositAuth": false,
"disableMasterKey": false,
"disallowIncomingXRP": false,
"globalFreeze": false,
"noFreeze": false,
"passwordSpent": false,
"requireAuthorization": false,
"requireDestinationTag": false
}},
"ledger_hash": "{}",
"ledger_index": 30,
"validated": true
}})",
ACCOUNT,
INDEX1,
ACCOUNT1,
ACCOUNT2,
LEDGERHASH);
auto const rawBackendPtr = static_cast<MockBackend*>(mockBackendPtr.get());
mockBackendPtr->updateRange(10); // min
mockBackendPtr->updateRange(30); // max
auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30);
EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1);
ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo));
auto const account = GetAccountIDWithString(ACCOUNT);
auto const accountKk = ripple::keylet::account(account).key;
auto const accountRoot = CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2);
ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _))
.WillByDefault(Return(accountRoot.getSerializer().peekData()));
auto signersKey = ripple::keylet::signers(account).key;
ON_CALL(*rawBackendPtr, doFetchLedgerObject(signersKey, 30, _))
.WillByDefault(Return(CreateSignerLists({{ACCOUNT1, 1}, {ACCOUNT2, 1}}).getSerializer().peekData()));
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::amendments().key, 30, _))
.WillByDefault(Return(CreateAmendmentsObject({}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(4);
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"signer_lists": true
}})",
ACCOUNT));
auto const handler = AnyHandler{AccountInfoHandler{mockBackendPtr}};
runSpawn([&](auto yield) {
auto const output = handler.process(input, Context{.yield = yield, .apiVersion = 1});
ASSERT_TRUE(output);
EXPECT_EQ(*output, json::parse(expectedOutput));
});
@@ -443,7 +565,7 @@ TEST_F(RPCAccountInfoHandlerTest, Flags)
.WillByDefault(Return(CreateAmendmentsObject({}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}"
}})",
@@ -474,7 +596,7 @@ TEST_F(RPCAccountInfoHandlerTest, IdentAndSignerListsFalse)
.WillByDefault(Return(CreateAmendmentsObject({}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"ident": "{}"
}})",
@@ -551,7 +673,7 @@ TEST_F(RPCAccountInfoHandlerTest, DisallowIncoming)
.WillByDefault(Return(CreateAmendmentsObject({rpc::Amendments::DisallowIncoming}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}"
}})",
@@ -624,7 +746,7 @@ TEST_F(RPCAccountInfoHandlerTest, Clawback)
.WillByDefault(Return(CreateAmendmentsObject({rpc::Amendments::Clawback}).getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}"
}})",

View File

@@ -465,10 +465,8 @@ TEST_F(RPCAccountLinesHandlerTest, DefaultParameterTest)
// return two trust lines
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 =
CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
auto const line1 = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 = CreateRippleStateLedgerObject("USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
bbs.push_back(line1.getSerializer().peekData());
bbs.push_back(line2.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
@@ -545,8 +543,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseLimit)
while (repetitions--)
{
indexes.push_back(ripple::uint256{INDEX1});
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
bbs.push_back(line.getSerializer().peekData());
}
ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1);
@@ -625,8 +622,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination)
while (repetitions--)
{
indexes.push_back(ripple::uint256{INDEX1});
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
bbs.push_back(line.getSerializer().peekData());
}
@@ -635,8 +631,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination)
while (repetitions--)
{
indexes.push_back(ripple::uint256{INDEX1});
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT3, 10, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123);
auto const line = CreateRippleStateLedgerObject("USD", ACCOUNT3, 10, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123);
bbs.push_back(line.getSerializer().peekData());
}
@@ -761,13 +756,13 @@ TEST_F(RPCAccountLinesHandlerTest, OptionalResponseField)
// return few trust lines
std::vector<Blob> bbs;
auto line1 = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
auto line1 = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
line1.setFlag(ripple::lsfHighAuth);
line1.setFlag(ripple::lsfHighNoRipple);
line1.setFlag(ripple::lsfHighFreeze);
bbs.push_back(line1.getSerializer().peekData());
auto line2 = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 20, ACCOUNT, 200, ACCOUNT2, 400, TXNID, 0);
auto line2 = CreateRippleStateLedgerObject("USD", ACCOUNT2, 20, ACCOUNT, 200, ACCOUNT2, 400, TXNID, 0);
line2.setFlag(ripple::lsfLowAuth);
line2.setFlag(ripple::lsfLowNoRipple);
line2.setFlag(ripple::lsfLowFreeze);
@@ -809,7 +804,7 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerOutput)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
std::vector<Blob> bbs;
auto line = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
auto line = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
// owner dir contains 10 indexes
int objectsCount = 10;
@@ -878,8 +873,7 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerInput)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
std::vector<Blob> bbs;
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
auto const line = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0);
int objectsCount = limit;
std::vector<ripple::uint256> indexes;
while (objectsCount != 0)
@@ -944,10 +938,8 @@ TEST_F(RPCAccountLinesHandlerTest, LimitLessThanMin)
// return two trust lines
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 =
CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
auto const line1 = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 = CreateRippleStateLedgerObject("USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
bbs.push_back(line1.getSerializer().peekData());
bbs.push_back(line2.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
@@ -1027,10 +1019,8 @@ TEST_F(RPCAccountLinesHandlerTest, LimitMoreThanMax)
// return two trust lines
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 =
CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
auto const line1 = CreateRippleStateLedgerObject("USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123);
auto const line2 = CreateRippleStateLedgerObject("USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123);
bbs.push_back(line1.getSerializer().peekData());
bbs.push_back(line2.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));

View File

@@ -171,7 +171,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaHash)
ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_hash":"{}"
@@ -198,7 +198,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaStringIndex)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":"{}"
@@ -225,7 +225,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaIntIndex)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":{}
@@ -254,7 +254,7 @@ TEST_F(RPCAccountNFTsHandlerTest, AccountNotFound)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -316,7 +316,7 @@ TEST_F(RPCAccountNFTsHandlerTest, NormalPath)
.WillByDefault(Return(pageObject.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -351,7 +351,7 @@ TEST_F(RPCAccountNFTsHandlerTest, Limit)
.WillByDefault(Return(pageObject.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1 + limit);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -387,7 +387,7 @@ TEST_F(RPCAccountNFTsHandlerTest, Marker)
.WillByDefault(Return(pageObject.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{}"
@@ -450,7 +450,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LimitLessThanMin)
.WillByDefault(Return(pageObject.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -513,7 +513,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LimitMoreThanMax)
.WillByDefault(Return(pageObject.getSerializer().peekData()));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}

View File

@@ -85,7 +85,7 @@ generateTestValuesForParametersTest()
"TypeInvalid",
R"({"account":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", "type":"wrong"})",
"invalidParams",
"Invalid parameters."},
"Invalid field 'type'."},
AccountObjectsParamTestCaseBundle{
"LedgerHashInvalid",
R"({"account":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", "ledger_hash":"1"})",
@@ -176,7 +176,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaIntSequence)
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":30
@@ -201,7 +201,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaStringSequence)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ, _)).WillByDefault(Return(std::nullopt));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":"30"
@@ -227,7 +227,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaHash)
ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_hash":"{}"
@@ -256,7 +256,7 @@ TEST_F(RPCAccountObjectsHandlerTest, AccountNotExist)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -327,14 +327,13 @@ TEST_F(RPCAccountObjectsHandlerTest, DefaultParameterNoNFTFound)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -378,14 +377,13 @@ TEST_F(RPCAccountObjectsHandlerTest, Limit)
std::vector<Blob> bbs;
while (count-- != 0)
{
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
}
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -427,14 +425,13 @@ TEST_F(RPCAccountObjectsHandlerTest, Marker)
std::vector<Blob> bbs;
while (count-- != 0)
{
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
}
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}"
@@ -489,14 +486,13 @@ TEST_F(RPCAccountObjectsHandlerTest, MultipleDirNoNFT)
cc = count * 2;
while (cc-- != 0)
{
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
}
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -538,8 +534,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilter)
std::vector<Blob> bbs;
// put 1 state and 1 offer
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
10,
@@ -555,7 +550,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilter)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"type":"offer"
@@ -594,8 +589,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterReturnEmpty)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(nftMaxKK, 30, _)).WillByDefault(Return(std::nullopt));
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
10,
@@ -611,7 +605,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterReturnEmpty)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"type": "check"
@@ -653,8 +647,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilter)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
@@ -674,7 +667,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilter)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"deletion_blockers_only": true
@@ -714,8 +707,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterWithTypeFilter)
auto const nftMaxKK = ripple::keylet::nftpage_max(account).key;
ON_CALL(*rawBackendPtr, doFetchLedgerObject(nftMaxKK, 30, _)).WillByDefault(Return(std::nullopt));
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28);
std::vector<Blob> bbs;
@@ -725,7 +717,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterWithTypeFilter)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"deletion_blockers_only": true,
@@ -793,7 +785,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterEmptyResult)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"deletion_blockers_only": true
@@ -858,7 +850,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterWithIncompatibleT
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"deletion_blockers_only": true,
@@ -971,14 +963,13 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMixOtherObjects)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(4);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -1022,7 +1013,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTReachLimitReturnMarker)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(11);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -1075,7 +1066,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTReachLimitNoMarker)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(12);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -1134,8 +1125,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarker)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _))
.WillByDefault(Return(ownerDir.getSerializer().peekData()));
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
@@ -1157,7 +1147,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarker)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(13);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}"
@@ -1195,8 +1185,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNoMoreNFT)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _))
.WillByDefault(Return(ownerDir.getSerializer().peekData()));
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
@@ -1218,7 +1207,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNoMoreNFT)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}"
@@ -1250,7 +1239,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNotInRange)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, MAXSEQ, _)).WillByDefault(Return(Blob{'f', 'a', 'k', 'e'}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"marker" : "{},{}"
@@ -1287,7 +1276,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNotExist)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountNftMax, MAXSEQ, _)).WillByDefault(Return(std::nullopt));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account": "{}",
"marker" : "{},{}"
@@ -1344,8 +1333,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTLimitAdjust)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _))
.WillByDefault(Return(ownerDir.getSerializer().peekData()));
auto const line =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28);
auto const offer = CreateOfferLedgerObject(
ACCOUNT,
@@ -1367,7 +1355,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTLimitAdjust)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(13);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}",
@@ -1462,14 +1450,13 @@ TEST_F(RPCAccountObjectsHandlerTest, FilterNFT)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(4);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"type": "nft_page"
@@ -1510,14 +1497,13 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTZeroMarkerNotAffectOtherMarker)
std::vector<Blob> bbs;
while (count-- != 0)
{
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
}
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{},
@@ -1595,14 +1581,13 @@ TEST_F(RPCAccountObjectsHandlerTest, LimitLessThanMin)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit": {}
@@ -1676,14 +1661,13 @@ TEST_F(RPCAccountObjectsHandlerTest, LimitMoreThanMax)
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3);
std::vector<Blob> bbs;
auto const line1 =
CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
auto const line1 = CreateRippleStateLedgerObject("USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0);
bbs.push_back(line1.getSerializer().peekData());
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit": {}

View File

@@ -164,7 +164,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaHash)
ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _))
.WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_hash":"{}"
@@ -191,7 +191,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaStringIndex)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":"{}"
@@ -218,7 +218,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaIntIndex)
// return empty ledgerinfo
ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional<ripple::LedgerInfo>{}));
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"ledger_index":{}
@@ -247,7 +247,7 @@ TEST_F(RPCAccountOffersHandlerTest, AccountNotFound)
ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -324,7 +324,7 @@ TEST_F(RPCAccountOffersHandlerTest, DefaultParams)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}"
}})",
@@ -374,7 +374,7 @@ TEST_F(RPCAccountOffersHandlerTest, Limit)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":10
@@ -429,7 +429,7 @@ TEST_F(RPCAccountOffersHandlerTest, Marker)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}"
@@ -467,7 +467,7 @@ TEST_F(RPCAccountOffersHandlerTest, MarkerNotExists)
ON_CALL(*rawBackendPtr, doFetchLedgerObject(hintIndex, ledgerSeq, _)).WillByDefault(Return(std::nullopt));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"marker":"{},{}"
@@ -524,7 +524,7 @@ TEST_F(RPCAccountOffersHandlerTest, LimitLessThanMin)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}
@@ -578,7 +578,7 @@ TEST_F(RPCAccountOffersHandlerTest, LimitMoreThanMax)
ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs));
EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1);
auto const static input = boost::json::parse(fmt::format(
auto const static input = json::parse(fmt::format(
R"({{
"account":"{}",
"limit":{}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More