Compare commits

...

33 Commits

Author SHA1 Message Date
Ed Hennis
b54539fb59 Merge branch 'develop' into ximinez/fix-getledger 2026-01-15 13:03:22 -04:00
Pratik Mankawde
96d17b7f66 ci: Add sanitizers to CI builds (#5996)
This change adds support for sanitizer build options in CI builds workflow. Currently `asan+ubsan` is enabled, while `tsan+ubsan` is left disabled as more changes are required.
2026-01-15 16:18:14 +00:00
Ed Hennis
83036e4f77 Merge branch 'develop' into ximinez/fix-getledger 2026-01-15 12:05:51 -04:00
Ayaz Salikhov
ec44347ffc test: Use gtest instead of doctest (#6216)
This change switches over the doctest framework to the gtest framework.
2026-01-15 08:36:13 -05:00
Ed Hennis
0f231c3cd2 Merge branch 'develop' into ximinez/fix-getledger 2026-01-13 18:19:04 -04:00
Ed Hennis
58febcb683 Merge branch 'develop' into ximinez/fix-getledger 2026-01-13 15:27:53 -04:00
Ed Hennis
754757bac9 Merge branch 'develop' into ximinez/fix-getledger 2026-01-12 14:52:07 -04:00
Ed Hennis
859467455c Merge branch 'develop' into ximinez/fix-getledger 2026-01-11 00:50:36 -04:00
Ed Hennis
60dc20042c Merge branch 'develop' into ximinez/fix-getledger 2026-01-08 17:06:02 -04:00
Ed Hennis
108b3e5e2c Merge branch 'develop' into ximinez/fix-getledger 2026-01-08 13:04:12 -04:00
Ed Hennis
599f197109 Merge branch 'develop' into ximinez/fix-getledger 2026-01-06 14:02:05 -05:00
Ed Hennis
36000188b1 Merge branch 'develop' into ximinez/fix-getledger 2025-12-22 17:39:51 -05:00
Ed Hennis
944d758a39 Merge branch 'develop' into ximinez/fix-getledger 2025-12-18 19:59:45 -05:00
Ed Hennis
de661f8ef8 Merge branch 'develop' into ximinez/fix-getledger 2025-12-12 20:34:51 -05:00
Ed Hennis
8b70664d4c Merge remote-tracking branch 'XRPLF/develop' into ximinez/fix-getledger
* XRPLF/develop:
  refactor: Rename `ripple` namespace to `xrpl` (5982)
  refactor: Move JobQueue and related classes into xrpl.core module (6121)
  refactor: Rename `rippled` binary to `xrpld` (5983)
2025-12-11 15:30:34 -05:00
Ed Hennis
4f03625b75 Merge remote-tracking branch 'XRPLF/develop' into ximinez/fix-getledger
* XRPLF/develop:
  refactor: rename info() to header() (6138)
  refactor: rename `LedgerInfo` to `LedgerHeader` (6136)
  refactor: clean up `RPCHelpers` (5684)
  chore: Fix docs readme and cmake (6122)
  chore: Clean up .gitignore and .gitattributes (6001)
  chore: Use updated secp256k1 recipe (6118)
2025-12-11 14:28:37 -05:00
Ed Hennis
fb0379f93d Merge branch 'develop' into ximinez/fix-getledger 2025-12-05 21:13:02 -05:00
Ed Hennis
8e795197c9 Merge branch 'develop' into ximinez/fix-getledger 2025-12-02 17:37:21 -05:00
Ed Hennis
cf7665137e Merge branch 'develop' into ximinez/fix-getledger 2025-12-01 14:40:37 -05:00
Ed Hennis
0aa43e1772 Merge branch 'develop' into ximinez/fix-getledger 2025-11-28 15:46:37 -05:00
Ed Hennis
0834c23f27 Merge branch 'develop' into ximinez/fix-getledger 2025-11-27 01:48:49 -05:00
Ed Hennis
cd62ba13ab Merge branch 'develop' into ximinez/fix-getledger 2025-11-26 00:25:08 -05:00
Ed Hennis
31fc348446 Merge branch 'develop' into ximinez/fix-getledger 2025-11-25 14:54:57 -05:00
Ed Hennis
009f17b9bf Merge branch 'develop' into ximinez/fix-getledger 2025-11-24 21:49:03 -05:00
Ed Hennis
b0872bae95 Merge branch 'develop' into ximinez/fix-getledger 2025-11-24 21:30:14 -05:00
Ed Hennis
0c69b23b93 Merge branch 'develop' into ximinez/fix-getledger 2025-11-21 12:47:49 -05:00
Ed Hennis
a2e93188fc Merge branch 'develop' into ximinez/fix-getledger 2025-11-18 22:39:22 -05:00
Ed Hennis
5406d28357 Merge branch 'develop' into ximinez/fix-getledger 2025-11-15 03:08:34 -05:00
Ed Hennis
7804c09494 Merge branch 'develop' into ximinez/fix-getledger 2025-11-13 12:18:58 -05:00
Ed Hennis
79d294bd2d Merge branch 'develop' into ximinez/fix-getledger 2025-11-12 14:12:47 -05:00
Ed Hennis
acace507d0 Fix formatting 2025-11-10 19:52:59 -05:00
Ed Hennis
98732100fb Reduce duplicate peer traffic for ledger data (#5126)
- Drop duplicate outgoing TMGetLedger messages per peer
  - Allow a retry after 30s in case of peer or network congestion.
  - Addresses RIPD-1870
  - (Changes levelization. That is not desirable, and will need to be fixed.)
- Drop duplicate incoming TMGetLedger messages per peer
  - Allow a retry after 15s in case of peer or network congestion.
  - The requestCookie is ignored when computing the hash, thus increasing
    the chances of detecting duplicate messages.
  - With duplicate messages, keep track of the different requestCookies
    (or lack of cookie). When work is finally done for a given request,
    send the response to all the peers that are waiting on the request,
    sending one message per peer, including all the cookies and
    a "directResponse" flag indicating the data is intended for the
    sender, too.
  - Addresses RIPD-1871
- Drop duplicate incoming TMLedgerData messages
  - Addresses RIPD-1869
2025-11-10 19:52:59 -05:00
Ed Hennis
b186516d0a Improve job queue collision checks and logging
- Improve logging related to ledger acquisition and operating mode
  changes
- Class "CanProcess" to keep track of processing of distinct items
2025-11-10 19:52:59 -05:00
69 changed files with 2854 additions and 1022 deletions

View File

@@ -28,6 +28,7 @@ ignoreRegExpList:
- /[\['"`]-[DWw][a-zA-Z0-9_-]+['"`\]]/g # compile flags
suggestWords:
- xprl->xrpl
- xprld->xrpld
- unsynched->unsynced
- synched->synced
- synch->sync
@@ -61,6 +62,7 @@ words:
- compr
- conanfile
- conanrun
- confs
- connectability
- coro
- coros
@@ -90,6 +92,7 @@ words:
- finalizers
- firewalled
- fmtdur
- fsanitize
- funclets
- gcov
- gcovr
@@ -126,6 +129,7 @@ words:
- lseq
- lsmf
- ltype
- mcmodel
- MEMORYSTATUSEX
- Merkle
- Metafuncton
@@ -235,6 +239,8 @@ words:
- txn
- txns
- txs
- UBSAN
- ubsan
- umant
- unacquired
- unambiguity
@@ -270,6 +276,7 @@ words:
- xbridge
- xchain
- ximinez
- EXPECT_STREQ
- XMACRO
- xrpkuwait
- xrpl

View File

@@ -18,6 +18,10 @@ inputs:
description: "The logging verbosity."
required: false
default: "verbose"
sanitizers:
description: "The sanitizers to enable."
required: false
default: ""
runs:
using: composite
@@ -29,9 +33,11 @@ runs:
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
SANITIZERS: ${{ inputs.sanitizers }}
run: |
echo 'Installing dependencies.'
conan install \
--profile ci \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \

View File

@@ -28,7 +28,7 @@ runs:
shell: bash
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
conan config install conan/profiles/ -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show

View File

@@ -229,7 +229,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (n := os["compiler_version"]) != "":
config_name += f"-{n}"
config_name += (
f"-{architecture['platform'][architecture['platform'].find('/') + 1 :]}"
f"-{architecture['platform'][architecture['platform'].find('/')+1:]}"
)
config_name += f"-{build_type.lower()}"
if "-Dcoverage=ON" in cmake_args:
@@ -240,17 +240,53 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
# Add the configuration to the list, with the most unique fields first,
# so that they are easier to identify in the GitHub Actions UI, as long
# names get truncated.
configurations.append(
{
"config_name": config_name,
"cmake_args": cmake_args,
"cmake_target": cmake_target,
"build_only": build_only,
"build_type": build_type,
"os": os,
"architecture": architecture,
}
)
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
if (
os["distro_version"] == "bookworm"
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
):
# Add ASAN + UBSAN configuration.
configurations.append(
{
"config_name": config_name + "-asan-ubsan",
"cmake_args": cmake_args,
"cmake_target": cmake_target,
"build_only": build_only,
"build_type": build_type,
"os": os,
"architecture": architecture,
"sanitizers": "address,undefinedbehavior",
}
)
# TSAN is deactivated due to seg faults with latest compilers.
activate_tsan = False
if activate_tsan:
configurations.append(
{
"config_name": config_name + "-tsan-ubsan",
"cmake_args": cmake_args,
"cmake_target": cmake_target,
"build_only": build_only,
"build_type": build_type,
"os": os,
"architecture": architecture,
"sanitizers": "thread,undefinedbehavior",
}
)
else:
configurations.append(
{
"config_name": config_name,
"cmake_args": cmake_args,
"cmake_target": cmake_target,
"build_only": build_only,
"build_type": build_type,
"os": os,
"architecture": architecture,
"sanitizers": "",
}
)
return configurations

View File

@@ -51,6 +51,12 @@ on:
type: number
default: 2
sanitizers:
description: "The sanitizers to enable."
required: false
type: string
default: ""
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
@@ -91,6 +97,7 @@ jobs:
# Determine if coverage and voidstar should be enabled.
COVERAGE_ENABLED: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
VOIDSTAR_ENABLED: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
SANITIZERS_ENABLED: ${{ inputs.sanitizers != '' }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
@@ -128,11 +135,13 @@ jobs:
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
sanitizers: ${{ inputs.sanitizers }}
- name: Configure CMake
working-directory: ${{ env.BUILD_DIR }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
SANITIZERS: ${{ inputs.sanitizers }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
@@ -174,7 +183,7 @@ jobs:
if-no-files-found: error
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
if: ${{ runner.os == 'Linux' && env.SANITIZERS_ENABLED == 'false' }}
working-directory: ${{ env.BUILD_DIR }}
run: |
ldd ./xrpld
@@ -191,6 +200,14 @@ jobs:
run: |
./xrpld --version | grep libvoidstar
- name: Set sanitizer options
if: ${{ !inputs.build_only && env.SANITIZERS_ENABLED == 'true' }}
run: |
echo "ASAN_OPTIONS=print_stacktrace=1:detect_container_overflow=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
echo "TSAN_OPTIONS=second_deadlock_stack=1:halt_on_error=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
echo "UBSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
echo "LSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
- name: Run the separate tests
if: ${{ !inputs.build_only }}
working-directory: ${{ env.BUILD_DIR }}

View File

@@ -57,5 +57,6 @@ jobs:
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
sanitizers: ${{ matrix.sanitizers }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,5 +1,5 @@
| :warning: **WARNING** :warning:
|---|
| :warning: **WARNING** :warning: |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
> These instructions also assume a basic familiarity with Conan and CMake.
@@ -523,18 +523,32 @@ stored inside the build directory, as either of:
- file named `coverage.`_extension_, with a suitable extension for the report format, or
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
## Sanitizers
To build dependencies and xrpld with sanitizer instrumentation, set the
`SANITIZERS` environment variable (only once before running conan and cmake) and use the `sanitizers` profile in conan:
```bash
export SANITIZERS=address,undefinedbehavior
conan install .. --output-folder . --profile:all sanitizers --build missing --settings build_type=Debug
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Debug -Dxrpld=ON -Dtests=ON ..
```
See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
## Options
| Option | Default Value | Description |
| ---------- | ------------- | ------------------------------------------------------------------ |
| `assert` | OFF | Enable assertions. |
| `coverage` | OFF | Prepare the coverage report. |
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
| `tests` | OFF | Build tests. |
| `unity` | OFF | Configure a unity build. |
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
| `werr` | OFF | Treat compilation warnings as errors |
| `wextra` | OFF | Enable additional compilation warnings |
| Option | Default Value | Description |
| ---------- | ------------- | -------------------------------------------------------------- |
| `assert` | OFF | Enable assertions. |
| `coverage` | OFF | Prepare the coverage report. |
| `tests` | OFF | Build tests. |
| `unity` | OFF | Configure a unity build. |
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
| `werr` | OFF | Treat compilation warnings as errors |
| `wextra` | OFF | Enable additional compilation warnings |
[Unity builds][5] may be faster for the first build
(at the cost of much more memory) since they concatenate sources into fewer

View File

@@ -16,14 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
include(CompilationEnv)
if(is_gcc)
# GCC-specific fixes
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
elseif(is_clang)
# Clang-specific fixes
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
elseif(MSVC)
elseif(is_msvc)
# MSVC-specific fixes
add_compile_options(/wd4068) # Ignore unknown pragmas
endif()
@@ -77,6 +79,7 @@ if (packages_only)
return ()
endif ()
include(XrplCompiler)
include(XrplSanitizers)
include(XrplInterface)
option(only_docs "Include only the docs target?" FALSE)

View File

@@ -0,0 +1,54 @@
# Shared detection of compiler, operating system, and architecture.
#
# This module centralizes environment detection so that other
# CMake modules can use the same variables instead of repeating
# checks on CMAKE_* and built-in platform variables.
# Only run once per configure step.
include_guard(GLOBAL)
# --------------------------------------------------------------------
# Compiler detection (C++)
# --------------------------------------------------------------------
set(is_clang FALSE)
set(is_gcc FALSE)
set(is_msvc FALSE)
if(CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") # Clang or AppleClang
set(is_clang TRUE)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(is_gcc TRUE)
elseif(MSVC)
set(is_msvc TRUE)
else()
message(FATAL_ERROR "Unsupported C++ compiler: ${CMAKE_CXX_COMPILER_ID}")
endif()
# --------------------------------------------------------------------
# Operating system detection
# --------------------------------------------------------------------
set(is_linux FALSE)
set(is_windows FALSE)
set(is_macos FALSE)
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
set(is_linux TRUE)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Windows")
set(is_windows TRUE)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
set(is_macos TRUE)
endif()
# --------------------------------------------------------------------
# Architecture
# --------------------------------------------------------------------
set(is_amd64 FALSE)
set(is_arm64 FALSE)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64")
set(is_amd64 TRUE)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64")
set(is_arm64 TRUE)
else()
message(FATAL_ERROR "Unknown architecture: ${CMAKE_SYSTEM_PROCESSOR}")
endif()

View File

@@ -2,16 +2,23 @@
setup project-wide compiler settings
#]===================================================================]
include(CompilationEnv)
#[=========================================================[
TODO some/most of these common settings belong in a
toolchain file, especially the ABI-impacting ones
#]=========================================================]
add_library (common INTERFACE)
add_library (Xrpl::common ALIAS common)
include(XrplSanitizers)
# add a single global dependency on this interface lib
link_libraries (Xrpl::common)
# Respect CMAKE_POSITION_INDEPENDENT_CODE setting (may be set by Conan toolchain)
if(NOT DEFINED CMAKE_POSITION_INDEPENDENT_CODE)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
set_target_properties (common
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON)
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})
set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions (common
INTERFACE
@@ -116,8 +123,8 @@ else ()
# link to static libc/c++ iff:
# * static option set and
# * NOT APPLE (AppleClang does not support static libc/c++) and
# * NOT san (sanitizers typically don't work with static libc/c++)
$<$<AND:$<BOOL:${static}>,$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:
# * NOT SANITIZERS (sanitizers typically don't work with static libc/c++)
$<$<AND:$<BOOL:${static}>,$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${SANITIZERS_ENABLED}>>>:
-static-libstdc++
-static-libgcc
>)

View File

@@ -2,6 +2,8 @@
xrpld compile options/settings via an interface library
#]===================================================================]
include(CompilationEnv)
add_library (opts INTERFACE)
add_library (Xrpl::opts ALIAS opts)
target_compile_definitions (opts
@@ -42,22 +44,6 @@ if(jemalloc)
target_link_libraries(opts INTERFACE jemalloc::jemalloc)
endif ()
if (san)
target_compile_options (opts
INTERFACE
# sanitizers recommend minimum of -O1 for reasonable performance
$<$<CONFIG:Debug>:-O1>
${SAN_FLAG}
-fno-omit-frame-pointer)
target_compile_definitions (opts
INTERFACE
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
target_link_libraries (opts INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif ()
#[===================================================================[
xrpld transitive library deps via an interface library
#]===================================================================]

198
cmake/XrplSanitizers.cmake Normal file
View File

@@ -0,0 +1,198 @@
#[===================================================================[
Configure sanitizers based on environment variables.
This module reads the following environment variables:
- SANITIZERS: The sanitizers to enable. Possible values:
- "address"
- "address,undefinedbehavior"
- "thread"
- "thread,undefinedbehavior"
- "undefinedbehavior"
The compiler type and platform are detected in CompilationEnv.cmake.
The sanitizer compile options are applied to the 'common' interface library
which is linked to all targets in the project.
Internal flag variables set by this module:
- SANITIZER_TYPES: List of sanitizer types to enable (e.g., "address",
"thread", "undefined"). And two more flags for undefined behavior sanitizer (e.g., "float-divide-by-zero", "unsigned-integer-overflow").
This list is joined with commas and passed to -fsanitize=<list>.
- SANITIZERS_COMPILE_FLAGS: Compiler flags for sanitizer instrumentation.
Includes:
* -fno-omit-frame-pointer: Preserves frame pointers for stack traces
* -O1: Minimum optimization for reasonable performance
* -fsanitize=<types>: Enables sanitizer instrumentation
* -fsanitize-ignorelist=<path>: (Clang only) Compile-time ignorelist
* -mcmodel=large/medium: (GCC only) Code model for large binaries
* -Wno-stringop-overflow: (GCC only) Suppresses false positive warnings
* -Wno-tsan: (For GCC TSAN combination only) Suppresses atomic_thread_fence warnings
- SANITIZERS_LINK_FLAGS: Linker flags for sanitizer runtime libraries.
Includes:
* -fsanitize=<types>: Links sanitizer runtime libraries
* -mcmodel=large/medium: (GCC only) Matches compile-time code model
- SANITIZERS_RELOCATION_FLAGS: (GCC only) Code model flags for linking.
Used to handle large instrumented binaries on x86_64:
* -mcmodel=large: For AddressSanitizer (prevents relocation errors)
* -mcmodel=medium: For ThreadSanitizer (large model is incompatible)
#]===================================================================]
include(CompilationEnv)
# Read environment variable
set(SANITIZERS $ENV{SANITIZERS})
# Set SANITIZERS_ENABLED flag for use in other modules
if(SANITIZERS MATCHES "address|thread|undefinedbehavior")
set(SANITIZERS_ENABLED TRUE)
else()
set(SANITIZERS_ENABLED FALSE)
return()
endif()
# Sanitizers are not supported on Windows/MSVC
if(is_msvc)
message(FATAL_ERROR "Sanitizers are not supported on Windows/MSVC. "
"Please unset the SANITIZERS environment variable.")
endif()
message(STATUS "Configuring sanitizers: ${SANITIZERS}")
# Parse SANITIZERS value to determine which sanitizers to enable
set(enable_asan FALSE)
set(enable_tsan FALSE)
set(enable_ubsan FALSE)
# Normalize SANITIZERS into a list
set(san_list "${SANITIZERS}")
string(REPLACE "," ";" san_list "${san_list}")
separate_arguments(san_list)
foreach(san IN LISTS san_list)
if(san STREQUAL "address")
set(enable_asan TRUE)
elseif(san STREQUAL "thread")
set(enable_tsan TRUE)
elseif(san STREQUAL "undefinedbehavior")
set(enable_ubsan TRUE)
else()
message(FATAL_ERROR "Unsupported sanitizer type: ${san}"
"Supported: address, thread, undefinedbehavior and their combinations.")
endif()
endforeach()
# Validate sanitizer compatibility
if(enable_asan AND enable_tsan)
message(FATAL_ERROR "AddressSanitizer and ThreadSanitizer are incompatible and cannot be enabled simultaneously. "
"Use 'address' or 'thread', optionally with 'undefinedbehavior'.")
endif()
# Frame pointer is required for meaningful stack traces. Sanitizers recommend minimum of -O1 for reasonable performance
set(SANITIZERS_COMPILE_FLAGS "-fno-omit-frame-pointer" "-O1")
# Build the sanitizer flags list
set(SANITIZER_TYPES)
if(enable_asan)
list(APPEND SANITIZER_TYPES "address")
elseif(enable_tsan)
list(APPEND SANITIZER_TYPES "thread")
endif()
if(enable_ubsan)
# UB sanitizer flags
list(APPEND SANITIZER_TYPES "undefined" "float-divide-by-zero")
if(is_clang)
# Clang supports additional UB checks. More info here https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
list(APPEND SANITIZER_TYPES "unsigned-integer-overflow")
endif()
endif()
# Configure code model for GCC on amd64
# Use large code model for ASAN to avoid relocation errors
# Use medium code model for TSAN (large is not compatible with TSAN)
set(SANITIZERS_RELOCATION_FLAGS)
# Compiler-specific configuration
if(is_gcc)
# Disable mold, gold and lld linkers for GCC with sanitizers
# Use default linker (bfd/ld) which is more lenient with mixed code models
# This is needed since the size of instrumented binary exceeds the limits set by mold, lld and gold linkers
set(use_mold OFF CACHE BOOL "Use mold linker" FORCE)
set(use_gold OFF CACHE BOOL "Use gold linker" FORCE)
set(use_lld OFF CACHE BOOL "Use lld linker" FORCE)
message(STATUS " Disabled mold, gold, and lld linkers for GCC with sanitizers")
# Suppress false positive warnings in GCC with stringop-overflow
list(APPEND SANITIZERS_COMPILE_FLAGS "-Wno-stringop-overflow")
if(is_amd64 AND enable_asan)
message(STATUS " Using large code model (-mcmodel=large)")
list(APPEND SANITIZERS_COMPILE_FLAGS "-mcmodel=large")
list(APPEND SANITIZERS_RELOCATION_FLAGS "-mcmodel=large")
elseif(enable_tsan)
# GCC doesn't support atomic_thread_fence with tsan. Suppress warnings.
list(APPEND SANITIZERS_COMPILE_FLAGS "-Wno-tsan")
message(STATUS " Using medium code model (-mcmodel=medium)")
list(APPEND SANITIZERS_COMPILE_FLAGS "-mcmodel=medium")
list(APPEND SANITIZERS_RELOCATION_FLAGS "-mcmodel=medium")
endif()
# Join sanitizer flags with commas for -fsanitize option
list(JOIN SANITIZER_TYPES "," SANITIZER_TYPES_STR)
# Add sanitizer to compile and link flags
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
set(SANITIZERS_LINK_FLAGS "${SANITIZERS_RELOCATION_FLAGS}" "-fsanitize=${SANITIZER_TYPES_STR}")
elseif(is_clang)
# Add ignorelist for Clang (GCC doesn't support this)
# Use CMAKE_SOURCE_DIR to get the path to the ignorelist
set(IGNORELIST_PATH "${CMAKE_SOURCE_DIR}/sanitizers/suppressions/sanitizer-ignorelist.txt")
if(NOT EXISTS "${IGNORELIST_PATH}")
message(FATAL_ERROR "Sanitizer ignorelist not found: ${IGNORELIST_PATH}")
endif()
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize-ignorelist=${IGNORELIST_PATH}")
message(STATUS " Using sanitizer ignorelist: ${IGNORELIST_PATH}")
# Join sanitizer flags with commas for -fsanitize option
list(JOIN SANITIZER_TYPES "," SANITIZER_TYPES_STR)
# Add sanitizer to compile and link flags
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
set(SANITIZERS_LINK_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
endif()
message(STATUS " Compile flags: ${SANITIZERS_COMPILE_FLAGS}")
message(STATUS " Link flags: ${SANITIZERS_LINK_FLAGS}")
# Apply the sanitizer flags to the 'common' interface library
# This is the same library used by XrplCompiler.cmake
target_compile_options(common INTERFACE
$<$<COMPILE_LANGUAGE:CXX>:${SANITIZERS_COMPILE_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${SANITIZERS_COMPILE_FLAGS}>
)
# Apply linker flags
target_link_options(common INTERFACE ${SANITIZERS_LINK_FLAGS})
# Define SANITIZERS macro for BuildInfo.cpp
set(sanitizers_list)
if(enable_asan)
list(APPEND sanitizers_list "ASAN")
endif()
if(enable_tsan)
list(APPEND sanitizers_list "TSAN")
endif()
if(enable_ubsan)
list(APPEND sanitizers_list "UBSAN")
endif()
if(sanitizers_list)
list(JOIN sanitizers_list "." sanitizers_str)
target_compile_definitions(common INTERFACE SANITIZERS=${sanitizers_str})
endif()

View File

@@ -2,6 +2,8 @@
sanity checks
#]===================================================================]
include(CompilationEnv)
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
@@ -16,14 +18,12 @@ if (NOT is_multiconfig)
endif ()
endif ()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
set (is_clang TRUE)
if (is_clang) # both Clang and AppleClang
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0)
message (FATAL_ERROR "This project requires clang 16 or later")
endif ()
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set (is_gcc TRUE)
elseif (is_gcc)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
message (FATAL_ERROR "This project requires GCC 12 or later")
endif ()
@@ -40,11 +40,6 @@ if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
message (FATAL_ERROR "Visual Studio 32-bit build is not supported.")
endif ()
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
message (FATAL_ERROR "Xrpld requires a 64 bit target architecture.\n"
"The most likely cause of this warning is trying to build xrpld with a 32-bit OS.")
endif ()
if (APPLE AND NOT HOMEBREW)
find_program (HOMEBREW brew)
endif ()

View File

@@ -2,11 +2,7 @@
declare options and variables
#]===================================================================]
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
set (is_linux TRUE)
else()
set(is_linux FALSE)
endif()
include(CompilationEnv)
if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
set(is_ci TRUE)
@@ -62,7 +58,7 @@ else()
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
endif()
if(is_linux)
if(is_linux AND NOT SANITIZER)
option(BUILD_SHARED_LIBS "build shared xrpl libraries" OFF)
option(static "link protobuf, openssl, libc++, and boost statically" ON)
option(perf "Enables flags that assist with perf recording" OFF)
@@ -107,33 +103,6 @@ option(local_protobuf
option(local_grpc
"Force a local build of gRPC instead of looking for an installed version." OFF)
# this one is a string and therefore can't be an option
set(san "" CACHE STRING "On gcc & clang, add sanitizer instrumentation")
set_property(CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
if(san)
string(TOLOWER ${san} san)
set(SAN_FLAG "-fsanitize=${san}")
set(SAN_LIB "")
if(is_gcc)
if(san STREQUAL "address")
set(SAN_LIB "asan")
elseif(san STREQUAL "thread")
set(SAN_LIB "tsan")
elseif(san STREQUAL "memory")
set(SAN_LIB "msan")
elseif(san STREQUAL "undefined")
set(SAN_LIB "ubsan")
endif()
endif()
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
if(NOT COMPILER_SUPPORTS_SAN)
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
endif()
endif()
# the remaining options are obscure and rarely used
option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table"

View File

@@ -1,3 +1,6 @@
include(CompilationEnv)
include(XrplSanitizers)
find_package(Boost REQUIRED
COMPONENTS
chrono
@@ -32,7 +35,7 @@ target_link_libraries(xrpl_boost
if(Boost_COMPILER)
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
endif()
if(san AND is_clang)
if(SANITIZERS_ENABLED AND is_clang)
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
# for gcc ?
if(NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)

View File

@@ -17,9 +17,9 @@
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1768312129.152",
"grpc/1.72.0#f244a57bff01e708c55a1100b12e1589%1765850193.734",
"ed25519/2015.03#ae761bdc52730a843f0809bdf6c1b1f6%1765850143.772",
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1765850143.95",
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772",
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336",
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837",

1
conan/profiles/ci Normal file
View File

@@ -0,0 +1 @@
include(sanitizers)

59
conan/profiles/sanitizers Normal file
View File

@@ -0,0 +1,59 @@
include(default)
{% set compiler, version, compiler_exe = detect_api.detect_default_compiler() %}
{% set sanitizers = os.getenv("SANITIZERS") %}
[conf]
{% if sanitizers %}
{% if compiler == "gcc" %}
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
{% set sanitizer_list = [] %}
{% set model_code = "" %}
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1", "-Wno-stringop-overflow"] %}
{% if "address" in sanitizers %}
{% set _ = sanitizer_list.append("address") %}
{% set model_code = "-mcmodel=large" %}
{% elif "thread" in sanitizers %}
{% set _ = sanitizer_list.append("thread") %}
{% set model_code = "-mcmodel=medium" %}
{% set _ = extra_cxxflags.append("-Wno-tsan") %}
{% endif %}
{% if "undefinedbehavior" in sanitizers %}
{% set _ = sanitizer_list.append("undefined") %}
{% set _ = sanitizer_list.append("float-divide-by-zero") %}
{% endif %}
{% set sanitizer_flags = "-fsanitize=" ~ ",".join(sanitizer_list) ~ " " ~ model_code %}
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
tools.build:exelinkflags+=['{{sanitizer_flags}}']
{% endif %}
{% elif compiler == "apple-clang" or compiler == "clang" %}
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
{% set sanitizer_list = [] %}
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1"] %}
{% if "address" in sanitizers %}
{% set _ = sanitizer_list.append("address") %}
{% elif "thread" in sanitizers %}
{% set _ = sanitizer_list.append("thread") %}
{% endif %}
{% if "undefinedbehavior" in sanitizers %}
{% set _ = sanitizer_list.append("undefined") %}
{% set _ = sanitizer_list.append("float-divide-by-zero") %}
{% set _ = sanitizer_list.append("unsigned-integer-overflow") %}
{% endif %}
{% set sanitizer_flags = "-fsanitize=" ~ ",".join(sanitizer_list) %}
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
tools.build:exelinkflags+=['{{sanitizer_flags}}']
{% endif %}
{% endif %}
{% endif %}
tools.info.package_id:confs+=["tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]

View File

@@ -39,7 +39,7 @@ class Xrpl(ConanFile):
]
test_requires = [
"doctest/2.4.12",
"gtest/1.17.0",
]
tool_requires = [

207
docs/build/sanitizers.md vendored Normal file
View File

@@ -0,0 +1,207 @@
# Sanitizer Configuration for Rippled
This document explains how to properly configure and run sanitizers (AddressSanitizer, undefinedbehaviorSanitizer, ThreadSanitizer) with the xrpld project.
Corresponding suppression files are located in the `sanitizers/suppressions` directory.
- [Sanitizer Configuration for Rippled](#sanitizer-configuration-for-rippled)
- [Building with Sanitizers](#building-with-sanitizers)
- [Summary](#summary)
- [Build steps:](#build-steps)
- [Install dependencies](#install-dependencies)
- [Call CMake](#call-cmake)
- [Build](#build)
- [Running Tests with Sanitizers](#running-tests-with-sanitizers)
- [AddressSanitizer (ASAN)](#addresssanitizer-asan)
- [ThreadSanitizer (TSan)](#threadsanitizer-tsan)
- [LeakSanitizer (LSan)](#leaksanitizer-lsan)
- [UndefinedBehaviorSanitizer (UBSan)](#undefinedbehaviorsanitizer-ubsan)
- [Suppression Files](#suppression-files)
- [`asan.supp`](#asansupp)
- [`lsan.supp`](#lsansupp)
- [`ubsan.supp`](#ubsansupp)
- [`tsan.supp`](#tsansupp)
- [`sanitizer-ignorelist.txt`](#sanitizer-ignorelisttxt)
- [Troubleshooting](#troubleshooting)
- ["ASAN is ignoring requested \_\_asan_handle_no_return" warnings](#asan-is-ignoring-requested-__asan_handle_no_return-warnings)
- [Sanitizer Mismatch Errors](#sanitizer-mismatch-errors)
- [References](#references)
## Building with Sanitizers
### Summary
Follow the same instructions as mentioned in [BUILD.md](../../BUILD.md) but with the following changes:
1. Make sure you have a clean build directory.
2. Set the `SANITIZERS` environment variable before calling conan install and cmake. Only set it once. Make sure both conan and cmake read the same values.
Example: `export SANITIZERS=address,undefinedbehavior`
3. Optionally use `--profile:all sanitizers` with Conan to build dependencies with sanitizer instrumentation. [!NOTE]Building with sanitizer-instrumented dependencies is slower but produces fewer false positives.
4. Set `ASAN_OPTIONS`, `LSAN_OPTIONS`, `UBSAN_OPTIONS` and `TSAN_OPTIONS` environment variables to configure sanitizer behavior when running executables. [More details below](#running-tests-with-sanitizers).
---
### Build steps:
```bash
cd /path/to/rippled
rm -rf .build
mkdir .build
cd .build
```
#### Install dependencies
The `SANITIZERS` environment variable is used by both Conan and CMake.
```bash
export SANITIZERS=address,undefinedbehavior
# Standard build (without instrumenting dependencies)
conan install .. --output-folder . --build missing --settings build_type=Debug
# Or with sanitizer-instrumented dependencies (takes longer but fewer false positives)
conan install .. --output-folder . --profile:all sanitizers --build missing --settings build_type=Debug
```
[!CAUTION]
Do not mix Address and Thread sanitizers - they are incompatible.
Since you already set the `SANITIZERS` environment variable when running Conan, same values will be read for the next part.
#### Call CMake
```bash
cmake .. -G Ninja \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=Debug \
-Dtests=ON -Dxrpld=ON
```
#### Build
```bash
cmake --build . --parallel 4
```
## Running Tests with Sanitizers
### AddressSanitizer (ASAN)
**IMPORTANT**: ASAN with Boost produces many false positives. Use these options:
```bash
export ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=path/to/asan.supp:halt_on_error=0:log_path=asan.log"
export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsan.log"
# Run tests
./xrpld --unittest --unittest-jobs=5
```
**Why `detect_container_overflow=0`?**
- Boost intrusive containers (used in `aged_unordered_container`) trigger false positives
- Boost context switching (used in `Workers.cpp`) confuses ASAN's stack tracking
- Since we usually don't build Boost (because we don't want to instrument Boost and detect issues in Boost code) with ASAN but use Boost containers in ASAN instrumented rippled code, it generates false positives.
- Building dependencies with ASAN instrumentation reduces false positives. But we don't want to instrument dependencies like Boost with ASAN because it is slow (to compile as well as run tests) and not necessary.
- See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
- More such flags are detailed [here](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)
### ThreadSanitizer (TSan)
```bash
export TSAN_OPTIONS="suppressions=path/to/tsan.supp halt_on_error=0 log_path=tsan.log"
# Run tests
./xrpld --unittest --unittest-jobs=5
```
More details [here](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual).
### LeakSanitizer (LSan)
LSan is automatically enabled with ASAN. To disable it:
```bash
export ASAN_OPTIONS="detect_leaks=0"
```
More details [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer).
### UndefinedBehaviorSanitizer (UBSan)
```bash
export UBSAN_OPTIONS="suppressions=path/to/ubsan.supp:print_stacktrace=1:halt_on_error=0:log_path=ubsan.log"
# Run tests
./xrpld --unittest --unittest-jobs=5
```
More details [here](https://clang.llvm.org/docs/undefinedbehaviorSanitizer.html).
## Suppression Files
[!NOTE] Attached files contain more details.
### [`asan.supp`](../../sanitizers/suppressions/asan.supp)
- **Purpose**: Suppress AddressSanitizer (ASAN) errors only
- **Format**: `interceptor_name:<pattern>` where pattern matches file names. Supported suppression types are:
- interceptor_name
- interceptor_via_fun
- interceptor_via_lib
- odr_violation
- **More info**: [AddressSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizer)
- **Note**: Cannot suppress stack-buffer-overflow, container-overflow, etc.
### [`lsan.supp`](../../sanitizers/suppressions/lsan.supp)
- **Purpose**: Suppress LeakSanitizer (LSan) errors only
- **Format**: `leak:<pattern>` where pattern matches function/file names
- **More info**: [LeakSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer)
### [`ubsan.supp`](../../sanitizers/suppressions/ubsan.supp)
- **Purpose**: Suppress undefinedbehaviorSanitizer errors
- **Format**: `<error_type>:<pattern>` (e.g., `unsigned-integer-overflow:protobuf`)
- **Covers**: Intentional overflows in sanitizers/suppressions libraries (protobuf, gRPC, stdlib)
- More info [UBSan suppressions](https://clang.llvm.org/docs/SanitizerSpecialCaseList.html).
### [`tsan.supp`](../../sanitizers/suppressions/tsan.supp)
- **Purpose**: Suppress ThreadSanitizer data race warnings
- **Format**: `race:<pattern>` where pattern matches function/file names
- **More info**: [ThreadSanitizer suppressions](https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions)
### [`sanitizer-ignorelist.txt`](../../sanitizers/suppressions/sanitizer-ignorelist.txt)
- **Purpose**: Compile-time ignorelist for all sanitizers
- **Usage**: Passed via `-fsanitize-ignorelist=absolute/path/to/sanitizer-ignorelist.txt`
- **Format**: `<level>:<pattern>` (e.g., `src:Workers.cpp`)
## Troubleshooting
### "ASAN is ignoring requested \_\_asan_handle_no_return" warnings
These warnings appear when using Boost context switching and are harmless. They indicate potential false positives.
### Sanitizer Mismatch Errors
If you see undefined symbols like `___tsan_atomic_load` when building with ASAN:
**Problem**: Dependencies were built with a different sanitizer than the main project.
**Solution**: Rebuild everything with the same sanitizer:
```bash
rm -rf .build
# Then follow the build instructions above
```
Then review the log files: `asan.log.*`, `ubsan.log.*`, `tsan.log.*`
## References
- [AddressSanitizer Wiki](https://github.com/google/sanitizers/wiki/AddressSanitizer)
- [AddressSanitizer Flags](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)
- [Container Overflow Detection](https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow)
- [UndefinedBehavior Sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
- [ThreadSanitizer](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual)

View File

@@ -0,0 +1,134 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#include <functional>
#include <mutex>
#include <set>
/** RAII class to check if an Item is already being processed on another thread,
* as indicated by it's presence in a Collection.
*
* If the Item is not in the Collection, it will be added under lock in the
* ctor, and removed under lock in the dtor. The object will be considered
* "usable" and evaluate to `true`.
*
* If the Item is in the Collection, no changes will be made to the collection,
* and the CanProcess object will be considered "unusable".
*
* It's up to the caller to decide what "usable" and "unusable" mean. (e.g.
* Process or skip a block of code, or set a flag.)
*
* The current use is to avoid lock contention that would be involved in
* processing something associated with the Item.
*
* Examples:
*
* void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...)
* {
* if (CanProcess check{acquiresMutex_, pendingAcquires_, hash})
* {
* acquire(hash, ...);
* }
* }
*
* bool
* NetworkOPsImp::recvValidation(
* std::shared_ptr<STValidation> const& val,
* std::string const& source)
* {
* CanProcess check(
* validationsMutex_, pendingValidations_, val->getLedgerHash());
* BypassAccept bypassAccept =
* check ? BypassAccept::no : BypassAccept::yes;
* handleNewValidation(app_, val, source, bypassAccept, m_journal);
* }
*
*/
class CanProcess
{
public:
template <class Mutex, class Collection, class Item>
CanProcess(Mutex& mtx, Collection& collection, Item const& item)
: cleanup_(insert(mtx, collection, item))
{
}
~CanProcess()
{
if (cleanup_)
cleanup_();
}
explicit
operator bool() const
{
return static_cast<bool>(cleanup_);
}
private:
template <bool useIterator, class Mutex, class Collection, class Item>
std::function<void()>
doInsert(Mutex& mtx, Collection& collection, Item const& item)
{
std::unique_lock<Mutex> lock(mtx);
// TODO: Use structured binding once LLVM 16 is the minimum supported
// version. See also: https://github.com/llvm/llvm-project/issues/48582
// https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c
auto const insertResult = collection.insert(item);
auto const it = insertResult.first;
if (!insertResult.second)
return {};
if constexpr (useIterator)
return [&, it]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(it);
};
else
return [&]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(item);
};
}
// Generic insert() function doesn't use iterators because they may get
// invalidated
template <class Mutex, class Collection, class Item>
std::function<void()>
insert(Mutex& mtx, Collection& collection, Item const& item)
{
return doInsert<false>(mtx, collection, item);
}
// Specialize insert() for std::set, which does not invalidate iterators for
// insert and erase
template <class Mutex, class Item>
std::function<void()>
insert(Mutex& mtx, std::set<Item>& collection, Item const& item)
{
return doInsert<true>(mtx, collection, item);
}
// If set, then the item is "usable"
std::function<void()> cleanup_;
};
#endif

View File

@@ -286,8 +286,18 @@ message TMLedgerData {
required uint32 ledgerSeq = 2;
required TMLedgerInfoType type = 3;
repeated TMLedgerNode nodes = 4;
// If the peer supports "responseCookies", this field will
// never be populated.
optional uint32 requestCookie = 5;
optional TMReplyError error = 6;
// The old field is called "requestCookie", but this is
// a response, so this name makes more sense
repeated uint32 responseCookies = 7;
// If a TMGetLedger request was received without a "requestCookie",
// and the peer supports it, this flag will be set to true to
// indicate that the receiver should process the result in addition
// to forwarding it to its "responseCookies" peers.
optional bool directResponse = 8;
}
message TMPing {

View File

@@ -36,6 +36,8 @@ struct LedgerHeader
// If validated is false, it means "not yet validated."
// Once validated is true, it will never be set false at a later time.
// NOTE: If you are accessing this directly, you are probably doing it
// wrong. Use LedgerMaster::isValidated().
// VFALCO TODO Make this not mutable
bool mutable validated = false;
bool accepted = false;

View File

@@ -0,0 +1,29 @@
# The idea is to empty this file gradually by fixing the underlying issues and removing suppressions.
#
# ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0"
#
# The detect_container_overflow=0 option disables false positives from:
# - Boost intrusive containers (slist_iterator.hpp, hashtable.hpp, aged_unordered_container.h)
# - Boost context/coroutine stack switching (Workers.cpp, thread.h)
#
# See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
# Boost
interceptor_name:boost/asio
# Leaks in Doctest tests: xrpl.test.*
interceptor_name:src/libxrpl/net/HTTPClient.cpp
interceptor_name:src/libxrpl/net/RegisterSSLCerts.cpp
interceptor_name:src/tests/libxrpl/net/HTTPClient.cpp
interceptor_name:xrpl/net/AutoSocket.h
interceptor_name:xrpl/net/HTTPClient.h
interceptor_name:xrpl/net/HTTPClientSSLContext.h
interceptor_name:xrpl/net/RegisterSSLCerts.h
# Suppress false positive stack-buffer errors in thread stack allocation
# Related to ASan's __asan_handle_no_return warnings (github.com/google/sanitizers/issues/189)
# These occur during multi-threaded test initialization on macOS
interceptor_name:memcpy
interceptor_name:__bzero
interceptor_name:__asan_memset
interceptor_name:__asan_memcpy

View File

@@ -0,0 +1,16 @@
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
# Suppress leaks detected by asan in rippled code.
leak:src/libxrpl/net/HTTPClient.cpp
leak:src/libxrpl/net/RegisterSSLCerts.cpp
leak:src/tests/libxrpl/net/HTTPClient.cpp
leak:xrpl/net/AutoSocket.h
leak:xrpl/net/HTTPClient.h
leak:xrpl/net/HTTPClientSSLContext.h
leak:xrpl/net/RegisterSSLCerts.h
leak:ripple::HTTPClient
leak:ripple::HTTPClientImp
# Suppress leaks detected by asan in boost code.
leak:boost::asio
leak:boost/asio

View File

@@ -0,0 +1,29 @@
# We were seeing some false positives and some repeated errors(since these are library files) in following files.
# Clang will skip instrumenting the files added here.
# We should fix the underlying issues(if any) and remove these entries.
deadlock:libxrpl/beast/utility/beast_Journal.cpp
deadlock:libxrpl/beast/utility/beast_PropertyStream.cpp
deadlock:test/beast/beast_PropertyStream_test.cpp
deadlock:xrpld/core/detail/Workers.cpp
deadlock:xrpld/core/JobQueue.cpp
race:libxrpl/beast/utility/beast_Journal.cpp
race:libxrpl/beast/utility/beast_PropertyStream.cpp
race:test/beast/beast_PropertyStream_test.cpp
race:xrpld/core/detail/Workers.cpp
race:xrpld/core/JobQueue.cpp
signal:libxrpl/beast/utility/beast_Journal.cpp
signal:libxrpl/beast/utility/beast_PropertyStream.cpp
signal:test/beast/beast_PropertyStream_test.cpp
signal:xrpld/core/detail/Workers.cpp
signal:xrpld/core/JobQueue.cpp
src:beast/utility/beast_Journal.cpp
src:beast/utility/beast_PropertyStream.cpp
src:core/detail/Workers.cpp
src:core/JobQueue.cpp
src:libxrpl/beast/utility/beast_Journal.cpp
src:test/beast/beast_PropertyStream_test.cpp
src:src/test/app/Invariants_test.cpp

View File

@@ -0,0 +1,102 @@
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
# Suppress race in Boost ASIO scheduler detected by GCC-15
# This is a false positive in Boost's internal pipe() synchronization
race:boost/asio/
race:boost/context/
race:boost/asio/executor.hpp
race:boost::asio
# Suppress tsan related issues in rippled code.
race:src/libxrpl/basics/make_SSLContext.cpp
race:src/libxrpl/basics/Number.cpp
race:src/libxrpl/json/json_value.cpp
race:src/libxrpl/json/to_string.cpp
race:src/libxrpl/ledger/OpenView.cpp
race:src/libxrpl/net/HTTPClient.cpp
race:src/libxrpl/nodestore/backend/NuDBFactory.cpp
race:src/libxrpl/protocol/InnerObjectFormats.cpp
race:src/libxrpl/protocol/STParsedJSON.cpp
race:src/libxrpl/resource/ResourceManager.cpp
race:src/test/app/Flow_test.cpp
race:src/test/app/LedgerReplay_test.cpp
race:src/test/app/NFToken_test.cpp
race:src/test/app/Offer_test.cpp
race:src/test/app/ValidatorSite_test.cpp
race:src/test/consensus/NegativeUNL_test.cpp
race:src/test/jtx/impl/Env.cpp
race:src/test/jtx/impl/JSONRPCClient.cpp
race:src/test/jtx/impl/pay.cpp
race:src/test/jtx/impl/token.cpp
race:src/test/rpc/Book_test.cpp
race:src/xrpld/app/ledger/detail/InboundTransactions.cpp
race:src/xrpld/app/main/Application.cpp
race:src/xrpld/app/main/BasicApp.cpp
race:src/xrpld/app/main/GRPCServer.cpp
race:src/xrpld/app/misc/detail/AmendmentTable.cpp
race:src/xrpld/app/misc/FeeVoteImpl.cpp
race:src/xrpld/app/rdb/detail/Wallet.cpp
race:src/xrpld/overlay/detail/OverlayImpl.cpp
race:src/xrpld/peerfinder/detail/PeerfinderManager.cpp
race:src/xrpld/peerfinder/detail/SourceStrings.cpp
race:src/xrpld/rpc/detail/ServerHandler.cpp
race:xrpl/server/detail/Door.h
race:xrpl/server/detail/Spawn.h
race:xrpl/server/detail/ServerImpl.h
race:xrpl/nodestore/detail/DatabaseNodeImp.h
race:src/libxrpl/beast/utility/beast_Journal.cpp
race:src/test/beast/LexicalCast_test.cpp
race:ripple::ServerHandler
# More suppressions in external library code.
race:crtstuff.c
race:pipe
# Deadlock / lock-order-inversion suppressions
# Note: GCC's TSAN may not fully support all deadlock suppression patterns
deadlock:src/libxrpl/beast/utility/beast_Journal.cpp
deadlock:src/libxrpl/beast/utility/beast_PropertyStream.cpp
deadlock:src/test/beast/beast_PropertyStream_test.cpp
deadlock:src/xrpld/core/detail/Workers.cpp
deadlock:src/xrpld/app/misc/detail/Manifest.cpp
deadlock:src/xrpld/app/misc/detail/ValidatorList.cpp
deadlock:src/xrpld/app/misc/detail/ValidatorSite.cpp
signal:src/libxrpl/beast/utility/beast_Journal.cpp
signal:src/xrpld/core/detail/Workers.cpp
signal:src/xrpld/core/JobQueue.cpp
signal:ripple::Workers::Worker
# Aggressive suppressing of deadlock tsan errors
deadlock:pthread_create
deadlock:pthread_rwlock_rdlock
deadlock:boost::asio
# Suppress SEGV crashes in TSAN itself during stringbuf operations
# This appears to be a GCC-15 TSAN instrumentation issue with basic_stringbuf::str()
# Commonly triggered in beast::Journal::ScopedStream destructor
signal:std::__cxx11::basic_stringbuf
signal:basic_stringbuf
signal:basic_ostringstream
called_from_lib:libclang_rt
race:ostreambuf_iterator
race:basic_ostream
# Suppress SEGV in Boost ASIO memory allocation with GCC-15 TSAN
signal:boost::asio::aligned_new
signal:boost::asio::detail::memory
# Suppress SEGV in execute_native_thread_routine
signal:execute_native_thread_routine
# Suppress data race in Boost Context fiber management
# This is a false positive in Boost's exception state management during fiber context switching
race:__cxxabiv1::manage_exception_state
race:boost::context::fiber::resume
race:boost::asio::detail::spawned_fiber_thread
race:boost::asio::detail::spawned_fiber_thread::suspend_with
race:boost::asio::detail::spawned_fiber_thread::destroy
# Suppress data race in __tsan_memcpy called from Boost fiber operations
race:__tsan_memcpy

View File

@@ -0,0 +1,237 @@
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
# Suppress UBSan errors in external code by source file path
# This matches any source file under the external/ directory
alignment:external
bool:external
bounds:external
cfi:external
enum:external
float-cast-overflow:external
float-divide-by-zero:external
function:external
implicit-integer-sign-change:external
implicit-signed-integer-truncation::external
implicit-signed-integer-truncation:external
implicit-unsigned-integer-truncation:external
integer-divide-by-zero:external
invalid-builtin-use:external
invalid-objc-cast:external
nonnull-attribute:external
null:external
nullability-arg:external
nullability-assign:external
nullability-return:external
object-size:external
pointer-overflow:external
return:external
returns-nonnull-attribute:external
shift-base:external
shift-exponent:external
signed-integer-overflow:external
undefined:external
unreachable:external
unsigned-integer-overflow:external
vla-bound:external
vptr_check:external
vptr:external
# Suppress all UBSan errors in Boost libraries
# This matches any files containing "boost" in its path or name
alignment:boost
bool:boost
bounds:boost
cfi:boost
enum:boost
float-cast-overflow:boost
float-divide-by-zero:boost
function:boost
implicit-integer-sign-change:boost
implicit-signed-integer-truncation:boost
implicit-unsigned-integer-truncation:boost
integer-divide-by-zero:boost
invalid-builtin-use:boost
invalid-objc-cast:boost
nonnull-attribute:boost
null:boost
nullability-arg:boost
nullability-assign:boost
nullability-return:boost
object-size:boost
pointer-overflow:boost
return:boost
returns-nonnull-attribute:boost
shift-base:boost
shift-exponent:boost
signed-integer-overflow:boost
undefined:boost
unreachable:boost
unsigned-integer-overflow:boost
vla-bound:boost
vptr_check:boost
vptr:boost
# Google protobuf
undefined:protobuf
# Suppress UBSan errors in rippled code by source file path
undefined:src/libxrpl/basics/base64.cpp
undefined:src/libxrpl/basics/Number.cpp
undefined:src/libxrpl/beast/utility/beast_Journal.cpp
undefined:src/libxrpl/crypto/RFC1751.cpp
undefined:src/libxrpl/ledger/ApplyView.cpp
undefined:src/libxrpl/ledger/View.cpp
undefined:src/libxrpl/protocol/Permissions.cpp
undefined:src/libxrpl/protocol/STAmount.cpp
undefined:src/libxrpl/protocol/STPathSet.cpp
undefined:src/libxrpl/protocol/tokens.cpp
undefined:src/libxrpl/shamap/SHAMap.cpp
undefined:src/test/app/Batch_test.cpp
undefined:src/test/app/Invariants_test.cpp
undefined:src/test/app/NFToken_test.cpp
undefined:src/test/app/Offer_test.cpp
undefined:src/test/app/Path_test.cpp
undefined:src/test/basics/XRPAmount_test.cpp
undefined:src/test/beast/LexicalCast_test.cpp
undefined:src/test/jtx/impl/acctdelete.cpp
undefined:src/test/ledger/SkipList_test.cpp
undefined:src/test/rpc/Subscribe_test.cpp
undefined:src/tests/libxrpl/basics/RangeSet.cpp
undefined:src/xrpld/app/main/BasicApp.cpp
undefined:src/xrpld/app/main/BasicApp.cpp
undefined:src/xrpld/app/misc/detail/AmendmentTable.cpp
undefined:src/xrpld/app/misc/NetworkOPs.cpp
undefined:src/libxrpl/json/json_value.cpp
undefined:src/xrpld/app/paths/detail/StrandFlow.h
undefined:src/xrpld/app/tx/detail/NFTokenMint.cpp
undefined:src/xrpld/app/tx/detail/SetOracle.cpp
undefined:src/xrpld/core/detail/JobQueue.cpp
undefined:src/xrpld/core/detail/Workers.cpp
undefined:src/xrpld/rpc/detail/Role.cpp
undefined:src/xrpld/rpc/handlers/GetAggregatePrice.cpp
undefined:xrpl/basics/base_uint.h
undefined:xrpl/basics/DecayingSample.h
undefined:xrpl/beast/test/yield_to.h
undefined:xrpl/beast/xor_shift_engine.h
undefined:xrpl/nodestore/detail/varint.h
undefined:xrpl/peerfinder/detail/Counts.h
undefined:xrpl/protocol/nft.h
# basic_string.h:483:51: runtime error: unsigned integer overflow
unsigned-integer-overflow:basic_string.h
unsigned-integer-overflow:bits/chrono.h
unsigned-integer-overflow:bits/random.h
unsigned-integer-overflow:bits/random.tcc
unsigned-integer-overflow:bits/stl_algobase.h
unsigned-integer-overflow:bits/uniform_int_dist.h
unsigned-integer-overflow:string_view
# runtime error: unsigned integer overflow: 0 - 1 cannot be represented in type 'std::size_t' (aka 'unsigned long')
unsigned-integer-overflow:src/libxrpl/basics/base64.cpp
unsigned-integer-overflow:src/libxrpl/basics/Number.cpp
unsigned-integer-overflow:src/libxrpl/crypto/RFC1751.cpp
unsigned-integer-overflow:rc/libxrpl/json/json_value.cpp
unsigned-integer-overflow:src/libxrpl/ledger/ApplyView.cpp
unsigned-integer-overflow:src/libxrpl/ledger/View.cpp
unsigned-integer-overflow:src/libxrpl/protocol/Permissions.cpp
unsigned-integer-overflow:src/libxrpl/protocol/STAmount.cpp
unsigned-integer-overflow:src/libxrpl/protocol/STPathSet.cpp
unsigned-integer-overflow:src/libxrpl/protocol/tokens.cpp
unsigned-integer-overflow:src/libxrpl/shamap/SHAMap.cpp
unsigned-integer-overflow:src/test/app/Batch_test.cpp
unsigned-integer-overflow:src/test/app/Invariants_test.cpp
unsigned-integer-overflow:src/test/app/NFToken_test.cpp
unsigned-integer-overflow:src/test/app/Offer_test.cpp
unsigned-integer-overflow:src/test/app/Path_test.cpp
unsigned-integer-overflow:src/test/basics/XRPAmount_test.cpp
unsigned-integer-overflow:src/test/beast/LexicalCast_test.cpp
unsigned-integer-overflow:src/test/jtx/impl/acctdelete.cpp
unsigned-integer-overflow:src/test/ledger/SkipList_test.cpp
unsigned-integer-overflow:src/test/rpc/Subscribe_test.cpp
unsigned-integer-overflow:src/tests/libxrpl/basics/RangeSet.cpp
unsigned-integer-overflow:src/xrpld/app/main/BasicApp.cpp
unsigned-integer-overflow:src/xrpld/app/misc/detail/AmendmentTable.cpp
unsigned-integer-overflow:src/xrpld/app/misc/NetworkOPs.cpp
unsigned-integer-overflow:src/xrpld/app/paths/detail/StrandFlow.h
unsigned-integer-overflow:src/xrpld/app/tx/detail/NFTokenMint.cpp
unsigned-integer-overflow:src/xrpld/app/tx/detail/SetOracle.cpp
unsigned-integer-overflow:src/xrpld/rpc/detail/Role.cpp
unsigned-integer-overflow:src/xrpld/rpc/handlers/GetAggregatePrice.cpp
unsigned-integer-overflow:xrpl/basics/base_uint.h
unsigned-integer-overflow:xrpl/basics/DecayingSample.h
unsigned-integer-overflow:xrpl/beast/test/yield_to.h
unsigned-integer-overflow:xrpl/beast/xor_shift_engine.h
unsigned-integer-overflow:xrpl/nodestore/detail/varint.h
unsigned-integer-overflow:xrpl/peerfinder/detail/Counts.h
unsigned-integer-overflow:xrpl/protocol/nft.h
# Rippled intentional overflows and operations
# STAmount uses intentional negation of INT64_MIN and overflow in arithmetic
signed-integer-overflow:src/libxrpl/protocol/STAmount.cpp
unsigned-integer-overflow:src/libxrpl/protocol/STAmount.cpp
# XRPAmount test intentional overflows
signed-integer-overflow:src/test/basics/XRPAmount_test.cpp
# Peerfinder intentional overflow in counter arithmetic
unsigned-integer-overflow:src/xrpld/peerfinder/detail/Counts.h
# Signed integer overflow suppressions
signed-integer-overflow:src/test/beast/LexicalCast_test.cpp
# External library suppressions
unsigned-integer-overflow:nudb/detail/xxhash.hpp
# Protobuf intentional overflows in hash functions
# Protobuf uses intentional unsigned overflow for hash computation (stringpiece.h:393)
unsigned-integer-overflow:google/protobuf/stubs/stringpiece.h
# gRPC intentional overflows
# gRPC uses intentional overflow in timer calculations
unsigned-integer-overflow:grpc
unsigned-integer-overflow:timer_manager.cc
# Standard library intentional overflows
# These are intentional overflows in random number generation and character conversion
unsigned-integer-overflow:__random/seed_seq.h
unsigned-integer-overflow:__charconv/traits.h
# Suppress errors in RocksDB
# RocksDB uses intentional unsigned integer overflows in hash functions and CRC calculations
unsigned-integer-overflow:rocks*/*/util/xxhash.h
unsigned-integer-overflow:rocks*/*/util/xxph3.h
unsigned-integer-overflow:rocks*/*/util/hash.cc
unsigned-integer-overflow:rocks*/*/util/crc32c.cc
unsigned-integer-overflow:rocks*/*/util/crc32c.h
unsigned-integer-overflow:rocks*/*/include/rocksdb/utilities/options_type.h
unsigned-integer-overflow:rocks*/*/table/format.h
unsigned-integer-overflow:rocks*/*/table/format.cc
unsigned-integer-overflow:rocks*/*/table/block_based/block_based_table_builder.cc
unsigned-integer-overflow:rocks*/*/table/block_based/reader_common.cc
unsigned-integer-overflow:rocks*/*/db/version_set.cc
# RocksDB misaligned loads (intentional for performance on ARM64)
alignment:rocks*/*/util/crc32c_arm64.cc
# nudb intentional overflows in hash functions
unsigned-integer-overflow:nudb/detail/xxhash.hpp
alignment:nudb/detail/xxhash.hpp
# Snappy compression library intentional overflows
unsigned-integer-overflow:snappy.cc
# Abseil intentional overflows
unsigned-integer-overflow:absl/strings/numbers.cc
unsigned-integer-overflow:absl/strings/internal/cord_rep_flat.h
unsigned-integer-overflow:absl/base/internal/low_level_alloc.cc
unsigned-integer-overflow:absl/hash/internal/hash.h
unsigned-integer-overflow:absl/container/internal/raw_hash_set.h
# Standard library intentional overflows in chrono duration arithmetic
unsigned-integer-overflow:__chrono/duration.h
# Suppress undefined errors in RocksDB and nudb
undefined:rocks.*/*/util/crc32c_arm64.cc
undefined:rocks.*/*/util/xxhash.h
undefined:nudb

View File

@@ -451,9 +451,8 @@ getTrustLineBalance(
amount.clear(Issue{currency, issuer});
}
JLOG(j.trace()) << "getTrustLineBalance:"
<< " account=" << to_string(account)
<< " amount=" << amount.getFullText();
JLOG(j.trace()) << "getTrustLineBalance:" << " account="
<< to_string(account) << " amount=" << amount.getFullText();
return view.balanceHook(account, issuer, amount);
}
@@ -700,8 +699,7 @@ xrpLiquid(
STAmount const amount =
(balance < reserve) ? STAmount{0} : balance - reserve;
JLOG(j.trace()) << "accountHolds:"
<< " account=" << to_string(id)
JLOG(j.trace()) << "accountHolds:" << " account=" << to_string(id)
<< " amount=" << amount.getFullText()
<< " fullBalance=" << fullBalance.getFullText()
<< " balance=" << balance.getFullText()
@@ -1107,7 +1105,7 @@ adjustOwnerCount(
std::function<void(SLE::ref)>
describeOwnerDir(AccountID const& account)
{
return [&account](std::shared_ptr<SLE> const& sle) {
return [account](std::shared_ptr<SLE> const& sle) {
(*sle)[sfOwner] = account;
};
}

View File

@@ -3,6 +3,8 @@
#include <xrpl/beast/core/SemanticVersion.h>
#include <xrpl/protocol/BuildInfo.h>
#include <boost/preprocessor/stringize.hpp>
#include <algorithm>
#include <cstdint>
#include <string>
@@ -20,7 +22,7 @@ namespace BuildInfo {
char const* const versionString = "3.2.0-b0"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)
#if defined(DEBUG) || defined(SANITIZERS)
"+"
#ifdef GIT_COMMIT_HASH
GIT_COMMIT_HASH
@@ -28,13 +30,13 @@ char const* const versionString = "3.2.0-b0"
#endif
#ifdef DEBUG
"DEBUG"
#ifdef SANITIZER
#ifdef SANITIZERS
"."
#endif
#endif
#ifdef SANITIZER
BOOST_PP_STRINGIZE(SANITIZER) // cspell: disable-line
#ifdef SANITIZERS
BOOST_PP_STRINGIZE(SANITIZERS) // cspell: disable-line
#endif
#endif

View File

@@ -389,6 +389,33 @@ class HashRouter_test : public beast::unit_test::suite
BEAST_EXPECT(!any(HF::UNDEFINED));
}
void
testProcessPeer()
{
using namespace std::chrono_literals;
TestStopwatch stopwatch;
HashRouter router(getSetup(5s, 5s), stopwatch);
uint256 const key(1);
HashRouter::PeerShortID peer1 = 1;
HashRouter::PeerShortID peer2 = 2;
auto const timeout = 2s;
BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout));
BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout));
++stopwatch;
BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout));
BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout));
BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout));
++stopwatch;
BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout));
BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout));
++stopwatch;
BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout));
++stopwatch;
BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout));
BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout));
}
public:
void
run() override
@@ -401,6 +428,7 @@ public:
testProcess();
testSetup();
testFlagsOps();
testProcessPeer();
}
};

View File

@@ -305,6 +305,11 @@ public:
{
return false;
}
std::set<std::optional<uint64_t>>
releaseRequestCookies(uint256 const& requestHash) override
{
return {};
}
std::string const&
fingerprint() const override

View File

@@ -69,8 +69,8 @@ public:
negotiateProtocolVersion("XRPL/2.2") == make_protocol(2, 2));
BEAST_EXPECT(
negotiateProtocolVersion(
"RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/999.999") ==
make_protocol(2, 2));
"RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/2.4, XRPL/999.999") ==
make_protocol(2, 3));
BEAST_EXPECT(
negotiateProtocolVersion("XRPL/999.999, WebSocket/1.0") ==
std::nullopt);

View File

@@ -170,6 +170,11 @@ public:
removeTxQueue(uint256 const&) override
{
}
std::set<std::optional<uint64_t>>
releaseRequestCookies(uint256 const& requestHash) override
{
return {};
}
};
/** Manually advanced clock. */

View File

@@ -1,5 +1,5 @@
# Unit tests
This directory contains unit tests for the project. The difference from existing `src/test` folder
is that we switch to 3rd party testing framework (doctest). We intend to gradually move existing tests
from our own framework to doctest and such tests will be moved to this new folder.
is that we switch to 3rd party testing framework (`gtest`). We intend to gradually move existing tests
from our own framework to `gtest` and such tests will be moved to this new folder.

View File

@@ -1,14 +1,14 @@
include(XrplAddTest)
# Test requirements.
find_package(doctest REQUIRED)
find_package(GTest REQUIRED)
# Custom target for all tests defined in this file
add_custom_target(xrpl.tests)
# Common library dependencies for the rest of the tests.
add_library(xrpl.imports.test INTERFACE)
target_link_libraries(xrpl.imports.test INTERFACE doctest::doctest xrpl.libxrpl)
target_link_libraries(xrpl.imports.test INTERFACE gtest::gtest xrpl.libxrpl)
# One test for each module.
xrpl_add_test(basics)

View File

@@ -1,15 +1,13 @@
#include <xrpl/basics/RangeSet.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <cstdint>
#include <optional>
using namespace xrpl;
TEST_SUITE_BEGIN("RangeSet");
TEST_CASE("prevMissing")
TEST(RangeSet, prevMissing)
{
// Set will include:
// [ 0, 5]
@@ -31,80 +29,78 @@ TEST_CASE("prevMissing")
expected = ((i % 10) > 6) ? (i - 1) : oneBelowRange;
}
CHECK(prevMissing(set, i) == expected);
EXPECT_EQ(prevMissing(set, i), expected);
}
}
TEST_CASE("toString")
TEST(RangeSet, toString)
{
RangeSet<std::uint32_t> set;
CHECK(to_string(set) == "empty");
EXPECT_EQ(to_string(set), "empty");
set.insert(1);
CHECK(to_string(set) == "1");
EXPECT_EQ(to_string(set), "1");
set.insert(range(4u, 6u));
CHECK(to_string(set) == "1,4-6");
EXPECT_EQ(to_string(set), "1,4-6");
set.insert(2);
CHECK(to_string(set) == "1-2,4-6");
EXPECT_EQ(to_string(set), "1-2,4-6");
set.erase(range(4u, 5u));
CHECK(to_string(set) == "1-2,6");
EXPECT_EQ(to_string(set), "1-2,6");
}
TEST_CASE("fromString")
TEST(RangeSet, fromString)
{
RangeSet<std::uint32_t> set;
CHECK(!from_string(set, ""));
CHECK(boost::icl::length(set) == 0);
EXPECT_FALSE(from_string(set, ""));
EXPECT_EQ(boost::icl::length(set), 0);
CHECK(!from_string(set, "#"));
CHECK(boost::icl::length(set) == 0);
EXPECT_FALSE(from_string(set, "#"));
EXPECT_EQ(boost::icl::length(set), 0);
CHECK(!from_string(set, ","));
CHECK(boost::icl::length(set) == 0);
EXPECT_FALSE(from_string(set, ","));
EXPECT_EQ(boost::icl::length(set), 0);
CHECK(!from_string(set, ",-"));
CHECK(boost::icl::length(set) == 0);
EXPECT_FALSE(from_string(set, ",-"));
EXPECT_EQ(boost::icl::length(set), 0);
CHECK(!from_string(set, "1,,2"));
CHECK(boost::icl::length(set) == 0);
EXPECT_FALSE(from_string(set, "1,,2"));
EXPECT_EQ(boost::icl::length(set), 0);
CHECK(from_string(set, "1"));
CHECK(boost::icl::length(set) == 1);
CHECK(boost::icl::first(set) == 1);
EXPECT_TRUE(from_string(set, "1"));
EXPECT_EQ(boost::icl::length(set), 1);
EXPECT_EQ(boost::icl::first(set), 1);
CHECK(from_string(set, "1,1"));
CHECK(boost::icl::length(set) == 1);
CHECK(boost::icl::first(set) == 1);
EXPECT_TRUE(from_string(set, "1,1"));
EXPECT_EQ(boost::icl::length(set), 1);
EXPECT_EQ(boost::icl::first(set), 1);
CHECK(from_string(set, "1-1"));
CHECK(boost::icl::length(set) == 1);
CHECK(boost::icl::first(set) == 1);
EXPECT_TRUE(from_string(set, "1-1"));
EXPECT_EQ(boost::icl::length(set), 1);
EXPECT_EQ(boost::icl::first(set), 1);
CHECK(from_string(set, "1,4-6"));
CHECK(boost::icl::length(set) == 4);
CHECK(boost::icl::first(set) == 1);
CHECK(!boost::icl::contains(set, 2));
CHECK(!boost::icl::contains(set, 3));
CHECK(boost::icl::contains(set, 4));
CHECK(boost::icl::contains(set, 5));
CHECK(boost::icl::last(set) == 6);
EXPECT_TRUE(from_string(set, "1,4-6"));
EXPECT_EQ(boost::icl::length(set), 4);
EXPECT_EQ(boost::icl::first(set), 1);
EXPECT_FALSE(boost::icl::contains(set, 2));
EXPECT_FALSE(boost::icl::contains(set, 3));
EXPECT_TRUE(boost::icl::contains(set, 4));
EXPECT_TRUE(boost::icl::contains(set, 5));
EXPECT_EQ(boost::icl::last(set), 6);
CHECK(from_string(set, "1-2,4-6"));
CHECK(boost::icl::length(set) == 5);
CHECK(boost::icl::first(set) == 1);
CHECK(boost::icl::contains(set, 2));
CHECK(boost::icl::contains(set, 4));
CHECK(boost::icl::last(set) == 6);
EXPECT_TRUE(from_string(set, "1-2,4-6"));
EXPECT_EQ(boost::icl::length(set), 5);
EXPECT_EQ(boost::icl::first(set), 1);
EXPECT_TRUE(boost::icl::contains(set, 2));
EXPECT_TRUE(boost::icl::contains(set, 4));
EXPECT_EQ(boost::icl::last(set), 6);
CHECK(from_string(set, "1-2,6"));
CHECK(boost::icl::length(set) == 3);
CHECK(boost::icl::first(set) == 1);
CHECK(boost::icl::contains(set, 2));
CHECK(boost::icl::last(set) == 6);
EXPECT_TRUE(from_string(set, "1-2,6"));
EXPECT_EQ(boost::icl::length(set), 3);
EXPECT_EQ(boost::icl::first(set), 1);
EXPECT_TRUE(boost::icl::contains(set, 2));
EXPECT_EQ(boost::icl::last(set), 6);
}
TEST_SUITE_END();

View File

@@ -1,6 +1,6 @@
#include <xrpl/basics/Slice.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <array>
#include <cstdint>
@@ -12,37 +12,35 @@ static std::uint8_t const data[] = {
0x18, 0xb4, 0x70, 0xcb, 0xf5, 0xac, 0x2d, 0x89, 0x4d, 0x19, 0x9c,
0xf0, 0x2c, 0x15, 0xd1, 0xf9, 0x9b, 0x66, 0xd2, 0x30, 0xd3};
TEST_SUITE_BEGIN("Slice");
TEST_CASE("equality & inequality")
TEST(Slice, equality_and_inequality)
{
Slice const s0{};
CHECK(s0.size() == 0);
CHECK(s0.data() == nullptr);
CHECK(s0 == s0);
EXPECT_EQ(s0.size(), 0);
EXPECT_EQ(s0.data(), nullptr);
EXPECT_EQ(s0, s0);
// Test slices of equal and unequal size pointing to same data:
for (std::size_t i = 0; i != sizeof(data); ++i)
{
Slice const s1{data, i};
CHECK(s1.size() == i);
CHECK(s1.data() != nullptr);
EXPECT_EQ(s1.size(), i);
EXPECT_NE(s1.data(), nullptr);
if (i == 0)
CHECK(s1 == s0);
EXPECT_EQ(s1, s0);
else
CHECK(s1 != s0);
EXPECT_NE(s1, s0);
for (std::size_t j = 0; j != sizeof(data); ++j)
{
Slice const s2{data, j};
if (i == j)
CHECK(s1 == s2);
EXPECT_EQ(s1, s2);
else
CHECK(s1 != s2);
EXPECT_NE(s1, s2);
}
}
@@ -53,22 +51,22 @@ TEST_CASE("equality & inequality")
for (std::size_t i = 0; i != sizeof(data); ++i)
a[i] = b[i] = data[i];
CHECK(makeSlice(a) == makeSlice(b));
EXPECT_EQ(makeSlice(a), makeSlice(b));
b[7]++;
CHECK(makeSlice(a) != makeSlice(b));
EXPECT_NE(makeSlice(a), makeSlice(b));
a[7]++;
CHECK(makeSlice(a) == makeSlice(b));
EXPECT_EQ(makeSlice(a), makeSlice(b));
}
TEST_CASE("indexing")
TEST(Slice, indexing)
{
Slice const s{data, sizeof(data)};
for (std::size_t i = 0; i != sizeof(data); ++i)
CHECK(s[i] == data[i]);
EXPECT_EQ(s[i], data[i]);
}
TEST_CASE("advancing")
TEST(Slice, advancing)
{
for (std::size_t i = 0; i < sizeof(data); ++i)
{
@@ -77,10 +75,8 @@ TEST_CASE("advancing")
Slice s(data + i, sizeof(data) - i);
s += j;
CHECK(s.data() == data + i + j);
CHECK(s.size() == sizeof(data) - i - j);
EXPECT_EQ(s.data(), data + i + j);
EXPECT_EQ(s.size(), sizeof(data) - i - j);
}
}
}
TEST_SUITE_END();

View File

@@ -1,6 +1,6 @@
#include <xrpl/basics/base64.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <string>
@@ -10,11 +10,11 @@ static void
check(std::string const& in, std::string const& out)
{
auto const encoded = base64_encode(in);
CHECK(encoded == out);
CHECK(base64_decode(encoded) == in);
EXPECT_EQ(encoded, out);
EXPECT_EQ(base64_decode(encoded), in);
}
TEST_CASE("base64")
TEST(base64, base64)
{
// cspell: disable
check("", "");
@@ -46,5 +46,5 @@ TEST_CASE("base64")
std::string const notBase64 = "not_base64!!";
std::string const truncated = "not";
CHECK(base64_decode(notBase64) == base64_decode(truncated));
EXPECT_EQ(base64_decode(notBase64), base64_decode(truncated));
}

View File

@@ -1,13 +1,13 @@
#include <xrpl/basics/contract.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <stdexcept>
#include <string>
using namespace xrpl;
TEST_CASE("contract")
TEST(contract, contract)
{
try
{
@@ -15,7 +15,7 @@ TEST_CASE("contract")
}
catch (std::runtime_error const& e1)
{
CHECK(std::string(e1.what()) == "Throw test");
EXPECT_STREQ(e1.what(), "Throw test");
try
{
@@ -23,15 +23,15 @@ TEST_CASE("contract")
}
catch (std::runtime_error const& e2)
{
CHECK(std::string(e2.what()) == "Throw test");
EXPECT_STREQ(e2.what(), "Throw test");
}
catch (...)
{
CHECK(false);
FAIL() << "std::runtime_error should have been re-caught";
}
}
catch (...)
{
CHECK(false);
FAIL() << "std::runtime_error should have been caught the first time";
}
}

View File

@@ -1,2 +1,8 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest/doctest.h>
#include <gtest/gtest.h>
int
main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -1,45 +1,45 @@
#include <xrpl/basics/mulDiv.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <cstdint>
#include <limits>
using namespace xrpl;
TEST_CASE("mulDiv")
TEST(mulDiv, mulDiv)
{
auto const max = std::numeric_limits<std::uint64_t>::max();
std::uint64_t const max32 = std::numeric_limits<std::uint32_t>::max();
auto result = mulDiv(85, 20, 5);
REQUIRE(result);
CHECK(*result == 340);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 340);
result = mulDiv(20, 85, 5);
REQUIRE(result);
CHECK(*result == 340);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 340);
result = mulDiv(0, max - 1, max - 3);
REQUIRE(result);
CHECK(*result == 0);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 0);
result = mulDiv(max - 1, 0, max - 3);
REQUIRE(result);
CHECK(*result == 0);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 0);
result = mulDiv(max, 2, max / 2);
REQUIRE(result);
CHECK(*result == 4);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 4);
result = mulDiv(max, 1000, max / 1000);
REQUIRE(result);
CHECK(*result == 1000000);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 1000000);
result = mulDiv(max, 1000, max / 1001);
REQUIRE(result);
CHECK(*result == 1001000);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 1001000);
result = mulDiv(max32 + 1, max32 + 1, 5);
REQUIRE(result);
CHECK(*result == 3689348814741910323);
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result, 3689348814741910323);
// Overflow
result = mulDiv(max - 1, max - 2, 5);
CHECK(!result);
EXPECT_FALSE(result.has_value());
}

View File

@@ -1,10 +1,10 @@
#include <xrpl/basics/scope.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
using namespace xrpl;
TEST_CASE("scope_exit")
TEST(scope, scope_exit)
{
// scope_exit always executes the functor on destruction,
// unless release() is called
@@ -12,23 +12,23 @@ TEST_CASE("scope_exit")
{
scope_exit x{[&i]() { i = 1; }};
}
CHECK(i == 1);
EXPECT_EQ(i, 1);
{
scope_exit x{[&i]() { i = 2; }};
x.release();
}
CHECK(i == 1);
EXPECT_EQ(i, 1);
{
scope_exit x{[&i]() { i += 2; }};
auto x2 = std::move(x);
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
{
scope_exit x{[&i]() { i = 4; }};
x.release();
auto x2 = std::move(x);
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
{
try
{
@@ -39,7 +39,7 @@ TEST_CASE("scope_exit")
{
}
}
CHECK(i == 5);
EXPECT_EQ(i, 5);
{
try
{
@@ -51,10 +51,10 @@ TEST_CASE("scope_exit")
{
}
}
CHECK(i == 5);
EXPECT_EQ(i, 5);
}
TEST_CASE("scope_fail")
TEST(scope, scope_fail)
{
// scope_fail executes the functor on destruction only
// if an exception is unwinding, unless release() is called
@@ -62,23 +62,23 @@ TEST_CASE("scope_fail")
{
scope_fail x{[&i]() { i = 1; }};
}
CHECK(i == 0);
EXPECT_EQ(i, 0);
{
scope_fail x{[&i]() { i = 2; }};
x.release();
}
CHECK(i == 0);
EXPECT_EQ(i, 0);
{
scope_fail x{[&i]() { i = 3; }};
auto x2 = std::move(x);
}
CHECK(i == 0);
EXPECT_EQ(i, 0);
{
scope_fail x{[&i]() { i = 4; }};
x.release();
auto x2 = std::move(x);
}
CHECK(i == 0);
EXPECT_EQ(i, 0);
{
try
{
@@ -89,7 +89,7 @@ TEST_CASE("scope_fail")
{
}
}
CHECK(i == 5);
EXPECT_EQ(i, 5);
{
try
{
@@ -101,10 +101,10 @@ TEST_CASE("scope_fail")
{
}
}
CHECK(i == 5);
EXPECT_EQ(i, 5);
}
TEST_CASE("scope_success")
TEST(scope, scope_success)
{
// scope_success executes the functor on destruction only
// if an exception is not unwinding, unless release() is called
@@ -112,23 +112,23 @@ TEST_CASE("scope_success")
{
scope_success x{[&i]() { i = 1; }};
}
CHECK(i == 1);
EXPECT_EQ(i, 1);
{
scope_success x{[&i]() { i = 2; }};
x.release();
}
CHECK(i == 1);
EXPECT_EQ(i, 1);
{
scope_success x{[&i]() { i += 2; }};
auto x2 = std::move(x);
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
{
scope_success x{[&i]() { i = 4; }};
x.release();
auto x2 = std::move(x);
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
{
try
{
@@ -139,7 +139,7 @@ TEST_CASE("scope_success")
{
}
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
{
try
{
@@ -151,5 +151,5 @@ TEST_CASE("scope_success")
{
}
}
CHECK(i == 3);
EXPECT_EQ(i, 3);
}

View File

@@ -1,6 +1,6 @@
#include <xrpl/basics/tagged_integer.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <type_traits>
@@ -102,127 +102,123 @@ static_assert(
!std::is_convertible<TagUInt2, TagUInt3>::value,
"TagUInt2 should not be convertible to a TagUInt3");
TEST_SUITE_BEGIN("tagged_integer");
using TagInt = tagged_integer<std::int32_t, Tag1>;
TEST_CASE("comparison operators")
TEST(tagged_integer, comparison_operators)
{
TagInt const zero(0);
TagInt const one(1);
CHECK(one == one);
CHECK(!(one == zero));
EXPECT_TRUE(one == one);
EXPECT_FALSE(one == zero);
CHECK(one != zero);
CHECK(!(one != one));
EXPECT_TRUE(one != zero);
EXPECT_FALSE(one != one);
CHECK(zero < one);
CHECK(!(one < zero));
EXPECT_TRUE(zero < one);
EXPECT_FALSE(one < zero);
CHECK(one > zero);
CHECK(!(zero > one));
EXPECT_TRUE(one > zero);
EXPECT_FALSE(zero > one);
CHECK(one >= one);
CHECK(one >= zero);
CHECK(!(zero >= one));
EXPECT_TRUE(one >= one);
EXPECT_TRUE(one >= zero);
EXPECT_FALSE(zero >= one);
CHECK(zero <= one);
CHECK(zero <= zero);
CHECK(!(one <= zero));
EXPECT_TRUE(zero <= one);
EXPECT_TRUE(zero <= zero);
EXPECT_FALSE(one <= zero);
}
TEST_CASE("increment / decrement operators")
TEST(tagged_integer, increment_decrement_operators)
{
TagInt const zero(0);
TagInt const one(1);
TagInt a{0};
++a;
CHECK(a == one);
EXPECT_EQ(a, one);
--a;
CHECK(a == zero);
EXPECT_EQ(a, zero);
a++;
CHECK(a == one);
EXPECT_EQ(a, one);
a--;
CHECK(a == zero);
EXPECT_EQ(a, zero);
}
TEST_CASE("arithmetic operators")
TEST(tagged_integer, arithmetic_operators)
{
TagInt a{-2};
CHECK(+a == TagInt{-2});
CHECK(-a == TagInt{2});
CHECK(TagInt{-3} + TagInt{4} == TagInt{1});
CHECK(TagInt{-3} - TagInt{4} == TagInt{-7});
CHECK(TagInt{-3} * TagInt{4} == TagInt{-12});
CHECK(TagInt{8} / TagInt{4} == TagInt{2});
CHECK(TagInt{7} % TagInt{4} == TagInt{3});
EXPECT_EQ(+a, TagInt{-2});
EXPECT_EQ(-a, TagInt{2});
EXPECT_EQ(TagInt{-3} + TagInt{4}, TagInt{1});
EXPECT_EQ(TagInt{-3} - TagInt{4}, TagInt{-7});
EXPECT_EQ(TagInt{-3} * TagInt{4}, TagInt{-12});
EXPECT_EQ(TagInt{8} / TagInt{4}, TagInt{2});
EXPECT_EQ(TagInt{7} % TagInt{4}, TagInt{3});
CHECK(~TagInt{8} == TagInt{~TagInt::value_type{8}});
CHECK((TagInt{6} & TagInt{3}) == TagInt{2});
CHECK((TagInt{6} | TagInt{3}) == TagInt{7});
CHECK((TagInt{6} ^ TagInt{3}) == TagInt{5});
EXPECT_EQ(~TagInt{8}, TagInt{~TagInt::value_type{8}});
EXPECT_EQ((TagInt{6} & TagInt{3}), TagInt{2});
EXPECT_EQ((TagInt{6} | TagInt{3}), TagInt{7});
EXPECT_EQ((TagInt{6} ^ TagInt{3}), TagInt{5});
CHECK((TagInt{4} << TagInt{2}) == TagInt{16});
CHECK((TagInt{16} >> TagInt{2}) == TagInt{4});
EXPECT_EQ((TagInt{4} << TagInt{2}), TagInt{16});
EXPECT_EQ((TagInt{16} >> TagInt{2}), TagInt{4});
}
TEST_CASE("assignment operators")
TEST(tagged_integer, assignment_operators)
{
TagInt a{-2};
TagInt b{0};
b = a;
CHECK(b == TagInt{-2});
EXPECT_EQ(b, TagInt{-2});
// -3 + 4 == 1
a = TagInt{-3};
a += TagInt{4};
CHECK(a == TagInt{1});
EXPECT_EQ(a, TagInt{1});
// -3 - 4 == -7
a = TagInt{-3};
a -= TagInt{4};
CHECK(a == TagInt{-7});
EXPECT_EQ(a, TagInt{-7});
// -3 * 4 == -12
a = TagInt{-3};
a *= TagInt{4};
CHECK(a == TagInt{-12});
EXPECT_EQ(a, TagInt{-12});
// 8/4 == 2
a = TagInt{8};
a /= TagInt{4};
CHECK(a == TagInt{2});
EXPECT_EQ(a, TagInt{2});
// 7 % 4 == 3
a = TagInt{7};
a %= TagInt{4};
CHECK(a == TagInt{3});
EXPECT_EQ(a, TagInt{3});
// 6 & 3 == 2
a = TagInt{6};
a /= TagInt{3};
CHECK(a == TagInt{2});
EXPECT_EQ(a, TagInt{2});
// 6 | 3 == 7
a = TagInt{6};
a |= TagInt{3};
CHECK(a == TagInt{7});
EXPECT_EQ(a, TagInt{7});
// 6 ^ 3 == 5
a = TagInt{6};
a ^= TagInt{3};
CHECK(a == TagInt{5});
EXPECT_EQ(a, TagInt{5});
// 4 << 2 == 16
a = TagInt{4};
a <<= TagInt{2};
CHECK(a == TagInt{16});
EXPECT_EQ(a, TagInt{16});
// 16 >> 2 == 4
a = TagInt{16};
a >>= TagInt{2};
CHECK(a == TagInt{4});
EXPECT_EQ(a, TagInt{4});
}
TEST_SUITE_END();

View File

@@ -1,15 +1,15 @@
#include <xrpl/crypto/csprng.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
using namespace xrpl;
TEST_CASE("get values")
TEST(csprng, get_values)
{
auto& engine = crypto_prng();
auto rand_val = engine();
CHECK(rand_val >= engine.min());
CHECK(rand_val <= engine.max());
EXPECT_GE(rand_val, engine.min());
EXPECT_LE(rand_val, engine.max());
uint16_t twoByte{0};
engine(&twoByte, sizeof(uint16_t));
}

View File

@@ -1,2 +1,8 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest/doctest.h>
#include <gtest/gtest.h>
int
main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -2,31 +2,29 @@
#include <xrpl/json/json_reader.h>
#include <xrpl/json/json_writer.h>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <string>
using namespace xrpl;
using namespace Json;
TEST_SUITE_BEGIN("JsonOutput");
static void
checkOutput(std::string const& valueDesc)
{
std::string output;
Json::Value value;
REQUIRE(Json::Reader().parse(valueDesc, value));
ASSERT_TRUE(Json::Reader().parse(valueDesc, value));
auto out = stringOutput(output);
outputJson(value, out);
auto expected = Json::FastWriter().write(value);
CHECK(output == expected);
CHECK(output == valueDesc);
CHECK(output == jsonAsString(value));
EXPECT_EQ(output, expected);
EXPECT_EQ(output, valueDesc);
EXPECT_EQ(output, jsonAsString(value));
}
TEST_CASE("output cases")
TEST(JsonOutput, output_cases)
{
checkOutput("{}");
checkOutput("[]");
@@ -36,5 +34,3 @@ TEST_CASE("output cases")
checkOutput("[[]]");
checkOutput(R"({"array":[{"12":23},{},null,false,0.5]})");
}
TEST_SUITE_END();

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
#include <xrpl/json/Writer.h>
#include <doctest/doctest.h>
#include <google/protobuf/stubs/port.h>
#include <gtest/gtest.h>
#include <memory>
#include <string>
@@ -9,14 +9,14 @@
using namespace xrpl;
using namespace Json;
TEST_SUITE_BEGIN("JsonWriter");
struct WriterFixture
class WriterFixture : public ::testing::Test
{
protected:
std::string output;
std::unique_ptr<Writer> writer;
WriterFixture()
void
SetUp() override
{
writer = std::make_unique<Writer>(stringOutput(output));
}
@@ -31,7 +31,7 @@ struct WriterFixture
void
expectOutput(std::string const& expected) const
{
CHECK(output == expected);
EXPECT_EQ(output, expected);
}
void
@@ -42,20 +42,20 @@ struct WriterFixture
}
};
TEST_CASE_FIXTURE(WriterFixture, "trivial")
TEST_F(WriterFixture, trivial)
{
CHECK(output.empty());
EXPECT_TRUE(output.empty());
checkOutputAndReset("");
}
TEST_CASE_FIXTURE(WriterFixture, "near trivial")
TEST_F(WriterFixture, near_trivial)
{
CHECK(output.empty());
EXPECT_TRUE(output.empty());
writer->output(0);
checkOutputAndReset("0");
}
TEST_CASE_FIXTURE(WriterFixture, "primitives")
TEST_F(WriterFixture, primitives)
{
writer->output(true);
checkOutputAndReset("true");
@@ -79,7 +79,7 @@ TEST_CASE_FIXTURE(WriterFixture, "primitives")
checkOutputAndReset("null");
}
TEST_CASE_FIXTURE(WriterFixture, "empty")
TEST_F(WriterFixture, empty)
{
writer->startRoot(Writer::array);
writer->finish();
@@ -90,7 +90,7 @@ TEST_CASE_FIXTURE(WriterFixture, "empty")
checkOutputAndReset("{}");
}
TEST_CASE_FIXTURE(WriterFixture, "escaping")
TEST_F(WriterFixture, escaping)
{
writer->output("\\");
checkOutputAndReset(R"("\\")");
@@ -108,7 +108,7 @@ TEST_CASE_FIXTURE(WriterFixture, "escaping")
checkOutputAndReset(R"("\b\f\n\r\t")");
}
TEST_CASE_FIXTURE(WriterFixture, "array")
TEST_F(WriterFixture, array)
{
writer->startRoot(Writer::array);
writer->append(12);
@@ -116,7 +116,7 @@ TEST_CASE_FIXTURE(WriterFixture, "array")
checkOutputAndReset("[12]");
}
TEST_CASE_FIXTURE(WriterFixture, "long array")
TEST_F(WriterFixture, long_array)
{
writer->startRoot(Writer::array);
writer->append(12);
@@ -126,7 +126,7 @@ TEST_CASE_FIXTURE(WriterFixture, "long array")
checkOutputAndReset(R"([12,true,"hello"])");
}
TEST_CASE_FIXTURE(WriterFixture, "embedded array simple")
TEST_F(WriterFixture, embedded_array_simple)
{
writer->startRoot(Writer::array);
writer->startAppend(Writer::array);
@@ -135,7 +135,7 @@ TEST_CASE_FIXTURE(WriterFixture, "embedded array simple")
checkOutputAndReset("[[]]");
}
TEST_CASE_FIXTURE(WriterFixture, "object")
TEST_F(WriterFixture, object)
{
writer->startRoot(Writer::object);
writer->set("hello", "world");
@@ -143,7 +143,7 @@ TEST_CASE_FIXTURE(WriterFixture, "object")
checkOutputAndReset(R"({"hello":"world"})");
}
TEST_CASE_FIXTURE(WriterFixture, "complex object")
TEST_F(WriterFixture, complex_object)
{
writer->startRoot(Writer::object);
writer->set("hello", "world");
@@ -160,7 +160,7 @@ TEST_CASE_FIXTURE(WriterFixture, "complex object")
R"({"hello":"world","array":[true,12,[{"goodbye":"cruel world.","subarray":[23.5]}]]})");
}
TEST_CASE_FIXTURE(WriterFixture, "json value")
TEST_F(WriterFixture, json_value)
{
Json::Value value(Json::objectValue);
value["foo"] = 23;
@@ -169,5 +169,3 @@ TEST_CASE_FIXTURE(WriterFixture, "json value")
writer->finish();
checkOutputAndReset(R"({"hello":{"foo":23}})");
}
TEST_SUITE_END();

View File

@@ -1,2 +1,8 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest/doctest.h>
#include <gtest/gtest.h>
int
main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -7,7 +7,7 @@
#include <boost/beast/http.hpp>
#include <boost/beast/version.hpp>
#include <doctest/doctest.h>
#include <gtest/gtest.h>
#include <atomic>
#include <map>
@@ -217,7 +217,7 @@ runHTTPTest(
} // anonymous namespace
TEST_CASE("HTTPClient case insensitive Content-Length")
TEST(HTTPClient, case_insensitive_content_length)
{
// Test different cases of Content-Length header
std::vector<std::string> header_cases = {
@@ -249,14 +249,14 @@ TEST_CASE("HTTPClient case insensitive Content-Length")
result_error);
// Verify results
CHECK(test_completed);
CHECK(!result_error);
CHECK(result_status == 200);
CHECK(result_data == test_body);
EXPECT_TRUE(test_completed);
EXPECT_FALSE(result_error);
EXPECT_EQ(result_status, 200);
EXPECT_EQ(result_data, test_body);
}
}
TEST_CASE("HTTPClient basic HTTP request")
TEST(HTTPClient, basic_http_request)
{
TestHTTPServer server;
std::string test_body = "Test response body";
@@ -271,13 +271,13 @@ TEST_CASE("HTTPClient basic HTTP request")
bool test_completed = runHTTPTest(
server, "/basic", completed, result_status, result_data, result_error);
CHECK(test_completed);
CHECK(!result_error);
CHECK(result_status == 200);
CHECK(result_data == test_body);
EXPECT_TRUE(test_completed);
EXPECT_FALSE(result_error);
EXPECT_EQ(result_status, 200);
EXPECT_EQ(result_data, test_body);
}
TEST_CASE("HTTPClient empty response")
TEST(HTTPClient, empty_response)
{
TestHTTPServer server;
server.setResponseBody(""); // Empty body
@@ -291,13 +291,13 @@ TEST_CASE("HTTPClient empty response")
bool test_completed = runHTTPTest(
server, "/empty", completed, result_status, result_data, result_error);
CHECK(test_completed);
CHECK(!result_error);
CHECK(result_status == 200);
CHECK(result_data.empty());
EXPECT_TRUE(test_completed);
EXPECT_FALSE(result_error);
EXPECT_EQ(result_status, 200);
EXPECT_TRUE(result_data.empty());
}
TEST_CASE("HTTPClient different status codes")
TEST(HTTPClient, different_status_codes)
{
std::vector<unsigned int> status_codes = {200, 404, 500};
@@ -320,8 +320,8 @@ TEST_CASE("HTTPClient different status codes")
result_data,
result_error);
CHECK(test_completed);
CHECK(!result_error);
CHECK(result_status == static_cast<int>(status));
EXPECT_TRUE(test_completed);
EXPECT_FALSE(result_error);
EXPECT_EQ(result_status, static_cast<int>(status));
}
}

View File

@@ -1,2 +1,8 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest/doctest.h>
#include <gtest/gtest.h>
int
main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -1056,7 +1056,8 @@ void
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
{
if (!positions && app_.getOPs().isFull())
app_.getOPs().setMode(OperatingMode::CONNECTED);
app_.getOPs().setMode(
OperatingMode::CONNECTED, "updateOperatingMode: no positions");
}
void

View File

@@ -179,6 +179,25 @@ private:
std::unique_ptr<PeerSet> mPeerSet;
};
inline std::string
to_string(InboundLedger::Reason reason)
{
using enum InboundLedger::Reason;
switch (reason)
{
case HISTORY:
return "HISTORY";
case GENERIC:
return "GENERIC";
case CONSENSUS:
return "CONSENSUS";
default:
UNREACHABLE(
"ripple::to_string(InboundLedger::Reason) : unknown value");
return "unknown";
}
}
} // namespace xrpl
#endif

View File

@@ -374,7 +374,14 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
if (!wasProgress)
{
checkLocal();
if (checkLocal())
{
// Done. Something else (probably consensus) built the ledger
// locally while waiting for data (or possibly before requesting)
XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done");
JLOG(journal_.info()) << "Finished while waiting " << hash_;
return;
}
mByHash = true;

View File

@@ -3,9 +3,9 @@
#include <xrpld/app/main/Application.h>
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/DecayingSample.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/container/aged_map.h>
#include <xrpl/core/JobQueue.h>
#include <xrpl/core/PerfLog.h>
@@ -58,11 +58,85 @@ public:
hash.isNonZero(),
"xrpl::InboundLedgersImp::acquire::doAcquire : nonzero hash");
// probably not the right rule
if (app_.getOPs().isNeedNetworkLedger() &&
(reason != InboundLedger::Reason::GENERIC) &&
(reason != InboundLedger::Reason::CONSENSUS))
bool const needNetworkLedger = app_.getOPs().isNeedNetworkLedger();
bool const shouldAcquire = [&]() {
if (!needNetworkLedger)
return true;
if (reason == InboundLedger::Reason::GENERIC)
return true;
if (reason == InboundLedger::Reason::CONSENSUS)
return true;
return false;
}();
std::stringstream ss;
ss << "InboundLedger::acquire: "
<< "Request: " << to_string(hash) << ", " << seq
<< " NeedNetworkLedger: " << (needNetworkLedger ? "yes" : "no")
<< " Reason: " << to_string(reason)
<< " Should acquire: " << (shouldAcquire ? "true." : "false.");
/* Acquiring ledgers is somewhat expensive. It requires lots of
* computation and network communication. Avoid it when it's not
* appropriate. Every validation from a peer for a ledger that
* we do not have locally results in a call to this function: even
* if we are moments away from validating the same ledger.
*/
bool const shouldBroadcast = [&]() {
// If the node is not in "full" state, it needs to sync to
// the network, and doesn't have the necessary tx's and
// ledger entries to build the ledger.
bool const isFull = app_.getOPs().isFull();
// If everything else is ok, don't try to acquire the ledger
// if the requested seq is in the near future relative to
// the validated ledger. If the requested ledger is between
// 1 and 19 inclusive ledgers ahead of the valid ledger this
// node has not built it yet, but it's possible/likely it
// has the tx's necessary to build it and get caught up.
// Plus it might not become validated. On the other hand, if
// it's more than 20 in the future, this node should request
// it so that it can jump ahead and get caught up.
LedgerIndex const validSeq =
app_.getLedgerMaster().getValidLedgerIndex();
constexpr std::size_t lagLeeway = 20;
bool const nearFuture =
(seq > validSeq) && (seq < validSeq + lagLeeway);
// If everything else is ok, don't try to acquire the ledger
// if the request is related to consensus. (Note that
// consensus calls usually pass a seq of 0, so nearFuture
// will be false other than on a brand new network.)
bool const consensus =
reason == InboundLedger::Reason::CONSENSUS;
ss << " Evaluating whether to broadcast requests to peers"
<< ". full: " << (isFull ? "true" : "false")
<< ". ledger sequence " << seq
<< ". Valid sequence: " << validSeq
<< ". Lag leeway: " << lagLeeway
<< ". request for near future ledger: "
<< (nearFuture ? "true" : "false")
<< ". Consensus: " << (consensus ? "true" : "false");
// If the node is not synced, send requests.
if (!isFull)
return true;
// If the ledger is in the near future, do NOT send requests.
// This node is probably about to build it.
if (nearFuture)
return false;
// If the request is because of consensus, do NOT send requests.
// This node is probably about to build it.
if (consensus)
return false;
return true;
}();
ss << ". Would broadcast to peers? "
<< (shouldBroadcast ? "true." : "false.");
if (!shouldAcquire)
{
JLOG(j_.debug()) << "Abort(rule): " << ss.str();
return {};
}
bool isNew = true;
std::shared_ptr<InboundLedger> inbound;
@@ -70,6 +144,7 @@ public:
ScopedLockType sl(mLock);
if (stopping_)
{
JLOG(j_.debug()) << "Abort(stopping): " << ss.str();
return {};
}
@@ -93,23 +168,29 @@ public:
++mCounter;
}
}
ss << " IsNew: " << (isNew ? "true" : "false");
if (inbound->isFailed())
{
JLOG(j_.debug()) << "Abort(failed): " << ss.str();
return {};
}
if (!isNew)
inbound->update(seq);
if (!inbound->isComplete())
{
JLOG(j_.debug()) << "InProgress: " << ss.str();
return {};
}
JLOG(j_.debug()) << "Complete: " << ss.str();
return inbound->getLedger();
};
using namespace std::chrono_literals;
std::shared_ptr<Ledger const> ledger = perf::measureDurationAndLog(
return perf::measureDurationAndLog(
doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
return ledger;
}
void
@@ -118,28 +199,25 @@ public:
std::uint32_t seq,
InboundLedger::Reason reason) override
{
std::unique_lock lock(acquiresMutex_);
try
if (CanProcess const check{acquiresMutex_, pendingAcquires_, hash})
{
if (pendingAcquires_.contains(hash))
return;
pendingAcquires_.insert(hash);
scope_unlock unlock(lock);
acquire(hash, seq, reason);
try
{
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn())
<< "Exception thrown for acquiring new inbound ledger "
<< hash << ": " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new "
"inbound ledger "
<< hash;
}
}
catch (std::exception const& e)
{
JLOG(j_.warn())
<< "Exception thrown for acquiring new inbound ledger " << hash
<< ": " << e.what();
}
catch (...)
{
JLOG(j_.warn())
<< "Unknown exception thrown for acquiring new inbound ledger "
<< hash;
}
pendingAcquires_.erase(hash);
}
std::shared_ptr<InboundLedger>

View File

@@ -942,8 +942,9 @@ LedgerMaster::checkAccept(std::shared_ptr<Ledger const> const& ledger)
}
JLOG(m_journal.info()) << "Advancing accepted ledger to "
<< ledger->header().seq << " with >= " << minVal
<< " validations";
<< ledger->header().seq << " ("
<< to_short_string(ledger->header().hash)
<< ") with >= " << minVal << " validations";
ledger->setValidated();
ledger->setFull();

View File

@@ -13,7 +13,8 @@ TimeoutCounter::TimeoutCounter(
QueueJobParameter&& jobParameter,
beast::Journal journal)
: app_(app)
, journal_(journal)
, sink_(journal, to_short_string(hash) + " ")
, journal_(sink_)
, hash_(hash)
, timeouts_(0)
, complete_(false)
@@ -33,6 +34,8 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
{
if (isDone())
return;
JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count()
<< "ms";
timer_.expires_after(timerInterval_);
timer_.async_wait(
[wptr = pmDowncast()](boost::system::error_code const& ec) {
@@ -41,6 +44,12 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
if (auto ptr = wptr.lock())
{
JLOG(ptr->journal_.debug())
<< "timer: ec: " << ec << " (operation_aborted: "
<< boost::asio::error::operation_aborted << " - "
<< (ec == boost::asio::error::operation_aborted ? "aborted"
: "other")
<< ")";
ScopedLockType sl(ptr->mtx_);
ptr->queueJob(sl);
}

View File

@@ -4,6 +4,7 @@
#include <xrpld/app/main/Application.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/WrappedSink.h>
#include <xrpl/core/Job.h>
#include <boost/asio/basic_waitable_timer.hpp>
@@ -104,6 +105,7 @@ protected:
// Used in this class for access to boost::asio::io_context and
// xrpl::Overlay. Used in subtypes for the kitchen sink.
Application& app_;
beast::WrappedSink sink_;
beast::Journal journal_;
mutable std::recursive_mutex mtx_;

View File

@@ -75,6 +75,20 @@ HashRouter::shouldProcess(
return s.shouldProcess(suppressionMap_.clock().now(), tx_interval);
}
bool
HashRouter::shouldProcessForPeer(
uint256 const& key,
PeerShortID peer,
std::chrono::seconds interval)
{
std::lock_guard lock(mutex_);
auto& entry = emplace(key).first;
return entry.shouldProcessForPeer(
peer, suppressionMap_.clock().now(), interval);
}
HashRouterFlags
HashRouter::getFlags(uint256 const& key)
{
@@ -149,4 +163,13 @@ setup_HashRouter(Config const& config)
return setup;
}
auto
HashRouter::getPeers(uint256 const& key) -> std::set<PeerShortID>
{
std::lock_guard lock(mutex_);
auto& s = emplace(key).first;
return s.peekPeerSet();
}
} // namespace xrpl

View File

@@ -140,6 +140,13 @@ private:
return std::move(peers_);
}
/** Return set of peers waiting for reply. Leaves list unchanged. */
std::set<PeerShortID> const&
peekPeerSet()
{
return peers_;
}
/** Return seated relay time point if the message has been relayed */
std::optional<Stopwatch::time_point>
relayed() const
@@ -173,6 +180,21 @@ private:
return true;
}
bool
shouldProcessForPeer(
PeerShortID peer,
Stopwatch::time_point now,
std::chrono::seconds interval)
{
if (peerProcessed_.contains(peer) &&
((peerProcessed_[peer] + interval) > now))
return false;
// Peer may already be in the list, but adding it again doesn't hurt
addPeer(peer);
peerProcessed_[peer] = now;
return true;
}
private:
HashRouterFlags flags_ = HashRouterFlags::UNDEFINED;
std::set<PeerShortID> peers_;
@@ -180,6 +202,7 @@ private:
// than one flag needs to expire independently.
std::optional<Stopwatch::time_point> relayed_;
std::optional<Stopwatch::time_point> processed_;
std::map<PeerShortID, Stopwatch::time_point> peerProcessed_;
};
public:
@@ -203,7 +226,7 @@ public:
/** Add a suppression peer and get message's relay status.
* Return pair:
* element 1: true if the peer is added.
* element 1: true if the key is added.
* element 2: optional is seated to the relay time point or
* is unseated if has not relayed yet. */
std::pair<bool, std::optional<Stopwatch::time_point>>
@@ -223,6 +246,18 @@ public:
HashRouterFlags& flags,
std::chrono::seconds tx_interval);
/** Determines whether the hashed item should be processed for the given
peer. Could be an incoming or outgoing message.
Items filtered with this function should only be processed for the given
peer once. Unlike shouldProcess, it can be processed for other peers.
*/
bool
shouldProcessForPeer(
uint256 const& key,
PeerShortID peer,
std::chrono::seconds interval);
/** Set the flags on a hash.
@return `true` if the flags were changed. `false` if unchanged.
@@ -248,6 +283,11 @@ public:
std::optional<std::set<PeerShortID>>
shouldRelay(uint256 const& key);
/** Returns a copy of the set of peers in the Entry for the key
*/
std::set<PeerShortID>
getPeers(uint256 const& key);
private:
// pair.second indicates whether the entry was created
std::pair<Entry&, bool>

View File

@@ -33,10 +33,10 @@
#include <xrpld/rpc/MPTokenIssuanceID.h>
#include <xrpld/rpc/ServerHandler.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/UptimeClock.h>
#include <xrpl/basics/mulDiv.h>
#include <xrpl/basics/safe_cast.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/utility/rngfill.h>
#include <xrpl/core/PerfLog.h>
#include <xrpl/crypto/RFC1751.h>
@@ -408,7 +408,7 @@ public:
isFull() override;
void
setMode(OperatingMode om) override;
setMode(OperatingMode om, char const* reason) override;
bool
isBlocked() override;
@@ -886,7 +886,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
inline void
NetworkOPsImp::setStandAlone()
{
setMode(OperatingMode::FULL);
setMode(OperatingMode::FULL, "setStandAlone");
}
inline void
@@ -1036,7 +1036,9 @@ NetworkOPsImp::processHeartbeatTimer()
{
if (mMode != OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::DISCONNECTED);
setMode(
OperatingMode::DISCONNECTED,
"Heartbeat: insufficient peers");
std::stringstream ss;
ss << "Node count (" << numPeers << ") has fallen "
<< "below required minimum (" << minPeerCount_ << ").";
@@ -1061,7 +1063,7 @@ NetworkOPsImp::processHeartbeatTimer()
if (mMode == OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
JLOG(m_journal.info())
<< "Node count (" << numPeers << ") is sufficient.";
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
@@ -1073,9 +1075,9 @@ NetworkOPsImp::processHeartbeatTimer()
auto origMode = mMode.load();
CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
if (mMode == OperatingMode::SYNCING)
setMode(OperatingMode::SYNCING);
setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
else if (mMode == OperatingMode::CONNECTED)
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
auto newMode = mMode.load();
if (origMode != newMode)
{
@@ -1825,7 +1827,7 @@ void
NetworkOPsImp::setAmendmentBlocked()
{
amendmentBlocked_ = true;
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
}
inline bool
@@ -1856,7 +1858,7 @@ void
NetworkOPsImp::setUNLBlocked()
{
unlBlocked_ = true;
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "setUNLBlocked");
}
inline void
@@ -1957,7 +1959,7 @@ NetworkOPsImp::checkLastClosedLedger(
if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
}
if (consensus)
@@ -2048,8 +2050,9 @@ NetworkOPsImp::beginConsensus(
// this shouldn't happen unless we jump ledgers
if (mMode == OperatingMode::FULL)
{
JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING);
JLOG(m_journal.warn())
<< "beginConsensus Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
}
@@ -2185,7 +2188,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
// validations we have for LCL. If the ledger is good enough, go to
// TRACKING - TODO
if (!needNetworkLedger_)
setMode(OperatingMode::TRACKING);
setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
}
if (((mMode == OperatingMode::CONNECTED) ||
@@ -2200,7 +2203,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
(current->header().parentCloseTime +
2 * current->header().closeTimeResolution))
{
setMode(OperatingMode::FULL);
setMode(OperatingMode::FULL, "endConsensus: check full");
}
}
@@ -2212,7 +2215,7 @@ NetworkOPsImp::consensusViewChange()
{
if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "consensusViewChange");
}
}
@@ -2531,7 +2534,7 @@ NetworkOPsImp::pubPeerStatus(std::function<Json::Value(void)> const& func)
}
void
NetworkOPsImp::setMode(OperatingMode om)
NetworkOPsImp::setMode(OperatingMode om, char const* reason)
{
using namespace std::chrono_literals;
if (om == OperatingMode::CONNECTED)
@@ -2551,11 +2554,12 @@ NetworkOPsImp::setMode(OperatingMode om)
if (mMode == om)
return;
auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
mMode = om;
accounting_.mode(om);
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
pubServer();
}
@@ -2567,34 +2571,28 @@ NetworkOPsImp::recvValidation(
JLOG(m_journal.trace())
<< "recvValidation " << val->getLedgerHash() << " from " << source;
std::unique_lock lock(validationsMutex_);
BypassAccept bypassAccept = BypassAccept::no;
try
{
if (pendingValidations_.contains(val->getLedgerHash()))
bypassAccept = BypassAccept::yes;
else
pendingValidations_.insert(val->getLedgerHash());
scope_unlock unlock(lock);
handleNewValidation(app_, val, source, bypassAccept, m_journal);
CanProcess const check(
validationsMutex_, pendingValidations_, val->getLedgerHash());
try
{
BypassAccept bypassAccept =
check ? BypassAccept::no : BypassAccept::yes;
handleNewValidation(app_, val, source, bypassAccept, m_journal);
}
catch (std::exception const& e)
{
JLOG(m_journal.warn())
<< "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn())
<< "Unknown exception thrown for handling new validation "
<< val->getLedgerHash();
}
}
catch (std::exception const& e)
{
JLOG(m_journal.warn())
<< "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn())
<< "Unknown exception thrown for handling new validation "
<< val->getLedgerHash();
}
if (bypassAccept == BypassAccept::no)
{
pendingValidations_.erase(val->getLedgerHash());
}
lock.unlock();
pubValidation(val);

View File

@@ -191,7 +191,7 @@ public:
virtual bool
isFull() = 0;
virtual void
setMode(OperatingMode om) = 0;
setMode(OperatingMode om, char const* reason) = 0;
virtual bool
isBlocked() = 0;
virtual bool

View File

@@ -18,6 +18,7 @@ enum class ProtocolFeature {
ValidatorListPropagation,
ValidatorList2Propagation,
LedgerReplay,
LedgerDataCookies
};
/** Represents a peer connection in the overlay. */
@@ -117,6 +118,13 @@ public:
virtual bool
txReduceRelayEnabled() const = 0;
//
// Messages
//
virtual std::set<std::optional<uint64_t>>
releaseRequestCookies(uint256 const& requestHash) = 0;
};
} // namespace xrpl

View File

@@ -11,6 +11,7 @@
#include <xrpld/app/tx/apply.h>
#include <xrpld/overlay/Cluster.h>
#include <xrpld/overlay/detail/PeerImp.h>
#include <xrpld/overlay/detail/ProtocolMessage.h>
#include <xrpld/overlay/detail/Tuning.h>
#include <xrpl/basics/UptimeClock.h>
@@ -45,6 +46,8 @@ std::chrono::seconds constexpr peerTimerInterval{60};
/** The timeout for a shutdown timer */
std::chrono::seconds constexpr shutdownTimerInterval{5};
/** How often we process duplicate incoming TMGetLedger messages */
std::chrono::seconds constexpr getledgerInterval{15};
} // namespace
// TODO: Remove this exclusion once unit tests are added after the hotfix
@@ -499,6 +502,8 @@ PeerImp::supportsFeature(ProtocolFeature f) const
return protocol_ >= make_protocol(2, 2);
case ProtocolFeature::LedgerReplay:
return ledgerReplayEnabled_;
case ProtocolFeature::LedgerDataCookies:
return protocol_ >= make_protocol(2, 3);
}
return false;
}
@@ -1475,8 +1480,9 @@ PeerImp::handleTransaction(
void
PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
{
auto badData = [&](std::string const& msg) {
fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
auto badData = [&](std::string const& msg, bool chargefee = true) {
if (chargefee)
fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
};
auto const itype{m->itype()};
@@ -1553,12 +1559,74 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
}
}
// Drop duplicate requests from the same peer for at least
// `getLedgerInterval` seconds.
// Append a little junk to prevent the hash of an incoming messsage
// from matching the hash of the same outgoing message.
// `shouldProcessForPeer` does not distingish between incoming and
// outgoing, and some of the message relay logic checks the hash to see
// if the message has been relayed already. If the hashes are the same,
// a duplicate will be detected when sending the message is attempted,
// so it will fail.
auto const messageHash = sha512Half(*m, nullptr);
// Request cookies are not included in the hash. Track them here.
auto const requestCookie = [&m]() -> std::optional<uint64_t> {
if (m->has_requestcookie())
return m->requestcookie();
return std::nullopt;
}();
auto const [inserted, pending] = [&] {
std::lock_guard lock{cookieLock_};
auto& cookies = messageRequestCookies_[messageHash];
bool const pending = !cookies.empty();
return std::pair{cookies.emplace(requestCookie).second, pending};
}();
// Check if the request has been seen from this peer.
if (!app_.getHashRouter().shouldProcessForPeer(
messageHash, id_, getledgerInterval))
{
// This request has already been seen from this peer.
// Has it been seen with this request cookie (or lack thereof)?
if (inserted)
{
// This is a duplicate request, but with a new cookie. When a
// response is ready, one will be sent for each request cookie.
JLOG(p_journal_.debug())
<< "TMGetLedger: duplicate request with new request cookie: "
<< requestCookie.value_or(0)
<< ". Job pending: " << (pending ? "yes" : "no") << ": "
<< messageHash;
if (pending)
{
// Don't bother queueing up a new job if other requests are
// already pending. This should limit entries in the job queue
// to one per peer per unique request.
JLOG(p_journal_.debug())
<< "TMGetLedger: Suppressing recvGetLedger job, since one "
"is pending: "
<< messageHash;
return;
}
}
else
{
// Don't punish nodes that don't know any better
return badData(
"duplicate request: " + to_string(messageHash),
supportsFeature(ProtocolFeature::LedgerDataCookies));
}
}
// Queue a job to process the request
JLOG(p_journal_.debug())
<< "TMGetLedger: Adding recvGetLedger job: " << messageHash;
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
if (auto peer = weak.lock())
peer->processLedgerRequest(m);
});
app_.getJobQueue().addJob(
jtLEDGER_REQ, "recvGetLedger", [weak, m, messageHash]() {
if (auto peer = weak.lock())
peer->processLedgerRequest(m, messageHash);
});
}
void
@@ -1674,8 +1742,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMReplayDeltaResponse> const& m)
void
PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
{
auto badData = [&](std::string const& msg) {
fee_.update(Resource::feeInvalidData, msg);
auto badData = [&](std::string const& msg, bool charge = true) {
if (charge)
fee_.update(Resource::feeInvalidData, msg);
JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
};
@@ -1726,23 +1795,99 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
"Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
}
// If there is a request cookie, attempt to relay the message
if (m->has_requestcookie())
auto const messageHash = sha512Half(*m);
if (!app_.getHashRouter().addSuppressionPeer(messageHash, id_))
{
if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
// Don't punish nodes that don't know any better
return badData(
"Duplicate message: " + to_string(messageHash),
supportsFeature(ProtocolFeature::LedgerDataCookies));
}
bool const routed = m->has_directresponse() || m->responsecookies_size() ||
m->has_requestcookie();
{
// Check if this message needs to be forwarded to one or more peers.
// Maximum of one of the relevant fields should be populated.
XRPL_ASSERT(
!m->has_requestcookie() || !m->responsecookies_size(),
"ripple::PeerImp::onMessage(TMLedgerData) : valid cookie fields");
// Make a copy of the response cookies, then wipe the list so it can be
// forwarded cleanly
auto const responseCookies = m->responsecookies();
m->clear_responsecookies();
// Flag indicating if this response should be processed locally,
// possibly in addition to being forwarded.
bool const directResponse =
m->has_directresponse() && m->directresponse();
m->clear_directresponse();
auto const relay = [this, m, &messageHash](auto const cookie) {
if (auto peer = overlay_.findPeerByShortID(cookie))
{
XRPL_ASSERT(
!m->has_requestcookie() && !m->responsecookies_size(),
"ripple::PeerImp::onMessage(TMLedgerData) relay : no "
"cookies");
if (peer->supportsFeature(ProtocolFeature::LedgerDataCookies))
// Setting this flag is not _strictly_ necessary for peers
// that support it if there are no cookies included in the
// message, but it is more accurate.
m->set_directresponse(true);
else
m->clear_directresponse();
peer->send(
std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
}
else
JLOG(p_journal_.info())
<< "Unable to route TX/ledger data reply to peer ["
<< cookie << "]: " << messageHash;
};
// If there is a request cookie, attempt to relay the message
if (m->has_requestcookie())
{
XRPL_ASSERT(
responseCookies.empty(),
"ripple::PeerImp::onMessage(TMLedgerData) : no response "
"cookies");
m->clear_requestcookie();
peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
relay(m->requestcookie());
if (!directResponse && responseCookies.empty())
return;
}
else
// If there's a list of request cookies, attempt to relay the message to
// all of them.
if (responseCookies.size())
{
JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
for (auto const cookie : responseCookies)
relay(cookie);
if (!directResponse)
return;
}
}
// Now that any forwarding is done check the base message (data only, no
// routing info for duplicates)
if (routed)
{
m->clear_directresponse();
XRPL_ASSERT(
!m->has_requestcookie() && !m->responsecookies_size(),
"ripple::PeerImp::onMessage(TMLedgerData) : no cookies");
auto const baseMessageHash = sha512Half(*m);
if (!app_.getHashRouter().addSuppressionPeer(baseMessageHash, id_))
{
// Don't punish nodes that don't know any better
return badData(
"Duplicate message: " + to_string(baseMessageHash),
supportsFeature(ProtocolFeature::LedgerDataCookies));
}
return;
}
uint256 const ledgerHash{m->ledgerhash()};
// Otherwise check if received data for a candidate transaction set
if (m->type() == protocol::liTS_CANDIDATE)
{
@@ -3172,16 +3317,22 @@ PeerImp::checkValidation(
// the TX tree with the specified root hash.
//
static std::shared_ptr<PeerImp>
getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
getPeerWithTree(
OverlayImpl& ov,
uint256 const& rootHash,
PeerImp const* skip,
std::function<bool(Peer::id_t)> shouldProcessCallback)
{
std::shared_ptr<PeerImp> ret;
int retScore = 0;
XRPL_ASSERT(
shouldProcessCallback, "ripple::getPeerWithTree : callback provided");
ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
if (p->hasTxSet(rootHash) && p.get() != skip)
{
auto score = p->getScore(true);
if (!ret || (score > retScore))
if (!ret || (score > retScore && shouldProcessCallback(p->id())))
{
ret = std::move(p);
retScore = score;
@@ -3200,16 +3351,19 @@ getPeerWithLedger(
OverlayImpl& ov,
uint256 const& ledgerHash,
LedgerIndex ledger,
PeerImp const* skip)
PeerImp const* skip,
std::function<bool(Peer::id_t)> shouldProcessCallback)
{
std::shared_ptr<PeerImp> ret;
int retScore = 0;
XRPL_ASSERT(
shouldProcessCallback, "ripple::getPeerWithLedger : callback provided");
ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
{
auto score = p->getScore(true);
if (!ret || (score > retScore))
if (!ret || (score > retScore && shouldProcessCallback(p->id())))
{
ret = std::move(p);
retScore = score;
@@ -3223,7 +3377,8 @@ getPeerWithLedger(
void
PeerImp::sendLedgerBase(
std::shared_ptr<Ledger const> const& ledger,
protocol::TMLedgerData& ledgerData)
protocol::TMLedgerData& ledgerData,
PeerCookieMap const& destinations)
{
JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
@@ -3255,15 +3410,102 @@ PeerImp::sendLedgerBase(
}
}
auto message{
std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
send(message);
sendToMultiple(ledgerData, destinations);
}
void
PeerImp::sendToMultiple(
protocol::TMLedgerData& ledgerData,
PeerCookieMap const& destinations)
{
bool foundSelf = false;
for (auto const& [peer, cookies] : destinations)
{
if (peer.get() == this)
foundSelf = true;
bool const multipleCookies =
peer->supportsFeature(ProtocolFeature::LedgerDataCookies);
std::vector<std::uint64_t> sendCookies;
bool directResponse = false;
if (!multipleCookies)
{
JLOG(p_journal_.debug())
<< "sendToMultiple: Sending " << cookies.size()
<< " TMLedgerData messages to peer [" << peer->id()
<< "]: " << sha512Half(ledgerData);
}
for (auto const& cookie : cookies)
{
// Unfortunately, need a separate Message object for every
// combination
if (cookie)
{
if (multipleCookies)
{
// Save this one for later to send a single message
sendCookies.emplace_back(*cookie);
continue;
}
// Feature not supported, so send a single message with a
// single cookie
ledgerData.set_requestcookie(*cookie);
}
else
{
if (multipleCookies)
{
// Set this flag later on the single message
directResponse = true;
continue;
}
ledgerData.clear_requestcookie();
}
XRPL_ASSERT(
!multipleCookies,
"ripple::PeerImp::sendToMultiple : ledger data cookies "
"unsupported");
auto message{
std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
peer->send(message);
}
if (multipleCookies)
{
// Send a single message with all the cookies and/or the direct
// response flag, so the receiver can farm out the single message to
// multiple peers and/or itself
XRPL_ASSERT(
sendCookies.size() || directResponse,
"ripple::PeerImp::sendToMultiple : valid response options");
ledgerData.clear_requestcookie();
ledgerData.clear_responsecookies();
ledgerData.set_directresponse(directResponse);
for (auto const& cookie : sendCookies)
ledgerData.add_responsecookies(cookie);
auto message{
std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
peer->send(message);
JLOG(p_journal_.debug())
<< "sendToMultiple: Sent 1 TMLedgerData message to peer ["
<< peer->id() << "]: including "
<< (directResponse ? "the direct response flag and " : "")
<< sendCookies.size() << " response cookies. "
<< ": " << sha512Half(ledgerData);
}
}
XRPL_ASSERT(
foundSelf, "ripple::PeerImp::sendToMultiple : current peer included");
}
std::shared_ptr<Ledger const>
PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
PeerImp::getLedger(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash)
{
JLOG(p_journal_.trace()) << "getLedger: Ledger";
JLOG(p_journal_.trace()) << "getLedger: Ledger " << mHash;
std::shared_ptr<Ledger const> ledger;
@@ -3280,22 +3522,33 @@ PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
if (m->has_querytype() && !m->has_requestcookie())
{
// Attempt to relay the request to a peer
// Note repeated messages will not relay to the same peer
// before `getLedgerInterval` seconds. This prevents one
// peer from getting flooded, and distributes the request
// load. If a request has been relayed to all eligible
// peers, then this message will not be relayed.
if (auto const peer = getPeerWithLedger(
overlay_,
ledgerHash,
m->has_ledgerseq() ? m->ledgerseq() : 0,
this))
this,
[&](Peer::id_t id) {
return app_.getHashRouter().shouldProcessForPeer(
mHash, id, getledgerInterval);
}))
{
m->set_requestcookie(id());
peer->send(
std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
JLOG(p_journal_.debug())
<< "getLedger: Request relayed to peer";
<< "getLedger: Request relayed to peer [" << peer->id()
<< "]: " << mHash;
return ledger;
}
JLOG(p_journal_.trace())
<< "getLedger: Failed to find peer to relay request";
<< "getLedger: Don't have ledger with hash " << ledgerHash
<< ": " << mHash;
}
}
}
@@ -3305,7 +3558,7 @@ PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
{
JLOG(p_journal_.debug())
<< "getLedger: Early ledger sequence request";
<< "getLedger: Early ledger sequence request " << mHash;
}
else
{
@@ -3314,7 +3567,7 @@ PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
{
JLOG(p_journal_.debug())
<< "getLedger: Don't have ledger with sequence "
<< m->ledgerseq();
<< m->ledgerseq() << ": " << mHash;
}
}
}
@@ -3337,29 +3590,33 @@ PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
Resource::feeMalformedRequest, "get_ledger ledgerSeq");
ledger.reset();
JLOG(p_journal_.warn())
<< "getLedger: Invalid ledger sequence " << ledgerSeq;
JLOG(p_journal_.warn()) << "getLedger: Invalid ledger sequence "
<< ledgerSeq << ": " << mHash;
}
}
else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
{
ledger.reset();
JLOG(p_journal_.debug())
<< "getLedger: Early ledger sequence request " << ledgerSeq;
<< "getLedger: Early ledger sequence request " << ledgerSeq
<< ": " << mHash;
}
}
else
{
JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
JLOG(p_journal_.debug())
<< "getLedger: Unable to find ledger " << mHash;
}
return ledger;
}
std::shared_ptr<SHAMap const>
PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
PeerImp::getTxSet(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash) const
{
JLOG(p_journal_.trace()) << "getTxSet: TX set";
JLOG(p_journal_.trace()) << "getTxSet: TX set " << mHash;
uint256 const txSetHash{m->ledgerhash()};
std::shared_ptr<SHAMap> shaMap{
@@ -3369,22 +3626,34 @@ PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
if (m->has_querytype() && !m->has_requestcookie())
{
// Attempt to relay the request to a peer
if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
// Note repeated messages will not relay to the same peer
// before `getLedgerInterval` seconds. This prevents one
// peer from getting flooded, and distributes the request
// load. If a request has been relayed to all eligible
// peers, then this message will not be relayed.
if (auto const peer = getPeerWithTree(
overlay_, txSetHash, this, [&](Peer::id_t id) {
return app_.getHashRouter().shouldProcessForPeer(
mHash, id, getledgerInterval);
}))
{
m->set_requestcookie(id());
peer->send(
std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
JLOG(p_journal_.debug())
<< "getTxSet: Request relayed to peer [" << peer->id()
<< "]: " << mHash;
}
else
{
JLOG(p_journal_.debug())
<< "getTxSet: Failed to find relay peer";
<< "getTxSet: Failed to find relay peer: " << mHash;
}
}
else
{
JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
JLOG(p_journal_.debug())
<< "getTxSet: Failed to find TX set " << mHash;
}
}
@@ -3392,7 +3661,9 @@ PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
}
void
PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
PeerImp::processLedgerRequest(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash)
{
// Do not resource charge a peer responding to a relay
if (!m->has_requestcookie())
@@ -3406,9 +3677,74 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
bool fatLeaves{true};
auto const itype{m->itype()};
auto getDestinations = [&] {
// If a ledger data message is generated, it's going to be sent to every
// peer that is waiting for it.
PeerCookieMap result;
std::size_t numCookies = 0;
{
// Don't do the work under this peer if this peer is not waiting for
// any replies
auto myCookies = releaseRequestCookies(mHash);
if (myCookies.empty())
{
JLOG(p_journal_.debug()) << "TMGetLedger: peer is no longer "
"waiting for response to request: "
<< mHash;
return result;
}
numCookies += myCookies.size();
result[shared_from_this()] = myCookies;
}
std::set<HashRouter::PeerShortID> const peers =
app_.getHashRouter().getPeers(mHash);
for (auto const peerID : peers)
{
// This loop does not need to be done under the HashRouter
// lock because findPeerByShortID and releaseRequestCookies
// are thread safe, and everything else is local
if (auto p = overlay_.findPeerByShortID(peerID))
{
auto cookies = p->releaseRequestCookies(mHash);
numCookies += cookies.size();
if (result.contains(p))
{
// Unlikely, but if a request came in to this peer while
// iterating, add the items instead of copying /
// overwriting.
XRPL_ASSERT(
p.get() == this,
"ripple::PeerImp::processLedgerRequest : found self in "
"map");
for (auto const& cookie : cookies)
result[p].emplace(cookie);
}
else if (cookies.size())
result[p] = cookies;
}
}
JLOG(p_journal_.debug())
<< "TMGetLedger: Processing request for " << result.size()
<< " peers. Will send " << numCookies
<< " messages if successful: " << mHash;
return result;
};
// Will only populate this if we're going to do work.
PeerCookieMap destinations;
if (itype == protocol::liTS_CANDIDATE)
{
if (sharedMap = getTxSet(m); !sharedMap)
destinations = getDestinations();
if (destinations.empty())
// Nowhere to send the response!
return;
if (sharedMap = getTxSet(m, mHash); !sharedMap)
return;
map = sharedMap.get();
@@ -3416,8 +3752,6 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
ledgerData.set_ledgerseq(0);
ledgerData.set_ledgerhash(m->ledgerhash());
ledgerData.set_type(protocol::liTS_CANDIDATE);
if (m->has_requestcookie())
ledgerData.set_requestcookie(m->requestcookie());
// We'll already have most transactions
fatLeaves = false;
@@ -3436,7 +3770,12 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
return;
}
if (ledger = getLedger(m); !ledger)
destinations = getDestinations();
if (destinations.empty())
// Nowhere to send the response!
return;
if (ledger = getLedger(m, mHash); !ledger)
return;
// Fill out the reply
@@ -3444,13 +3783,11 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
ledgerData.set_ledgerseq(ledger->header().seq);
ledgerData.set_type(itype);
if (m->has_requestcookie())
ledgerData.set_requestcookie(m->requestcookie());
switch (itype)
{
case protocol::liBASE:
sendLedgerBase(ledger, ledgerData);
sendLedgerBase(ledger, ledgerData, destinations);
return;
case protocol::liTX_NODE:
@@ -3567,7 +3904,7 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
if (ledgerData.nodes_size() == 0)
return;
send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
sendToMultiple(ledgerData, destinations);
}
int
@@ -3615,6 +3952,19 @@ PeerImp::isHighLatency() const
return latency_ >= peerHighLatency;
}
std::set<std::optional<uint64_t>>
PeerImp::releaseRequestCookies(uint256 const& requestHash)
{
std::set<std::optional<uint64_t>> result;
std::lock_guard lock(cookieLock_);
if (messageRequestCookies_.contains(requestHash))
{
std::swap(result, messageRequestCookies_[requestHash]);
messageRequestCookies_.erase(requestHash);
}
return result;
};
void
PeerImp::Metrics::add_message(std::uint64_t bytes)
{

View File

@@ -253,6 +253,15 @@ private:
bool ledgerReplayEnabled_ = false;
LedgerReplayMsgHandler ledgerReplayMsgHandler_;
// Track message requests and responses
// TODO: Use an expiring cache or something
using MessageCookieMap =
std::map<uint256, std::set<std::optional<uint64_t>>>;
using PeerCookieMap =
std::map<std::shared_ptr<Peer>, std::set<std::optional<uint64_t>>>;
std::mutex mutable cookieLock_;
MessageCookieMap messageRequestCookies_;
friend class OverlayImpl;
class Metrics
@@ -496,6 +505,13 @@ public:
return txReduceRelayEnabled_;
}
//
// Messages
//
std::set<std::optional<uint64_t>>
releaseRequestCookies(uint256 const& requestHash) override;
private:
/**
* @brief Handles a failure associated with a specific error code.
@@ -798,16 +814,28 @@ private:
void
sendLedgerBase(
std::shared_ptr<Ledger const> const& ledger,
protocol::TMLedgerData& ledgerData);
std::shared_ptr<Ledger const>
getLedger(std::shared_ptr<protocol::TMGetLedger> const& m);
std::shared_ptr<SHAMap const>
getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const;
protocol::TMLedgerData& ledgerData,
PeerCookieMap const& destinations);
void
processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m);
sendToMultiple(
protocol::TMLedgerData& ledgerData,
PeerCookieMap const& destinations);
std::shared_ptr<Ledger const>
getLedger(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash);
std::shared_ptr<SHAMap const>
getTxSet(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash) const;
void
processLedgerRequest(
std::shared_ptr<protocol::TMGetLedger> const& m,
uint256 const& mHash);
};
//------------------------------------------------------------------------------

View File

@@ -1,8 +1,11 @@
#include <xrpld/app/main/Application.h>
#include <xrpld/app/misc/HashRouter.h>
#include <xrpld/core/JobQueue.h>
#include <xrpld/overlay/Overlay.h>
#include <xrpld/overlay/PeerSet.h>
#include <xrpl/core/JobQueue.h>
#include <xrpl/protocol/digest.h>
namespace xrpl {
@@ -86,16 +89,52 @@ PeerSetImpl::sendRequest(
std::shared_ptr<Peer> const& peer)
{
auto packet = std::make_shared<Message>(message, type);
auto const messageHash = [&]() {
auto const packetBuffer =
packet->getBuffer(compression::Compressed::Off);
return sha512Half(Slice(packetBuffer.data(), packetBuffer.size()));
}();
// Allow messages to be re-sent to the same peer after a delay
using namespace std::chrono_literals;
constexpr std::chrono::seconds interval = 30s;
if (peer)
{
peer->send(packet);
if (app_.getHashRouter().shouldProcessForPeer(
messageHash, peer->id(), interval))
{
JLOG(journal_.trace())
<< "Sending " << protocolMessageName(type) << " message to ["
<< peer->id() << "]: " << messageHash;
peer->send(packet);
}
else
JLOG(journal_.debug())
<< "Suppressing sending duplicate " << protocolMessageName(type)
<< " message to [" << peer->id() << "]: " << messageHash;
return;
}
for (auto id : peers_)
{
if (auto p = app_.overlay().findPeerByShortID(id))
p->send(packet);
{
if (app_.getHashRouter().shouldProcessForPeer(
messageHash, p->id(), interval))
{
JLOG(journal_.trace())
<< "Sending " << protocolMessageName(type)
<< " message to [" << p->id() << "]: " << messageHash;
p->send(packet);
}
else
JLOG(journal_.debug())
<< "Suppressing sending duplicate "
<< protocolMessageName(type) << " message to [" << p->id()
<< "]: " << messageHash;
}
}
}

View File

@@ -24,6 +24,12 @@ protocolMessageType(protocol::TMGetLedger const&)
return protocol::mtGET_LEDGER;
}
inline protocol::MessageType
protocolMessageType(protocol::TMLedgerData const&)
{
return protocol::mtLEDGER_DATA;
}
inline protocol::MessageType
protocolMessageType(protocol::TMReplayDeltaRequest const&)
{
@@ -467,4 +473,64 @@ invokeProtocolMessage(
} // namespace xrpl
namespace protocol {
template <class Hasher>
void
hash_append(Hasher& h, TMGetLedger const& msg)
{
using beast::hash_append;
using namespace ripple;
hash_append(h, safe_cast<int>(protocolMessageType(msg)));
hash_append(h, safe_cast<int>(msg.itype()));
if (msg.has_ltype())
hash_append(h, safe_cast<int>(msg.ltype()));
if (msg.has_ledgerhash())
hash_append(h, msg.ledgerhash());
if (msg.has_ledgerseq())
hash_append(h, msg.ledgerseq());
for (auto const& nodeId : msg.nodeids())
hash_append(h, nodeId);
hash_append(h, msg.nodeids_size());
// Do NOT include the request cookie. It does not affect the content of the
// request, but only where to route the results.
// if (msg.has_requestcookie())
// hash_append(h, msg.requestcookie());
if (msg.has_querytype())
hash_append(h, safe_cast<int>(msg.querytype()));
if (msg.has_querydepth())
hash_append(h, msg.querydepth());
}
template <class Hasher>
void
hash_append(Hasher& h, TMLedgerData const& msg)
{
using beast::hash_append;
using namespace ripple;
hash_append(h, safe_cast<int>(protocolMessageType(msg)));
hash_append(h, msg.ledgerhash());
hash_append(h, msg.ledgerseq());
hash_append(h, safe_cast<int>(msg.type()));
for (auto const& node : msg.nodes())
{
hash_append(h, node.nodedata());
if (node.has_nodeid())
hash_append(h, node.nodeid());
}
hash_append(h, msg.nodes_size());
if (msg.has_requestcookie())
hash_append(h, msg.requestcookie());
if (msg.has_error())
hash_append(h, safe_cast<int>(msg.error()));
}
} // namespace protocol
#endif

View File

@@ -21,7 +21,9 @@ namespace xrpl {
constexpr ProtocolVersion const supportedProtocolList[]
{
{2, 1},
{2, 2}
{2, 2},
// Adds TMLedgerData::responseCookies and directResponse
{2, 3}
};
// clang-format on