mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-07 23:42:28 +00:00
Compare commits
47 Commits
copilot/ad
...
pratik/Fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69597e4b3b | ||
|
|
65ba439117 | ||
|
|
14e3e098d5 | ||
|
|
1a14b813b8 | ||
|
|
95c9851146 | ||
|
|
127f44a40b | ||
|
|
acca1c73cf | ||
|
|
567433d272 | ||
|
|
4600c381b9 | ||
|
|
be28f4d489 | ||
|
|
413aee0752 | ||
|
|
6b16a5a8ed | ||
|
|
fd53813746 | ||
|
|
bbb03e153e | ||
|
|
63b6ec98ea | ||
|
|
7ef9fb4290 | ||
|
|
49dcb6b60b | ||
|
|
26dd1fafe3 | ||
|
|
4b99771021 | ||
|
|
e6664fe4cf | ||
|
|
b5001bc258 | ||
|
|
5dacfa1938 | ||
|
|
394b256f02 | ||
|
|
5bfa38f6c5 | ||
|
|
7c6c49bf98 | ||
|
|
6334be1ff0 | ||
|
|
a9c3bb84ba | ||
|
|
ca99e40290 | ||
|
|
7612c1af0c | ||
|
|
67e40be1ab | ||
|
|
0132174a7b | ||
|
|
8773cc4bbf | ||
|
|
dabdadfff5 | ||
|
|
bfe2cd7893 | ||
|
|
0584c20f36 | ||
|
|
3ced0b27b7 | ||
|
|
f83b27f7dd | ||
|
|
cdb41b5376 | ||
|
|
f223c89a9f | ||
|
|
efe07c09f3 | ||
|
|
79cde8b199 | ||
|
|
2078ce01cf | ||
|
|
2770a9cdf3 | ||
|
|
05ef3b1ad8 | ||
|
|
7dd4dbe285 | ||
|
|
b32a5f2c08 | ||
|
|
df76002a44 |
@@ -89,6 +89,7 @@ words:
|
||||
- endmacro
|
||||
- exceptioned
|
||||
- Falco
|
||||
- fcontext
|
||||
- finalizers
|
||||
- firewalled
|
||||
- fmtdur
|
||||
@@ -101,6 +102,7 @@ words:
|
||||
- gpgcheck
|
||||
- gpgkey
|
||||
- hotwallet
|
||||
- hwaddress
|
||||
- hwrap
|
||||
- ifndef
|
||||
- inequation
|
||||
@@ -217,6 +219,7 @@ words:
|
||||
- soci
|
||||
- socidb
|
||||
- sslws
|
||||
- stackful
|
||||
- statsd
|
||||
- STATSDCOLLECTOR
|
||||
- stissue
|
||||
|
||||
8
.github/CODEOWNERS
vendored
Normal file
8
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Allow anyone to review any change by default.
|
||||
*
|
||||
|
||||
# Require the rpc-reviewers team to review changes to the rpc code.
|
||||
include/xrpl/protocol/ @xrplf/rpc-reviewers
|
||||
src/libxrpl/protocol/ @xrplf/rpc-reviewers
|
||||
src/xrpld/rpc/ @xrplf/rpc-reviewers
|
||||
src/xrpld/app/misc/ @xrplf/rpc-reviewers
|
||||
27
.github/scripts/strategy-matrix/generate.py
vendored
27
.github/scripts/strategy-matrix/generate.py
vendored
@@ -196,22 +196,11 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "debian-bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0"
|
||||
|
||||
# Enable unity build for Ubuntu Jammy using GCC 12 in Debug on
|
||||
# linux/amd64.
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "ubuntu-jammy"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dunity=ON"
|
||||
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
|
||||
|
||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
||||
# or debian-bookworm-gcc-12-amd64-release.
|
||||
@@ -228,18 +217,18 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
config_name += f"-{build_type.lower()}"
|
||||
if "-Dcoverage=ON" in cmake_args:
|
||||
config_name += "-coverage"
|
||||
if "-Dunity=ON" in cmake_args:
|
||||
config_name += "-unity"
|
||||
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
):
|
||||
if os[
|
||||
"distro_version"
|
||||
] == "bookworm" and f"{os['compiler_name']}-{os['compiler_version']}" in [
|
||||
"clang-20",
|
||||
"gcc-13",
|
||||
]:
|
||||
# Add ASAN + UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
|
||||
20
.github/workflows/reusable-build-test-config.yml
vendored
20
.github/workflows/reusable-build-test-config.yml
vendored
@@ -205,14 +205,18 @@ jobs:
|
||||
- name: Set sanitizer options
|
||||
if: ${{ !inputs.build_only && env.SANITIZERS_ENABLED == 'true' }}
|
||||
run: |
|
||||
echo "ASAN_OPTIONS=print_stacktrace=1:detect_container_overflow=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=second_deadlock_stack=1:halt_on_error=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
echo "ASAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-asan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-tsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-ubsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-lsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Run the separate tests
|
||||
# We continue on error here because we want to try the Embedded tests before
|
||||
# failing. This will give us details on all the failures at once.
|
||||
continue-on-error: true
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
id: separate_tests
|
||||
# Windows locks some of the build files while running tests, and parallel jobs can collide
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -228,8 +232,14 @@ jobs:
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
PARALLELISM: ${{ env.SANITIZERS_ENABLED == 'true' && '1' || steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
./xrpld --unittest --unittest-jobs "${BUILD_NPROC}"
|
||||
./xrpld --unittest --unittest-jobs "${PARALLELISM}"
|
||||
|
||||
# Pipeline should fail if the separate tests failed.
|
||||
- name: Check results of the SeparateTests
|
||||
if: ${{ !inputs.build_only && steps.separate_tests.outcome == 'failure' }}
|
||||
run: exit 1
|
||||
|
||||
- name: Debug failure (Linux)
|
||||
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
|
||||
|
||||
@@ -71,12 +71,6 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## Unreleased Changes
|
||||
|
||||
### Additions and bugfixes
|
||||
|
||||
- `submit`: Augmented response fields (`accepted`, `applied`, `broadcast`, `queued`, `kept`, `account_sequence_next`, `account_sequence_available`, `open_ledger_cost`, `validated_ledger_index`) are now included in sign-and-submit mode. Previously, these fields were only returned when submitting a binary transaction blob. ([#6304](https://github.com/XRPLF/rippled/pull/6304))
|
||||
|
||||
## XRP Ledger server version 2.5.0
|
||||
|
||||
[Version 2.5.0](https://github.com/XRPLF/rippled/releases/tag/2.5.0) was released on Jun 24, 2025.
|
||||
|
||||
7
BUILD.md
7
BUILD.md
@@ -575,16 +575,10 @@ See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
|
||||
[Unity builds][5] may be faster for the first build (at the cost of much more
|
||||
memory) since they concatenate sources into fewer translation units. Non-unity
|
||||
builds may be faster for incremental builds, and can be helpful for detecting
|
||||
`#include` omissions.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Conan
|
||||
@@ -651,7 +645,6 @@ If you want to experiment with a new package, follow these steps:
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
||||
[3]: https://docs.conan.io/en/latest/getting_started.html
|
||||
[5]: https://en.wikipedia.org/wiki/Unity_build
|
||||
[6]: https://github.com/boostorg/beast/issues/2648
|
||||
[7]: https://github.com/boostorg/beast/issues/2661
|
||||
[gcovr]: https://gcovr.com/en/stable/getting-started.html
|
||||
|
||||
@@ -940,7 +940,23 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
# Setting this value to 0 will use the default value.
|
||||
#
|
||||
# cache_age Length of time in minutes to keep database records
|
||||
# cached. Default is 5 minutes. Setting this value to
|
||||
# 0 will use the default value.
|
||||
#
|
||||
# Note: if neither cache_size nor cache_age is
|
||||
# specified, the cache for database records will not
|
||||
# be created. If only one of cache_size or cache_age
|
||||
# is specified, the cache will be created using the
|
||||
# default value for the unspecified parameter.
|
||||
#
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified.
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
@@ -948,6 +964,8 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
|
||||
@@ -17,7 +17,7 @@ find_dependency(Boost
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
coroutine2
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
|
||||
@@ -4,12 +4,7 @@
|
||||
|
||||
include(target_protobuf_sources)
|
||||
|
||||
# Protocol buffers cannot participate in a unity build,
|
||||
# because all the generated sources
|
||||
# define a bunch of `static const` variables with the same names,
|
||||
# so we just build them as a separate library.
|
||||
add_library(xrpl.libpb)
|
||||
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS include/xrpl/proto/xrpl.proto)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ target_compile_definitions(
|
||||
BOOST_FILESYSTEM_NO_DEPRECATED
|
||||
>
|
||||
$<$<NOT:$<BOOL:${boost_show_deprecated}>>:
|
||||
BOOST_COROUTINES_NO_DEPRECATION_WARNING
|
||||
BOOST_COROUTINES2_NO_DEPRECATION_WARNING
|
||||
BOOST_BEAST_ALLOW_DEPRECATED
|
||||
BOOST_FILESYSTEM_DEPRECATED
|
||||
>
|
||||
|
||||
@@ -30,14 +30,6 @@ if (tests)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake." OFF)
|
||||
if (unity)
|
||||
if (NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
|
||||
endif ()
|
||||
|
||||
if (is_clang AND is_linux)
|
||||
option(voidstar "Enable Antithesis instrumentation." OFF)
|
||||
endif ()
|
||||
|
||||
@@ -4,13 +4,12 @@ include(XrplSanitizers)
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS chrono
|
||||
container
|
||||
coroutine
|
||||
context
|
||||
date_time
|
||||
filesystem
|
||||
json
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
|
||||
add_library(xrpl_boost INTERFACE)
|
||||
@@ -21,7 +20,7 @@ target_link_libraries(
|
||||
INTERFACE Boost::headers
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::coroutine
|
||||
Boost::context
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
@@ -32,13 +31,13 @@ target_link_libraries(
|
||||
if (Boost_COMPILER)
|
||||
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
|
||||
endif ()
|
||||
if (SANITIZERS_ENABLED AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif ()
|
||||
# if (SANITIZERS_ENABLED AND is_clang)
|
||||
# # TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
# if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
# get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# endif ()
|
||||
# message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
# file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
# target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
# -fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
# endif ()
|
||||
|
||||
19
conanfile.py
19
conanfile.py
@@ -1,4 +1,5 @@
|
||||
import re
|
||||
import os
|
||||
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
@@ -23,7 +24,6 @@ class Xrpl(ConanFile):
|
||||
"shared": [True, False],
|
||||
"static": [True, False],
|
||||
"tests": [True, False],
|
||||
"unity": [True, False],
|
||||
"xrpld": [True, False],
|
||||
}
|
||||
|
||||
@@ -55,8 +55,10 @@ class Xrpl(ConanFile):
|
||||
"shared": False,
|
||||
"static": True,
|
||||
"tests": False,
|
||||
"unity": False,
|
||||
"xrpld": False,
|
||||
"boost/*:without_context": False,
|
||||
"boost/*:without_coroutine": True,
|
||||
"boost/*:without_coroutine2": False,
|
||||
"date/*:header_only": True,
|
||||
"ed25519/*:shared": False,
|
||||
"grpc/*:shared": False,
|
||||
@@ -125,6 +127,15 @@ class Xrpl(ConanFile):
|
||||
self.options["boost"].visibility = "global"
|
||||
if self.settings.compiler in ["clang", "gcc"]:
|
||||
self.options["boost"].without_cobalt = True
|
||||
self.options["boost"].without_context = False
|
||||
self.options["boost"].without_coroutine = True
|
||||
self.options["boost"].without_coroutine2 = False
|
||||
|
||||
# Check if environment variable exists
|
||||
if "SANITIZERS" in os.environ:
|
||||
sanitizers = os.environ["SANITIZERS"]
|
||||
if "Address" in sanitizers:
|
||||
self.default_options["fPIC"] = False
|
||||
|
||||
def requirements(self):
|
||||
# Conan 2 requires transitive headers to be specified
|
||||
@@ -168,7 +179,6 @@ class Xrpl(ConanFile):
|
||||
tc.variables["rocksdb"] = self.options.rocksdb
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
tc.variables["static"] = self.options.static
|
||||
tc.variables["unity"] = self.options.unity
|
||||
tc.variables["xrpld"] = self.options.xrpld
|
||||
tc.generate()
|
||||
|
||||
@@ -196,7 +206,8 @@ class Xrpl(ConanFile):
|
||||
"boost::headers",
|
||||
"boost::chrono",
|
||||
"boost::container",
|
||||
"boost::coroutine",
|
||||
"boost::context",
|
||||
"boost::coroutine2",
|
||||
"boost::date_time",
|
||||
"boost::filesystem",
|
||||
"boost::json",
|
||||
|
||||
8
docs/build/sanitizers.md
vendored
8
docs/build/sanitizers.md
vendored
@@ -89,8 +89,8 @@ cmake --build . --parallel 4
|
||||
**IMPORTANT**: ASAN with Boost produces many false positives. Use these options:
|
||||
|
||||
```bash
|
||||
export ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=path/to/asan.supp:halt_on_error=0:log_path=asan.log"
|
||||
export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsan.log"
|
||||
export ASAN_OPTIONS="include=sanitizers/suppressions/runtime-asan-options.txt:suppressions=sanitizers/suppressions/asan.supp"
|
||||
export LSAN_OPTIONS="include=sanitizers/suppressions/runtime-lsan-options.txt:suppressions=sanitizers/suppressions/lsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
@@ -108,7 +108,7 @@ export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsa
|
||||
### ThreadSanitizer (TSan)
|
||||
|
||||
```bash
|
||||
export TSAN_OPTIONS="suppressions=path/to/tsan.supp halt_on_error=0 log_path=tsan.log"
|
||||
export TSAN_OPTIONS="include=sanitizers/suppressions/runtime-tsan-options.txt:suppressions=sanitizers/suppressions/tsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
@@ -129,7 +129,7 @@ More details [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLe
|
||||
### UndefinedBehaviorSanitizer (UBSan)
|
||||
|
||||
```bash
|
||||
export UBSAN_OPTIONS="suppressions=path/to/ubsan.supp:print_stacktrace=1:halt_on_error=0:log_path=ubsan.log"
|
||||
export UBSAN_OPTIONS="include=sanitizers/suppressions/runtime-ubsan-options.txt:suppressions=sanitizers/suppressions/ubsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <boost/thread/tss.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
@@ -41,21 +39,59 @@ struct LocalValues
|
||||
|
||||
// Keys are the address of a LocalValue.
|
||||
std::unordered_map<void const*, std::unique_ptr<BasicValue>> values;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cleanup(LocalValues* lvs)
|
||||
// Wrapper to ensure proper cleanup when thread exits
|
||||
struct LocalValuesHolder
|
||||
{
|
||||
LocalValues* ptr = nullptr;
|
||||
|
||||
~LocalValuesHolder()
|
||||
{
|
||||
if (lvs && !lvs->onCoro)
|
||||
delete lvs;
|
||||
if (ptr && !ptr->onCoro)
|
||||
delete ptr;
|
||||
}
|
||||
};
|
||||
|
||||
template <class = void>
|
||||
boost::thread_specific_ptr<detail::LocalValues>&
|
||||
getLocalValues()
|
||||
LocalValuesHolder&
|
||||
getLocalValuesHolder();
|
||||
|
||||
inline LocalValues*&
|
||||
getLocalValuesPtr()
|
||||
{
|
||||
static boost::thread_specific_ptr<detail::LocalValues> tsp(&detail::LocalValues::cleanup);
|
||||
return tsp;
|
||||
return getLocalValuesHolder().ptr;
|
||||
}
|
||||
|
||||
inline LocalValues*
|
||||
getOrCreateLocalValues()
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
if (!ptr)
|
||||
{
|
||||
ptr = new LocalValues();
|
||||
ptr->onCoro = false;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// For coroutine support, we need explicit swap functions
|
||||
inline LocalValues*
|
||||
releaseLocalValues()
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
auto* result = ptr;
|
||||
ptr = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void
|
||||
resetLocalValues(LocalValues* lvs)
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
// Clean up old value if it's not a coroutine's LocalValues
|
||||
if (ptr && !ptr->onCoro)
|
||||
delete ptr;
|
||||
ptr = lvs;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
@@ -88,19 +124,10 @@ template <class T>
|
||||
T&
|
||||
LocalValue<T>::operator*()
|
||||
{
|
||||
auto lvs = detail::getLocalValues().get();
|
||||
if (!lvs)
|
||||
{
|
||||
lvs = new detail::LocalValues();
|
||||
lvs->onCoro = false;
|
||||
detail::getLocalValues().reset(lvs);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto const iter = lvs->values.find(this);
|
||||
if (iter != lvs->values.end())
|
||||
return *reinterpret_cast<T*>(iter->second->get());
|
||||
}
|
||||
auto lvs = detail::getOrCreateLocalValues();
|
||||
auto const iter = lvs->values.find(this);
|
||||
if (iter != lvs->values.end())
|
||||
return *reinterpret_cast<T*>(iter->second->get());
|
||||
|
||||
return *reinterpret_cast<T*>(
|
||||
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_)).first->second->get());
|
||||
|
||||
@@ -698,8 +698,12 @@ Number::normalizeToRange(T minMantissa, T maxMantissa) const
|
||||
XRPL_ASSERT_PARTS(!negative, "xrpl::Number::normalizeToRange", "Number is non-negative for unsigned range.");
|
||||
Number::normalize(negative, mantissa, exponent, minMantissa, maxMantissa);
|
||||
|
||||
auto const sign = negative ? -1 : 1;
|
||||
return std::make_pair(static_cast<T>(sign * mantissa), exponent);
|
||||
// Cast mantissa to signed type first to avoid unsigned integer overflow
|
||||
// when multiplying by negative sign
|
||||
T signedMantissa = static_cast<T>(mantissa);
|
||||
if (negative)
|
||||
signedMantissa = -signedMantissa;
|
||||
return std::make_pair(signedMantissa, exponent);
|
||||
}
|
||||
|
||||
inline constexpr Number
|
||||
|
||||
@@ -357,6 +357,7 @@ public:
|
||||
base_uint&
|
||||
operator&=(base_uint const& b)
|
||||
{
|
||||
XRPL_ASSERT(WIDTH == b.WIDTH, "input size mismatch");
|
||||
for (int i = 0; i < WIDTH; i++)
|
||||
data_[i] &= b.data_[i];
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/basics/sanitizers.h>
|
||||
#include <xrpl/beast/type_name.h>
|
||||
|
||||
#include <exception>
|
||||
@@ -24,7 +25,7 @@ LogThrow(std::string const& title);
|
||||
control to the next matching exception handler, if any.
|
||||
Otherwise, std::terminate will be called.
|
||||
*/
|
||||
[[noreturn]] inline void
|
||||
[[noreturn]] XRPL_NO_SANITIZE_ADDRESS inline void
|
||||
Rethrow()
|
||||
{
|
||||
LogThrow("Re-throwing exception");
|
||||
@@ -32,7 +33,7 @@ Rethrow()
|
||||
}
|
||||
|
||||
template <class E, class... Args>
|
||||
[[noreturn]] inline void
|
||||
[[noreturn]] XRPL_NO_SANITIZE_ADDRESS inline void
|
||||
Throw(Args&&... args)
|
||||
{
|
||||
static_assert(std::is_convertible<E*, std::exception*>::value, "Exception must derive from std::exception.");
|
||||
|
||||
6
include/xrpl/basics/sanitizers.h
Normal file
6
include/xrpl/basics/sanitizers.h
Normal file
@@ -0,0 +1,6 @@
|
||||
// Helper to disable ASan/HwASan for specific functions
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define XRPL_NO_SANITIZE_ADDRESS __attribute__((no_sanitize("address", "hwaddress")))
|
||||
#else
|
||||
#define XRPL_NO_SANITIZE_ADDRESS
|
||||
#endif
|
||||
@@ -4,22 +4,28 @@
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
// Coroutine stack size is set to 2MB to provide sufficient headroom for
|
||||
// deep call stacks in RPC operations. With 1MB, stack exhaustion occurred
|
||||
// in production scenarios, particularly in:
|
||||
// - RPC handlers processing complex JSON (ServerHandler::processRequest)
|
||||
// - Transaction validation with deep parsing (TransactionSign::getCurrentNetworkFee)
|
||||
// - Amount parsing with boost::split operations (amountFromJson)
|
||||
// The 2MB stack provides ~50% safety margin even for the deepest observed
|
||||
// call chains while keeping memory overhead reasonable (~2MB per coroutine).
|
||||
template <class F>
|
||||
JobQueue::Coro::Coro(Coro_create_t, JobQueue& jq, JobType type, std::string const& name, F&& f)
|
||||
: jq_(jq)
|
||||
, type_(type)
|
||||
, name_(name)
|
||||
, running_(false)
|
||||
, coro_(
|
||||
[this, fn = std::forward<F>(f)](boost::coroutines::asymmetric_coroutine<void>::push_type& do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
fn(shared_from_this());
|
||||
, coro_([this, fn = std::forward<F>(f)](boost::coroutines2::asymmetric_coroutine<void>::push_type& do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
fn(shared_from_this());
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
finished_ = true;
|
||||
#endif
|
||||
},
|
||||
boost::coroutines::attributes(megabytes(1)))
|
||||
})
|
||||
{
|
||||
}
|
||||
|
||||
@@ -72,13 +78,16 @@ JobQueue::Coro::resume()
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
}
|
||||
auto saved = detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(&lvs_);
|
||||
auto saved = detail::releaseLocalValues();
|
||||
detail::resetLocalValues(&lvs_);
|
||||
std::lock_guard lock(mutex_);
|
||||
XRPL_ASSERT(static_cast<bool>(coro_), "xrpl::JobQueue::Coro::resume : is runnable");
|
||||
coro_();
|
||||
detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(saved);
|
||||
|
||||
// Restore the thread's original LocalValues
|
||||
detail::releaseLocalValues();
|
||||
detail::resetLocalValues(saved);
|
||||
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <xrpl/core/detail/Workers.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
|
||||
#include <boost/coroutine/all.hpp>
|
||||
#include <boost/coroutine2/all.hpp>
|
||||
|
||||
#include <set>
|
||||
|
||||
@@ -48,8 +48,8 @@ public:
|
||||
std::mutex mutex_;
|
||||
std::mutex mutex_run_;
|
||||
std::condition_variable cv_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::pull_type coro_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type* yield_;
|
||||
boost::coroutines2::asymmetric_coroutine<void>::pull_type coro_;
|
||||
boost::coroutines2::asymmetric_coroutine<void>::push_type* yield_;
|
||||
#ifndef NDEBUG
|
||||
bool finished_ = false;
|
||||
#endif
|
||||
|
||||
@@ -29,6 +29,9 @@ public:
|
||||
bool sslVerify,
|
||||
beast::Journal j);
|
||||
|
||||
static void
|
||||
cleanupSSLContext();
|
||||
|
||||
static void
|
||||
get(bool bSSL,
|
||||
boost::asio::io_context& io_context,
|
||||
|
||||
@@ -133,6 +133,10 @@ public:
|
||||
std::uint32_t ledgerSeq,
|
||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback);
|
||||
|
||||
/** Remove expired entries from the positive and negative caches. */
|
||||
virtual void
|
||||
sweep() = 0;
|
||||
|
||||
/** Gather statistics pertaining to read and write activities.
|
||||
*
|
||||
* @param obj Json object reference into which to place counters.
|
||||
|
||||
@@ -23,6 +23,32 @@ public:
|
||||
beast::Journal j)
|
||||
: Database(scheduler, readThreads, config, j), backend_(std::move(backend))
|
||||
{
|
||||
std::optional<int> cacheSize, cacheAge;
|
||||
|
||||
if (config.exists("cache_size"))
|
||||
{
|
||||
cacheSize = get<int>(config, "cache_size");
|
||||
if (cacheSize.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>("Specified negative value for cache_size");
|
||||
}
|
||||
}
|
||||
|
||||
if (config.exists("cache_age"))
|
||||
{
|
||||
cacheAge = get<int>(config, "cache_age");
|
||||
if (cacheAge.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>("Specified negative value for cache_age");
|
||||
}
|
||||
}
|
||||
|
||||
if (cacheSize != 0 || cacheAge != 0)
|
||||
{
|
||||
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
|
||||
"DatabaseNodeImp", cacheSize.value_or(0), std::chrono::minutes(cacheAge.value_or(0)), stopwatch(), j);
|
||||
}
|
||||
|
||||
XRPL_ASSERT(
|
||||
backend_,
|
||||
"xrpl::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
|
||||
@@ -77,7 +103,13 @@ public:
|
||||
std::uint32_t ledgerSeq,
|
||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback) override;
|
||||
|
||||
void
|
||||
sweep() override;
|
||||
|
||||
private:
|
||||
// Cache for database objects. This cache is not always initialized. Check
|
||||
// for null before using.
|
||||
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
|
||||
// Persistent key/value storage
|
||||
std::shared_ptr<Backend> backend_;
|
||||
|
||||
|
||||
@@ -55,6 +55,9 @@ public:
|
||||
void
|
||||
sync() override;
|
||||
|
||||
void
|
||||
sweep() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Backend> writableBackend_;
|
||||
std::shared_ptr<Backend> archiveBackend_;
|
||||
|
||||
@@ -229,7 +229,7 @@ missing_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
missing_field_error(Json::StaticString name)
|
||||
missing_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return missing_field_error(std::string(name));
|
||||
}
|
||||
@@ -247,7 +247,7 @@ object_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
object_field_error(Json::StaticString name)
|
||||
object_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return object_field_error(std::string(name));
|
||||
}
|
||||
@@ -259,7 +259,7 @@ invalid_field_message(std::string const& name)
|
||||
}
|
||||
|
||||
inline std::string
|
||||
invalid_field_message(Json::StaticString name)
|
||||
invalid_field_message(Json::StaticString const& name)
|
||||
{
|
||||
return invalid_field_message(std::string(name));
|
||||
}
|
||||
@@ -271,7 +271,7 @@ invalid_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
invalid_field_error(Json::StaticString name)
|
||||
invalid_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return invalid_field_error(std::string(name));
|
||||
}
|
||||
@@ -283,7 +283,7 @@ expected_field_message(std::string const& name, std::string const& type)
|
||||
}
|
||||
|
||||
inline std::string
|
||||
expected_field_message(Json::StaticString name, std::string const& type)
|
||||
expected_field_message(Json::StaticString const& name, std::string const& type)
|
||||
{
|
||||
return expected_field_message(std::string(name), type);
|
||||
}
|
||||
@@ -295,7 +295,7 @@ expected_field_error(std::string const& name, std::string const& type)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
expected_field_error(Json::StaticString name, std::string const& type)
|
||||
expected_field_error(Json::StaticString const& name, std::string const& type)
|
||||
{
|
||||
return expected_field_error(std::string(name), type);
|
||||
}
|
||||
|
||||
@@ -46,6 +46,17 @@ public:
|
||||
return id_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the SHAMapNodeID of a child node at the specified branch.
|
||||
*
|
||||
* @param m The branch number (0-15) indicating which child to descend to.
|
||||
* In the SHAMap's 16-way radix tree, each inner node has up to
|
||||
* 16 children, indexed by the corresponding nibble (4 bits) of
|
||||
* the key at the current depth.
|
||||
* @return SHAMapNodeID of the child node at branch m.
|
||||
* @throws std::logic_error if this node is at the maximum leaf depth (64)
|
||||
* or if the node's id doesn't match its depth mask.
|
||||
*/
|
||||
SHAMapNodeID
|
||||
getChildNodeID(unsigned int m) const;
|
||||
|
||||
|
||||
@@ -1,29 +1,25 @@
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppressions.
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
|
||||
#
|
||||
# ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0"
|
||||
# ASAN_OPTIONS="suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0:detect_stack_use_after_return=0"
|
||||
#
|
||||
# The detect_container_overflow=0 option disables false positives from:
|
||||
# - Boost intrusive containers (slist_iterator.hpp, hashtable.hpp, aged_unordered_container.h)
|
||||
# - Boost context/coroutine stack switching (Workers.cpp, thread.h)
|
||||
# Boost coroutines cause multiple ASAN false positives due to swapcontext/fiber stack switching.
|
||||
# ASAN cannot correctly track stack memory across coroutine context switches, leading to:
|
||||
# - stack-use-after-return errors
|
||||
# - stack-use-after-scope errors
|
||||
# - stack-buffer-overflow errors in seemingly unrelated code (e.g., std::chrono::steady_clock::now())
|
||||
# - stack-buffer-underflow errors in seemingly unrelated code (e.g., xxhasher::retrieveHash(), clock_gettime)
|
||||
#
|
||||
# See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
|
||||
# These are suppressed via:
|
||||
# 1. Runtime option: detect_stack_use_after_return=0 (in ASAN_OPTIONS in CI workflow)
|
||||
# 2. Compile-time flag: -fno-sanitize-address-use-after-scope (in cmake/XrplSanitizers.cmake)
|
||||
#
|
||||
# Note: stack-buffer-overflow false positives from coroutines cannot be fully suppressed
|
||||
# without disabling ASAN entirely for Boost. Clang builds use -fsanitize-blacklist to
|
||||
# exclude Boost headers, but GCC does not support this feature.
|
||||
#
|
||||
# See: https://github.com/google/sanitizers/issues/189
|
||||
|
||||
# Boost
|
||||
interceptor_name:boost/asio
|
||||
|
||||
# Leaks in Doctest tests: xrpl.test.*
|
||||
interceptor_name:src/libxrpl/net/HTTPClient.cpp
|
||||
interceptor_name:src/libxrpl/net/RegisterSSLCerts.cpp
|
||||
interceptor_name:src/tests/libxrpl/net/HTTPClient.cpp
|
||||
interceptor_name:xrpl/net/AutoSocket.h
|
||||
interceptor_name:xrpl/net/HTTPClient.h
|
||||
interceptor_name:xrpl/net/HTTPClientSSLContext.h
|
||||
interceptor_name:xrpl/net/RegisterSSLCerts.h
|
||||
|
||||
# Suppress false positive stack-buffer errors in thread stack allocation
|
||||
# Related to ASan's __asan_handle_no_return warnings (github.com/google/sanitizers/issues/189)
|
||||
# These occur during multi-threaded test initialization on macOS
|
||||
interceptor_name:memcpy
|
||||
# Boost - false positives from stackful coroutines
|
||||
interceptor_name:clock_gettime
|
||||
interceptor_name:__bzero
|
||||
interceptor_name:__asan_memset
|
||||
interceptor_name:__asan_memcpy
|
||||
interceptor_name:nudb
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
|
||||
|
||||
# Suppress leaks detected by asan in rippled code.
|
||||
leak:src/libxrpl/net/HTTPClient.cpp
|
||||
leak:src/libxrpl/net/RegisterSSLCerts.cpp
|
||||
leak:src/tests/libxrpl/net/HTTPClient.cpp
|
||||
leak:xrpl/net/AutoSocket.h
|
||||
leak:xrpl/net/HTTPClient.h
|
||||
leak:xrpl/net/HTTPClientSSLContext.h
|
||||
leak:xrpl/net/RegisterSSLCerts.h
|
||||
leak:ripple::HTTPClient
|
||||
leak:ripple::HTTPClientImp
|
||||
|
||||
# Suppress leaks detected by asan in boost code.
|
||||
leak:boost::asio
|
||||
leak:boost/asio
|
||||
# These are false positives from Boost.Asio SSL internals that use OpenSSL BIO structures.
|
||||
# The BIO structures are managed by OpenSSL's internal reference counting and freed at process exit.
|
||||
|
||||
#leak:boost::asio
|
||||
#leak:boost/asio
|
||||
|
||||
# OpenSSL BIO memory is managed internally and freed at process exit
|
||||
leak:CRYPTO_malloc
|
||||
leak:bio_make_pair
|
||||
leak:BIO_new_bio_pair
|
||||
|
||||
10
sanitizers/suppressions/runtime-asan-options.txt
Normal file
10
sanitizers/suppressions/runtime-asan-options.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
detect_container_overflow=0
|
||||
detect_stack_use_after_return=0
|
||||
detect_stack-buffer-overflow=0
|
||||
debug=true
|
||||
halt_on_error=false
|
||||
print_stats=true
|
||||
print_legend=true
|
||||
symbolize=true
|
||||
print_cmdline=true
|
||||
print_summary=true
|
||||
1
sanitizers/suppressions/runtime-lsan-options.txt
Normal file
1
sanitizers/suppressions/runtime-lsan-options.txt
Normal file
@@ -0,0 +1 @@
|
||||
halt_on_error=false
|
||||
3
sanitizers/suppressions/runtime-tsan-options.txt
Normal file
3
sanitizers/suppressions/runtime-tsan-options.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
halt_on_error=false
|
||||
verbosity=1
|
||||
second_deadlock_stack=1
|
||||
1
sanitizers/suppressions/runtime-ubsan-options.txt
Normal file
1
sanitizers/suppressions/runtime-ubsan-options.txt
Normal file
@@ -0,0 +1 @@
|
||||
halt_on_error=false
|
||||
@@ -27,3 +27,8 @@ src:core/JobQueue.cpp
|
||||
src:libxrpl/beast/utility/beast_Journal.cpp
|
||||
src:test/beast/beast_PropertyStream_test.cpp
|
||||
src:src/test/app/Invariants_test.cpp
|
||||
|
||||
# Boost coroutines cause false positive stack-buffer-underflow in xxhasher
|
||||
# This is a known ASAN limitation with stackful coroutines
|
||||
# See: https://github.com/google/sanitizers/issues/189
|
||||
src:beast/hash/xxhasher.h
|
||||
|
||||
@@ -140,6 +140,7 @@ unsigned-integer-overflow:src/libxrpl/protocol/tokens.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/shamap/SHAMap.cpp
|
||||
unsigned-integer-overflow:src/test/app/Batch_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Invariants_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Loan_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/NFToken_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Offer_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Path_test.cpp
|
||||
|
||||
14
src/libxrpl/basics/LocalValue.cpp
Normal file
14
src/libxrpl/basics/LocalValue.cpp
Normal file
@@ -0,0 +1,14 @@
|
||||
#include <xrpl/basics/LocalValue.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace detail {
|
||||
|
||||
LocalValuesHolder&
|
||||
getLocalValuesHolder()
|
||||
{
|
||||
thread_local LocalValuesHolder holder;
|
||||
return holder;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace xrpl
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <numeric>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
@@ -107,7 +108,7 @@ public:
|
||||
int& exponent,
|
||||
internalrep const& minMantissa,
|
||||
internalrep const& maxMantissa,
|
||||
std::string location);
|
||||
std::string_view location);
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
template <UnsignedMantissa T>
|
||||
@@ -116,7 +117,7 @@ public:
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
void
|
||||
doRound(rep& drops, std::string location);
|
||||
doRound(rep& drops, std::string_view location);
|
||||
|
||||
private:
|
||||
void
|
||||
@@ -238,7 +239,7 @@ Number::Guard::doRoundUp(
|
||||
int& exponent,
|
||||
internalrep const& minMantissa,
|
||||
internalrep const& maxMantissa,
|
||||
std::string location)
|
||||
std::string_view location)
|
||||
{
|
||||
auto r = round();
|
||||
if (r == 1 || (r == 0 && (mantissa & 1) == 1))
|
||||
@@ -254,7 +255,7 @@ Number::Guard::doRoundUp(
|
||||
}
|
||||
bringIntoRange(negative, mantissa, exponent, minMantissa);
|
||||
if (exponent > maxExponent)
|
||||
throw std::overflow_error(location);
|
||||
Throw<std::overflow_error>(std::string(location));
|
||||
}
|
||||
|
||||
template <UnsignedMantissa T>
|
||||
@@ -276,7 +277,7 @@ Number::Guard::doRoundDown(bool& negative, T& mantissa, int& exponent, internalr
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
void
|
||||
Number::Guard::doRound(rep& drops, std::string location)
|
||||
Number::Guard::doRound(rep& drops, std::string_view location)
|
||||
{
|
||||
auto r = round();
|
||||
if (r == 1 || (r == 0 && (drops & 1) == 1))
|
||||
@@ -290,7 +291,7 @@ Number::Guard::doRound(rep& drops, std::string location)
|
||||
// or "(maxRep + 1) / 10", neither of which will round up when
|
||||
// converting to rep, though the latter might overflow _before_
|
||||
// rounding.
|
||||
throw std::overflow_error(location); // LCOV_EXCL_LINE
|
||||
throw std::overflow_error(std::string(location)); // LCOV_EXCL_LINE
|
||||
}
|
||||
++drops;
|
||||
}
|
||||
@@ -427,8 +428,8 @@ doNormalize(
|
||||
mantissa_ = m;
|
||||
|
||||
g.doRoundUp(negative, mantissa_, exponent_, minMantissa, maxMantissa, "Number::normalize 2");
|
||||
XRPL_ASSERT_PARTS(
|
||||
mantissa_ >= minMantissa && mantissa_ <= maxMantissa, "xrpl::doNormalize", "final mantissa fits in range");
|
||||
// XRPL_ASSERT_PARTS(
|
||||
// mantissa_ >= minMantissa && mantissa_ <= maxMantissa, "xrpl::doNormalize", "final mantissa fits in range");
|
||||
}
|
||||
|
||||
template <>
|
||||
|
||||
@@ -191,17 +191,17 @@ Value::Value(ValueType type) : type_(type), allocated_(0)
|
||||
}
|
||||
}
|
||||
|
||||
Value::Value(Int value) : type_(intValue)
|
||||
Value::Value(Int value) : type_(intValue), allocated_(0)
|
||||
{
|
||||
value_.int_ = value;
|
||||
}
|
||||
|
||||
Value::Value(UInt value) : type_(uintValue)
|
||||
Value::Value(UInt value) : type_(uintValue), allocated_(0)
|
||||
{
|
||||
value_.uint_ = value;
|
||||
}
|
||||
|
||||
Value::Value(double value) : type_(realValue)
|
||||
Value::Value(double value) : type_(realValue), allocated_(0)
|
||||
{
|
||||
value_.real_ = value;
|
||||
}
|
||||
@@ -227,7 +227,7 @@ Value::Value(StaticString const& value) : type_(stringValue), allocated_(false)
|
||||
value_.string_ = const_cast<char*>(value.c_str());
|
||||
}
|
||||
|
||||
Value::Value(bool value) : type_(booleanValue)
|
||||
Value::Value(bool value) : type_(booleanValue), allocated_(0)
|
||||
{
|
||||
value_.bool_ = value;
|
||||
}
|
||||
|
||||
@@ -26,6 +26,12 @@ HTTPClient::initializeSSLContext(
|
||||
httpClientSSLContext.emplace(sslVerifyDir, sslVerifyFile, sslVerify, j);
|
||||
}
|
||||
|
||||
void
|
||||
HTTPClient::cleanupSSLContext()
|
||||
{
|
||||
httpClientSSLContext.reset();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Fetch a web page via http or https.
|
||||
|
||||
@@ -85,7 +85,8 @@ registerSSLCerts(boost::asio::ssl::context& ctx, boost::system::error_code& ec,
|
||||
// There is a very unpleasant interaction between <wincrypt> and
|
||||
// openssl x509 types (namely the former has macros that stomp
|
||||
// on the latter), these undefs allow this TU to be safely used in
|
||||
// unity builds without messing up subsequent TUs.
|
||||
// unity builds without messing up subsequent TUs. Although we
|
||||
// no longer use unity builds, leaving the undefs here does no harm.
|
||||
#if BOOST_OS_WINDOWS
|
||||
#undef X509_NAME
|
||||
#undef X509_EXTENSIONS
|
||||
|
||||
@@ -10,6 +10,11 @@ DatabaseNodeImp::store(NodeObjectType type, Blob&& data, uint256 const& hash, st
|
||||
|
||||
auto obj = NodeObject::createObject(type, std::move(data), hash);
|
||||
backend_->store(obj);
|
||||
if (cache_)
|
||||
{
|
||||
// After the store, replace a negative cache entry if there is one
|
||||
cache_->canonicalize(hash, obj, [](std::shared_ptr<NodeObject> const& n) { return n->getType() == hotDUMMY; });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -18,36 +23,77 @@ DatabaseNodeImp::asyncFetch(
|
||||
std::uint32_t ledgerSeq,
|
||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
|
||||
{
|
||||
if (cache_)
|
||||
{
|
||||
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
|
||||
if (obj)
|
||||
{
|
||||
callback(obj->getType() == hotDUMMY ? nullptr : obj);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
|
||||
}
|
||||
|
||||
void
|
||||
DatabaseNodeImp::sweep()
|
||||
{
|
||||
if (cache_)
|
||||
cache_->sweep();
|
||||
}
|
||||
|
||||
std::shared_ptr<NodeObject>
|
||||
DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nodeObject = nullptr;
|
||||
Status status;
|
||||
std::shared_ptr<NodeObject> nodeObject = cache_ ? cache_->fetch(hash) : nullptr;
|
||||
|
||||
try
|
||||
if (!nodeObject)
|
||||
{
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not " << (cache_ ? "cached" : "found");
|
||||
|
||||
switch (status)
|
||||
Status status;
|
||||
|
||||
try
|
||||
{
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
|
||||
switch (status)
|
||||
{
|
||||
case ok:
|
||||
if (cache_)
|
||||
{
|
||||
if (nodeObject)
|
||||
cache_->canonicalize_replace_client(hash, nodeObject);
|
||||
else
|
||||
{
|
||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nodeObject = notFound;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
case ok:
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
|
||||
break;
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record found in cache";
|
||||
if (nodeObject->getType() == hotDUMMY)
|
||||
nodeObject.reset();
|
||||
}
|
||||
|
||||
if (nodeObject)
|
||||
@@ -59,36 +105,66 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
|
||||
std::vector<std::shared_ptr<NodeObject>>
|
||||
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||
using namespace std::chrono;
|
||||
auto const before = steady_clock::now();
|
||||
|
||||
std::vector<uint256 const*> batch{};
|
||||
batch.reserve(hashes.size());
|
||||
std::unordered_map<uint256 const*, size_t> indexMap;
|
||||
std::vector<uint256 const*> cacheMisses;
|
||||
uint64_t hits = 0;
|
||||
uint64_t fetches = 0;
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
batch.push_back(&hash);
|
||||
}
|
||||
|
||||
// Get the node objects that match the hashes from the backend. To protect
|
||||
// against the backends returning fewer or more results than expected, the
|
||||
// container is resized to the number of hashes.
|
||||
auto results = backend_->fetchBatch(batch).first;
|
||||
XRPL_ASSERT(
|
||||
results.size() == hashes.size() || results.empty(),
|
||||
"number of output objects either matches number of input hashes or is empty");
|
||||
results.resize(hashes.size());
|
||||
for (size_t i = 0; i < results.size(); ++i)
|
||||
{
|
||||
if (!results[i])
|
||||
// See if the object already exists in the cache
|
||||
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
|
||||
++fetches;
|
||||
if (!nObj)
|
||||
{
|
||||
JLOG(j_.error()) << "fetchBatch - "
|
||||
<< "record not found in db. hash = " << strHex(hashes[i]);
|
||||
// Try the database
|
||||
indexMap[&hash] = i;
|
||||
cacheMisses.push_back(&hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
|
||||
// It was in the cache.
|
||||
++hits;
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "fetchBatch - cache hits = " << (hashes.size() - cacheMisses.size())
|
||||
<< " - cache misses = " << cacheMisses.size();
|
||||
auto dbResults = backend_->fetchBatch(cacheMisses).first;
|
||||
|
||||
for (size_t i = 0; i < dbResults.size(); ++i)
|
||||
{
|
||||
auto nObj = std::move(dbResults[i]);
|
||||
size_t index = indexMap[cacheMisses[i]];
|
||||
auto const& hash = hashes[index];
|
||||
|
||||
if (nObj)
|
||||
{
|
||||
// Ensure all threads get the same object
|
||||
if (cache_)
|
||||
cache_->canonicalize_replace_client(hash, nObj);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(j_.error()) << "fetchBatch - "
|
||||
<< "record not found in db or cache. hash = " << strHex(hash);
|
||||
if (cache_)
|
||||
{
|
||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nObj = std::move(notFound);
|
||||
}
|
||||
}
|
||||
results[index] = std::move(nObj);
|
||||
}
|
||||
|
||||
auto fetchDurationUs = std::chrono::duration_cast<std::chrono::microseconds>(steady_clock::now() - before).count();
|
||||
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
|
||||
updateFetchMetrics(fetches, hits, fetchDurationUs);
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +93,12 @@ DatabaseRotatingImp::store(NodeObjectType type, Blob&& data, uint256 const& hash
|
||||
storeStats(1, nObj->getData().size());
|
||||
}
|
||||
|
||||
void
|
||||
DatabaseRotatingImp::sweep()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
std::shared_ptr<NodeObject>
|
||||
DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
|
||||
{
|
||||
|
||||
@@ -984,7 +984,12 @@ amountFromJson(SField const& name, Json::Value const& v)
|
||||
else if (v.isString())
|
||||
{
|
||||
std::string val = v.asString();
|
||||
// Pre-allocate to avoid reallocation during split. This function is often
|
||||
// called deep in the RPC stack (via JSON parsing) where stack space is
|
||||
// limited. ASAN detected stack-buffer-overflow here at ~95% coroutine
|
||||
// stack usage (1001376/1048576 bytes). Coroutine stack increased to 2MB.
|
||||
std::vector<std::string> elements;
|
||||
elements.reserve(3);
|
||||
boost::split(elements, val, boost::is_any_of("\t\n\r ,/"));
|
||||
|
||||
if (elements.size() > 3)
|
||||
|
||||
@@ -69,7 +69,7 @@ make_name(std::string const& object, std::string const& field)
|
||||
if (field.empty())
|
||||
return object;
|
||||
|
||||
return object + "." + field;
|
||||
return {object + "." + field};
|
||||
}
|
||||
|
||||
static inline Json::Value
|
||||
|
||||
@@ -6,7 +6,11 @@
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
static uint256 const&
|
||||
// Returns a depth mask by value to avoid potential lifetime issues in
|
||||
// multi-threaded contexts. Returning by const reference to a static member
|
||||
// could trigger stack-use-after-scope errors when the reference is used in
|
||||
// temporary expressions with operator& in concurrent coroutine scenarios.
|
||||
static uint256 const
|
||||
depthMask(unsigned int depth)
|
||||
{
|
||||
enum { mask_size = 65 };
|
||||
@@ -67,7 +71,8 @@ SHAMapNodeID::getChildNodeID(unsigned int m) const
|
||||
if (depth_ >= SHAMap::leafDepth)
|
||||
Throw<std::logic_error>("Request for child node ID of " + to_string(*this));
|
||||
|
||||
if (id_ != (id_ & depthMask(depth_)))
|
||||
auto const idAtDepth = id_ & depthMask(depth_);
|
||||
if (id_ != idAtDepth)
|
||||
Throw<std::logic_error>("Incorrect mask for " + to_string(*this));
|
||||
|
||||
SHAMapNodeID node{depth_ + 1, id_};
|
||||
|
||||
@@ -490,8 +490,19 @@ public:
|
||||
Env env(*this, envconfig(onlineDelete));
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Create NodeStore with two backends to allow online deletion of data.
|
||||
// Normally, SHAMapStoreImp handles all these details.
|
||||
// Create the backend. Normally, SHAMapStoreImp handles all these
|
||||
// details
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set(
|
||||
"cache_size", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set(
|
||||
"cache_age", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
NodeStoreScheduler scheduler(env.app().getJobQueue());
|
||||
|
||||
std::string const writableDb = "write";
|
||||
@@ -499,8 +510,9 @@ public:
|
||||
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
|
||||
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
|
||||
|
||||
// Create NodeStore with two backends to allow online deletion of
|
||||
// data
|
||||
constexpr int readThreads = 4;
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
||||
scheduler,
|
||||
readThreads,
|
||||
|
||||
@@ -148,7 +148,7 @@ private:
|
||||
std::vector<std::string> emptyCfgKeys;
|
||||
struct publisher
|
||||
{
|
||||
publisher(FetchListConfig const& c) : cfg{c}
|
||||
publisher(FetchListConfig const& c) : cfg{c}, isRetry{false}
|
||||
{
|
||||
}
|
||||
std::shared_ptr<TrustedPublisherServer> server;
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
#include <test/jtx.h>
|
||||
|
||||
#include <xrpld/core/ConfigSections.h>
|
||||
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
class Submit_test : public beast::unit_test::suite
|
||||
{
|
||||
public:
|
||||
void
|
||||
testAugmentedFields()
|
||||
{
|
||||
testcase("Augmented fields in sign-and-submit mode");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
// Enable signing support in config
|
||||
Env env{*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->loadFromString("[" SECTION_SIGNING_SUPPORT "]\ntrue");
|
||||
return cfg;
|
||||
})};
|
||||
|
||||
Account const alice{"alice"};
|
||||
Account const bob{"bob"};
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
// Test 1: Sign-and-submit mode should return augmented fields
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[jss::tx_json][jss::TransactionType] = jss::Payment;
|
||||
jv[jss::tx_json][jss::Account] = alice.human();
|
||||
jv[jss::tx_json][jss::Destination] = bob.human();
|
||||
jv[jss::tx_json][jss::Amount] = XRP(100).value().getJson(JsonOptions::none);
|
||||
jv[jss::secret] = alice.name();
|
||||
|
||||
auto const result = env.rpc("json", "submit", to_string(jv))[jss::result];
|
||||
|
||||
// These are the augmented fields that should be present
|
||||
BEAST_EXPECT(result.isMember(jss::engine_result));
|
||||
BEAST_EXPECT(result.isMember(jss::engine_result_code));
|
||||
BEAST_EXPECT(result.isMember(jss::engine_result_message));
|
||||
|
||||
// New augmented fields from issue #3125
|
||||
BEAST_EXPECT(result.isMember(jss::accepted));
|
||||
BEAST_EXPECT(result.isMember(jss::applied));
|
||||
BEAST_EXPECT(result.isMember(jss::broadcast));
|
||||
BEAST_EXPECT(result.isMember(jss::queued));
|
||||
BEAST_EXPECT(result.isMember(jss::kept));
|
||||
|
||||
// Current ledger state fields
|
||||
BEAST_EXPECT(result.isMember(jss::account_sequence_next));
|
||||
BEAST_EXPECT(result.isMember(jss::account_sequence_available));
|
||||
BEAST_EXPECT(result.isMember(jss::open_ledger_cost));
|
||||
BEAST_EXPECT(result.isMember(jss::validated_ledger_index));
|
||||
|
||||
// Verify basic transaction fields
|
||||
BEAST_EXPECT(result.isMember(jss::tx_blob));
|
||||
BEAST_EXPECT(result.isMember(jss::tx_json));
|
||||
}
|
||||
|
||||
// Test 2: Binary blob mode should also return augmented fields (regression test)
|
||||
{
|
||||
auto jt = env.jt(pay(alice, bob, XRP(100)));
|
||||
Serializer s;
|
||||
jt.stx->add(s);
|
||||
|
||||
auto const result = env.rpc("submit", strHex(s.slice()))[jss::result];
|
||||
|
||||
// Verify augmented fields are present in binary mode too
|
||||
BEAST_EXPECT(result.isMember(jss::engine_result));
|
||||
BEAST_EXPECT(result.isMember(jss::accepted));
|
||||
BEAST_EXPECT(result.isMember(jss::applied));
|
||||
BEAST_EXPECT(result.isMember(jss::broadcast));
|
||||
BEAST_EXPECT(result.isMember(jss::queued));
|
||||
BEAST_EXPECT(result.isMember(jss::kept));
|
||||
BEAST_EXPECT(result.isMember(jss::account_sequence_next));
|
||||
BEAST_EXPECT(result.isMember(jss::account_sequence_available));
|
||||
BEAST_EXPECT(result.isMember(jss::open_ledger_cost));
|
||||
BEAST_EXPECT(result.isMember(jss::validated_ledger_index));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testAugmentedFields();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(Submit, rpc, xrpl);
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -183,6 +183,9 @@ private:
|
||||
};
|
||||
|
||||
// Helper function to run HTTP client test
|
||||
// Note: Caller must ensure HTTPClient::initializeSSLContext has been called
|
||||
// before this function, and HTTPClient::cleanupSSLContext is called after
|
||||
// all tests are completed.
|
||||
bool
|
||||
runHTTPTest(
|
||||
TestHTTPServer& server,
|
||||
@@ -190,14 +193,9 @@ runHTTPTest(
|
||||
bool& completed,
|
||||
int& resultStatus,
|
||||
std::string& resultData,
|
||||
boost::system::error_code& resultError)
|
||||
boost::system::error_code& resultError,
|
||||
beast::Journal& j)
|
||||
{
|
||||
// Create a null journal for testing
|
||||
beast::Journal j{TestSink::instance()};
|
||||
|
||||
// Initialize HTTPClient SSL context
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
HTTPClient::get(
|
||||
false, // no SSL
|
||||
server.ioc(),
|
||||
@@ -230,6 +228,9 @@ runHTTPTest(
|
||||
}
|
||||
}
|
||||
|
||||
// Drain any remaining handlers to ensure proper cleanup of HTTPClientImp
|
||||
server.ioc().poll();
|
||||
|
||||
return completed;
|
||||
}
|
||||
|
||||
@@ -258,18 +259,27 @@ TEST(HTTPClient, case_insensitive_content_length)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/test", completed, resultStatus, resultData, resultError);
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/test", completed, resultStatus, resultData, resultError, j);
|
||||
// Verify results
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_EQ(resultData, testBody);
|
||||
}
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, basic_http_request)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
TestHTTPServer server;
|
||||
std::string testBody = "Test response body";
|
||||
server.setResponseBody(testBody);
|
||||
@@ -280,16 +290,23 @@ TEST(HTTPClient, basic_http_request)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/basic", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/basic", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_EQ(resultData, testBody);
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, empty_response)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
TestHTTPServer server;
|
||||
server.setResponseBody(""); // Empty body
|
||||
server.setHeader("Content-Length", "0");
|
||||
@@ -299,16 +316,23 @@ TEST(HTTPClient, empty_response)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/empty", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/empty", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_TRUE(resultData.empty());
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, different_status_codes)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
std::vector<unsigned int> statusCodes = {200, 404, 500};
|
||||
|
||||
for (auto status : statusCodes)
|
||||
@@ -322,10 +346,13 @@ TEST(HTTPClient, different_status_codes)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/status", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/status", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, static_cast<int>(status));
|
||||
}
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
@@ -908,6 +908,10 @@ public:
|
||||
JLOG(m_journal.debug()) << "MasterTransaction sweep. Size before: " << oldMasterTxSize
|
||||
<< "; size after: " << masterTxCache.size();
|
||||
}
|
||||
{
|
||||
// Does not appear to have an associated cache.
|
||||
getNodeStore().sweep();
|
||||
}
|
||||
{
|
||||
std::size_t const oldLedgerMasterCacheSize = getLedgerMaster().getFetchPackCacheSize();
|
||||
|
||||
|
||||
@@ -130,6 +130,14 @@ std::unique_ptr<NodeStore::Database>
|
||||
SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
{
|
||||
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set("cache_size", std::to_string(app_.config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set("cache_age", std::to_string(app_.config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
std::unique_ptr<NodeStore::Database> db;
|
||||
|
||||
if (deleteInterval_)
|
||||
@@ -218,6 +226,8 @@ SHAMapStoreImp::run()
|
||||
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
||||
netOPs_ = &app_.getOPs();
|
||||
ledgerMaster_ = &app_.getLedgerMaster();
|
||||
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
|
||||
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
|
||||
|
||||
if (advisoryDelete_)
|
||||
canDelete_ = state_db_.getCanDelete();
|
||||
@@ -480,19 +490,16 @@ void
|
||||
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
||||
{
|
||||
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
||||
// Also clear the FullBelowCache so its generation counter is bumped.
|
||||
// This prevents stale "full below" markers from persisting across
|
||||
// backend rotation/online deletion and interfering with SHAMap sync.
|
||||
app_.getNodeFamily().getFullBelowCache()->clear();
|
||||
fullBelowCache_->clear();
|
||||
}
|
||||
|
||||
void
|
||||
SHAMapStoreImp::freshenCaches()
|
||||
{
|
||||
if (freshenCache(*app_.getNodeFamily().getTreeNodeCache()))
|
||||
if (freshenCache(*treeNodeCache_))
|
||||
return;
|
||||
if (freshenCache(app_.getMasterTransaction().getCache()))
|
||||
return;
|
||||
|
||||
freshenCache(app_.getMasterTransaction().getCache());
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -93,6 +93,8 @@ private:
|
||||
// as of run() or before
|
||||
NetworkOPs* netOPs_ = nullptr;
|
||||
LedgerMaster* ledgerMaster_ = nullptr;
|
||||
FullBelowCache* fullBelowCache_ = nullptr;
|
||||
TreeNodeCache* treeNodeCache_ = nullptr;
|
||||
|
||||
static constexpr auto nodeStoreName_ = "NodeStore";
|
||||
|
||||
|
||||
@@ -699,8 +699,6 @@ transactionFormatResultImpl(Transaction::pointer tpTrans, unsigned apiVersion)
|
||||
jvResult[jss::engine_result] = sToken;
|
||||
jvResult[jss::engine_result_code] = tpTrans->getResult();
|
||||
jvResult[jss::engine_result_message] = sHuman;
|
||||
|
||||
RPC::populateAugmentedSubmitFields(jvResult, tpTrans);
|
||||
}
|
||||
}
|
||||
catch (std::exception&)
|
||||
@@ -714,28 +712,6 @@ transactionFormatResultImpl(Transaction::pointer tpTrans, unsigned apiVersion)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void
|
||||
populateAugmentedSubmitFields(Json::Value& jvResult, std::shared_ptr<Transaction> const& transaction)
|
||||
{
|
||||
auto const submitResult = transaction->getSubmitResult();
|
||||
|
||||
jvResult[jss::accepted] = submitResult.any();
|
||||
jvResult[jss::applied] = submitResult.applied;
|
||||
jvResult[jss::broadcast] = submitResult.broadcast;
|
||||
jvResult[jss::queued] = submitResult.queued;
|
||||
jvResult[jss::kept] = submitResult.kept;
|
||||
|
||||
if (auto currentLedgerState = transaction->getCurrentLedgerState())
|
||||
{
|
||||
jvResult[jss::account_sequence_next] = safe_cast<Json::Value::UInt>(currentLedgerState->accountSeqNext);
|
||||
jvResult[jss::account_sequence_available] = safe_cast<Json::Value::UInt>(currentLedgerState->accountSeqAvail);
|
||||
jvResult[jss::open_ledger_cost] = to_string(currentLedgerState->minFeeRequired);
|
||||
jvResult[jss::validated_ledger_index] = safe_cast<Json::Value::UInt>(currentLedgerState->validatedLedger);
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
[[nodiscard]] static XRPAmount
|
||||
getTxFee(Application const& app, Config const& config, Json::Value tx)
|
||||
{
|
||||
|
||||
@@ -15,18 +15,6 @@ class TxQ;
|
||||
|
||||
namespace RPC {
|
||||
|
||||
/** Populate augmented submit fields into a JSON result.
|
||||
This helper populates the submit result flags (accepted, applied,
|
||||
broadcast, queued, kept) and current ledger state fields
|
||||
(account_sequence_next, account_sequence_available, open_ledger_cost,
|
||||
validated_ledger_index) from a Transaction pointer.
|
||||
|
||||
@param jvResult The JSON result to populate
|
||||
@param transaction The transaction containing the submit result and state
|
||||
*/
|
||||
void
|
||||
populateAugmentedSubmitFields(Json::Value& jvResult, std::shared_ptr<Transaction> const& transaction);
|
||||
|
||||
Json::Value
|
||||
getCurrentNetworkFee(
|
||||
Role const role,
|
||||
|
||||
@@ -127,7 +127,23 @@ doSubmit(RPC::JsonContext& context)
|
||||
jvResult[jss::engine_result_code] = transaction->getResult();
|
||||
jvResult[jss::engine_result_message] = sHuman;
|
||||
|
||||
RPC::populateAugmentedSubmitFields(jvResult, transaction);
|
||||
auto const submitResult = transaction->getSubmitResult();
|
||||
|
||||
jvResult[jss::accepted] = submitResult.any();
|
||||
jvResult[jss::applied] = submitResult.applied;
|
||||
jvResult[jss::broadcast] = submitResult.broadcast;
|
||||
jvResult[jss::queued] = submitResult.queued;
|
||||
jvResult[jss::kept] = submitResult.kept;
|
||||
|
||||
if (auto currentLedgerState = transaction->getCurrentLedgerState())
|
||||
{
|
||||
jvResult[jss::account_sequence_next] = safe_cast<Json::Value::UInt>(currentLedgerState->accountSeqNext);
|
||||
jvResult[jss::account_sequence_available] =
|
||||
safe_cast<Json::Value::UInt>(currentLedgerState->accountSeqAvail);
|
||||
jvResult[jss::open_ledger_cost] = to_string(currentLedgerState->minFeeRequired);
|
||||
jvResult[jss::validated_ledger_index] =
|
||||
safe_cast<Json::Value::UInt>(currentLedgerState->validatedLedger);
|
||||
}
|
||||
}
|
||||
|
||||
return jvResult;
|
||||
|
||||
Reference in New Issue
Block a user