Compare commits

..

88 Commits

Author SHA1 Message Date
JCW
4001748ee9 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-29 09:57:31 +01:00
JCW
f1482d332c Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 16:49:11 +01:00
JCW
373121ed78 Fix levelisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 15:54:56 +01:00
JCW
17c10de2ea Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 15:46:10 +01:00
JCW
6de7802001 Remove unrelated changes
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 15:30:49 +01:00
JCW
56a45506eb Optimise
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:08:06 +01:00
JCW
23029ab2b6 Optimise
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
d1fe8ed31d Fix error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
129166cda5 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
6376f10df7 Fix unit tests
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
acafed7376 Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
4feaa7b279 WIP
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:52 +01:00
JCW
45a4f44dc1 Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
211d90dadd Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
a4498f084e Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
294dae5766 Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
d2f01eb755 Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
a854a78107 Fix test error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:51 +01:00
JCW
c4047690e2 Fix test cases
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
893632d330 Performance test
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
f44d53be16 Fix test error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
d33691da84 Log size optimise
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
1dc3b256e0 Bugfix
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
2c2936fa93 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:50 +01:00
JCW
dcec5a0bbc Revert unrelated changes & performance optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:20 +01:00
JCW
ce5a6aec7b Revert unneeded changes
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:06:18 +01:00
JCW
79c3a83088 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:03:16 +01:00
JCW
bd91ec7242 Optimise
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:03:14 +01:00
JCW
bb787e3995 Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:55 +01:00
JCW
0223443452 Hardcode the logstyle as json
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
79e8c6a158 Add additional check
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
44aa394e1e Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
5e16b3df62 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
2f6d133169 Improve test coverage
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
06c212495d Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:54 +01:00
JCW
9543ccf8e1 Set module name in json
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
816089eab7 Fix error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
fa0cff3532 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
3ec7596170 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
28ad89ca20 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
e6c5f8338b Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
4d0c0ca5c7 Polish code
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:53 +01:00
JCW
4f63747f33 Polish code
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
1a2b7e9b94 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
c2aae2d846 Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
458bd8a3bd Revert unrelated changes
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
cd8d5d97d1 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
bd7b098409 Fix levelisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:52 +01:00
JCW
addfae1213 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:51 +01:00
JCW
89ebb6b495 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:24 +01:00
JCW
67aa3d5ac9 Remove hardcoded logstyle
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:24 +01:00
JCW
3b2edce813 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
1d3d0c6774 Optimise
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
f50f76788b Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
feae1d6e15 Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
7debf3e9f4 Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
90f970be46 Improve performance
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:23 +01:00
JCW
5e060a9e7b Fix
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
dca000a60f Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
7500d635bb Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
3181042f15 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
157aa367f2 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
48cf042258 Optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:22 +01:00
JCW
61ff2ba0e7 Fix issues
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:21 +01:00
JCW
e19d770b86 performance optimisation
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 14:02:21 +01:00
JCW
a128571ab5 Fix issues 2025-09-26 14:02:21 +01:00
JCW
76bb517eb8 Fix issues 2025-09-26 14:02:21 +01:00
JCW
dc221de60c Fix issues 2025-09-26 14:02:21 +01:00
JCW
cdf1109558 Performance improvement 2025-09-26 14:02:18 +01:00
JCW
0fe8f3f62d Hardcode the log style as json
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:55 +01:00
JCW
ab9e6563e4 Fix PR comments
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
Jingchen
d0f0789490 Update include/xrpl/basics/Log.h
Co-authored-by: Vito Tumas <5780819+Tapanito@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
JCW
d36ef0cd18 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
JCW
a90bf169bf Improve coverage
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
JCW
b3f389d918 Remove unneeded file
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
JCW
d68f87f968 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:54 +01:00
JCW
34127593e6 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:53 +01:00
JCW
9e09595db0 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:53 +01:00
JCW
856b36d0a5 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:53 +01:00
JCW
9edba67e64 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:53 +01:00
JCW
0e4f9a7ccf Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:51 +01:00
JCW
eda9bf1f1a Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:41 +01:00
JCW
43c6e202af Fix to_string error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:41 +01:00
JCW
e4db80f61d Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:39 +01:00
JCW
af9dde4f75 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:19 +01:00
JCW
f6d7b90b70 Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:18 +01:00
JCW
1774769226 Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:18 +01:00
JCW
92312801f1 Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-26 13:55:16 +01:00
405 changed files with 8902 additions and 30004 deletions

View File

@@ -10,40 +10,24 @@ inputs:
build_type: build_type:
description: 'The build type to use ("Debug", "Release").' description: 'The build type to use ("Debug", "Release").'
required: true required: true
build_nproc:
description: "The number of processors to use for building."
required: true
force_build: force_build:
description: 'Force building of all dependencies ("true", "false").' description: 'Force building of all dependencies ("true", "false").'
required: false required: false
default: "false" default: "false"
log_verbosity:
description: "The logging verbosity."
required: false
default: "verbose"
runs: runs:
using: composite using: composite
steps: steps:
- name: Install Conan dependencies - name: Install Conan dependencies
shell: bash shell: bash
env:
BUILD_DIR: ${{ inputs.build_dir }}
BUILD_NPROC: ${{ inputs.build_nproc }}
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
run: | run: |
echo 'Installing dependencies.' echo 'Installing dependencies.'
mkdir -p "${BUILD_DIR}" mkdir -p ${{ inputs.build_dir }}
cd "${BUILD_DIR}" cd ${{ inputs.build_dir }}
conan install \ conan install \
--output-folder . \ --output-folder . \
--build="${BUILD_OPTION}" \ --build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \
--options:host='&:tests=True' \ --options:host '&:tests=True' \
--options:host='&:xrpld=True' \ --options:host '&:xrpld=True' \
--settings:all build_type="${BUILD_TYPE}" \ --settings:all build_type=${{ inputs.build_type }} \
--conf:all tools.build:jobs=${BUILD_NPROC} \
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
.. ..

96
.github/actions/build-test/action.yml vendored Normal file
View File

@@ -0,0 +1,96 @@
# This action build and tests the binary. The Conan dependencies must have
# already been installed (see the build-deps action).
name: Build and Test
description: "Build and test the binary."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
inputs:
build_dir:
description: "The directory where to build."
required: true
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: false
default: "false"
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
default: ""
cmake_target:
description: "The CMake target to build."
required: true
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
default: ""
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
runs:
using: composite
steps:
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Configuring CMake.'
cmake \
-G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \
${{ inputs.cmake_args }} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Building binary.'
cmake \
--build . \
--config ${{ inputs.build_type }} \
--parallel $(nproc) \
--target ${{ inputs.cmake_target }}
- name: Check linking
if: ${{ inputs.os == 'linux' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Checking linking.'
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify voidstar
if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Verifying presence of instrumentation.'
./rippled --version | grep libvoidstar
- name: Test the binary
if: ${{ inputs.build_only == 'false' }}
shell: bash
working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }}
run: |
echo 'Testing binary.'
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
- name: Upload coverage report
if: ${{ inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ inputs.codecov_token }}
verbose: true

View File

@@ -1,43 +0,0 @@
name: Print build environment
description: "Print environment and some tooling versions"
runs:
using: composite
steps:
- name: Check configuration (Windows)
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version

View File

@@ -35,12 +35,9 @@ runs:
- name: Set up Conan remote - name: Set up Conan remote
shell: bash shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: | run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'." echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}" conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.' echo 'Listing Conan remotes.'
conan remote list conan remote list

View File

@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be desired as described above. In a perfect repo, this file will be
empty. empty.
This file is committed to the repo, and is used by the [levelization This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed. that nothing changed.
- [`ordering.txt`](results/ordering.txt): A list showing relationships - [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above. opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed. that nothing changed.
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml) - [`levelization.yml`](../../workflows/check-levelization.yml)
Github Actions workflow to test that levelization loops haven't Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or they are improvements or not, so if you have resolved any issues or

View File

@@ -138,7 +138,6 @@ test.toplevel > test.csf
test.toplevel > xrpl.json test.toplevel > xrpl.json
test.unit_test > xrpl.basics test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net tests.libxrpl > xrpl.net
xrpl.json > xrpl.basics xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics xrpl.ledger > xrpl.basics

View File

@@ -74,14 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue continue
# RHEL: # RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64. # - 9.4 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64. # - 9.6 using Clang: Release and no Unity on linux/amd64.
if os['distro_name'] == 'rhel': if os['distro_name'] == 'rhel':
skip = True skip = True
if os['distro_version'] == '9': if os['distro_version'] == '9.4':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False skip = False
elif os['distro_version'] == '10': elif os['distro_version'] == '9.6':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False skip = False
if skip: if skip:
@@ -130,14 +130,16 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64': if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
continue continue
# We skip all clang 20+ on arm64 due to Boost build error. # We skip all clang-20 on arm64 due to boost 1.86 build error
if f'{os['compiler_name']}-{os['compiler_version']}' in ['clang-20', 'clang-21'] and architecture['platform'] == 'linux/arm64': if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
continue continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no # Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
# Unity on linux/amd64 # Unity on linux/amd64
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}' cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}'
cmake_target = 'coverage'
build_only = True
# Generate a unique name for the configuration, e.g. macos-arm64-debug # Generate a unique name for the configuration, e.g. macos-arm64-debug
# or debian-bookworm-gcc-12-amd64-release-unity. # or debian-bookworm-gcc-12-amd64-release-unity.
@@ -160,7 +162,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
'config_name': config_name, 'config_name': config_name,
'cmake_args': cmake_args, 'cmake_args': cmake_args,
'cmake_target': cmake_target, 'cmake_target': cmake_target,
'build_only': build_only, 'build_only': 'true' if build_only else 'false',
'build_type': build_type, 'build_type': build_type,
'os': os, 'os': os,
'architecture': architecture, 'architecture': architecture,

View File

@@ -14,197 +14,139 @@
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "12", "compiler_version": "12"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "13", "compiler_version": "13"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "14", "compiler_version": "14"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "15", "compiler_version": "15"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "16", "compiler_version": "16"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "17", "compiler_version": "17"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "18", "compiler_version": "18"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "19", "compiler_version": "19"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "debian",
"distro_version": "bookworm", "distro_version": "bookworm",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "20", "compiler_version": "20"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "rhel",
"distro_version": "trixie", "distro_version": "9.4",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "14", "compiler_version": "12"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "rhel",
"distro_version": "trixie", "distro_version": "9.4",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "15", "compiler_version": "13"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "rhel",
"distro_version": "trixie", "distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "20", "compiler_version": "any"
"image_sha": "0525eae"
}, },
{ {
"distro_name": "debian", "distro_name": "rhel",
"distro_version": "trixie", "distro_version": "9.6",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "21", "compiler_version": "any"
"image_sha": "0525eae"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "jammy", "distro_version": "jammy",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "12", "compiler_version": "12"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "13", "compiler_version": "13"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "gcc", "compiler_name": "gcc",
"compiler_version": "14", "compiler_version": "14"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "16", "compiler_version": "16"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "17", "compiler_version": "17"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "18", "compiler_version": "18"
"image_sha": "e1782cd"
}, },
{ {
"distro_name": "ubuntu", "distro_name": "ubuntu",
"distro_version": "noble", "distro_version": "noble",
"compiler_name": "clang", "compiler_name": "clang",
"compiler_version": "19", "compiler_version": "19"
"image_sha": "e1782cd"
} }
], ],
"build_type": ["Debug", "Release"], "build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "macos", "distro_name": "macos",
"distro_version": "", "distro_version": "",
"compiler_name": "", "compiler_name": "",
"compiler_version": "", "compiler_version": ""
"image_sha": ""
} }
], ],
"build_type": ["Debug", "Release"], "build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "windows", "distro_name": "windows",
"distro_version": "", "distro_version": "",
"compiler_name": "", "compiler_name": "",
"compiler_version": "", "compiler_version": ""
"image_sha": ""
} }
], ],
"build_type": ["Debug", "Release"], "build_type": ["Debug", "Release"],

147
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,147 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary.
build-test:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Check strategy matrix
run: |
echo 'Operating system distro name: ${{ matrix.os.distro_name }}'
echo 'Operating system distro version: ${{ matrix.os.distro_version }}'
echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}'
echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}'
echo 'Architecture platform: ${{ matrix.architecture.platform }}'
echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}'
echo 'Build type: ${{ matrix.build_type }}'
echo 'Build only: ${{ matrix.build_only }}'
echo 'CMake arguments: ${{ matrix.cmake_args }}'
echo 'CMake target: ${{ matrix.cmake_target }}'
echo 'Config name: ${{ matrix.config_name }}'
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: false
- name: Check configuration (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and MacOS)
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ matrix.build_type }}
force_build: ${{ inputs.dependencies_force_build }}
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
codecov_token: ${{ secrets.codecov_token }}
os: ${{ inputs.os }}

View File

@@ -0,0 +1,62 @@
# This workflow checks that all commits in the "master" branch are also in the
# "release" and "develop" branches, and that all commits in the "release" branch
# are also in the "develop" branch.
name: Check for missing commits
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-missing-commits
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
fetch-depth: 0
- name: Check for missing commits
env:
MESSAGE: |
If you are reading this, then the commits indicated above are missing
from the "develop" and/or "release" branch. Do a reverse-merge as soon
as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches are ordered by how "canonical" they are. Every commit in one
# branch should be in all the branches behind it.
order=(master release develop)
branches=()
for branch in "${order[@]}"; do
# Check that the branches exist so that this job will work on forked
# repos, which don't necessarily have master and release branches.
echo "Checking if ${branch} exists."
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null; then
branches+=(origin/${branch})
fi
done
prior=()
for branch in "${branches[@]}"; do
if [[ ${#prior[@]} -ne 0 ]]; then
echo "Checking ${prior[@]} for commits missing from ${branch}."
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=("${branch}")
done
if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then
echo "${MESSAGE}"
exit 1
fi

View File

@@ -46,46 +46,41 @@ jobs:
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs - name: Generate outputs
id: generate id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: | run: |
echo 'Generating user and channel.' echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}" echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}" echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.' echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference - name: Calculate conan reference
id: conan_ref id: conan_ref
run: | run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan - name: Set up Conan
uses: ./.github/actions/setup-conan uses: ./.github/actions/setup-conan
with: with:
conan_remote_name: ${{ inputs.conan_remote_name }} conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }} conan_remote_url: ${{ inputs.conan_remote_url }}
- name: Log into Conan remote - name: Log into Conan remote
env: run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package - name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: | run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} . conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }} conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
outputs: outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }} conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
notify: notify:
needs: upload needs: upload
runs-on: ubuntu-latest runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
steps: steps:
- name: Notify Clio - name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: | run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \ -F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}" -F "client_payload[pr_url]=${{ github.event.pull_request.html_url }}"

View File

@@ -50,8 +50,8 @@ jobs:
files: | files: |
# These paths are unique to `on-pr.yml`. # These paths are unique to `on-pr.yml`.
.github/scripts/levelization/** .github/scripts/levelization/**
.github/workflows/reusable-check-levelization.yml .github/workflows/check-levelization.yml
.github/workflows/reusable-notify-clio.yml .github/workflows/notify-clio.yml
.github/workflows/on-pr.yml .github/workflows/on-pr.yml
# Keep the paths below in sync with those in `on-trigger.yml`. # Keep the paths below in sync with those in `on-trigger.yml`.
@@ -59,11 +59,8 @@ jobs:
.github/actions/build-test/** .github/actions/build-test/**
.github/actions/setup-conan/** .github/actions/setup-conan/**
.github/scripts/strategy-matrix/** .github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml .github/workflows/build-test.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-strategy-matrix.yml .github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.codecov.yml .codecov.yml
cmake/** cmake/**
conan/** conan/**
@@ -96,27 +93,26 @@ jobs:
check-levelization: check-levelization:
needs: should-run needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }} if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml uses: ./.github/workflows/check-levelization.yml
build-test: build-test:
needs: should-run needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }} if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build-test.yml
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [linux, macos, windows] os: [linux, macos, windows]
with: with:
os: ${{ matrix.os }} os: ${{ matrix.os }}
secrets: secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
notify-clio: notify-clio:
needs: needs:
- should-run - should-run
- build-test - build-test
if: ${{ needs.should-run.outputs.go == 'true' && (startsWith(github.base_ref, 'release') || github.base_ref == 'master') }} if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-notify-clio.yml uses: ./.github/workflows/notify-clio.yml
secrets: secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}

View File

@@ -9,23 +9,22 @@ name: Trigger
on: on:
push: push:
branches: branches:
- "develop" - develop
- "release*" - release
- "master" - master
paths: paths:
# These paths are unique to `on-trigger.yml`. # These paths are unique to `on-trigger.yml`.
- ".github/workflows/check-missing-commits.yml"
- ".github/workflows/on-trigger.yml" - ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
# Keep the paths below in sync with those in `on-pr.yml`. # Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**" - ".github/actions/build-deps/**"
- ".github/actions/build-test/**" - ".github/actions/build-test/**"
- ".github/actions/setup-conan/**" - ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**" - ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml" - ".github/workflows/build-test.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml" - ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".codecov.yml" - ".codecov.yml"
- "cmake/**" - "cmake/**"
- "conan/**" - "conan/**"
@@ -44,16 +43,25 @@ on:
schedule: schedule:
- cron: "32 6 * * 1-5" - cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API. # Run when manually triggered via the GitHub UI or API. If `force_upload` is
# true, then the dependencies that were missing (`force_rebuild` is false) or
# rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing
# dependencies if needed.
workflow_dispatch: workflow_dispatch:
inputs:
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
concurrency: concurrency:
# When a PR is merged into the develop branch it will be assigned a unique group: ${{ github.workflow }}-${{ github.ref }}
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
cancel-in-progress: true cancel-in-progress: true
defaults: defaults:
@@ -61,14 +69,17 @@ defaults:
shell: bash shell: bash
jobs: jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/check-missing-commits.yml
build-test: build-test:
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build-test.yml
strategy: strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: matrix:
os: [linux, macos, windows] os: [linux, macos, windows]
with: with:
os: ${{ matrix.os }} os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }} strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
secrets: secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -9,7 +9,7 @@ on:
jobs: jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks. # Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks: run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3
with: with:
runs_on: ubuntu-latest runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }' container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-d1496b8" }'

View File

@@ -23,24 +23,16 @@ defaults:
env: env:
BUILD_DIR: .build BUILD_DIR: .build
NPROC_SUBTRACT: 2
jobs: jobs:
publish: publish:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1 container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-d1496b8
permissions: permissions:
contents: write contents: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Check configuration - name: Check configuration
run: | run: |
echo 'Checking path.' echo 'Checking path.'
@@ -54,16 +46,12 @@ jobs:
echo 'Checking Doxygen version.' echo 'Checking Doxygen version.'
doxygen --version doxygen --version
- name: Build documentation - name: Build documentation
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: | run: |
mkdir -p "${BUILD_DIR}" mkdir -p ${{ env.BUILD_DIR }}
cd "${BUILD_DIR}" cd ${{ env.BUILD_DIR }}
cmake -Donly_docs=ON .. cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC} cmake --build . --target docs --parallel $(nproc)
- name: Publish documentation - name: Publish documentation
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0

View File

@@ -1,213 +0,0 @@
name: Build and test configuration
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
type: string
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
default: ""
cmake_target:
description: "The CMake target to build."
type: string
required: true
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The configuration string (used for naming artifacts and such)."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: false
type: number
default: 2
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
defaults:
run:
shell: bash
jobs:
build-and-test:
name: ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
env:
ENABLED_VOIDSTAR: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
ENABLED_COVERAGE: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ inputs.build_type }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Configure CMake
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel "${BUILD_NPROC}" \
--target "${CMAKE_TARGET}"
- name: Upload rippled artifact (Linux)
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/rippled
retention-days: 3
if-no-files-found: error
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
working-directory: ${{ inputs.build_dir }}
run: |
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify presence of instrumentation (Linux)
if: ${{ runner.os == 'Linux' && env.ENABLED_VOIDSTAR == 'true' }}
working-directory: ${{ inputs.build_dir }}
run: |
./rippled --version | grep libvoidstar
- name: Run the separate tests
if: ${{ !inputs.build_only }}
working-directory: ${{ inputs.build_dir }}
# Windows locks some of the build files while running tests, and parallel jobs can collide
env:
BUILD_TYPE: ${{ inputs.build_type }}
PARALLELISM: ${{ runner.os == 'Windows' && '1' || steps.nproc.outputs.nproc }}
run: |
ctest \
--output-on-failure \
-C "${BUILD_TYPE}" \
-j "${PARALLELISM}"
- name: Run the embedded tests
if: ${{ !inputs.build_only }}
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', inputs.build_dir, inputs.build_type) || inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
./rippled --unittest --unittest-jobs "${BUILD_NPROC}"
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
echo "Netstat:"
netstat -an
- name: Prepare coverage report
if: ${{ !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel "${BUILD_NPROC}" \
--target coverage
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -1,58 +0,0 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary for each configuration.
build-test-config:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-build-test-config.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -18,10 +18,6 @@ on:
description: "The generated strategy matrix." description: "The generated strategy matrix."
value: ${{ jobs.generate-matrix.outputs.matrix }} value: ${{ jobs.generate-matrix.outputs.matrix }}
defaults:
run:
shell: bash
jobs: jobs:
generate-matrix: generate-matrix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -39,7 +35,4 @@ jobs:
- name: Generate strategy matrix - name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix working-directory: .github/scripts/strategy-matrix
id: generate id: generate
env: run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} >> "${GITHUB_OUTPUT}"
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"

View File

@@ -24,34 +24,30 @@ on:
branches: [develop] branches: [develop]
paths: paths:
- .github/workflows/upload-conan-deps.yml - .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml - .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml - .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml - .github/actions/setup-conan/action.yml
- ".github/scripts/strategy-matrix/**" - ".github/scripts/strategy-matrix/**"
- conanfile.py - conanfile.py
- conan.lock - conan.lock
env: env:
CONAN_REMOTE_NAME: xrplf CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io CONAN_REMOTE_URL: https://conan.ripplex.io
NPROC_SUBTRACT: 2
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true cancel-in-progress: true
defaults:
run:
shell: bash
jobs: jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix: generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml uses: ./.github/workflows/reusable-strategy-matrix.yml
with: with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }} strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps: run-upload-conan-deps:
needs: needs:
- generate-matrix - generate-matrix
@@ -60,29 +56,19 @@ jobs:
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10 max-parallel: 10
runs-on: ${{ matrix.architecture.runner }} runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }} container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps: steps:
- name: Cleanup workspace (macOS and Windows) - name: Cleanup workspace
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }} if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with: with:
disable_ccache: false disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Setup Conan - name: Setup Conan
uses: ./.github/actions/setup-conan uses: ./.github/actions/setup-conan
with: with:
@@ -93,19 +79,13 @@ jobs:
uses: ./.github/actions/build-deps uses: ./.github/actions/build-deps
with: with:
build_dir: .build build_dir: .build
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ matrix.build_type }} build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Log into Conan remote - name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }} if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}" run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages - name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }} if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
env: run: conan upload "*" -r=${{ env.CONAN_REMOTE_NAME }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}

View File

@@ -34,5 +34,6 @@ repos:
exclude: | exclude: |
(?x)^( (?x)^(
external/.*| external/.*|
.github/scripts/levelization/results/.*\.txt .github/scripts/levelization/results/.*\.txt|
conan\.lock
)$ )$

View File

@@ -39,12 +39,17 @@ found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher - [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher - [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/), or higher - [CMake 3.22](https://cmake.org/download/)[^2], or higher
[^1]: [^1]:
It is possible to build with Conan 1.60+, but the instructions are It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it. significantly different, which is why we are not recommending it.
[^2]:
CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header. `rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are: The [minimum compiler versions][2] required are:
@@ -277,6 +282,21 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
``` ```
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc #### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or If your compiler is clang, version 19 or later, or apple-clang, version 17 or
@@ -495,18 +515,18 @@ A coverage report is created when the following steps are completed, in order:
1. `rippled` binary built with instrumentation data, enabled by the `coverage` 1. `rippled` binary built with instrumentation data, enabled by the `coverage`
option mentioned above option mentioned above
2. completed one or more run of the unit tests, which populates coverage capture data 2. completed run of unit tests, which populates coverage capture data
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`) 3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
to assemble both instrumentation data and the coverage capture data into a coverage report to assemble both instrumentation data and the coverage capture data into a coverage report
The last step of the above is automated into a single target `coverage`. The instrumented The above steps are automated into a single target `coverage`. The instrumented
`rippled` binary can also be used for regular development or testing work, at `rippled` binary can also be used for regular development or testing work, at
the cost of extra disk space utilization and a small performance hit the cost of extra disk space utilization and a small performance hit
(to store coverage capture data). Since `rippled` binary is simply a dependency of the (to store coverage capture). In case of a spurious failure of unit tests, it is
coverage report target, it is possible to re-run the `coverage` target without possible to re-run the `coverage` target without rebuilding the `rippled` binary
rebuilding the `rippled` binary. Note, running of the unit tests before the `coverage` (since it is simply a dependency of the coverage report target). It is also possible
target is left to the developer. Each such run will append to the coverage data to select only specific tests for the purpose of the coverage report, by setting
collected in the build directory. the `coverage_test` variable in `cmake`
The default coverage report format is `html-details`, but the user The default coverage report format is `html-details`, but the user
can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake` can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake`
@@ -515,6 +535,11 @@ to generate more than one format at a time by setting the `coverage_extra_args`
variable in `cmake`. The specific command line used to run the `gcovr` tool will be variable in `cmake`. The specific command line used to run the `gcovr` tool will be
displayed if the `CODE_COVERAGE_VERBOSE` variable is set. displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
set to the number of available CPU cores. This may cause spurious test
errors on Apple. Developers can override the number of unit test jobs with
the `coverage_test_parallelism` variable in `cmake`.
Example use with some cmake variables set: Example use with some cmake variables set:
``` ```

View File

@@ -1,3 +1,21 @@
macro(group_sources_in source_dir curdir)
file(GLOB children RELATIVE ${source_dir}/${curdir}
${source_dir}/${curdir}/*)
foreach (child ${children})
if (IS_DIRECTORY ${source_dir}/${curdir}/${child})
group_sources_in(${source_dir} ${curdir}/${child})
else()
string(REPLACE "/" "\\" groupname ${curdir})
source_group(${groupname} FILES
${source_dir}/${curdir}/${child})
endif()
endforeach()
endmacro()
macro(group_sources curdir)
group_sources_in(${PROJECT_SOURCE_DIR} ${curdir})
endmacro()
macro (exclude_from_default target_) macro (exclude_from_default target_)
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_ALL ON) set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_ALL ON)
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON) set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON)

View File

@@ -109,9 +109,6 @@
# - add a new function add_code_coverage_to_target # - add a new function add_code_coverage_to_target
# - remove some unused code # - remove some unused code
# #
# 2025-11-11, Bronek Kozicki
# - make EXECUTABLE and EXECUTABLE_ARGS optional
#
# USAGE: # USAGE:
# #
# 1. Copy this file into your cmake modules path. # 1. Copy this file into your cmake modules path.
@@ -320,10 +317,6 @@ function(setup_target_for_coverage_gcovr)
set(Coverage_FORMAT xml) set(Coverage_FORMAT xml)
endif() endif()
if(NOT DEFINED Coverage_EXECUTABLE AND DEFINED Coverage_EXECUTABLE_ARGS)
message(FATAL_ERROR "EXECUTABLE_ARGS must not be set if EXECUTABLE is not set")
endif()
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS) if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...") message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
else() else()
@@ -405,18 +398,17 @@ function(setup_target_for_coverage_gcovr)
endforeach() endforeach()
# Set up commands which will be run to generate coverage data # Set up commands which will be run to generate coverage data
# If EXECUTABLE is not set, the user is expected to run the tests manually # Run tests
# before running the coverage target NAME set(GCOVR_EXEC_TESTS_CMD
if(DEFINED Coverage_EXECUTABLE) ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
set(GCOVR_EXEC_TESTS_CMD )
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
)
endif()
# Create folder # Create folder
if(DEFINED GCOVR_CREATE_FOLDER) if(DEFINED GCOVR_CREATE_FOLDER)
set(GCOVR_FOLDER_CMD set(GCOVR_FOLDER_CMD
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER}) ${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
else()
set(GCOVR_FOLDER_CMD echo) # dummy
endif() endif()
# Running gcovr # Running gcovr
@@ -433,13 +425,11 @@ function(setup_target_for_coverage_gcovr)
if(CODE_COVERAGE_VERBOSE) if(CODE_COVERAGE_VERBOSE)
message(STATUS "Executed command report") message(STATUS "Executed command report")
if(NOT "${GCOVR_EXEC_TESTS_CMD}" STREQUAL "") message(STATUS "Command to run tests: ")
message(STATUS "Command to run tests: ") string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}") message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
endif()
if(NOT "${GCOVR_FOLDER_CMD}" STREQUAL "") if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
message(STATUS "Command to create a folder: ") message(STATUS "Command to create a folder: ")
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}") string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}") message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")

View File

@@ -12,7 +12,7 @@ if (static OR MSVC)
else () else ()
set (Boost_USE_STATIC_RUNTIME OFF) set (Boost_USE_STATIC_RUNTIME OFF)
endif () endif ()
find_dependency (Boost find_dependency (Boost 1.70
COMPONENTS COMPONENTS
chrono chrono
container container
@@ -52,3 +52,5 @@ if (TARGET ZLIB::ZLIB)
set_target_properties(OpenSSL::Crypto PROPERTIES set_target_properties(OpenSSL::Crypto PROPERTIES
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB) INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
endif () endif ()
include ("${CMAKE_CURRENT_LIST_DIR}/RippleTargets.cmake")

View File

@@ -72,7 +72,10 @@ include(target_link_modules)
# Level 01 # Level 01
add_module(xrpl beast) add_module(xrpl beast)
target_link_libraries(xrpl.libxrpl.beast PUBLIC xrpl.imports.main) target_link_libraries(xrpl.libxrpl.beast PUBLIC
xrpl.imports.main
xrpl.libpb
)
# Level 02 # Level 02
add_module(xrpl basics) add_module(xrpl basics)

View File

@@ -11,9 +11,6 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
return() return()
endif() endif()
include(ProcessorCount)
ProcessorCount(PROCESSOR_COUNT)
include(CodeCoverage) include(CodeCoverage)
# The instructions for these commands come from the `CodeCoverage` module, # The instructions for these commands come from the `CodeCoverage` module,
@@ -29,13 +26,15 @@ list(APPEND GCOVR_ADDITIONAL_ARGS
--exclude-throw-branches --exclude-throw-branches
--exclude-noncode-lines --exclude-noncode-lines
--exclude-unreachable-branches -s --exclude-unreachable-branches -s
-j ${PROCESSOR_COUNT}) -j ${coverage_test_parallelism})
setup_target_for_coverage_gcovr( setup_target_for_coverage_gcovr(
NAME coverage NAME coverage
FORMAT ${coverage_format} FORMAT ${coverage_format}
EXECUTABLE rippled
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb" EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES rippled xrpl.tests DEPENDENCIES rippled
) )
add_code_coverage_to_target(opts INTERFACE) add_code_coverage_to_target(opts INTERFACE)

View File

@@ -1,5 +1,5 @@
#[===================================================================[ #[===================================================================[
sanity checks convenience variables and sanity checks
#]===================================================================] #]===================================================================]
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
@@ -16,19 +16,39 @@ if (NOT is_multiconfig)
endif () endif ()
endif () endif ()
get_directory_property(has_parent PARENT_DIRECTORY)
if (has_parent)
set (is_root_project OFF)
else ()
set (is_root_project ON)
endif ()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
set (is_clang TRUE) set (is_clang TRUE)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0) CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
message (FATAL_ERROR "This project requires clang 16 or later") message (FATAL_ERROR "This project requires clang 8 or later")
endif () endif ()
# TODO min AppleClang version check ?
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set (is_gcc TRUE) set (is_gcc TRUE)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
message (FATAL_ERROR "This project requires GCC 12 or later") message (FATAL_ERROR "This project requires GCC 8 or later")
endif () endif ()
endif () endif ()
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
set (is_linux TRUE)
else ()
set (is_linux FALSE)
endif ()
if ("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
set (is_ci TRUE)
else ()
set (is_ci FALSE)
endif ()
# check for in-source build and fail # check for in-source build and fail
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message (FATAL_ERROR "Builds (in-source) are not allowed in " message (FATAL_ERROR "Builds (in-source) are not allowed in "

View File

@@ -1,25 +1,10 @@
#[===================================================================[ #[===================================================================[
declare options and variables declare user options/settings
#]===================================================================] #]===================================================================]
if(CMAKE_SYSTEM_NAME STREQUAL "Linux") include(ProcessorCount)
set (is_linux TRUE)
else()
set(is_linux FALSE)
endif()
if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true") ProcessorCount(PROCESSOR_COUNT)
set(is_ci TRUE)
else()
set(is_ci FALSE)
endif()
get_directory_property(has_parent PARENT_DIRECTORY)
if(has_parent)
set(is_root_project OFF)
else()
set(is_root_project ON)
endif()
option(assert "Enables asserts, even in release builds" OFF) option(assert "Enables asserts, even in release builds" OFF)
@@ -40,28 +25,29 @@ if(unity)
endif() endif()
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build") set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
endif() endif()
if(is_clang AND is_linux) if(is_clang AND is_linux)
option(voidstar "Enable Antithesis instrumentation." OFF) option(voidstar "Enable Antithesis instrumentation." OFF)
endif() endif()
if(is_gcc OR is_clang) if(is_gcc OR is_clang)
include(ProcessorCount)
ProcessorCount(PROCESSOR_COUNT)
option(coverage "Generates coverage info." OFF) option(coverage "Generates coverage info." OFF)
option(profile "Add profiling flags" OFF) option(profile "Add profiling flags" OFF)
set(coverage_test_parallelism "${PROCESSOR_COUNT}" CACHE STRING
"Unit tests parallelism for the purpose of coverage report.")
set(coverage_format "html-details" CACHE STRING set(coverage_format "html-details" CACHE STRING
"Output format of the coverage report.") "Output format of the coverage report.")
set(coverage_extra_args "" CACHE STRING set(coverage_extra_args "" CACHE STRING
"Additional arguments to pass to gcovr.") "Additional arguments to pass to gcovr.")
set(coverage_test "" CACHE STRING
"On gcc & clang, the specific unit test(s) to run for coverage. Default is all tests.")
if(coverage_test AND NOT coverage)
set(coverage ON CACHE BOOL "gcc/clang only" FORCE)
endif()
option(wextra "compile with extra gcc/clang warnings enabled" ON) option(wextra "compile with extra gcc/clang warnings enabled" ON)
else() else()
set(profile OFF CACHE BOOL "gcc/clang only" FORCE) set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE) set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE) set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
endif() endif()
if(is_linux) if(is_linux)
option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF) option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
option(static "link protobuf, openssl, libc++, and boost statically" ON) option(static "link protobuf, openssl, libc++, and boost statically" ON)
@@ -78,13 +64,11 @@ else()
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE) set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE) set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
endif() endif()
if(is_clang) if(is_clang)
option(use_lld "enables detection of lld linker" ON) option(use_lld "enables detection of lld linker" ON)
else() else()
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE) set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
endif() endif()
option(jemalloc "Enables jemalloc for heap profiling" OFF) option(jemalloc "Enables jemalloc for heap profiling" OFF)
option(werr "treat warnings as errors" OFF) option(werr "treat warnings as errors" OFF)
option(local_protobuf option(local_protobuf
@@ -118,26 +102,38 @@ if(san)
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler") message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
endif() endif()
endif() endif()
set(container_label "" CACHE STRING "tag to use for package building containers")
option(packages_only
"ONLY generate package building targets. This is special use-case and almost \
certainly not what you want. Use with caution as you won't be able to build \
any compiled targets locally." OFF)
option(have_package_container
"Sometimes you already have the tagged container you want to use for package \
building and you don't want docker to rebuild it. This flag will detach the \
dependency of the package build from the container build. It's an advanced \
use case and most likely you should not be touching this flag." OFF)
# the remaining options are obscure and rarely used # the remaining options are obscure and rarely used
option(beast_no_unit_test_inline option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table" "Prevents unit test definitions from being inserted into global table"
OFF) OFF)
option(single_io_service_thread option(single_io_service_thread
"Restricts the number of threads calling io_service::run to one. \ "Restricts the number of threads calling io_context::run to one. \
This can be useful when debugging." This can be useful when debugging."
OFF) OFF)
option(boost_show_deprecated option(boost_show_deprecated
"Allow boost to fail on deprecated usage. Only useful if you're trying\ "Allow boost to fail on deprecated usage. Only useful if you're trying\
to find deprecated calls." to find deprecated calls."
OFF) OFF)
option(beast_hashers
"Use local implementations for sha/ripemd hashes (experimental, not recommended)"
OFF)
if(WIN32) if(WIN32)
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF) option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
else() else()
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE) set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
endif() endif()
if(coverage) if(coverage)
message(STATUS "coverage build requested - forcing Debug build") message(STATUS "coverage build requested - forcing Debug build")
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE) set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)

View File

@@ -24,6 +24,7 @@ target_link_libraries(ripple_boost
Boost::date_time Boost::date_time
Boost::filesystem Boost::filesystem
Boost::json Boost::json
Boost::process
Boost::program_options Boost::program_options
Boost::regex Boost::regex
Boost::system Boost::system

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
) )
add_executable(${target} ${ARGN} ${sources}) add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
isolate_headers( isolate_headers(
${target} ${target}
@@ -22,4 +22,20 @@ function(xrpl_add_test name)
UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
add_test(NAME ${target} COMMAND ${target}) add_test(NAME ${target} COMMAND ${target})
set_tests_properties(
${target} PROPERTIES
FIXTURES_REQUIRED ${target}_fixture
)
add_test(
NAME ${target}.build
COMMAND
${CMAKE_COMMAND}
--build ${CMAKE_BINARY_DIR}
--config $<CONFIG>
--target ${target}
)
set_tests_properties(${target}.build PROPERTIES
FIXTURES_SETUP ${target}_fixture
)
endfunction() endfunction()

View File

@@ -21,7 +21,7 @@
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493", "date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915", "c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716", "bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336", "boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907" "abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
], ],
"build_requires": [ "build_requires": [
@@ -46,11 +46,11 @@
"lz4/1.10.0" "lz4/1.10.0"
], ],
"boost/1.83.0": [ "boost/1.83.0": [
"boost/1.83.0" "boost/1.88.0"
], ],
"sqlite3/3.44.2": [ "sqlite3/3.44.2": [
"sqlite3/3.49.1" "sqlite3/3.49.1"
] ]
}, },
"config_requires": [] "config_requires": []
} }

View File

@@ -1,5 +1,9 @@
# Global configuration for Conan. This is used to set the number of parallel # Global configuration for Conan. This is used to set the number of parallel
# downloads and uploads. # downloads, uploads, and build jobs. The verbosity is set to verbose to
# provide more information during the build process.
core:non_interactive=True core:non_interactive=True
core.download:parallel={{ os.cpu_count() }} core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose

View File

@@ -21,14 +21,14 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf] [conf]
{% if compiler == "clang" and compiler_version >= 19 %} {% if compiler == "clang" and compiler_version >= 19 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw'] tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %} {% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %} {% if compiler == "apple-clang" and compiler_version >= 17 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw'] tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "clang" and compiler_version == 16 %}
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
{% endif %} {% endif %}
{% if compiler == "gcc" and compiler_version < 13 %} {% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags+=['-Wno-restrict'] tools.build:cxxflags=['-Wno-restrict']
{% endif %} {% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -100,11 +100,13 @@ class Xrpl(ConanFile):
def configure(self): def configure(self):
if self.settings.compiler == 'apple-clang': if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global' self.options['boost'].visibility = 'global'
if self.settings.compiler in ['clang', 'gcc']:
self.options['boost'].without_cobalt = True
def requirements(self): def requirements(self):
# Conan 2 requires transitive headers to be specified # Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.83.0', force=True, **transitive_headers_opt) self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True) self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True) self.requires('protobuf/3.21.12', force=True)
@@ -175,6 +177,7 @@ class Xrpl(ConanFile):
'boost::filesystem', 'boost::filesystem',
'boost::json', 'boost::json',
'boost::program_options', 'boost::program_options',
'boost::process',
'boost::regex', 'boost::regex',
'boost::system', 'boost::system',
'boost::thread', 'boost::thread',

View File

@@ -541,7 +541,7 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact(
/** Verify an ECDSA signature. /** Verify an ECDSA signature.
* *
* Returns: 1: correct signature * Returns: 1: correct signature
* 0: incorrect or unparsable signature * 0: incorrect or unparseable signature
* Args: ctx: pointer to a context object * Args: ctx: pointer to a context object
* In: sig: the signature being verified. * In: sig: the signature being verified.
* msghash32: the 32-byte message hash being verified. * msghash32: the 32-byte message hash being verified.

View File

@@ -654,14 +654,12 @@ SharedWeakUnion<T>::convertToWeak()
break; break;
case destroy: case destroy:
// We just added a weak ref. How could we destroy? // We just added a weak ref. How could we destroy?
// LCOV_EXCL_START
UNREACHABLE( UNREACHABLE(
"ripple::SharedWeakUnion::convertToWeak : destroying freshly " "ripple::SharedWeakUnion::convertToWeak : destroying freshly "
"added ref"); "added ref");
delete p; delete p;
unsafeSetRawPtr(nullptr); unsafeSetRawPtr(nullptr);
return true; // Should never happen return true; // Should never happen
// LCOV_EXCL_STOP
case partialDestroy: case partialDestroy:
// This is a weird case. We just converted the last strong // This is a weird case. We just converted the last strong
// pointer to a weak pointer. // pointer to a weak pointer.

View File

@@ -30,6 +30,7 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <string_view>
#include <utility> #include <utility>
namespace ripple { namespace ripple {
@@ -130,27 +131,14 @@ private:
Does nothing if there is no associated system file. Does nothing if there is no associated system file.
*/ */
void void
write(char const* text); write(std::string_view text);
/** write to the log file and append an end of line marker. /** write to the log file and append an end of line marker.
Does nothing if there is no associated system file. Does nothing if there is no associated system file.
*/ */
void void
writeln(char const* text); writeln(std::string_view text);
/** Write to the log file using std::string. */
/** @{ */
void
write(std::string const& str)
{
write(str.c_str());
}
void
writeln(std::string const& str)
{
writeln(str.c_str());
}
/** @} */ /** @} */
private: private:
@@ -186,6 +174,14 @@ public:
beast::Journal::Sink& beast::Journal::Sink&
operator[](std::string const& name); operator[](std::string const& name);
template <typename AttributesFactory>
beast::Journal
journal(std::string const& name, AttributesFactory&& factory)
{
return beast::Journal{
get(name), name, std::forward<AttributesFactory>(factory)};
}
beast::Journal beast::Journal
journal(std::string const& name); journal(std::string const& name);
@@ -237,30 +233,34 @@ public:
static LogSeverity static LogSeverity
fromString(std::string const& s); fromString(std::string const& s);
private:
enum {
// Maximum line length for log messages.
// If the message exceeds this length it will be truncated with elipses.
maximumMessageCharacters = 12 * 1024
};
static void static void
format( format(
std::string& output, std::string& output,
std::string const& message, std::string const& message,
beast::severities::Severity severity, beast::severities::Severity severity,
std::string const& partition); std::string const& partition);
private:
enum {
// Maximum line length for log messages.
// If the message exceeds this length it will be truncated with elipses.
maximumMessageCharacters = 12 * 1024
};
}; };
// Wraps a Journal::Stream to skip evaluation of // Wraps a Journal::Stream to skip evaluation of
// expensive argument lists if the stream is not active. // expensive argument lists if the stream is not active.
#ifndef JLOG #ifndef JLOG
#define JLOG(x) \ #define JLOG_JOIN_(a, b) a##b
if (!x) \ #define JLOG_JOIN(a, b) JLOG_JOIN_(a, b)
{ \ #define JLOG_UNIQUE(base) JLOG_JOIN(base, __LINE__) // line-based unique name
} \
else \ #define JLOG(x) \
x if (auto JLOG_UNIQUE(stream) = (x); !JLOG_UNIQUE(stream)) \
{ \
} \
else \
std::move(JLOG_UNIQUE(stream))
#endif #endif
#ifndef CLOG #ifndef CLOG

View File

@@ -32,15 +32,6 @@ class Number;
std::string std::string
to_string(Number const& amount); to_string(Number const& amount);
template <typename T>
constexpr bool
isPowerOfTen(T value)
{
while (value >= 10 && value % 10 == 0)
value /= 10;
return value == 1;
}
class Number class Number
{ {
using rep = std::int64_t; using rep = std::int64_t;
@@ -50,9 +41,7 @@ class Number
public: public:
// The range for the mantissa when normalized // The range for the mantissa when normalized
constexpr static std::int64_t minMantissa = 1'000'000'000'000'000LL; constexpr static std::int64_t minMantissa = 1'000'000'000'000'000LL;
static_assert(isPowerOfTen(minMantissa)); constexpr static std::int64_t maxMantissa = 9'999'999'999'999'999LL;
constexpr static std::int64_t maxMantissa = minMantissa * 10 - 1;
static_assert(maxMantissa == 9'999'999'999'999'999LL);
// The range for the exponent when normalized // The range for the exponent when normalized
constexpr static int minExponent = -32768; constexpr static int minExponent = -32768;
@@ -162,7 +151,22 @@ public:
} }
Number Number
truncate() const noexcept; truncate() const noexcept
{
if (exponent_ >= 0 || mantissa_ == 0)
return *this;
Number ret = *this;
while (ret.exponent_ < 0 && ret.mantissa_ != 0)
{
ret.exponent_ += 1;
ret.mantissa_ /= rep(10);
}
// We are guaranteed that normalize() will never throw an exception
// because exponent is either negative or zero at this point.
ret.normalize();
return ret;
}
friend constexpr bool friend constexpr bool
operator>(Number const& x, Number const& y) noexcept operator>(Number const& x, Number const& y) noexcept
@@ -207,8 +211,6 @@ private:
class Guard; class Guard;
}; };
constexpr static Number numZero{};
inline constexpr Number::Number(rep mantissa, int exponent, unchecked) noexcept inline constexpr Number::Number(rep mantissa, int exponent, unchecked) noexcept
: mantissa_{mantissa}, exponent_{exponent} : mantissa_{mantissa}, exponent_{exponent}
{ {

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/Resolver.h> #include <xrpl/basics/Resolver.h>
#include <xrpl/beast/utility/Journal.h> #include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_context.hpp>
namespace ripple { namespace ripple {
@@ -33,7 +33,7 @@ public:
explicit ResolverAsio() = default; explicit ResolverAsio() = default;
static std::unique_ptr<ResolverAsio> static std::unique_ptr<ResolverAsio>
New(boost::asio::io_service&, beast::Journal); New(boost::asio::io_context&, beast::Journal);
}; };
} // namespace ripple } // namespace ripple

View File

@@ -176,7 +176,7 @@ public:
@param count the number of items the slab allocator can allocate; note @param count the number of items the slab allocator can allocate; note
that a count of 0 is valid and means that the allocator that a count of 0 is valid and means that the allocator
is, effectively, disabled. This can be very useful in some is, effectively, disabled. This can be very useful in some
contexts (e.g. when minimal memory usage is needed) and contexts (e.g. when mimimal memory usage is needed) and
allows for graceful failure. allows for graceful failure.
*/ */
constexpr explicit SlabAllocator( constexpr explicit SlabAllocator(

View File

@@ -565,7 +565,7 @@ operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
// This comparison might seem wrong on a casual inspection because it // This comparison might seem wrong on a casual inspection because it
// compares data internally stored as std::uint32_t byte-by-byte. But // compares data internally stored as std::uint32_t byte-by-byte. But
// note that the underlying data is stored in big endian, even if the // note that the underlying data is stored in big endian, even if the
// platform is little endian. This makes the comparison correct. // plaform is little endian. This makes the comparison correct.
// //
// FIXME: use std::lexicographical_compare_three_way once support is // FIXME: use std::lexicographical_compare_three_way once support is
// added to MacOS. // added to MacOS.

View File

@@ -28,7 +28,7 @@ namespace ripple {
/* /*
* MSVC 2019 version 16.9.0 added [[nodiscard]] to the std comparison * MSVC 2019 version 16.9.0 added [[nodiscard]] to the std comparison
* operator() functions. boost::bimap checks that the comparator is a * operator() functions. boost::bimap checks that the comparitor is a
* BinaryFunction, in part by calling the function and ignoring the value. * BinaryFunction, in part by calling the function and ignoring the value.
* These two things don't play well together. These wrapper classes simply * These two things don't play well together. These wrapper classes simply
* strip [[nodiscard]] from operator() for use in boost::bimap. * strip [[nodiscard]] from operator() for use in boost::bimap.

View File

@@ -23,7 +23,8 @@
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp> #include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_context.hpp>
#include <boost/asio/post.hpp>
#include <chrono> #include <chrono>
#include <condition_variable> #include <condition_variable>
@@ -32,7 +33,7 @@
namespace beast { namespace beast {
/** Measures handler latency on an io_service queue. */ /** Measures handler latency on an io_context queue. */
template <class Clock> template <class Clock>
class io_latency_probe class io_latency_probe
{ {
@@ -44,12 +45,12 @@ private:
std::condition_variable_any m_cond; std::condition_variable_any m_cond;
std::size_t m_count; std::size_t m_count;
duration const m_period; duration const m_period;
boost::asio::io_service& m_ios; boost::asio::io_context& m_ios;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer; boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
bool m_cancel; bool m_cancel;
public: public:
io_latency_probe(duration const& period, boost::asio::io_service& ios) io_latency_probe(duration const& period, boost::asio::io_context& ios)
: m_count(1) : m_count(1)
, m_period(period) , m_period(period)
, m_ios(ios) , m_ios(ios)
@@ -64,16 +65,16 @@ public:
cancel(lock, true); cancel(lock, true);
} }
/** Return the io_service associated with the latency probe. */ /** Return the io_context associated with the latency probe. */
/** @{ */ /** @{ */
boost::asio::io_service& boost::asio::io_context&
get_io_service() get_io_context()
{ {
return m_ios; return m_ios;
} }
boost::asio::io_service const& boost::asio::io_context const&
get_io_service() const get_io_context() const
{ {
return m_ios; return m_ios;
} }
@@ -109,8 +110,10 @@ public:
std::lock_guard lock(m_mutex); std::lock_guard lock(m_mutex);
if (m_cancel) if (m_cancel)
throw std::logic_error("io_latency_probe is canceled"); throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>( boost::asio::post(
std::forward<Handler>(handler), Clock::now(), false, this)); m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
} }
/** Initiate continuous i/o latency sampling. /** Initiate continuous i/o latency sampling.
@@ -124,8 +127,10 @@ public:
std::lock_guard lock(m_mutex); std::lock_guard lock(m_mutex);
if (m_cancel) if (m_cancel)
throw std::logic_error("io_latency_probe is canceled"); throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>( boost::asio::post(
std::forward<Handler>(handler), Clock::now(), true, this)); m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
} }
private: private:
@@ -236,12 +241,13 @@ private:
// The latency is too high to maintain the desired // The latency is too high to maintain the desired
// period so don't bother with a timer. // period so don't bother with a timer.
// //
m_probe->m_ios.post( boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe)); sample_op<Handler>(m_handler, now, m_repeat, m_probe));
} }
else else
{ {
m_probe->m_timer.expires_from_now(when - now); m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait( m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe)); sample_op<Handler>(m_handler, now, m_repeat, m_probe));
} }
@@ -254,7 +260,8 @@ private:
if (!m_probe) if (!m_probe)
return; return;
typename Clock::time_point const now(Clock::now()); typename Clock::time_point const now(Clock::now());
m_probe->m_ios.post( boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe)); sample_op<Handler>(m_handler, now, m_repeat, m_probe));
} }
}; };

View File

@@ -94,11 +94,7 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept
else if (addr.is_v6()) else if (addr.is_v6())
hash_append(h, addr.to_v6().to_bytes()); hash_append(h, addr.to_v6().to_bytes());
else else
{
// LCOV_EXCL_START
UNREACHABLE("beast::hash_append : invalid address type"); UNREACHABLE("beast::hash_append : invalid address type");
// LCOV_EXCL_STOP
}
} }
} // namespace beast } // namespace beast

View File

@@ -8,9 +8,11 @@
#ifndef BEAST_TEST_YIELD_TO_HPP #ifndef BEAST_TEST_YIELD_TO_HPP
#define BEAST_TEST_YIELD_TO_HPP #define BEAST_TEST_YIELD_TO_HPP
#include <boost/asio/io_service.hpp> #include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/optional.hpp> #include <boost/optional.hpp>
#include <boost/thread/csbl/memory/allocator_arg.hpp>
#include <condition_variable> #include <condition_variable>
#include <mutex> #include <mutex>
@@ -29,10 +31,12 @@ namespace test {
class enable_yield_to class enable_yield_to
{ {
protected: protected:
boost::asio::io_service ios_; boost::asio::io_context ios_;
private: private:
boost::optional<boost::asio::io_service::work> work_; boost::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::vector<std::thread> threads_; std::vector<std::thread> threads_;
std::mutex m_; std::mutex m_;
std::condition_variable cv_; std::condition_variable cv_;
@@ -42,7 +46,8 @@ public:
/// The type of yield context passed to functions. /// The type of yield context passed to functions.
using yield_context = boost::asio::yield_context; using yield_context = boost::asio::yield_context;
explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_) explicit enable_yield_to(std::size_t concurrency = 1)
: work_(boost::asio::make_work_guard(ios_))
{ {
threads_.reserve(concurrency); threads_.reserve(concurrency);
while (concurrency--) while (concurrency--)
@@ -56,9 +61,9 @@ public:
t.join(); t.join();
} }
/// Return the `io_service` associated with the object /// Return the `io_context` associated with the object
boost::asio::io_service& boost::asio::io_context&
get_io_service() get_io_context()
{ {
return ios_; return ios_;
} }
@@ -111,13 +116,18 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
{ {
boost::asio::spawn( boost::asio::spawn(
ios_, ios_,
boost::allocator_arg,
boost::context::fixedsize_stack(2 * 1024 * 1024),
[&](yield_context yield) { [&](yield_context yield) {
f(yield); f(yield);
std::lock_guard lock{m_}; std::lock_guard lock{m_};
if (--running_ == 0) if (--running_ == 0)
cv_.notify_all(); cv_.notify_all();
}, },
boost::coroutines::attributes(2 * 1024 * 1024)); [](std::exception_ptr e) {
if (e)
std::rethrow_exception(e);
});
spawn(fn...); spawn(fn...);
} }

View File

@@ -42,7 +42,7 @@ public:
The argument string is available to suites and The argument string is available to suites and
allows for customization of the test. Each suite allows for customization of the test. Each suite
defines its own syntax for the argument string. defines its own syntax for the argumnet string.
The same argument is passed to all suites. The same argument is passed to all suites.
*/ */
void void

View File

@@ -22,10 +22,266 @@
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <atomic>
#include <charconv>
#include <cstring>
#include <deque>
#include <mutex>
#include <shared_mutex>
#include <source_location>
#include <sstream> #include <sstream>
#include <string>
#include <string_view>
#include <utility>
namespace ripple::log {
template <typename T>
class LogParameter
{
public:
template <typename TArg>
LogParameter(char const* name, TArg&& value)
: name_(name), value_(std::forward<TArg>(value))
{
}
private:
char const* name_;
T value_;
template <typename U>
friend std::ostream&
operator<<(std::ostream& os, LogParameter<U> const&);
};
template <typename T>
class LogField
{
public:
template <typename TArg>
LogField(char const* name, TArg&& value)
: name_(name), value_(std::forward<TArg>(value))
{
}
private:
char const* name_;
T value_;
template <typename U>
friend std::ostream&
operator<<(std::ostream& os, LogField<U> const&);
};
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogField<T> const& param);
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogParameter<T> const& param);
} // namespace ripple::log
namespace beast { namespace beast {
namespace detail {
class SimpleJsonWriter
{
public:
explicit SimpleJsonWriter(std::string* buffer) : buffer_(buffer)
{
}
SimpleJsonWriter() = default;
SimpleJsonWriter(SimpleJsonWriter const& other) = default;
SimpleJsonWriter&
operator=(SimpleJsonWriter const& other) = default;
std::string&
buffer()
{
return *buffer_;
}
void
startObject() const
{
buffer_->push_back('{');
}
void
endObject() const
{
using namespace std::string_view_literals;
if (buffer_->back() == ',')
buffer_->pop_back();
buffer_->append("},"sv);
}
void
writeKey(std::string_view key) const
{
writeString(key);
buffer_->back() = ':';
}
void
startArray() const
{
buffer_->push_back('[');
}
void
endArray() const
{
using namespace std::string_view_literals;
if (buffer_->back() == ',')
buffer_->pop_back();
buffer_->append("],"sv);
}
void
writeString(std::string_view str) const
{
using namespace std::string_view_literals;
buffer_->push_back('"');
escape(str, *buffer_);
buffer_->append("\","sv);
}
std::string_view
writeInt(std::int32_t val) const
{
return pushNumber(val, *buffer_);
}
std::string_view
writeInt(std::int64_t val) const
{
return pushNumber(val, *buffer_);
}
std::string_view
writeUInt(std::uint32_t val) const
{
return pushNumber(val, *buffer_);
}
std::string_view
writeUInt(std::uint64_t val) const
{
return pushNumber(val, *buffer_);
}
std::string_view
writeDouble(double val) const
{
return pushNumber(val, *buffer_);
}
std::string_view
writeBool(bool val) const
{
using namespace std::string_view_literals;
auto str = val ? "true,"sv : "false,"sv;
buffer_->append(str);
return str;
}
void
writeNull() const
{
using namespace std::string_view_literals;
buffer_->append("null,"sv);
}
void
writeRaw(std::string_view str) const
{
buffer_->append(str);
}
void
finish()
{
buffer_->pop_back();
}
private:
template <typename T>
static std::string_view
pushNumber(T val, std::string& str)
{
thread_local char buffer[128];
auto result = std::to_chars(std::begin(buffer), std::end(buffer), val);
auto ptr = result.ptr;
*ptr = ',';
auto len = ptr - std::begin(buffer);
str.append(buffer, len + 1);
return {buffer, static_cast<size_t>(len)};
}
static void
escape(std::string_view str, std::string& buffer)
{
static constexpr char HEX[] = "0123456789ABCDEF";
char const* p = str.data();
char const* end = p + str.size();
char const* chunk = p;
while (p < end)
{
auto c = static_cast<unsigned char>(*p);
// JSON requires escaping for <0x20 and the two specials below.
bool needsEscape = (c < 0x20) || (c == '"') || (c == '\\');
if (!needsEscape)
{
++p;
continue;
}
// Flush the preceding safe run in one go.
if (chunk != p)
buffer.append(chunk, p - chunk);
switch (c)
{
case '"':
buffer.append("\\\"", 2);
break;
case '\\':
buffer.append("\\\\", 2);
break;
case '\b':
buffer.append("\\b", 2);
break;
case '\f':
buffer.append("\\f", 2);
break;
case '\n':
buffer.append("\\n", 2);
break;
case '\r':
buffer.append("\\r", 2);
break;
case '\t':
buffer.append("\\t", 2);
break;
default: {
// Other C0 controls -> \u00XX (JSON compliant)
char buf[6]{
'\\', 'u', '0', '0', HEX[(c >> 4) & 0xF], HEX[c & 0xF]};
buffer.append(buf, 6);
break;
}
}
++p;
chunk = p;
}
// Flush trailing safe run
if (chunk != p)
buffer.append(chunk, p - chunk);
}
std::string* buffer_ = nullptr;
};
} // namespace detail
/** A namespace for easy access to logging severity values. */ /** A namespace for easy access to logging severity values. */
namespace severities { namespace severities {
/** Severity level / threshold of a Journal message. */ /** Severity level / threshold of a Journal message. */
@@ -42,6 +298,9 @@ enum Severity {
kDisabled, kDisabled,
kNone = kDisabled kNone = kDisabled
}; };
std::string_view
to_string(Severity severity);
} // namespace severities } // namespace severities
/** A generic endpoint for log messages. /** A generic endpoint for log messages.
@@ -59,18 +318,114 @@ enum Severity {
class Journal class Journal
{ {
public: public:
template <typename T>
friend std::ostream&
ripple::log::operator<<(
std::ostream& os,
ripple::log::LogField<T> const& param);
template <typename T>
friend std::ostream&
ripple::log::operator<<(
std::ostream& os,
ripple::log::LogParameter<T> const& param);
class Sink; class Sink;
class JsonLogContext
{
std::string messageBuffer_;
detail::SimpleJsonWriter jsonWriter_;
bool hasMessageParams_ = false;
std::size_t messageOffset_ = 0;
public:
JsonLogContext() : jsonWriter_(&messageBuffer_)
{
messageBuffer_.reserve(4 * 1024);
}
std::string&
messageBuffer()
{
return messageBuffer_;
}
void
startMessageParams()
{
if (!hasMessageParams_)
{
writer().writeKey("Dt");
writer().startObject();
hasMessageParams_ = true;
}
}
void
endMessageParams()
{
if (hasMessageParams_)
{
writer().endObject();
}
}
detail::SimpleJsonWriter&
writer()
{
return jsonWriter_;
}
void
reuseJson();
void
finish();
void
start(
std::source_location location,
severities::Severity severity,
std::string_view moduleName,
std::string_view journalAttributes) noexcept;
};
private: private:
// Severity level / threshold of a Journal message. // Severity level / threshold of a Journal message.
using Severity = severities::Severity; using Severity = severities::Severity;
std::string name_;
std::string attributes_;
static std::string globalLogAttributes_;
static std::shared_mutex globalLogAttributesMutex_;
static bool jsonLogsEnabled_;
static thread_local JsonLogContext currentJsonLogContext_;
// Invariant: m_sink always points to a valid Sink // Invariant: m_sink always points to a valid Sink
Sink* m_sink; Sink* m_sink = nullptr;
void
initMessageContext(
std::source_location location,
severities::Severity severity) const;
static std::string&
formatLog(std::string const& message);
public: public:
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
static void
enableStructuredJournal();
static void
disableStructuredJournal();
static bool
isStructuredJournalEnabled();
/** Abstraction for the underlying message destination. */ /** Abstraction for the underlying message destination. */
class Sink class Sink
{ {
@@ -261,11 +616,32 @@ public:
/** Output stream support. */ /** Output stream support. */
/** @{ */ /** @{ */
ScopedStream ScopedStream
operator<<(std::ostream& manip(std::ostream&)) const; operator<<(std::ostream& manip(std::ostream&)) const&&
{
return {*this, manip};
}
template <typename T> template <typename T>
ScopedStream ScopedStream
operator<<(T const& t) const; operator<<(T const& t) const&&
{
return {*this, t};
}
ScopedStream
operator<<(std::ostream& manip(std::ostream&)) const&
{
currentJsonLogContext_.reuseJson();
return {*this, manip};
}
template <typename T>
ScopedStream
operator<<(T const& t) const&
{
currentJsonLogContext_.reuseJson();
return {*this, t};
}
/** @} */ /** @} */
private: private:
@@ -287,11 +663,73 @@ public:
/** Journal has no default constructor. */ /** Journal has no default constructor. */
Journal() = delete; Journal() = delete;
/** Create a journal that writes to the specified sink. */ Journal(Journal const& other)
explicit Journal(Sink& sink) : m_sink(&sink) : name_(other.name_)
, attributes_(other.attributes_)
, m_sink(other.m_sink)
{ {
} }
template <typename TAttributesFactory>
Journal(Journal const& other, TAttributesFactory&& attributesFactory)
: name_(other.name_), m_sink(other.m_sink)
{
std::string buffer{other.attributes_};
detail::SimpleJsonWriter writer{&buffer};
if (other.attributes_.empty() && jsonLogsEnabled_)
{
writer.startObject();
}
attributesFactory(writer);
attributes_ = std::move(buffer);
}
/** Create a journal that writes to the specified sink. */
explicit Journal(Sink& sink, std::string const& name = {})
: name_(name), m_sink(&sink)
{
}
/** Create a journal that writes to the specified sink. */
template <typename TAttributesFactory>
explicit Journal(
Sink& sink,
std::string const& name,
TAttributesFactory&& attributesFactory)
: name_(name), m_sink(&sink)
{
std::string buffer;
buffer.reserve(128);
detail::SimpleJsonWriter writer{&buffer};
if (jsonLogsEnabled_)
{
writer.startObject();
}
attributesFactory(writer);
attributes_ = std::move(buffer);
}
Journal&
operator=(Journal const& other)
{
if (&other == this)
return *this; // LCOV_EXCL_LINE
m_sink = other.m_sink;
name_ = other.name_;
attributes_ = other.attributes_;
return *this;
}
Journal&
operator=(Journal&& other) noexcept
{
m_sink = other.m_sink;
name_ = std::move(other.name_);
attributes_ = std::move(other.attributes_);
return *this;
}
/** Returns the Sink associated with this Journal. */ /** Returns the Sink associated with this Journal. */
Sink& Sink&
sink() const sink() const
@@ -301,8 +739,11 @@ public:
/** Returns a stream for this sink, with the specified severity level. */ /** Returns a stream for this sink, with the specified severity level. */
Stream Stream
stream(Severity level) const stream(
Severity level,
std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, level);
return Stream(*m_sink, level); return Stream(*m_sink, level);
} }
@@ -319,41 +760,69 @@ public:
/** Severity stream access functions. */ /** Severity stream access functions. */
/** @{ */ /** @{ */
Stream Stream
trace() const trace(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kTrace);
return {*m_sink, severities::kTrace}; return {*m_sink, severities::kTrace};
} }
Stream Stream
debug() const debug(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kDebug);
return {*m_sink, severities::kDebug}; return {*m_sink, severities::kDebug};
} }
Stream Stream
info() const info(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kInfo);
return {*m_sink, severities::kInfo}; return {*m_sink, severities::kInfo};
} }
Stream Stream
warn() const warn(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kWarning);
return {*m_sink, severities::kWarning}; return {*m_sink, severities::kWarning};
} }
Stream Stream
error() const error(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kError);
return {*m_sink, severities::kError}; return {*m_sink, severities::kError};
} }
Stream Stream
fatal() const fatal(std::source_location location = std::source_location::current()) const
{ {
initMessageContext(location, severities::kFatal);
return {*m_sink, severities::kFatal}; return {*m_sink, severities::kFatal};
} }
/** @} */ /** @} */
static void
resetGlobalAttributes()
{
std::unique_lock lock(globalLogAttributesMutex_);
globalLogAttributes_.clear();
}
template <typename TAttributesFactory>
static void
addGlobalAttributes(TAttributesFactory&& factory)
{
std::unique_lock lock(globalLogAttributesMutex_);
globalLogAttributes_.reserve(1024);
auto isEmpty = globalLogAttributes_.empty();
detail::SimpleJsonWriter writer{&globalLogAttributes_};
if (isEmpty && jsonLogsEnabled_)
{
writer.startObject();
}
factory(writer);
}
}; };
#ifndef __INTELLISENSE__ #ifndef __INTELLISENSE__
@@ -368,7 +837,7 @@ static_assert(std::is_nothrow_destructible<Journal>::value == true, "");
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
template <typename T> template <typename T>
Journal::ScopedStream::ScopedStream(Journal::Stream const& stream, T const& t) Journal::ScopedStream::ScopedStream(Stream const& stream, T const& t)
: ScopedStream(stream.sink(), stream.level()) : ScopedStream(stream.sink(), stream.level())
{ {
m_ostream << t; m_ostream << t;
@@ -384,13 +853,6 @@ Journal::ScopedStream::operator<<(T const& t) const
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
template <typename T>
Journal::ScopedStream
Journal::Stream::operator<<(T const& t) const
{
return ScopedStream(*this, t);
}
namespace detail { namespace detail {
template <class CharT, class Traits = std::char_traits<CharT>> template <class CharT, class Traits = std::char_traits<CharT>>
@@ -460,4 +922,244 @@ using logwstream = basic_logstream<wchar_t>;
} // namespace beast } // namespace beast
namespace ripple::log {
namespace detail {
template <typename T>
concept ToCharsFormattable = requires(T val) {
{
to_chars(std::declval<char*>(), std::declval<char*>(), val)
} -> std::convertible_to<std::to_chars_result>;
};
template <typename T>
concept StreamFormattable = requires(T val) {
{
std::declval<std::ostream&>() << val
} -> std::convertible_to<std::ostream&>;
};
template <typename T>
void
setTextValue(
beast::detail::SimpleJsonWriter& writer,
char const* name,
T&& value)
{
using ValueType = std::decay_t<T>;
writer.buffer() += name;
writer.buffer() += ": ";
if constexpr (
std::is_same_v<ValueType, std::string> ||
std::is_same_v<ValueType, std::string_view> ||
std::is_same_v<ValueType, char const*> ||
std::is_same_v<ValueType, char*>)
{
writer.buffer() += value;
}
else
{
std::ostringstream oss;
oss << value;
writer.buffer() += value;
;
}
writer.buffer() += " ";
}
template <typename T>
void
setJsonValue(
beast::detail::SimpleJsonWriter& writer,
char const* name,
T&& value,
std::ostream* outStream)
{
using ValueType = std::decay_t<T>;
writer.writeKey(name);
if constexpr (std::is_same_v<ValueType, bool>)
{
auto sv = writer.writeBool(value);
if (outStream)
{
outStream->write(sv.data(), sv.size());
}
}
else if constexpr (std::is_integral_v<ValueType>)
{
std::string_view sv;
if constexpr (std::is_signed_v<ValueType>)
{
if constexpr (sizeof(ValueType) > 4)
{
sv = writer.writeInt(static_cast<std::int64_t>(value));
}
else
{
sv = writer.writeInt(static_cast<std::int32_t>(value));
}
}
else
{
if constexpr (sizeof(ValueType) > 4)
{
sv = writer.writeUInt(static_cast<std::uint64_t>(value));
}
else
{
sv = writer.writeUInt(static_cast<std::uint32_t>(value));
}
}
if (outStream)
{
outStream->write(sv.data(), sv.size());
}
}
else if constexpr (std::is_floating_point_v<ValueType>)
{
auto sv = writer.writeDouble(value);
if (outStream)
{
outStream->write(sv.data(), sv.size());
}
}
else if constexpr (
std::is_same_v<ValueType, char const*> ||
std::is_same_v<ValueType, char*>)
{
writer.writeString(value);
if (outStream)
{
outStream->write(value, std::strlen(value));
}
}
else if constexpr (
std::is_same_v<ValueType, std::string> ||
std::is_same_v<ValueType, std::string_view>)
{
writer.writeString(value);
if (outStream)
{
outStream->write(value.data(), value.size());
}
}
else
{
if constexpr (ToCharsFormattable<ValueType>)
{
char buffer[1024];
std::to_chars_result result =
to_chars(std::begin(buffer), std::end(buffer), value);
if (result.ec == std::errc{})
{
std::string_view sv{std::begin(buffer), result.ptr};
writer.writeString(sv);
if (outStream)
{
outStream->write(sv.data(), sv.size());
}
return;
}
}
if constexpr (StreamFormattable<ValueType>)
{
std::ostringstream oss;
oss.imbue(std::locale::classic());
oss << value;
auto str = oss.str();
writer.writeString(str);
if (outStream)
{
outStream->write(
str.c_str(), static_cast<std::streamsize>(str.size()));
}
return;
}
static_assert(
ToCharsFormattable<ValueType> || StreamFormattable<ValueType>);
}
}
} // namespace detail
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogParameter<T> const& param)
{
if (!beast::Journal::jsonLogsEnabled_)
{
os << param.value_;
return os;
}
beast::Journal::currentJsonLogContext_.startMessageParams();
detail::setJsonValue(
beast::Journal::currentJsonLogContext_.writer(),
param.name_,
param.value_,
&os);
return os;
}
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogField<T> const& param)
{
if (!beast::Journal::jsonLogsEnabled_)
return os;
beast::Journal::currentJsonLogContext_.startMessageParams();
detail::setJsonValue(
beast::Journal::currentJsonLogContext_.writer(),
param.name_,
param.value_,
nullptr);
return os;
}
template <typename T>
LogParameter<T>
param(char const* name, T&& value)
{
return LogParameter<T>{name, std::forward<T>(value)};
}
template <typename T>
LogField<T>
field(char const* name, T&& value)
{
return LogField<T>{name, std::forward<T>(value)};
}
template <typename... Pair>
[[nodiscard]] auto
attributes(Pair&&... pairs)
{
return [&](beast::detail::SimpleJsonWriter& writer) {
if (beast::Journal::isStructuredJournalEnabled())
{
(detail::setJsonValue(writer, pairs.first, pairs.second, nullptr),
...);
}
else
{
(detail::setTextValue(writer, pairs.first, pairs.second), ...);
}
};
}
template <typename T>
[[nodiscard]] std::pair<char const*, std::decay_t<T>>
attr(char const* name, T&& value)
{
return std::make_pair(name, std::forward<T>(value));
}
} // namespace ripple::log
#endif #endif

View File

@@ -32,7 +32,7 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// The duplication is because Visual Studio 2019 cannot compile that header // The duplication is because Visual Studio 2019 cannot compile that header
// even with the option -Zc:__cplusplus added. // even with the option -Zc:__cplusplus added.
#define ALWAYS(cond, message, ...) assert((message) && (cond)) #define ALWAYS(cond, message, ...) assert((message) && (cond))
#define ALWAYS_OR_UNREACHABLE(cond, message) assert((message) && (cond)) #define ALWAYS_OR_UNREACHABLE(cond, message, ...) assert((message) && (cond))
#define SOMETIMES(cond, message, ...) #define SOMETIMES(cond, message, ...)
#define REACHABLE(message, ...) #define REACHABLE(message, ...)
#define UNREACHABLE(message, ...) assert((message) && false) #define UNREACHABLE(message, ...) assert((message) && false)

View File

@@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs)
std::string s; std::string s;
s.reserve(buffer_size(bs)); s.reserve(buffer_size(bs));
for (auto const& b : bs) for (auto const& b : bs)
s.append(buffer_cast<char const*>(b), buffer_size(b)); s.append(static_cast<char const*>(b.data()), buffer_size(b));
return parse(s, root); return parse(s, root);
} }

View File

@@ -24,7 +24,6 @@
#include <xrpl/json/json_forwards.h> #include <xrpl/json/json_forwards.h>
#include <cstring> #include <cstring>
#include <limits>
#include <map> #include <map>
#include <string> #include <string>
#include <vector> #include <vector>
@@ -159,9 +158,9 @@ public:
using ArrayIndex = UInt; using ArrayIndex = UInt;
static Value const null; static Value const null;
static constexpr Int minInt = std::numeric_limits<Int>::min(); static Int const minInt;
static constexpr Int maxInt = std::numeric_limits<Int>::max(); static Int const maxInt;
static constexpr UInt maxUInt = std::numeric_limits<UInt>::max(); static UInt const maxUInt;
private: private:
class CZString class CZString
@@ -264,10 +263,6 @@ public:
bool bool
asBool() const; asBool() const;
/** Correct absolute value from int or unsigned int */
UInt
asAbsUInt() const;
// TODO: What is the "empty()" method this docstring mentions? // TODO: What is the "empty()" method this docstring mentions?
/** isNull() tests to see if this field is null. Don't use this method to /** isNull() tests to see if this field is null. Don't use this method to
test for emptiness: use empty(). */ test for emptiness: use empty(). */
@@ -400,9 +395,6 @@ public:
/// Return true if the object has a member named key. /// Return true if the object has a member named key.
bool bool
isMember(std::string const& key) const; isMember(std::string const& key) const;
/// Return true if the object has a member named key.
bool
isMember(StaticString const& key) const;
/// \brief Return a list of the member names. /// \brief Return a list of the member names.
/// ///

View File

@@ -46,7 +46,7 @@ public:
* without formatting (not human friendly). * without formatting (not human friendly).
* *
* The JSON document is written in a single line. It is not intended for 'human' * The JSON document is written in a single line. It is not intended for 'human'
* consumption, but may be useful to support feature such as RPC where bandwidth * consumption, but may be useful to support feature such as RPC where bandwith
* is limited. \sa Reader, Value * is limited. \sa Reader, Value
*/ */

View File

@@ -284,14 +284,12 @@ public:
{ {
if (key.type != ltOFFER) if (key.type != ltOFFER)
{ {
// LCOV_EXCL_START
UNREACHABLE( UNREACHABLE(
"ripple::ApplyView::dirAppend : only Offers are appended to " "ripple::ApplyView::dirAppend : only Offers are appended to "
"book directories"); "book directories");
// Only Offers are appended to book directories. Call dirInsert() // Only Offers are appended to book directories. Call dirInsert()
// instead // instead
return std::nullopt; return std::nullopt;
// LCOV_EXCL_STOP
} }
return dirAdd(true, directory, key.key, describe); return dirAdd(true, directory, key.key, describe);
} }
@@ -387,45 +385,6 @@ public:
emptyDirDelete(Keylet const& directory); emptyDirDelete(Keylet const& directory);
}; };
namespace directory {
/** Helper functions for managing low-level directory operations.
These are not part of the ApplyView interface.
Don't use them unless you really, really know what you're doing.
Instead use dirAdd, dirInsert, etc.
*/
std::uint64_t
createRoot(
ApplyView& view,
Keylet const& directory,
uint256 const& key,
std::function<void(std::shared_ptr<SLE> const&)> const& describe);
auto
findPreviousPage(ApplyView& view, Keylet const& directory, SLE::ref start);
std::uint64_t
insertKey(
ApplyView& view,
SLE::ref node,
std::uint64_t page,
bool preserveOrder,
STVector256& indexes,
uint256 const& key);
std::optional<std::uint64_t>
insertPage(
ApplyView& view,
std::uint64_t page,
SLE::pointer node,
std::uint64_t nextPage,
SLE::ref next,
uint256 const& key,
Keylet const& directory,
std::function<void(std::shared_ptr<SLE> const&)> const& describe);
} // namespace directory
} // namespace ripple } // namespace ripple
#endif #endif

View File

@@ -24,7 +24,6 @@
#include <xrpl/ledger/ApplyView.h> #include <xrpl/ledger/ApplyView.h>
#include <xrpl/ledger/OpenView.h> #include <xrpl/ledger/OpenView.h>
#include <xrpl/ledger/ReadView.h> #include <xrpl/ledger/ReadView.h>
#include <xrpl/protocol/Asset.h>
#include <xrpl/protocol/Indexes.h> #include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/MPTIssue.h> #include <xrpl/protocol/MPTIssue.h>
#include <xrpl/protocol/Protocol.h> #include <xrpl/protocol/Protocol.h>
@@ -243,80 +242,6 @@ isDeepFrozen(
Currency const& currency, Currency const& currency,
AccountID const& issuer); AccountID const& issuer);
[[nodiscard]] inline bool
isDeepFrozen(
ReadView const& view,
AccountID const& account,
Issue const& issue,
int = 0 /*ignored*/)
{
return isDeepFrozen(view, account, issue.currency, issue.account);
}
[[nodiscard]] inline bool
isDeepFrozen(
ReadView const& view,
AccountID const& account,
MPTIssue const& mptIssue,
int depth = 0)
{
// Unlike IOUs, frozen / locked MPTs are not allowed to send or receive
// funds, so checking "deep frozen" is the same as checking "frozen".
return isFrozen(view, account, mptIssue, depth);
}
/**
* isFrozen check is recursive for MPT shares in a vault, descending to
* assets in the vault, up to maxAssetCheckDepth recursion depth. This is
* purely defensive, as we currently do not allow such vaults to be created.
*/
[[nodiscard]] inline bool
isDeepFrozen(
ReadView const& view,
AccountID const& account,
Asset const& asset,
int depth = 0)
{
return std::visit(
[&](auto const& issue) {
return isDeepFrozen(view, account, issue, depth);
},
asset.value());
}
[[nodiscard]] inline TER
checkDeepFrozen(
ReadView const& view,
AccountID const& account,
Issue const& issue)
{
return isDeepFrozen(view, account, issue) ? (TER)tecFROZEN
: (TER)tesSUCCESS;
}
[[nodiscard]] inline TER
checkDeepFrozen(
ReadView const& view,
AccountID const& account,
MPTIssue const& mptIssue)
{
return isDeepFrozen(view, account, mptIssue) ? (TER)tecLOCKED
: (TER)tesSUCCESS;
}
[[nodiscard]] inline TER
checkDeepFrozen(
ReadView const& view,
AccountID const& account,
Asset const& asset)
{
return std::visit(
[&](auto const& issue) {
return checkDeepFrozen(view, account, issue);
},
asset.value());
}
[[nodiscard]] bool [[nodiscard]] bool
isLPTokenFrozen( isLPTokenFrozen(
ReadView const& view, ReadView const& view,
@@ -362,49 +287,6 @@ accountHolds(
AuthHandling zeroIfUnauthorized, AuthHandling zeroIfUnauthorized,
beast::Journal j); beast::Journal j);
// Returns the amount an account can spend total.
//
// These functions use accountHolds, but unlike accountHolds:
// * The account can go into debt.
// * If the account is the asset issuer the only limit is defined by the asset /
// issuance.
//
// <-- saAmount: amount of currency held by account. May be negative.
[[nodiscard]] STAmount
accountSpendable(
ReadView const& view,
AccountID const& account,
Currency const& currency,
AccountID const& issuer,
FreezeHandling zeroIfFrozen,
beast::Journal j);
[[nodiscard]] STAmount
accountSpendable(
ReadView const& view,
AccountID const& account,
Issue const& issue,
FreezeHandling zeroIfFrozen,
beast::Journal j);
[[nodiscard]] STAmount
accountSpendable(
ReadView const& view,
AccountID const& account,
MPTIssue const& mptIssue,
FreezeHandling zeroIfFrozen,
AuthHandling zeroIfUnauthorized,
beast::Journal j);
[[nodiscard]] STAmount
accountSpendable(
ReadView const& view,
AccountID const& account,
Asset const& asset,
FreezeHandling zeroIfFrozen,
AuthHandling zeroIfUnauthorized,
beast::Journal j);
// Returns the amount an account can spend of the currency type saDefault, or // Returns the amount an account can spend of the currency type saDefault, or
// returns saDefault if this account is the issuer of the currency in // returns saDefault if this account is the issuer of the currency in
// question. Should be used in favor of accountHolds when questioning how much // question. Should be used in favor of accountHolds when questioning how much
@@ -651,11 +533,7 @@ dirNext(
describeOwnerDir(AccountID const& account); describeOwnerDir(AccountID const& account);
[[nodiscard]] TER [[nodiscard]] TER
dirLink( dirLink(ApplyView& view, AccountID const& owner, std::shared_ptr<SLE>& object);
ApplyView& view,
AccountID const& owner,
std::shared_ptr<SLE>& object,
SF_UINT64 const& node = sfOwnerNode);
AccountID AccountID
pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey); pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey);
@@ -674,17 +552,14 @@ createPseudoAccount(
uint256 const& pseudoOwnerKey, uint256 const& pseudoOwnerKey,
SField const& ownerField); SField const& ownerField);
// Returns true iff sleAcct is a pseudo-account or specific // Returns true iff sleAcct is a pseudo-account.
// pseudo-accounts in pseudoFieldFilter.
// //
// Returns false if sleAcct is // Returns false if sleAcct is
// * NOT a pseudo-account OR // * NOT a pseudo-account OR
// * NOT a ltACCOUNT_ROOT OR // * NOT a ltACCOUNT_ROOT OR
// * null pointer // * null pointer
[[nodiscard]] bool [[nodiscard]] bool
isPseudoAccount( isPseudoAccount(std::shared_ptr<SLE const> sleAcct);
std::shared_ptr<SLE const> sleAcct,
std::set<SField const*> const& pseudoFieldFilter = {});
// Returns the list of fields that define an ACCOUNT_ROOT as a pseudo-account if // Returns the list of fields that define an ACCOUNT_ROOT as a pseudo-account if
// set // set
@@ -698,91 +573,14 @@ isPseudoAccount(
getPseudoAccountFields(); getPseudoAccountFields();
[[nodiscard]] inline bool [[nodiscard]] inline bool
isPseudoAccount( isPseudoAccount(ReadView const& view, AccountID accountId)
ReadView const& view,
AccountID const& accountId,
std::set<SField const*> const& pseudoFieldFilter = {})
{ {
return isPseudoAccount( return isPseudoAccount(view.read(keylet::account(accountId)));
view.read(keylet::account(accountId)), pseudoFieldFilter);
} }
[[nodiscard]] TER [[nodiscard]] TER
canAddHolding(ReadView const& view, Asset const& asset); canAddHolding(ReadView const& view, Asset const& asset);
/** Validates that the destination SLE and tag are valid
- Checks that the SLE is not null.
- If the SLE requires a destination tag, checks that there is a tag.
*/
[[nodiscard]] TER
checkDestinationAndTag(SLE::const_ref toSle, bool hasDestinationTag);
/** Checks that can withdraw funds from an object to itself or a destination.
*
* The receiver may be either the submitting account (sfAccount) or a different
* destination account (sfDestination).
*
* - Checks that the receiver account exists.
* - If the receiver requires a destination tag, check that one exists, even
* if withdrawing to self.
* - If withdrawing to self, succeed.
* - If not, checks if the receiver requires deposit authorization, and if
* the sender has it.
*/
[[nodiscard]] TER
canWithdraw(
AccountID const& from,
ReadView const& view,
AccountID const& to,
SLE::const_ref toSle,
bool hasDestinationTag);
/** Checks that can withdraw funds from an object to itself or a destination.
*
* The receiver may be either the submitting account (sfAccount) or a different
* destination account (sfDestination).
*
* - Checks that the receiver account exists.
* - If the receiver requires a destination tag, check that one exists, even
* if withdrawing to self.
* - If withdrawing to self, succeed.
* - If not, checks if the receiver requires deposit authorization, and if
* the sender has it.
*/
[[nodiscard]] TER
canWithdraw(
AccountID const& from,
ReadView const& view,
AccountID const& to,
bool hasDestinationTag);
/** Checks that can withdraw funds from an object to itself or a destination.
*
* The receiver may be either the submitting account (sfAccount) or a different
* destination account (sfDestination).
*
* - Checks that the receiver account exists.
* - If the receiver requires a destination tag, check that one exists, even
* if withdrawing to self.
* - If withdrawing to self, succeed.
* - If not, checks if the receiver requires deposit authorization, and if
* the sender has it.
*/
[[nodiscard]] TER
canWithdraw(ReadView const& view, STTx const& tx);
[[nodiscard]] TER
doWithdraw(
ApplyView& view,
STTx const& tx,
AccountID const& senderAcct,
AccountID const& dstAcct,
AccountID const& sourceAcct,
XRPAmount priorBalance,
STAmount const& amount,
beast::Journal j);
/// Any transactors that call addEmptyHolding() in doApply must call /// Any transactors that call addEmptyHolding() in doApply must call
/// canAddHolding() in preflight with the same View and Asset /// canAddHolding() in preflight with the same View and Asset
[[nodiscard]] TER [[nodiscard]] TER
@@ -952,22 +750,6 @@ accountSend(
beast::Journal j, beast::Journal j,
WaiveTransferFee waiveFee = WaiveTransferFee::No); WaiveTransferFee waiveFee = WaiveTransferFee::No);
using MultiplePaymentDestinations = std::vector<std::pair<AccountID, Number>>;
/** Like accountSend, except one account is sending multiple payments (with the
* same asset!) simultaneously
*
* Calls static accountSendMultiIOU if saAmount represents Issue.
* Calls static accountSendMultiMPT if saAmount represents MPTIssue.
*/
[[nodiscard]] TER
accountSendMulti(
ApplyView& view,
AccountID const& senderID,
Asset const& asset,
MultiplePaymentDestinations const& receivers,
beast::Journal j,
WaiveTransferFee waiveFee = WaiveTransferFee::No);
[[nodiscard]] TER [[nodiscard]] TER
issueIOU( issueIOU(
ApplyView& view, ApplyView& view,
@@ -1039,8 +821,7 @@ requireAuth(
* purely defensive, as we currently do not allow such vaults to be created. * purely defensive, as we currently do not allow such vaults to be created.
* *
* If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or * If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or
* lsfMPTRequireAuth is set and MPToken is not authorized. Vault and LoanBroker * lsfMPTRequireAuth is set and MPToken is not authorized.
* pseudo-accounts are implicitly authorized.
* *
* If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken * If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken
* doesn't exist or is not authorized (explicitly or via credentials, if * doesn't exist or is not authorized (explicitly or via credentials, if
@@ -1113,26 +894,6 @@ canTransfer(
AccountID const& from, AccountID const& from,
AccountID const& to); AccountID const& to);
[[nodiscard]] TER
canTransfer(
ReadView const& view,
Issue const& issue,
AccountID const& from,
AccountID const& to);
[[nodiscard]] TER inline canTransfer(
ReadView const& view,
Asset const& asset,
AccountID const& from,
AccountID const& to)
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) -> TER {
return canTransfer(view, issue, from, to);
},
asset.value());
}
/** Deleter function prototype. Returns the status of the entry deletion /** Deleter function prototype. Returns the status of the entry deletion
* (if should not be skipped) and if the entry should be skipped. The status * (if should not be skipped) and if the entry should be skipped. The status
* is always tesSUCCESS if the entry should be skipped. * is always tesSUCCESS if the entry should be skipped.

View File

@@ -47,7 +47,7 @@ public:
public: public:
AutoSocket( AutoSocket(
boost::asio::io_service& s, boost::asio::io_context& s,
boost::asio::ssl::context& c, boost::asio::ssl::context& c,
bool secureOnly, bool secureOnly,
bool plainOnly) bool plainOnly)
@@ -58,7 +58,7 @@ public:
mSocket = std::make_unique<ssl_socket>(s, c); mSocket = std::make_unique<ssl_socket>(s, c);
} }
AutoSocket(boost::asio::io_service& s, boost::asio::ssl::context& c) AutoSocket(boost::asio::io_context& s, boost::asio::ssl::context& c)
: AutoSocket(s, c, false, false) : AutoSocket(s, c, false, false)
{ {
} }

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/Journal.h> #include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_context.hpp>
#include <boost/asio/streambuf.hpp> #include <boost/asio/streambuf.hpp>
#include <chrono> #include <chrono>
@@ -51,7 +51,7 @@ public:
static void static void
get(bool bSSL, get(bool bSSL,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
std::deque<std::string> deqSites, std::deque<std::string> deqSites,
unsigned short const port, unsigned short const port,
std::string const& strPath, std::string const& strPath,
@@ -65,7 +65,7 @@ public:
static void static void
get(bool bSSL, get(bool bSSL,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
std::string strSite, std::string strSite,
unsigned short const port, unsigned short const port,
std::string const& strPath, std::string const& strPath,
@@ -80,7 +80,7 @@ public:
static void static void
request( request(
bool bSSL, bool bSSL,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
std::string strSite, std::string strSite,
unsigned short const port, unsigned short const port,
std::function< std::function<

View File

@@ -153,7 +153,7 @@ public:
{ {
strm.set_verify_callback( strm.set_verify_callback(
std::bind( std::bind(
&rfc2818_verify, &rfc6125_verify,
host, host,
std::placeholders::_1, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_2,
@@ -167,7 +167,7 @@ public:
/** /**
* @brief callback invoked for name verification - just passes through * @brief callback invoked for name verification - just passes through
* to the asio rfc2818 implementation. * to the asio `host_name_verification` (rfc6125) implementation.
* *
* @param domain hostname expected * @param domain hostname expected
* @param preverified passed by implementation * @param preverified passed by implementation
@@ -175,13 +175,13 @@ public:
* @param j journal for logging * @param j journal for logging
*/ */
static bool static bool
rfc2818_verify( rfc6125_verify(
std::string const& domain, std::string const& domain,
bool preverified, bool preverified,
boost::asio::ssl::verify_context& ctx, boost::asio::ssl::verify_context& ctx,
beast::Journal j) beast::Journal j)
{ {
if (boost::asio::ssl::rfc2818_verification(domain)(preverified, ctx)) if (boost::asio::ssl::host_name_verification(domain)(preverified, ctx))
return true; return true;
JLOG(j.warn()) << "Outbound SSL connection to " << domain JLOG(j.warn()) << "Outbound SSL connection to " << domain

View File

@@ -100,27 +100,7 @@ public:
bool bool
native() const native() const
{ {
return std::visit( return holds<Issue>() && get<Issue>().native();
[&]<ValidIssueType TIss>(TIss const& issue) {
if constexpr (std::is_same_v<TIss, Issue>)
return issue.native();
if constexpr (std::is_same_v<TIss, MPTIssue>)
return false;
},
issue_);
}
bool
integral() const
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) {
if constexpr (std::is_same_v<TIss, Issue>)
return issue.native();
if constexpr (std::is_same_v<TIss, MPTIssue>)
return true;
},
issue_);
} }
friend constexpr bool friend constexpr bool

View File

@@ -346,24 +346,6 @@ vault(uint256 const& vaultKey)
return {ltVAULT, vaultKey}; return {ltVAULT, vaultKey};
} }
Keylet
loanbroker(AccountID const& owner, std::uint32_t seq) noexcept;
inline Keylet
loanbroker(uint256 const& key)
{
return {ltLOAN_BROKER, key};
}
Keylet
loan(uint256 const& loanBrokerID, std::uint32_t loanSeq) noexcept;
inline Keylet
loan(uint256 const& key)
{
return {ltLOAN, key};
}
Keylet Keylet
permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept; permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept;

View File

@@ -188,14 +188,14 @@ enum LedgerSpecificFlags {
lsfMPTCanTransfer = 0x00000020, lsfMPTCanTransfer = 0x00000020,
lsfMPTCanClawback = 0x00000040, lsfMPTCanClawback = 0x00000040,
lsmfMPTCanMutateCanLock = 0x00000002, lmfMPTCanMutateCanLock = 0x00000002,
lsmfMPTCanMutateRequireAuth = 0x00000004, lmfMPTCanMutateRequireAuth = 0x00000004,
lsmfMPTCanMutateCanEscrow = 0x00000008, lmfMPTCanMutateCanEscrow = 0x00000008,
lsmfMPTCanMutateCanTrade = 0x00000010, lmfMPTCanMutateCanTrade = 0x00000010,
lsmfMPTCanMutateCanTransfer = 0x00000020, lmfMPTCanMutateCanTransfer = 0x00000020,
lsmfMPTCanMutateCanClawback = 0x00000040, lmfMPTCanMutateCanClawback = 0x00000040,
lsmfMPTCanMutateMetadata = 0x00010000, lmfMPTCanMutateMetadata = 0x00010000,
lsmfMPTCanMutateTransferFee = 0x00020000, lmfMPTCanMutateTransferFee = 0x00020000,
// ltMPTOKEN // ltMPTOKEN
lsfMPTAuthorized = 0x00000002, lsfMPTAuthorized = 0x00000002,
@@ -205,11 +205,6 @@ enum LedgerSpecificFlags {
// ltVAULT // ltVAULT
lsfVaultPrivate = 0x00010000, lsfVaultPrivate = 0x00010000,
// ltLOAN
lsfLoanDefault = 0x00010000,
lsfLoanImpaired = 0x00020000,
lsfLoanOverpayment = 0x00040000, // True, loan allows overpayments
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@@ -86,9 +86,6 @@ public:
std::optional<TxType> std::optional<TxType>
getGranularTxType(GranularPermissionType const& gpType) const; getGranularTxType(GranularPermissionType const& gpType) const;
std::optional<std::reference_wrapper<uint256 const>> const
getTxFeature(TxType txType) const;
bool bool
isDelegatable(std::uint32_t const& permissionValue, Rules const& rules) isDelegatable(std::uint32_t const& permissionValue, Rules const& rules)
const; const;

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/Units.h>
#include <cstdint> #include <cstdint>
@@ -56,10 +55,7 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */ /** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32; std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory /** The maximum number of pages allowed in a directory */
Made obsolete by fixDirectoryLimit amendment.
*/
std::uint64_t constexpr dirNodeMaxPages = 262144; std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */ /** The maximum number of items in an NFT page */
@@ -85,140 +81,6 @@ std::size_t constexpr maxDeletableTokenOfferEntries = 500;
*/ */
std::uint16_t constexpr maxTransferFee = 50000; std::uint16_t constexpr maxTransferFee = 50000;
/** There are 10,000 basis points (bips) in 100%.
*
* Basis points represent 0.01%.
*
* Given a value X, to find the amount for B bps,
* use X * B / bipsPerUnity
*
* Example: If a loan broker has 999 XRP of debt, and must maintain 1,000 bps of
* that debt as cover (10%), then the minimum cover amount is 999,000,000 drops
* * 1000 / bipsPerUnity = 99,900,00 drops or 99.9 XRP.
*
* Given a percentage P, to find the number of bps that percentage represents,
* use P * bipsPerUnity.
*
* Example: 50% is 0.50 * bipsPerUnity = 5,000 bps.
*/
Bips32 constexpr bipsPerUnity(100 * 100);
static_assert(bipsPerUnity == Bips32{10'000});
TenthBips32 constexpr tenthBipsPerUnity(bipsPerUnity.value() * 10);
static_assert(tenthBipsPerUnity == TenthBips32(100'000));
constexpr Bips32
percentageToBips(std::uint32_t percentage)
{
return Bips32(percentage * bipsPerUnity.value() / 100);
}
constexpr TenthBips32
percentageToTenthBips(std::uint32_t percentage)
{
return TenthBips32(percentage * tenthBipsPerUnity.value() / 100);
}
template <typename T, class TBips>
constexpr T
bipsOfValue(T value, Bips<TBips> bips)
{
return value * bips.value() / bipsPerUnity.value();
}
template <typename T, class TBips>
constexpr T
tenthBipsOfValue(T value, TenthBips<TBips> bips)
{
return value * bips.value() / tenthBipsPerUnity.value();
}
namespace Lending {
/** The maximum management fee rate allowed by a loan broker in 1/10 bips.
Valid values are between 0 and 10% inclusive.
*/
TenthBips16 constexpr maxManagementFeeRate(
unsafe_cast<std::uint16_t>(percentageToTenthBips(10).value()));
static_assert(maxManagementFeeRate == TenthBips16(std::uint16_t(10'000u)));
/** The maximum coverage rate required of a loan broker in 1/10 bips.
Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxCoverRate = percentageToTenthBips(100);
static_assert(maxCoverRate == TenthBips32(100'000u));
/** The maximum overpayment fee on a loan in 1/10 bips.
*
Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxOverpaymentFee = percentageToTenthBips(100);
static_assert(maxOverpaymentFee == TenthBips32(100'000u));
/** Annualized interest rate of the Loan in 1/10 bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxInterestRate = percentageToTenthBips(100);
static_assert(maxInterestRate == TenthBips32(100'000u));
/** The maximum premium added to the interest rate for late payments on a loan
* in 1/10 bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxLateInterestRate = percentageToTenthBips(100);
static_assert(maxLateInterestRate == TenthBips32(100'000u));
/** The maximum close interest rate charged for repaying a loan early in 1/10
* bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxCloseInterestRate = percentageToTenthBips(100);
static_assert(maxCloseInterestRate == TenthBips32(100'000u));
/** The maximum overpayment interest rate charged on loan overpayments in 1/10
* bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxOverpaymentInterestRate = percentageToTenthBips(100);
static_assert(maxOverpaymentInterestRate == TenthBips32(100'000u));
/** LoanPay transaction cost will be one base fee per X combined payments
*
* The number of payments is estimated based on the Amount paid and the Loan's
* Fixed Payment size. Overpayments (indicated with the tfLoanOverpayment flag)
* count as one more payment.
*
* This number was chosen arbitrarily, but should not be changed once released
* without an amendment
*/
static constexpr int loanPaymentsPerFeeIncrement = 5;
/** Maximum number of combined payments that a LoanPay transaction will process
*
* This limit is enforced during the loan payment process, and thus is not
* estimated. If the limit is hit, no further payments or overpayments will be
* processed, no matter how much of the transation Amount is left, but the
* transaction will succeed with the payments that have been processed up to
* that point.
*
* This limit is independent of loanPaymentsPerFeeIncrement, so a transaction
* could potentially be charged for many more payments than actually get
* processed. Users should take care not to submit a transaction paying more
* than loanMaximumPaymentsPerTransaction * Loan.PeriodicPayment. Because
* overpayments are charged as a payment, if submitting
* loanMaximumPaymentsPerTransaction * Loan.PeriodicPayment, users should not
* set the tfLoanOverpayment flag.
*
* Even though they're independent, loanMaximumPaymentsPerTransaction should be
* a multiple of loanPaymentsPerFeeIncrement.
*
* This number was chosen arbitrarily, but should not be changed once released
* without an amendment
*/
static constexpr int loanMaximumPaymentsPerTransaction = 100;
} // namespace Lending
/** The maximum length of a URI inside an NFT */ /** The maximum length of a URI inside an NFT */
std::size_t constexpr maxTokenURILength = 256; std::size_t constexpr maxTokenURILength = 256;

View File

@@ -72,10 +72,8 @@ class STCurrency;
STYPE(STI_VL, 7) \ STYPE(STI_VL, 7) \
STYPE(STI_ACCOUNT, 8) \ STYPE(STI_ACCOUNT, 8) \
STYPE(STI_NUMBER, 9) \ STYPE(STI_NUMBER, 9) \
STYPE(STI_INT32, 10) \
STYPE(STI_INT64, 11) \
\ \
/* 12-13 are reserved */ \ /* 10-13 are reserved */ \
STYPE(STI_OBJECT, 14) \ STYPE(STI_OBJECT, 14) \
STYPE(STI_ARRAY, 15) \ STYPE(STI_ARRAY, 15) \
\ \
@@ -139,8 +137,8 @@ field_code(int id, int index)
SFields are created at compile time. SFields are created at compile time.
Each SField, once constructed, lives until program termination, and there Each SField, once constructed, lives until program termination, and there
is only one instance per fieldType/fieldValue pair which serves the is only one instance per fieldType/fieldValue pair which serves the entire
entire application. application.
*/ */
class SField class SField
{ {
@@ -358,9 +356,6 @@ using SF_UINT256 = TypedField<STBitString<256>>;
using SF_UINT384 = TypedField<STBitString<384>>; using SF_UINT384 = TypedField<STBitString<384>>;
using SF_UINT512 = TypedField<STBitString<512>>; using SF_UINT512 = TypedField<STBitString<512>>;
using SF_INT32 = TypedField<STInteger<std::int32_t>>;
using SF_INT64 = TypedField<STInteger<std::int64_t>>;
using SF_ACCOUNT = TypedField<STAccount>; using SF_ACCOUNT = TypedField<STAccount>;
using SF_AMOUNT = TypedField<STAmount>; using SF_AMOUNT = TypedField<STAmount>;
using SF_ISSUE = TypedField<STIssue>; using SF_ISSUE = TypedField<STIssue>;

View File

@@ -66,18 +66,16 @@ public:
static int const cMaxOffset = 80; static int const cMaxOffset = 80;
// Maximum native value supported by the code // Maximum native value supported by the code
constexpr static std::uint64_t cMinValue = 1'000'000'000'000'000ull; static std::uint64_t const cMinValue = 1000000000000000ull;
static_assert(isPowerOfTen(cMinValue)); static std::uint64_t const cMaxValue = 9999999999999999ull;
constexpr static std::uint64_t cMaxValue = cMinValue * 10 - 1; static std::uint64_t const cMaxNative = 9000000000000000000ull;
static_assert(cMaxValue == 9'999'999'999'999'999ull);
constexpr static std::uint64_t cMaxNative = 9'000'000'000'000'000'000ull;
// Max native value on network. // Max native value on network.
constexpr static std::uint64_t cMaxNativeN = 100'000'000'000'000'000ull; static std::uint64_t const cMaxNativeN = 100000000000000000ull;
constexpr static std::uint64_t cIssuedCurrency = 0x8'000'000'000'000'000ull; static std::uint64_t const cIssuedCurrency = 0x8000000000000000ull;
constexpr static std::uint64_t cPositive = 0x4'000'000'000'000'000ull; static std::uint64_t const cPositive = 0x4000000000000000ull;
constexpr static std::uint64_t cMPToken = 0x2'000'000'000'000'000ull; static std::uint64_t const cMPToken = 0x2000000000000000ull;
constexpr static std::uint64_t cValueMask = ~(cPositive | cMPToken); static std::uint64_t const cValueMask = ~(cPositive | cMPToken);
static std::uint64_t const uRateOne; static std::uint64_t const uRateOne;
@@ -176,9 +174,6 @@ public:
int int
exponent() const noexcept; exponent() const noexcept;
bool
integral() const noexcept;
bool bool
native() const noexcept; native() const noexcept;
@@ -459,12 +454,6 @@ STAmount::exponent() const noexcept
return mOffset; return mOffset;
} }
inline bool
STAmount::integral() const noexcept
{
return mAsset.integral();
}
inline bool inline bool
STAmount::native() const noexcept STAmount::native() const noexcept
{ {
@@ -583,7 +572,7 @@ STAmount::clear()
{ {
// The -100 is used to allow 0 to sort less than a small positive values // The -100 is used to allow 0 to sort less than a small positive values
// which have a negative exponent. // which have a negative exponent.
mOffset = integral() ? 0 : -100; mOffset = native() ? 0 : -100;
mValue = 0; mValue = 0;
mIsNegative = false; mIsNegative = false;
} }
@@ -706,53 +695,6 @@ divRoundStrict(
std::uint64_t std::uint64_t
getRate(STAmount const& offerOut, STAmount const& offerIn); getRate(STAmount const& offerOut, STAmount const& offerIn);
/** Round an arbitrary precision Amount to the precision of an STAmount that has
* a given exponent.
*
* This is used to ensure that calculations involving IOU amounts do not collect
* dust beyond the precision of the reference value.
*
* @param value The value to be rounded
* @param scale An exponent value to establish the precision limit of
* `value`. Should be larger than `value.exponent()`.
* @param rounding Optional Number rounding mode
*
*/
STAmount
roundToScale(
STAmount const& value,
std::int32_t scale,
Number::rounding_mode rounding = Number::getround());
/** Round an arbitrary precision Number to the precision of a given Asset.
*
* This is used to ensure that calculations do not collect dust beyond the
* precision of the reference value for IOUs, or fractional amounts for the
* integral types XRP and MPT.
*
* @param asset The relevant asset
* @param value The value to be rounded
* @param scale Only relevant to IOU assets. An exponent value to establish the
* precision limit of `value`. Should be larger than `value.exponent()`.
* @param rounding Optional Number rounding mode
*/
template <AssetType A>
Number
roundToAsset(
A const& asset,
Number const& value,
std::int32_t scale,
Number::rounding_mode rounding = Number::getround())
{
NumberRoundModeGuard mg(rounding);
STAmount const ret{asset, value};
if (ret.integral())
return ret;
// Note that the ctor will round integral types (XRP, MPT) via canonicalize,
// so no extra work is needed for those.
return roundToScale(ret, scale);
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
inline bool inline bool

View File

@@ -81,8 +81,6 @@ using STUInt16 = STInteger<std::uint16_t>;
using STUInt32 = STInteger<std::uint32_t>; using STUInt32 = STInteger<std::uint32_t>;
using STUInt64 = STInteger<std::uint64_t>; using STUInt64 = STInteger<std::uint64_t>;
using STInt32 = STInteger<std::int32_t>;
template <typename Integer> template <typename Integer>
inline STInteger<Integer>::STInteger(Integer v) : value_(v) inline STInteger<Integer>::STInteger(Integer v) : value_(v)
{ {

View File

@@ -231,8 +231,6 @@ public:
getFieldH192(SField const& field) const; getFieldH192(SField const& field) const;
uint256 uint256
getFieldH256(SField const& field) const; getFieldH256(SField const& field) const;
std::int32_t
getFieldI32(SField const& field) const;
AccountID AccountID
getAccountID(SField const& field) const; getAccountID(SField const& field) const;
@@ -244,9 +242,6 @@ public:
getFieldPathSet(SField const& field) const; getFieldPathSet(SField const& field) const;
STVector256 const& STVector256 const&
getFieldV256(SField const& field) const; getFieldV256(SField const& field) const;
// If not found, returns an object constructed with the given field
STObject
getFieldObject(SField const& field) const;
STArray const& STArray const&
getFieldArray(SField const& field) const; getFieldArray(SField const& field) const;
STCurrency const& STCurrency const&
@@ -370,8 +365,6 @@ public:
void void
setFieldH256(SField const& field, uint256 const&); setFieldH256(SField const& field, uint256 const&);
void void
setFieldI32(SField const& field, std::int32_t);
void
setFieldVL(SField const& field, Blob const&); setFieldVL(SField const& field, Blob const&);
void void
setFieldVL(SField const& field, Slice const&); setFieldVL(SField const& field, Slice const&);
@@ -393,8 +386,6 @@ public:
setFieldV256(SField const& field, STVector256 const& v); setFieldV256(SField const& field, STVector256 const& v);
void void
setFieldArray(SField const& field, STArray const& v); setFieldArray(SField const& field, STArray const& v);
void
setFieldObject(SField const& field, STObject const& v);
template <class Tag> template <class Tag>
void void
@@ -501,8 +492,6 @@ public:
value_type value_type
operator*() const; operator*() const;
/// Do not use operator->() unless the field is required, or you've checked
/// that it's set.
T const* T const*
operator->() const; operator->() const;
@@ -526,26 +515,7 @@ protected:
// Constraint += and -= ValueProxy operators // Constraint += and -= ValueProxy operators
// to value types that support arithmetic operations // to value types that support arithmetic operations
template <typename U> template <typename U>
concept IsArithmeticNumber = std::is_arithmetic_v<U> || concept IsArithmetic = std::is_arithmetic_v<U> || std::is_same_v<U, STAmount>;
std::is_same_v<U, Number> || std::is_same_v<U, STAmount>;
template <
typename U,
typename Value = typename U::value_type,
typename Unit = typename U::unit_type>
concept IsArithmeticValueUnit =
std::is_same_v<U, unit::ValueUnit<Unit, Value>> &&
IsArithmeticNumber<Value> && std::is_class_v<Unit>;
template <typename U, typename Value = typename U::value_type>
concept IsArithmeticST = !IsArithmeticValueUnit<U> && IsArithmeticNumber<Value>;
template <typename U>
concept IsArithmetic =
IsArithmeticNumber<U> || IsArithmeticST<U> || IsArithmeticValueUnit<U>;
template <class T, class U>
concept Addable = requires(T t, U u) { t = t + u; };
template <typename T, typename U>
concept IsArithmeticCompatible =
IsArithmetic<typename T::value_type> && Addable<typename T::value_type, U>;
template <class T> template <class T>
class STObject::ValueProxy : public Proxy<T> class STObject::ValueProxy : public Proxy<T>
@@ -565,12 +535,10 @@ public:
// Convenience operators for value types supporting // Convenience operators for value types supporting
// arithmetic operations // arithmetic operations
template <IsArithmetic U> template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
ValueProxy& ValueProxy&
operator+=(U const& u); operator+=(U const& u);
template <IsArithmetic U> template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
ValueProxy& ValueProxy&
operator-=(U const& u); operator-=(U const& u);
@@ -760,8 +728,6 @@ STObject::Proxy<T>::operator*() const -> value_type
return this->value(); return this->value();
} }
/// Do not use operator->() unless the field is required, or you've checked that
/// it's set.
template <class T> template <class T>
T const* T const*
STObject::Proxy<T>::operator->() const STObject::Proxy<T>::operator->() const
@@ -808,7 +774,6 @@ STObject::ValueProxy<T>::operator=(U&& u)
template <typename T> template <typename T>
template <IsArithmetic U> template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
STObject::ValueProxy<T>& STObject::ValueProxy<T>&
STObject::ValueProxy<T>::operator+=(U const& u) STObject::ValueProxy<T>::operator+=(U const& u)
{ {
@@ -818,7 +783,6 @@ STObject::ValueProxy<T>::operator+=(U const& u)
template <class T> template <class T>
template <IsArithmetic U> template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
STObject::ValueProxy<T>& STObject::ValueProxy<T>&
STObject::ValueProxy<T>::operator-=(U const& u) STObject::ValueProxy<T>::operator-=(U const& u)
{ {

View File

@@ -87,14 +87,8 @@ public:
getFullText() const override; getFullText() const override;
// Outer transaction functions / signature functions. // Outer transaction functions / signature functions.
static Blob
getSignature(STObject const& sigObject);
Blob Blob
getSignature() const getSignature() const;
{
return getSignature(*this);
}
uint256 uint256
getSigningHash() const; getSigningHash() const;
@@ -125,20 +119,13 @@ public:
getJson(JsonOptions options, bool binary) const; getJson(JsonOptions options, bool binary) const;
void void
sign( sign(PublicKey const& publicKey, SecretKey const& secretKey);
PublicKey const& publicKey,
SecretKey const& secretKey,
std::optional<std::reference_wrapper<SField const>> signatureTarget =
{});
enum class RequireFullyCanonicalSig : bool { no, yes };
/** Check the signature. /** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@return `true` if valid signature. If invalid, the error message string. @return `true` if valid signature. If invalid, the error message string.
*/ */
enum class RequireFullyCanonicalSig : bool { no, yes };
Expected<void, std::string> Expected<void, std::string>
checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules) checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules)
const; const;
@@ -163,34 +150,17 @@ public:
char status, char status,
std::string const& escapedMetaData) const; std::string const& escapedMetaData) const;
std::vector<uint256> const& std::vector<uint256>
getBatchTransactionIDs() const; getBatchTransactionIDs() const;
private: private:
/** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@param sigObject Reference to object that contains the signature fields.
Will be *this more often than not.
@return `true` if valid signature. If invalid, the error message string.
*/
Expected<void, std::string> Expected<void, std::string>
checkSign( checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const;
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const;
Expected<void, std::string>
checkSingleSign(
RequireFullyCanonicalSig requireCanonicalSig,
STObject const& sigObject) const;
Expected<void, std::string> Expected<void, std::string>
checkMultiSign( checkMultiSign(
RequireFullyCanonicalSig requireCanonicalSig, RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules, Rules const& rules) const;
STObject const& sigObject) const;
Expected<void, std::string> Expected<void, std::string>
checkBatchSingleSign( checkBatchSingleSign(
@@ -209,7 +179,7 @@ private:
move(std::size_t n, void* buf) override; move(std::size_t n, void* buf) override;
friend class detail::STVar; friend class detail::STVar;
mutable std::vector<uint256> batchTxnIds_; mutable std::vector<uint256> batch_txn_ids_;
}; };
bool bool

View File

@@ -673,8 +673,7 @@ isTerRetry(TER x) noexcept
inline bool inline bool
isTesSuccess(TER x) noexcept isTesSuccess(TER x) noexcept
{ {
// Makes use of TERSubset::operator bool() return (x == tesSUCCESS);
return !(x);
} }
inline bool inline bool

View File

@@ -156,14 +156,14 @@ constexpr std::uint32_t const tfMPTokenIssuanceCreateMask =
// MPTokenIssuanceCreate MutableFlags: // MPTokenIssuanceCreate MutableFlags:
// Indicating specific fields or flags may be changed after issuance. // Indicating specific fields or flags may be changed after issuance.
constexpr std::uint32_t const tmfMPTCanMutateCanLock = lsmfMPTCanMutateCanLock; constexpr std::uint32_t const tmfMPTCanMutateCanLock = lmfMPTCanMutateCanLock;
constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lsmfMPTCanMutateRequireAuth; constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lmfMPTCanMutateRequireAuth;
constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lsmfMPTCanMutateCanEscrow; constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lmfMPTCanMutateCanEscrow;
constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lsmfMPTCanMutateCanTrade; constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lmfMPTCanMutateCanTrade;
constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lsmfMPTCanMutateCanTransfer; constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lmfMPTCanMutateCanTransfer;
constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lsmfMPTCanMutateCanClawback; constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lmfMPTCanMutateCanClawback;
constexpr std::uint32_t const tmfMPTCanMutateMetadata = lsmfMPTCanMutateMetadata; constexpr std::uint32_t const tmfMPTCanMutateMetadata = lmfMPTCanMutateMetadata;
constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lsmfMPTCanMutateTransferFee; constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lmfMPTCanMutateTransferFee;
constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask = constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask =
~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade ~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade
| tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee); | tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee);
@@ -285,32 +285,6 @@ constexpr std::uint32_t tfIndependent = 0x00080000;
constexpr std::uint32_t const tfBatchMask = constexpr std::uint32_t const tfBatchMask =
~(tfUniversal | tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent) | tfInnerBatchTxn; ~(tfUniversal | tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent) | tfInnerBatchTxn;
// LoanSet and LoanPay flags:
// LoanSet: True, indicates the loan supports overpayments
// LoanPay: True, indicates any excess in this payment can be used
// as an overpayment. False, no overpayments will be taken.
constexpr std::uint32_t const tfLoanOverpayment = 0x00010000;
// LoanPay exclusive flags:
// tfLoanFullPayment: True, indicates that the payment is an early
// full payment. It must pay the entire loan including close
// interest and fees, or it will fail. False: Not a full payment.
constexpr std::uint32_t const tfLoanFullPayment = 0x00020000;
// tfLoanLatePayment: True, indicates that the payment is late,
// and includes late iterest and fees. If the loan is not late,
// it will fail. False: not a late payment. If the current payment
// is overdue, the transaction will fail.
constexpr std::uint32_t const tfLoanLatePayment = 0x00040000;
constexpr std::uint32_t const tfLoanSetMask = ~(tfUniversal |
tfLoanOverpayment);
constexpr std::uint32_t const tfLoanPayMask = ~(tfUniversal |
tfLoanOverpayment | tfLoanFullPayment | tfLoanLatePayment);
// LoanManage flags:
constexpr std::uint32_t const tfLoanDefault = 0x00010000;
constexpr std::uint32_t const tfLoanImpair = 0x00020000;
constexpr std::uint32_t const tfLoanUnimpair = 0x00040000;
constexpr std::uint32_t const tfLoanManageMask = ~(tfUniversal | tfLoanDefault | tfLoanImpair | tfLoanUnimpair);
// clang-format on // clang-format on
} // namespace ripple } // namespace ripple

View File

@@ -129,12 +129,10 @@ inplace_bigint_div_rem(std::span<uint64_t> numerator, std::uint64_t divisor)
{ {
// should never happen, but if it does then it seems natural to define // should never happen, but if it does then it seems natural to define
// the a null set of numbers to be zero, so the remainder is also zero. // the a null set of numbers to be zero, so the remainder is also zero.
// LCOV_EXCL_START
UNREACHABLE( UNREACHABLE(
"ripple::b58_fast::detail::inplace_bigint_div_rem : empty " "ripple::b58_fast::detail::inplace_bigint_div_rem : empty "
"numerator"); "numerator");
return 0; return 0;
// LCOV_EXCL_STOP
} }
auto to_u128 = [](std::uint64_t high, auto to_u128 = [](std::uint64_t high,

View File

@@ -27,19 +27,17 @@
#error "undefined macro: XRPL_RETIRE" #error "undefined macro: XRPL_RETIRE"
#endif #endif
// clang-format off
// Add new amendments to the top of this list. // Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order. // Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FEATURE(LendingProtocol, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (IncludeKeyletFields, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
@@ -47,7 +45,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions // Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
@@ -158,5 +156,3 @@ XRPL_RETIRE(fix1512)
XRPL_RETIRE(fix1523) XRPL_RETIRE(fix1523)
XRPL_RETIRE(fix1528) XRPL_RETIRE(fix1528)
XRPL_RETIRE(FlowCross) XRPL_RETIRE(FlowCross)
// clang-format on

View File

@@ -168,7 +168,6 @@ LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, ({
{sfFirstNFTokenSequence, soeOPTIONAL}, {sfFirstNFTokenSequence, soeOPTIONAL},
{sfAMMID, soeOPTIONAL}, // pseudo-account designator {sfAMMID, soeOPTIONAL}, // pseudo-account designator
{sfVaultID, soeOPTIONAL}, // pseudo-account designator {sfVaultID, soeOPTIONAL}, // pseudo-account designator
{sfLoanBrokerID, soeOPTIONAL}, // pseudo-account designator
})) }))
/** A ledger object which contains a list of object identifiers. /** A ledger object which contains a list of object identifiers.
@@ -458,7 +457,7 @@ LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, ({
{sfExpiration, soeOPTIONAL}, {sfExpiration, soeOPTIONAL},
{sfURI, soeOPTIONAL}, {sfURI, soeOPTIONAL},
{sfIssuerNode, soeREQUIRED}, {sfIssuerNode, soeREQUIRED},
{sfSubjectNode, soeOPTIONAL}, {sfSubjectNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED},
})) }))
@@ -499,10 +498,10 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
{sfAccount, soeREQUIRED}, {sfAccount, soeREQUIRED},
{sfData, soeOPTIONAL}, {sfData, soeOPTIONAL},
{sfAsset, soeREQUIRED}, {sfAsset, soeREQUIRED},
{sfAssetsTotal, soeDEFAULT}, {sfAssetsTotal, soeREQUIRED},
{sfAssetsAvailable, soeDEFAULT}, {sfAssetsAvailable, soeREQUIRED},
{sfAssetsMaximum, soeDEFAULT}, {sfAssetsMaximum, soeDEFAULT},
{sfLossUnrealized, soeDEFAULT}, {sfLossUnrealized, soeREQUIRED},
{sfShareMPTID, soeREQUIRED}, {sfShareMPTID, soeREQUIRED},
{sfWithdrawalPolicy, soeREQUIRED}, {sfWithdrawalPolicy, soeREQUIRED},
{sfScale, soeDEFAULT}, {sfScale, soeDEFAULT},
@@ -510,117 +509,5 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID) // no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
})) }))
/** Reserve 0x0084-0x0087 for future Vault-related objects. */
/** A ledger object representing a loan broker
\sa keylet::loanbroker
*/
LEDGER_ENTRY(ltLOAN_BROKER, 0x0088, LoanBroker, loan_broker, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfVaultNode, soeREQUIRED},
{sfVaultID, soeREQUIRED},
{sfAccount, soeREQUIRED},
{sfOwner, soeREQUIRED},
{sfLoanSequence, soeREQUIRED},
{sfData, soeDEFAULT},
{sfManagementFeeRate, soeDEFAULT},
{sfOwnerCount, soeDEFAULT},
{sfDebtTotal, soeDEFAULT},
{sfDebtMaximum, soeDEFAULT},
{sfCoverAvailable, soeDEFAULT},
{sfCoverRateMinimum, soeDEFAULT},
{sfCoverRateLiquidation, soeDEFAULT},
}))
/** A ledger object representing a loan between a Borrower and a Loan Broker
\sa keylet::loan
*/
LEDGER_ENTRY(ltLOAN, 0x0089, Loan, loan, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfLoanBrokerNode, soeREQUIRED},
{sfLoanBrokerID, soeREQUIRED},
{sfLoanSequence, soeREQUIRED},
{sfBorrower, soeREQUIRED},
{sfLoanOriginationFee, soeDEFAULT},
{sfLoanServiceFee, soeDEFAULT},
{sfLatePaymentFee, soeDEFAULT},
{sfClosePaymentFee, soeDEFAULT},
{sfOverpaymentFee, soeDEFAULT},
{sfInterestRate, soeDEFAULT},
{sfLateInterestRate, soeDEFAULT},
{sfCloseInterestRate, soeDEFAULT},
{sfOverpaymentInterestRate, soeDEFAULT},
{sfStartDate, soeREQUIRED},
{sfPaymentInterval, soeREQUIRED},
{sfGracePeriod, soeDEFAULT},
{sfPreviousPaymentDate, soeDEFAULT},
{sfNextPaymentDueDate, soeDEFAULT},
// The loan object tracks these values:
//
// - PaymentRemaining: The number of payments left in the loan. When it
// reaches 0, the loan is paid off, and all other relevant values
// must also be 0.
//
// - PeriodicPayment: The fixed, unrounded amount to be paid each
// interval. Stored with as much precision as possible.
// Payment transactions must round this value *UP*.
//
// - TotalValueOutstanding: The rounded total amount owed by the
// borrower to the lender / vault.
//
// - PrincipalOutstanding: The rounded portion of the
// TotalValueOutstanding that is from the principal borrowed.
//
// - ManagementFeeOutstanding: The rounded portion of the
// TotalValueOutstanding that represents management fees
// specifically owed to the broker based on the initial
// loan parameters.
//
// There are additional values that can be computed from these:
//
// - InterestOutstanding = TotalValueOutstanding - PrincipalOutstanding
// The total amount of interest still pending on the loan,
// independent of management fees.
//
// - InterestOwedToVault = InterestOutstanding - ManagementFeeOutstanding
// The amount of the total interest that is owed to the vault, and
// will be sent to it as part of a payment.
//
// - TrueTotalLoanValue = PaymentRemaining * PeriodicPayment
// The unrounded true total value of the loan.
//
// - TrueTotalPrincialOutstanding can be computed using the algorithm
// in the ripple::detail::loanPrincipalFromPeriodicPayment function.
//
// - TrueTotalInterestOutstanding = TrueTotalLoanValue -
// TrueTotalPrincipalOutstanding
// The unrounded true total interest remaining.
//
// - TrueTotalManagementFeeOutstanding = TrueTotalInterestOutstanding *
// LoanBroker.ManagementFeeRate
// The unrounded true total fee still owed to the broker.
//
// Note the the "True" values may differ significantly from the tracked
// rounded values.
{sfPaymentRemaining, soeDEFAULT},
{sfPeriodicPayment, soeREQUIRED},
{sfPrincipalOutstanding, soeDEFAULT},
{sfTotalValueOutstanding, soeDEFAULT},
{sfManagementFeeOutstanding, soeDEFAULT},
// Based on the computed total value at creation, used for
// rounding calculated values so they are all on a
// consistent scale - that is, they all have the same
// number of digits after the decimal point (excluding
// trailing zeros).
{sfLoanScale, soeDEFAULT},
}))
#undef EXPAND #undef EXPAND
#undef LEDGER_ENTRY_DUPLICATE #undef LEDGER_ENTRY_DUPLICATE

View File

@@ -24,8 +24,6 @@
#error "undefined macro: TYPED_SFIELD" #error "undefined macro: TYPED_SFIELD"
#endif #endif
// clang-format off
// untyped // untyped
UNTYPED_SFIELD(sfLedgerEntry, LEDGERENTRY, 257) UNTYPED_SFIELD(sfLedgerEntry, LEDGERENTRY, 257)
UNTYPED_SFIELD(sfTransaction, TRANSACTION, 257) UNTYPED_SFIELD(sfTransaction, TRANSACTION, 257)
@@ -61,7 +59,6 @@ TYPED_SFIELD(sfHookEmitCount, UINT16, 18)
TYPED_SFIELD(sfHookExecutionIndex, UINT16, 19) TYPED_SFIELD(sfHookExecutionIndex, UINT16, 19)
TYPED_SFIELD(sfHookApiVersion, UINT16, 20) TYPED_SFIELD(sfHookApiVersion, UINT16, 20)
TYPED_SFIELD(sfLedgerFixType, UINT16, 21) TYPED_SFIELD(sfLedgerFixType, UINT16, 21)
TYPED_SFIELD(sfManagementFeeRate, UINT16, 22) // 1/10 basis points (bips)
// 32-bit integers (common) // 32-bit integers (common)
TYPED_SFIELD(sfNetworkID, UINT32, 1) TYPED_SFIELD(sfNetworkID, UINT32, 1)
@@ -118,21 +115,6 @@ TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50)
TYPED_SFIELD(sfOracleDocumentID, UINT32, 51) TYPED_SFIELD(sfOracleDocumentID, UINT32, 51)
TYPED_SFIELD(sfPermissionValue, UINT32, 52) TYPED_SFIELD(sfPermissionValue, UINT32, 52)
TYPED_SFIELD(sfMutableFlags, UINT32, 53) TYPED_SFIELD(sfMutableFlags, UINT32, 53)
TYPED_SFIELD(sfStartDate, UINT32, 54)
TYPED_SFIELD(sfPaymentInterval, UINT32, 55)
TYPED_SFIELD(sfGracePeriod, UINT32, 56)
TYPED_SFIELD(sfPreviousPaymentDate, UINT32, 57)
TYPED_SFIELD(sfNextPaymentDueDate, UINT32, 58)
TYPED_SFIELD(sfPaymentRemaining, UINT32, 59)
TYPED_SFIELD(sfPaymentTotal, UINT32, 60)
TYPED_SFIELD(sfLoanSequence, UINT32, 61)
TYPED_SFIELD(sfCoverRateMinimum, UINT32, 62) // 1/10 basis points (bips)
TYPED_SFIELD(sfCoverRateLiquidation, UINT32, 63) // 1/10 basis points (bips)
TYPED_SFIELD(sfOverpaymentFee, UINT32, 64) // 1/10 basis points (bips)
TYPED_SFIELD(sfInterestRate, UINT32, 65) // 1/10 basis points (bips)
TYPED_SFIELD(sfLateInterestRate, UINT32, 66) // 1/10 basis points (bips)
TYPED_SFIELD(sfCloseInterestRate, UINT32, 67) // 1/10 basis points (bips)
TYPED_SFIELD(sfOverpaymentInterestRate, UINT32, 68) // 1/10 basis points (bips)
// 64-bit integers (common) // 64-bit integers (common)
TYPED_SFIELD(sfIndexNext, UINT64, 1) TYPED_SFIELD(sfIndexNext, UINT64, 1)
@@ -164,8 +146,6 @@ TYPED_SFIELD(sfMPTAmount, UINT64, 26, SField::sMD_BaseTen|SFie
TYPED_SFIELD(sfIssuerNode, UINT64, 27) TYPED_SFIELD(sfIssuerNode, UINT64, 27)
TYPED_SFIELD(sfSubjectNode, UINT64, 28) TYPED_SFIELD(sfSubjectNode, UINT64, 28)
TYPED_SFIELD(sfLockedAmount, UINT64, 29, SField::sMD_BaseTen|SField::sMD_Default) TYPED_SFIELD(sfLockedAmount, UINT64, 29, SField::sMD_BaseTen|SField::sMD_Default)
TYPED_SFIELD(sfVaultNode, UINT64, 30)
TYPED_SFIELD(sfLoanBrokerNode, UINT64, 31)
// 128-bit // 128-bit
TYPED_SFIELD(sfEmailHash, UINT128, 1) TYPED_SFIELD(sfEmailHash, UINT128, 1)
@@ -220,9 +200,6 @@ TYPED_SFIELD(sfDomainID, UINT256, 34)
TYPED_SFIELD(sfVaultID, UINT256, 35, TYPED_SFIELD(sfVaultID, UINT256, 35,
SField::sMD_PseudoAccount | SField::sMD_Default) SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfParentBatchID, UINT256, 36) TYPED_SFIELD(sfParentBatchID, UINT256, 36)
TYPED_SFIELD(sfLoanBrokerID, UINT256, 37,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfLoanID, UINT256, 38)
// number (common) // number (common)
TYPED_SFIELD(sfNumber, NUMBER, 1) TYPED_SFIELD(sfNumber, NUMBER, 1)
@@ -230,21 +207,6 @@ TYPED_SFIELD(sfAssetsAvailable, NUMBER, 2)
TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3) TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3)
TYPED_SFIELD(sfAssetsTotal, NUMBER, 4) TYPED_SFIELD(sfAssetsTotal, NUMBER, 4)
TYPED_SFIELD(sfLossUnrealized, NUMBER, 5) TYPED_SFIELD(sfLossUnrealized, NUMBER, 5)
TYPED_SFIELD(sfDebtTotal, NUMBER, 6)
TYPED_SFIELD(sfDebtMaximum, NUMBER, 7)
TYPED_SFIELD(sfCoverAvailable, NUMBER, 8)
TYPED_SFIELD(sfLoanOriginationFee, NUMBER, 9)
TYPED_SFIELD(sfLoanServiceFee, NUMBER, 10)
TYPED_SFIELD(sfLatePaymentFee, NUMBER, 11)
TYPED_SFIELD(sfClosePaymentFee, NUMBER, 12)
TYPED_SFIELD(sfPrincipalOutstanding, NUMBER, 13)
TYPED_SFIELD(sfPrincipalRequested, NUMBER, 14)
TYPED_SFIELD(sfTotalValueOutstanding, NUMBER, 15)
TYPED_SFIELD(sfPeriodicPayment, NUMBER, 16)
TYPED_SFIELD(sfManagementFeeOutstanding, NUMBER, 17)
// int32
TYPED_SFIELD(sfLoanScale, INT32, 1)
// currency amount (common) // currency amount (common)
TYPED_SFIELD(sfAmount, AMOUNT, 1) TYPED_SFIELD(sfAmount, AMOUNT, 1)
@@ -340,8 +302,6 @@ TYPED_SFIELD(sfAttestationRewardAccount, ACCOUNT, 21)
TYPED_SFIELD(sfLockingChainDoor, ACCOUNT, 22) TYPED_SFIELD(sfLockingChainDoor, ACCOUNT, 22)
TYPED_SFIELD(sfIssuingChainDoor, ACCOUNT, 23) TYPED_SFIELD(sfIssuingChainDoor, ACCOUNT, 23)
TYPED_SFIELD(sfSubject, ACCOUNT, 24) TYPED_SFIELD(sfSubject, ACCOUNT, 24)
TYPED_SFIELD(sfBorrower, ACCOUNT, 25)
TYPED_SFIELD(sfCounterparty, ACCOUNT, 26)
// vector of 256-bit // vector of 256-bit
TYPED_SFIELD(sfIndexes, VECTOR256, 1, SField::sMD_Never) TYPED_SFIELD(sfIndexes, VECTOR256, 1, SField::sMD_Never)
@@ -405,7 +365,6 @@ UNTYPED_SFIELD(sfCredential, OBJECT, 33)
UNTYPED_SFIELD(sfRawTransaction, OBJECT, 34) UNTYPED_SFIELD(sfRawTransaction, OBJECT, 34)
UNTYPED_SFIELD(sfBatchSigner, OBJECT, 35) UNTYPED_SFIELD(sfBatchSigner, OBJECT, 35)
UNTYPED_SFIELD(sfBook, OBJECT, 36) UNTYPED_SFIELD(sfBook, OBJECT, 36)
UNTYPED_SFIELD(sfCounterpartySignature, OBJECT, 37, SField::sMD_Default, SField::notSigning)
// array of objects (common) // array of objects (common)
// ARRAY/1 is reserved for end of array // ARRAY/1 is reserved for end of array
@@ -440,5 +399,3 @@ UNTYPED_SFIELD(sfAcceptedCredentials, ARRAY, 28)
UNTYPED_SFIELD(sfPermissions, ARRAY, 29) UNTYPED_SFIELD(sfPermissions, ARRAY, 29)
UNTYPED_SFIELD(sfRawTransactions, ARRAY, 30) UNTYPED_SFIELD(sfRawTransactions, ARRAY, 30)
UNTYPED_SFIELD(sfBatchSigners, ARRAY, 31, SField::sMD_Default, SField::notSigning) UNTYPED_SFIELD(sfBatchSigners, ARRAY, 31, SField::sMD_Default, SField::notSigning)
// clang-format on

View File

@@ -851,7 +851,7 @@ TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
createPseudoAcct | createMPTIssuance | mustModifyVault, createPseudoAcct | createMPTIssuance,
({ ({
{sfAsset, soeREQUIRED, soeMPTSupported}, {sfAsset, soeREQUIRED, soeMPTSupported},
{sfAssetsMaximum, soeOPTIONAL}, {sfAssetsMaximum, soeOPTIONAL},
@@ -869,7 +869,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
TRANSACTION(ttVAULT_SET, 66, VaultSet, TRANSACTION(ttVAULT_SET, 66, VaultSet,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
mustModifyVault, noPriv,
({ ({
{sfVaultID, soeREQUIRED}, {sfVaultID, soeREQUIRED},
{sfAssetsMaximum, soeOPTIONAL}, {sfAssetsMaximum, soeOPTIONAL},
@@ -884,7 +884,7 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet,
TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
mustDeleteAcct | destroyMPTIssuance | mustModifyVault, mustDeleteAcct | destroyMPTIssuance,
({ ({
{sfVaultID, soeREQUIRED}, {sfVaultID, soeREQUIRED},
})) }))
@@ -896,7 +896,7 @@ TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
mayAuthorizeMPT | mustModifyVault, mayAuthorizeMPT,
({ ({
{sfVaultID, soeREQUIRED}, {sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported}, {sfAmount, soeREQUIRED, soeMPTSupported},
@@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault, mayDeleteMPT,
({ ({
{sfVaultID, soeREQUIRED}, {sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported}, {sfAmount, soeREQUIRED, soeMPTSupported},
@@ -924,7 +924,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
Delegation::delegatable, Delegation::delegatable,
featureSingleAssetVault, featureSingleAssetVault,
mayDeleteMPT | mustModifyVault, mayDeleteMPT,
({ ({
{sfVaultID, soeREQUIRED}, {sfVaultID, soeREQUIRED},
{sfHolder, soeREQUIRED}, {sfHolder, soeREQUIRED},
@@ -944,139 +944,6 @@ TRANSACTION(ttBATCH, 71, Batch,
{sfBatchSigners, soeOPTIONAL}, {sfBatchSigners, soeOPTIONAL},
})) }))
/** Reserve 72-73 for future Vault-related transactions */
/** This transaction creates and updates a Loan Broker */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanBrokerSet.h>
#endif
TRANSACTION(ttLOAN_BROKER_SET, 74, LoanBrokerSet,
Delegation::delegatable,
featureLendingProtocol,
createPseudoAcct | mayAuthorizeMPT, ({
{sfVaultID, soeREQUIRED},
{sfLoanBrokerID, soeOPTIONAL},
{sfData, soeOPTIONAL},
{sfManagementFeeRate, soeOPTIONAL},
{sfDebtMaximum, soeOPTIONAL},
{sfCoverRateMinimum, soeOPTIONAL},
{sfCoverRateLiquidation, soeOPTIONAL},
}))
/** This transaction deletes a Loan Broker */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanBrokerDelete.h>
#endif
TRANSACTION(ttLOAN_BROKER_DELETE, 75, LoanBrokerDelete,
Delegation::delegatable,
featureLendingProtocol,
mustDeleteAcct | mayAuthorizeMPT, ({
{sfLoanBrokerID, soeREQUIRED},
}))
/** This transaction deposits First Loss Capital into a Loan Broker */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanBrokerCoverDeposit.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_DEPOSIT, 76, LoanBrokerCoverDeposit,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanBrokerID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
}))
/** This transaction withdraws First Loss Capital from a Loan Broker */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanBrokerCoverWithdraw.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_WITHDRAW, 77, LoanBrokerCoverWithdraw,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT, ({
{sfLoanBrokerID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
{sfDestination, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
}))
/** This transaction claws back First Loss Capital from a Loan Broker to
the issuer of the capital */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanBrokerCoverClawback.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_CLAWBACK, 78, LoanBrokerCoverClawback,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanBrokerID, soeOPTIONAL},
{sfAmount, soeOPTIONAL, soeMPTSupported},
}))
/** This transaction creates a Loan */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanSet.h>
#endif
TRANSACTION(ttLOAN_SET, 80, LoanSet,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT | mustModifyVault, ({
{sfLoanBrokerID, soeREQUIRED},
{sfData, soeOPTIONAL},
{sfCounterparty, soeOPTIONAL},
{sfCounterpartySignature, soeOPTIONAL},
{sfLoanOriginationFee, soeOPTIONAL},
{sfLoanServiceFee, soeOPTIONAL},
{sfLatePaymentFee, soeOPTIONAL},
{sfClosePaymentFee, soeOPTIONAL},
{sfOverpaymentFee, soeOPTIONAL},
{sfInterestRate, soeOPTIONAL},
{sfLateInterestRate, soeOPTIONAL},
{sfCloseInterestRate, soeOPTIONAL},
{sfOverpaymentInterestRate, soeOPTIONAL},
{sfPrincipalRequested, soeREQUIRED},
{sfPaymentTotal, soeOPTIONAL},
{sfPaymentInterval, soeOPTIONAL},
{sfGracePeriod, soeOPTIONAL},
}))
/** This transaction deletes an existing Loan */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanDelete.h>
#endif
TRANSACTION(ttLOAN_DELETE, 81, LoanDelete,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanID, soeREQUIRED},
}))
/** This transaction is used to change the delinquency status of an existing Loan */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanManage.h>
#endif
TRANSACTION(ttLOAN_MANAGE, 82, LoanManage,
Delegation::delegatable,
featureLendingProtocol,
// All of the LoanManage options will modify the vault, but the
// transaction can succeed without options, essentially making it
// a noop.
mayModifyVault, ({
{sfLoanID, soeREQUIRED},
}))
/** The Borrower uses this transaction to make a Payment on the Loan. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LoanPay.h>
#endif
TRANSACTION(ttLOAN_PAY, 84, LoanPay,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT | mustModifyVault, ({
{sfLoanID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
}))
/** This system-generated transaction type is used to update the status of the various amendments. /** This system-generated transaction type is used to update the status of the various amendments.
For details, see: https://xrpl.org/amendments.html For details, see: https://xrpl.org/amendments.html

View File

@@ -59,8 +59,6 @@ JSS(BaseAsset); // in: Oracle
JSS(BidMax); // in: AMM Bid JSS(BidMax); // in: AMM Bid
JSS(BidMin); // in: AMM Bid JSS(BidMin); // in: AMM Bid
JSS(ClearFlag); // field. JSS(ClearFlag); // field.
JSS(Counterparty); // field.
JSS(CounterpartySignature);// field.
JSS(DeliverMax); // out: alias to Amount JSS(DeliverMax); // out: alias to Amount
JSS(DeliverMin); // in: TransactionSign JSS(DeliverMin); // in: TransactionSign
JSS(Destination); // in: TransactionSign; field. JSS(Destination); // in: TransactionSign; field.
@@ -394,8 +392,6 @@ JSS(load_factor_local); // out: NetworkOPs
JSS(load_factor_net); // out: NetworkOPs JSS(load_factor_net); // out: NetworkOPs
JSS(load_factor_server); // out: NetworkOPs JSS(load_factor_server); // out: NetworkOPs
JSS(load_fee); // out: LoadFeeTrackImp, NetworkOPs JSS(load_fee); // out: LoadFeeTrackImp, NetworkOPs
JSS(loan_broker_id); // in: LedgerEntry
JSS(loan_seq); // in: LedgerEntry
JSS(local); // out: resource/Logic.h JSS(local); // out: resource/Logic.h
JSS(local_txs); // out: GetCounts JSS(local_txs); // out: GetCounts
JSS(local_static_keys); // out: ValidatorList JSS(local_static_keys); // out: ValidatorList
@@ -508,7 +504,6 @@ JSS(propose_seq); // out: LedgerPropose
JSS(proposers); // out: NetworkOPs, LedgerConsensus JSS(proposers); // out: NetworkOPs, LedgerConsensus
JSS(protocol); // out: NetworkOPs, PeerImp JSS(protocol); // out: NetworkOPs, PeerImp
JSS(proxied); // out: RPC ping JSS(proxied); // out: RPC ping
JSS(pseudo_account); // out: AccountInfo
JSS(pubkey_node); // out: NetworkOPs JSS(pubkey_node); // out: NetworkOPs
JSS(pubkey_publisher); // out: ValidatorList JSS(pubkey_publisher); // out: ValidatorList
JSS(pubkey_validator); // out: NetworkOPs, ValidatorList JSS(pubkey_validator); // out: NetworkOPs, ValidatorList
@@ -574,7 +569,6 @@ JSS(settle_delay); // out: AccountChannels
JSS(severity); // in: LogLevel JSS(severity); // in: LogLevel
JSS(shares); // out: VaultInfo JSS(shares); // out: VaultInfo
JSS(signature); // out: NetworkOPs, ChannelAuthorize JSS(signature); // out: NetworkOPs, ChannelAuthorize
JSS(signature_target); // in: TransactionSign
JSS(signature_verified); // out: ChannelVerify JSS(signature_verified); // out: ChannelVerify
JSS(signing_key); // out: NetworkOPs JSS(signing_key); // out: NetworkOPs
JSS(signing_keys); // out: ValidatorList JSS(signing_keys); // out: ValidatorList

View File

@@ -436,12 +436,10 @@ public:
admin_.erase(admin_.iterator_to(entry)); admin_.erase(admin_.iterator_to(entry));
break; break;
default: default:
// LCOV_EXCL_START
UNREACHABLE( UNREACHABLE(
"ripple::Resource::Logic::release : invalid entry " "ripple::Resource::Logic::release : invalid entry "
"kind"); "kind");
break; break;
// LCOV_EXCL_STOP
} }
inactive_.push_back(entry); inactive_.push_back(entry);
entry.whenExpires = m_clock.now() + secondsUntilExpiration; entry.whenExpires = m_clock.now() + secondsUntilExpiration;

View File

@@ -25,7 +25,7 @@
#include <xrpl/server/Port.h> #include <xrpl/server/Port.h>
#include <xrpl/server/detail/ServerImpl.h> #include <xrpl/server/detail/ServerImpl.h>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_context.hpp>
namespace ripple { namespace ripple {
@@ -34,10 +34,10 @@ template <class Handler>
std::unique_ptr<Server> std::unique_ptr<Server>
make_Server( make_Server(
Handler& handler, Handler& handler,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
beast::Journal journal) beast::Journal journal)
{ {
return std::make_unique<ServerImpl<Handler>>(handler, io_service, journal); return std::make_unique<ServerImpl<Handler>>(handler, io_context, journal);
} }
} // namespace ripple } // namespace ripple

View File

@@ -88,9 +88,7 @@ public:
++iter) ++iter)
{ {
typename BufferSequence::value_type const& buffer(*iter); typename BufferSequence::value_type const& buffer(*iter);
write( write(buffer.data(), boost::asio::buffer_size(buffer));
boost::asio::buffer_cast<void const*>(buffer),
boost::asio::buffer_size(buffer));
} }
} }
@@ -104,7 +102,7 @@ public:
/** Detach the session. /** Detach the session.
This holds the session open so that the response can be sent This holds the session open so that the response can be sent
asynchronously. Calls to io_service::run made by the server asynchronously. Calls to io_context::run made by the server
will not return until all detached sessions are closed. will not return until all detached sessions are closed.
*/ */
virtual std::shared_ptr<Session> virtual std::shared_ptr<Session>

View File

@@ -24,11 +24,13 @@
#include <xrpl/beast/net/IPAddressConversion.h> #include <xrpl/beast/net/IPAddressConversion.h>
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/server/Session.h> #include <xrpl/server/Session.h>
#include <xrpl/server/detail/Spawn.h>
#include <xrpl/server/detail/io_list.h> #include <xrpl/server/detail/io_list.h>
#include <boost/asio/ip/tcp.hpp> #include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/asio/ssl/stream.hpp> #include <boost/asio/ssl/stream.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/streambuf.hpp> #include <boost/asio/streambuf.hpp>
#include <boost/beast/core/stream_traits.hpp> #include <boost/beast/core/stream_traits.hpp>
#include <boost/beast/http/dynamic_body.hpp> #include <boost/beast/http/dynamic_body.hpp>
@@ -215,8 +217,8 @@ BaseHTTPPeer<Handler, Impl>::BaseHTTPPeer(
ConstBufferSequence const& buffers) ConstBufferSequence const& buffers)
: port_(port) : port_(port)
, handler_(handler) , handler_(handler)
, work_(executor) , work_(boost::asio::make_work_guard(executor))
, strand_(executor) , strand_(boost::asio::make_strand(executor))
, remote_address_(remote_address) , remote_address_(remote_address)
, journal_(journal) , journal_(journal)
{ {
@@ -356,7 +358,7 @@ BaseHTTPPeer<Handler, Impl>::on_write(
return; return;
if (graceful_) if (graceful_)
return do_close(); return do_close();
boost::asio::spawn( util::spawn(
strand_, strand_,
std::bind( std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read, &BaseHTTPPeer<Handler, Impl>::do_read,
@@ -375,7 +377,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
{ {
auto const p = impl().shared_from_this(); auto const p = impl().shared_from_this();
resume = std::function<void(void)>([this, p, writer, keep_alive]() { resume = std::function<void(void)>([this, p, writer, keep_alive]() {
boost::asio::spawn( util::spawn(
strand_, strand_,
std::bind( std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer, &BaseHTTPPeer<Handler, Impl>::do_writer,
@@ -406,7 +408,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
if (!keep_alive) if (!keep_alive)
return do_close(); return do_close();
boost::asio::spawn( util::spawn(
strand_, strand_,
std::bind( std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read, &BaseHTTPPeer<Handler, Impl>::do_read,
@@ -448,14 +450,14 @@ BaseHTTPPeer<Handler, Impl>::write(
std::shared_ptr<Writer> const& writer, std::shared_ptr<Writer> const& writer,
bool keep_alive) bool keep_alive)
{ {
boost::asio::spawn(bind_executor( util::spawn(
strand_, strand_,
std::bind( std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer, &BaseHTTPPeer<Handler, Impl>::do_writer,
impl().shared_from_this(), impl().shared_from_this(),
writer, writer,
keep_alive, keep_alive,
std::placeholders::_1))); std::placeholders::_1));
} }
// DEPRECATED // DEPRECATED
@@ -490,12 +492,12 @@ BaseHTTPPeer<Handler, Impl>::complete()
} }
// keep-alive // keep-alive
boost::asio::spawn(bind_executor( util::spawn(
strand_, strand_,
std::bind( std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read, &BaseHTTPPeer<Handler, Impl>::do_read,
impl().shared_from_this(), impl().shared_from_this(),
std::placeholders::_1))); std::placeholders::_1));
} }
// DEPRECATED // DEPRECATED

View File

@@ -47,7 +47,6 @@ protected:
Port const& port_; Port const& port_;
Handler& handler_; Handler& handler_;
endpoint_type remote_address_; endpoint_type remote_address_;
beast::WrappedSink sink_;
beast::Journal const j_; beast::Journal const j_;
boost::asio::executor_work_guard<boost::asio::executor> work_; boost::asio::executor_work_guard<boost::asio::executor> work_;
@@ -84,15 +83,15 @@ BasePeer<Handler, Impl>::BasePeer(
: port_(port) : port_(port)
, handler_(handler) , handler_(handler)
, remote_address_(remote_address) , remote_address_(remote_address)
, sink_( , j_(journal,
journal.sink(), log::attributes(log::attr(
[] { "PeerID",
static std::atomic<unsigned> id{0}; [] {
return "##" + std::to_string(++id) + " "; static std::atomic<unsigned> id{0};
}()) return "##" + std::to_string(++id) + " ";
, j_(sink_) }())))
, work_(executor) , work_(boost::asio::make_work_guard(executor))
, strand_(executor) , strand_(boost::asio::make_strand(executor))
{ {
} }

View File

@@ -29,6 +29,7 @@
#include <xrpl/server/detail/BasePeer.h> #include <xrpl/server/detail/BasePeer.h>
#include <xrpl/server/detail/LowestLayer.h> #include <xrpl/server/detail/LowestLayer.h>
#include <boost/asio/error.hpp>
#include <boost/beast/core/multi_buffer.hpp> #include <boost/beast/core/multi_buffer.hpp>
#include <boost/beast/http/message.hpp> #include <boost/beast/http/message.hpp>
#include <boost/beast/websocket.hpp> #include <boost/beast/websocket.hpp>
@@ -420,11 +421,17 @@ BaseWSPeer<Handler, Impl>::start_timer()
// Max seconds without completing a message // Max seconds without completing a message
static constexpr std::chrono::seconds timeout{30}; static constexpr std::chrono::seconds timeout{30};
static constexpr std::chrono::seconds timeoutLocal{3}; static constexpr std::chrono::seconds timeoutLocal{3};
error_code ec;
timer_.expires_from_now( try
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec); {
if (ec) timer_.expires_after(
return fail(ec, "start_timer"); remote_endpoint().address().is_loopback() ? timeoutLocal : timeout);
}
catch (boost::system::system_error const& e)
{
return fail(e.code(), "start_timer");
}
timer_.async_wait(bind_executor( timer_.async_wait(bind_executor(
strand_, strand_,
std::bind( std::bind(
@@ -438,8 +445,14 @@ template <class Handler, class Impl>
void void
BaseWSPeer<Handler, Impl>::cancel_timer() BaseWSPeer<Handler, Impl>::cancel_timer()
{ {
error_code ec; try
timer_.cancel(ec); {
timer_.cancel();
}
catch (boost::system::system_error const&)
{
// ignored
}
} }
template <class Handler, class Impl> template <class Handler, class Impl>

View File

@@ -69,7 +69,7 @@ private:
stream_type stream_; stream_type stream_;
socket_type& socket_; socket_type& socket_;
endpoint_type remote_address_; endpoint_type remote_address_;
boost::asio::io_context::strand strand_; boost::asio::strand<boost::asio::io_context::executor_type> strand_;
beast::Journal const j_; beast::Journal const j_;
public: public:
@@ -95,7 +95,7 @@ private:
Handler& handler_; Handler& handler_;
boost::asio::io_context& ioc_; boost::asio::io_context& ioc_;
acceptor_type acceptor_; acceptor_type acceptor_;
boost::asio::io_context::strand strand_; boost::asio::strand<boost::asio::io_context::executor_type> strand_;
bool ssl_; bool ssl_;
bool plain_; bool plain_;
@@ -155,7 +155,7 @@ Door<Handler>::Detector::Detector(
, stream_(std::move(stream)) , stream_(std::move(stream))
, socket_(stream_.socket()) , socket_(stream_.socket())
, remote_address_(remote_address) , remote_address_(remote_address)
, strand_(ioc_) , strand_(boost::asio::make_strand(ioc_))
, j_(j) , j_(j)
{ {
} }
@@ -164,7 +164,7 @@ template <class Handler>
void void
Door<Handler>::Detector::run() Door<Handler>::Detector::run()
{ {
boost::asio::spawn( util::spawn(
strand_, strand_,
std::bind( std::bind(
&Detector::do_detect, &Detector::do_detect,
@@ -269,7 +269,7 @@ Door<Handler>::reOpen()
Throw<std::exception>(); Throw<std::exception>();
} }
acceptor_.listen(boost::asio::socket_base::max_connections, ec); acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
if (ec) if (ec)
{ {
JLOG(j_.error()) << "Listen on port '" << port_.name JLOG(j_.error()) << "Listen on port '" << port_.name
@@ -291,7 +291,7 @@ Door<Handler>::Door(
, handler_(handler) , handler_(handler)
, ioc_(io_context) , ioc_(io_context)
, acceptor_(io_context) , acceptor_(io_context)
, strand_(io_context) , strand_(boost::asio::make_strand(io_context))
, ssl_( , ssl_(
port_.protocol.count("https") > 0 || port_.protocol.count("https") > 0 ||
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 || port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
@@ -307,7 +307,7 @@ template <class Handler>
void void
Door<Handler>::run() Door<Handler>::run()
{ {
boost::asio::spawn( util::spawn(
strand_, strand_,
std::bind( std::bind(
&Door<Handler>::do_accept, &Door<Handler>::do_accept,
@@ -320,7 +320,8 @@ void
Door<Handler>::close() Door<Handler>::close()
{ {
if (!strand_.running_in_this_thread()) if (!strand_.running_in_this_thread())
return strand_.post( return boost::asio::post(
strand_,
std::bind(&Door<Handler>::close, this->shared_from_this())); std::bind(&Door<Handler>::close, this->shared_from_this()));
error_code ec; error_code ec;
acceptor_.close(ec); acceptor_.close(ec);

View File

@@ -105,7 +105,7 @@ PlainHTTPPeer<Handler>::run()
{ {
if (!this->handler_.onAccept(this->session(), this->remote_address_)) if (!this->handler_.onAccept(this->session(), this->remote_address_))
{ {
boost::asio::spawn( util::spawn(
this->strand_, this->strand_,
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this())); std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
return; return;
@@ -114,7 +114,7 @@ PlainHTTPPeer<Handler>::run()
if (!socket_.is_open()) if (!socket_.is_open())
return; return;
boost::asio::spawn( util::spawn(
this->strand_, this->strand_,
std::bind( std::bind(
&PlainHTTPPeer::do_read, &PlainHTTPPeer::do_read,

View File

@@ -115,14 +115,14 @@ SSLHTTPPeer<Handler>::run()
{ {
if (!this->handler_.onAccept(this->session(), this->remote_address_)) if (!this->handler_.onAccept(this->session(), this->remote_address_))
{ {
boost::asio::spawn( util::spawn(
this->strand_, this->strand_,
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this())); std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
return; return;
} }
if (!socket_.is_open()) if (!socket_.is_open())
return; return;
boost::asio::spawn( util::spawn(
this->strand_, this->strand_,
std::bind( std::bind(
&SSLHTTPPeer::do_handshake, &SSLHTTPPeer::do_handshake,
@@ -164,7 +164,7 @@ SSLHTTPPeer<Handler>::do_handshake(yield_context do_yield)
this->port().protocol.count("https") > 0; this->port().protocol.count("https") > 0;
if (http) if (http)
{ {
boost::asio::spawn( util::spawn(
this->strand_, this->strand_,
std::bind( std::bind(
&SSLHTTPPeer::do_read, &SSLHTTPPeer::do_read,

View File

@@ -26,6 +26,8 @@
#include <xrpl/server/detail/io_list.h> #include <xrpl/server/detail/io_list.h>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <array> #include <array>
#include <chrono> #include <chrono>
@@ -85,9 +87,11 @@ private:
Handler& handler_; Handler& handler_;
beast::Journal const j_; beast::Journal const j_;
boost::asio::io_service& io_service_; boost::asio::io_context& io_context_;
boost::asio::io_service::strand strand_; boost::asio::strand<boost::asio::io_context::executor_type> strand_;
std::optional<boost::asio::io_service::work> work_; std::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::mutex m_; std::mutex m_;
std::vector<Port> ports_; std::vector<Port> ports_;
@@ -100,7 +104,7 @@ private:
public: public:
ServerImpl( ServerImpl(
Handler& handler, Handler& handler,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
beast::Journal journal); beast::Journal journal);
~ServerImpl(); ~ServerImpl();
@@ -123,10 +127,10 @@ public:
return ios_; return ios_;
} }
boost::asio::io_service& boost::asio::io_context&
get_io_service() get_io_context()
{ {
return io_service_; return io_context_;
} }
bool bool
@@ -140,13 +144,13 @@ private:
template <class Handler> template <class Handler>
ServerImpl<Handler>::ServerImpl( ServerImpl<Handler>::ServerImpl(
Handler& handler, Handler& handler,
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
beast::Journal journal) beast::Journal journal)
: handler_(handler) : handler_(handler)
, j_(journal) , j_(journal)
, io_service_(io_service) , io_context_(io_context)
, strand_(io_service_) , strand_(boost::asio::make_strand(io_context_))
, work_(io_service_) , work_(std::in_place, boost::asio::make_work_guard(io_context_))
{ {
} }
@@ -173,7 +177,7 @@ ServerImpl<Handler>::ports(std::vector<Port> const& ports)
ports_.push_back(port); ports_.push_back(port);
auto& internalPort = ports_.back(); auto& internalPort = ports_.back();
if (auto sp = ios_.emplace<Door<Handler>>( if (auto sp = ios_.emplace<Door<Handler>>(
handler_, io_service_, internalPort, j_)) handler_, io_context_, internalPort, j_))
{ {
list_.push_back(sp); list_.push_back(sp);

View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright(c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED
#define RIPPLE_SERVER_SPAWN_H_INCLUDED
#include <xrpl/basics/Log.h>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <concepts>
#include <type_traits>
namespace ripple::util {
namespace impl {
template <typename T>
concept IsStrand = std::same_as<
std::decay_t<T>,
boost::asio::strand<typename std::decay_t<T>::inner_executor_type>>;
/**
* @brief A completion handler that restores `boost::asio::spawn`'s behaviour
* from Boost 1.83
*
* This is intended to be passed as the third argument to `boost::asio::spawn`
* so that exceptions are not ignored but propagated to `io_context.run()` call
* site.
*
* @param ePtr The exception that was caught on the coroutine
*/
inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) {
if (ePtr)
{
try
{
std::rethrow_exception(ePtr);
}
catch (std::exception const& e)
{
JLOG(debugLog().warn()) << "Spawn exception: " << e.what();
throw;
}
catch (...)
{
JLOG(debugLog().warn()) << "Spawn exception: Unknown";
throw;
}
}
};
} // namespace impl
/**
* @brief Spawns a coroutine using `boost::asio::spawn`
*
* @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions
* through `io_context`
* @note Since implicit strand was removed from boost::asio::spawn this helper
* function adds the strand back
*
* @tparam Ctx The type of the context/strand
* @tparam F The type of the function to execute
* @param ctx The execution context
* @param func The function to execute. Must return `void`
*/
template <typename Ctx, typename F>
requires std::is_invocable_r_v<void, F, boost::asio::yield_context>
void
spawn(Ctx&& ctx, F&& func)
{
if constexpr (impl::IsStrand<Ctx>)
{
boost::asio::spawn(
std::forward<Ctx>(ctx),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
else
{
boost::asio::spawn(
boost::asio::make_strand(
boost::asio::get_associated_executor(std::forward<Ctx>(ctx))),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
}
} // namespace ripple::util
#endif

View File

@@ -166,7 +166,7 @@ public:
May be called concurrently. May be called concurrently.
Preconditions: Preconditions:
No call to io_service::run on any io_service No call to io_context::run on any io_context
used by work objects associated with this io_list used by work objects associated with this io_list
exists in the caller's call stack. exists in the caller's call stack.
*/ */

View File

@@ -49,7 +49,7 @@ Section::append(std::vector<std::string> const& lines)
// <key> '=' <value> // <key> '=' <value>
static boost::regex const re1( static boost::regex const re1(
"^" // start of line "^" // start of line
"(?:\\s*)" // whitespace (optional) "(?:\\s*)" // whitespace (optonal)
"([a-zA-Z][_a-zA-Z0-9]*)" // <key> "([a-zA-Z][_a-zA-Z0-9]*)" // <key>
"(?:\\s*)" // whitespace (optional) "(?:\\s*)" // whitespace (optional)
"(?:=)" // '=' "(?:=)" // '='

View File

@@ -113,14 +113,14 @@ Logs::File::close()
} }
void void
Logs::File::write(char const* text) Logs::File::write(std::string_view text)
{ {
if (m_stream != nullptr) if (m_stream != nullptr)
(*m_stream) << text; (*m_stream) << text;
} }
void void
Logs::File::writeln(char const* text) Logs::File::writeln(std::string_view text)
{ {
if (m_stream != nullptr) if (m_stream != nullptr)
{ {
@@ -196,11 +196,15 @@ Logs::write(
bool console) bool console)
{ {
std::string s; std::string s;
format(s, text, level, partition); std::string_view result = text;
if (!beast::Journal::isStructuredJournalEnabled())
{
format(s, text, level, partition);
result = text;
}
std::lock_guard lock(mutex_); std::lock_guard lock(mutex_);
file_.writeln(s); file_.writeln(result);
if (!silent_)
std::cerr << s << '\n';
// VFALCO TODO Fix console output // VFALCO TODO Fix console output
// if (console) // if (console)
// out_.write_console(s); // out_.write_console(s);
@@ -239,11 +243,9 @@ Logs::fromSeverity(beast::severities::Severity level)
case kError: case kError:
return lsERROR; return lsERROR;
// LCOV_EXCL_START
default: default:
UNREACHABLE("ripple::Logs::fromSeverity : invalid severity"); UNREACHABLE("ripple::Logs::fromSeverity : invalid severity");
[[fallthrough]]; [[fallthrough]];
// LCOV_EXCL_STOP
case kFatal: case kFatal:
break; break;
} }
@@ -267,11 +269,9 @@ Logs::toSeverity(LogSeverity level)
return kWarning; return kWarning;
case lsERROR: case lsERROR:
return kError; return kError;
// LCOV_EXCL_START
default: default:
UNREACHABLE("ripple::Logs::toSeverity : invalid severity"); UNREACHABLE("ripple::Logs::toSeverity : invalid severity");
[[fallthrough]]; [[fallthrough]];
// LCOV_EXCL_STOP
case lsFATAL: case lsFATAL:
break; break;
} }
@@ -296,11 +296,9 @@ Logs::toString(LogSeverity s)
return "Error"; return "Error";
case lsFATAL: case lsFATAL:
return "Fatal"; return "Fatal";
// LCOV_EXCL_START
default: default:
UNREACHABLE("ripple::Logs::toString : invalid severity"); UNREACHABLE("ripple::Logs::toString : invalid severity");
return "Unknown"; return "Unknown";
// LCOV_EXCL_STOP
} }
} }
@@ -362,11 +360,9 @@ Logs::format(
case kError: case kError:
output += "ERR "; output += "ERR ";
break; break;
// LCOV_EXCL_START
default: default:
UNREACHABLE("ripple::Logs::format : invalid severity"); UNREACHABLE("ripple::Logs::format : invalid severity");
[[fallthrough]]; [[fallthrough]];
// LCOV_EXCL_STOP
case kFatal: case kFatal:
output += "FTL "; output += "FTL ";
break; break;

View File

@@ -93,18 +93,6 @@ public:
// tie, round towards even. // tie, round towards even.
int int
round() noexcept; round() noexcept;
// Modify the result to the correctly rounded value
void
doRoundUp(rep& mantissa, int& exponent, std::string location);
// Modify the result to the correctly rounded value
void
doRoundDown(rep& mantissa, int& exponent);
// Modify the result to the correctly rounded value
void
doRound(rep& drops);
}; };
inline void inline void
@@ -182,61 +170,6 @@ Number::Guard::round() noexcept
return 0; return 0;
} }
void
Number::Guard::doRoundUp(rep& mantissa, int& exponent, std::string location)
{
auto r = round();
if (r == 1 || (r == 0 && (mantissa & 1) == 1))
{
++mantissa;
if (mantissa > maxMantissa)
{
mantissa /= 10;
++exponent;
}
}
if (exponent < minExponent)
{
mantissa = 0;
exponent = Number{}.exponent_;
}
if (exponent > maxExponent)
throw std::overflow_error(location);
}
void
Number::Guard::doRoundDown(rep& mantissa, int& exponent)
{
auto r = round();
if (r == 1 || (r == 0 && (mantissa & 1) == 1))
{
--mantissa;
if (mantissa < minMantissa)
{
mantissa *= 10;
--exponent;
}
}
if (exponent < minExponent)
{
mantissa = 0;
exponent = Number{}.exponent_;
}
}
// Modify the result to the correctly rounded value
void
Number::Guard::doRound(rep& drops)
{
auto r = round();
if (r == 1 || (r == 0 && (drops & 1) == 1))
{
++drops;
}
if (is_negative())
drops = -drops;
}
// Number // Number
constexpr Number one{1000000000000000, -15, Number::unchecked{}}; constexpr Number one{1000000000000000, -15, Number::unchecked{}};
@@ -276,7 +209,18 @@ Number::normalize()
return; return;
} }
g.doRoundUp(mantissa_, exponent_, "Number::normalize 2"); auto r = g.round();
if (r == 1 || (r == 0 && (mantissa_ & 1) == 1))
{
++mantissa_;
if (mantissa_ > maxMantissa)
{
mantissa_ /= 10;
++exponent_;
}
}
if (exponent_ > maxExponent)
throw std::overflow_error("Number::normalize 2");
if (negative) if (negative)
mantissa_ = -mantissa_; mantissa_ = -mantissa_;
@@ -348,7 +292,18 @@ Number::operator+=(Number const& y)
xm /= 10; xm /= 10;
++xe; ++xe;
} }
g.doRoundUp(xm, xe, "Number::addition overflow"); auto r = g.round();
if (r == 1 || (r == 0 && (xm & 1) == 1))
{
++xm;
if (xm > maxMantissa)
{
xm /= 10;
++xe;
}
}
if (xe > maxExponent)
throw std::overflow_error("Number::addition overflow");
} }
else else
{ {
@@ -368,7 +323,21 @@ Number::operator+=(Number const& y)
xm -= g.pop(); xm -= g.pop();
--xe; --xe;
} }
g.doRoundDown(xm, xe); auto r = g.round();
if (r == 1 || (r == 0 && (xm & 1) == 1))
{
--xm;
if (xm < minMantissa)
{
xm *= 10;
--xe;
}
}
if (xe < minExponent)
{
xm = 0;
xe = Number{}.exponent_;
}
} }
mantissa_ = xm * xn; mantissa_ = xm * xn;
exponent_ = xe; exponent_ = xe;
@@ -448,10 +417,25 @@ Number::operator*=(Number const& y)
} }
xm = static_cast<rep>(zm); xm = static_cast<rep>(zm);
xe = ze; xe = ze;
g.doRoundUp( auto r = g.round();
xm, if (r == 1 || (r == 0 && (xm & 1) == 1))
xe, {
"Number::multiplication overflow : exponent is " + std::to_string(xe)); ++xm;
if (xm > maxMantissa)
{
xm /= 10;
++xe;
}
}
if (xe < minExponent)
{
xm = 0;
xe = Number{}.exponent_;
}
if (xe > maxExponent)
throw std::overflow_error(
"Number::multiplication overflow : exponent is " +
std::to_string(xe));
mantissa_ = xm * zn; mantissa_ = xm * zn;
exponent_ = xe; exponent_ = xe;
XRPL_ASSERT( XRPL_ASSERT(
@@ -516,29 +500,17 @@ Number::operator rep() const
throw std::overflow_error("Number::operator rep() overflow"); throw std::overflow_error("Number::operator rep() overflow");
drops *= 10; drops *= 10;
} }
g.doRound(drops); auto r = g.round();
if (r == 1 || (r == 0 && (drops & 1) == 1))
{
++drops;
}
if (g.is_negative())
drops = -drops;
} }
return drops; return drops;
} }
Number
Number::truncate() const noexcept
{
if (exponent_ >= 0 || mantissa_ == 0)
return *this;
Number ret = *this;
while (ret.exponent_ < 0 && ret.mantissa_ != 0)
{
ret.exponent_ += 1;
ret.mantissa_ /= rep(10);
}
// We are guaranteed that normalize() will never throw an exception
// because exponent is either negative or zero at this point.
ret.normalize();
return ret;
}
std::string std::string
to_string(Number const& amount) to_string(Number const& amount)
{ {

View File

@@ -25,8 +25,9 @@
#include <xrpl/beast/utility/Journal.h> #include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/error.hpp> #include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp> #include <boost/asio/ip/tcp.hpp>
#include <boost/system/detail/error_code.hpp> #include <boost/system/detail/error_code.hpp>
@@ -124,8 +125,8 @@ public:
beast::Journal m_journal; beast::Journal m_journal;
boost::asio::io_service& m_io_service; boost::asio::io_context& m_io_context;
boost::asio::io_service::strand m_strand; boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
boost::asio::ip::tcp::resolver m_resolver; boost::asio::ip::tcp::resolver m_resolver;
std::condition_variable m_cv; std::condition_variable m_cv;
@@ -155,12 +156,12 @@ public:
std::deque<Work> m_work; std::deque<Work> m_work;
ResolverAsioImpl( ResolverAsioImpl(
boost::asio::io_service& io_service, boost::asio::io_context& io_context,
beast::Journal journal) beast::Journal journal)
: m_journal(journal) : m_journal(journal)
, m_io_service(io_service) , m_io_context(io_context)
, m_strand(io_service) , m_strand(boost::asio::make_strand(io_context))
, m_resolver(io_service) , m_resolver(io_context)
, m_asyncHandlersCompleted(true) , m_asyncHandlersCompleted(true)
, m_stop_called(false) , m_stop_called(false)
, m_stopped(true) , m_stopped(true)
@@ -216,8 +217,14 @@ public:
{ {
if (m_stop_called.exchange(true) == false) if (m_stop_called.exchange(true) == false)
{ {
m_io_service.dispatch(m_strand.wrap(std::bind( boost::asio::dispatch(
&ResolverAsioImpl::do_stop, this, CompletionCounter(this)))); m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_stop,
this,
CompletionCounter(this))));
JLOG(m_journal.debug()) << "Queued a stop request"; JLOG(m_journal.debug()) << "Queued a stop request";
} }
@@ -248,12 +255,16 @@ public:
// TODO NIKB use rvalue references to construct and move // TODO NIKB use rvalue references to construct and move
// reducing cost. // reducing cost.
m_io_service.dispatch(m_strand.wrap(std::bind( boost::asio::dispatch(
&ResolverAsioImpl::do_resolve, m_io_context,
this, boost::asio::bind_executor(
names, m_strand,
handler, std::bind(
CompletionCounter(this)))); &ResolverAsioImpl::do_resolve,
this,
names,
handler,
CompletionCounter(this))));
} }
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
@@ -279,19 +290,20 @@ public:
std::string name, std::string name,
boost::system::error_code const& ec, boost::system::error_code const& ec,
HandlerType handler, HandlerType handler,
boost::asio::ip::tcp::resolver::iterator iter, boost::asio::ip::tcp::resolver::results_type results,
CompletionCounter) CompletionCounter)
{ {
if (ec == boost::asio::error::operation_aborted) if (ec == boost::asio::error::operation_aborted)
return; return;
std::vector<beast::IP::Endpoint> addresses; std::vector<beast::IP::Endpoint> addresses;
auto iter = results.begin();
// If we get an error message back, we don't return any // If we get an error message back, we don't return any
// results that we may have gotten. // results that we may have gotten.
if (!ec) if (!ec)
{ {
while (iter != boost::asio::ip::tcp::resolver::iterator()) while (iter != results.end())
{ {
addresses.push_back( addresses.push_back(
beast::IPAddressConversion::from_asio(*iter)); beast::IPAddressConversion::from_asio(*iter));
@@ -301,8 +313,14 @@ public:
handler(name, addresses); handler(name, addresses);
m_io_service.post(m_strand.wrap(std::bind( boost::asio::post(
&ResolverAsioImpl::do_work, this, CompletionCounter(this)))); m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
} }
HostAndPort HostAndPort
@@ -383,16 +401,21 @@ public:
{ {
JLOG(m_journal.error()) << "Unable to parse '" << name << "'"; JLOG(m_journal.error()) << "Unable to parse '" << name << "'";
m_io_service.post(m_strand.wrap(std::bind( boost::asio::post(
&ResolverAsioImpl::do_work, this, CompletionCounter(this)))); m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
return; return;
} }
boost::asio::ip::tcp::resolver::query query(host, port);
m_resolver.async_resolve( m_resolver.async_resolve(
query, host,
port,
std::bind( std::bind(
&ResolverAsioImpl::do_finish, &ResolverAsioImpl::do_finish,
this, this,
@@ -423,10 +446,14 @@ public:
if (m_work.size() > 0) if (m_work.size() > 0)
{ {
m_io_service.post(m_strand.wrap(std::bind( boost::asio::post(
&ResolverAsioImpl::do_work, m_io_context,
this, boost::asio::bind_executor(
CompletionCounter(this)))); m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
} }
} }
} }
@@ -435,9 +462,9 @@ public:
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
std::unique_ptr<ResolverAsio> std::unique_ptr<ResolverAsio>
ResolverAsio::New(boost::asio::io_service& io_service, beast::Journal journal) ResolverAsio::New(boost::asio::io_context& io_context, beast::Journal journal)
{ {
return std::make_unique<ResolverAsioImpl>(io_service, journal); return std::make_unique<ResolverAsioImpl>(io_context, journal);
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------

View File

@@ -36,7 +36,6 @@ LogThrow(std::string const& title)
[[noreturn]] void [[noreturn]] void
LogicError(std::string const& s) noexcept LogicError(std::string const& s) noexcept
{ {
// LCOV_EXCL_START
JLOG(debugLog().fatal()) << s; JLOG(debugLog().fatal()) << s;
std::cerr << "Logic error: " << s << std::endl; std::cerr << "Logic error: " << s << std::endl;
// Use a non-standard contract naming here (without namespace) because // Use a non-standard contract naming here (without namespace) because
@@ -46,7 +45,6 @@ LogicError(std::string const& s) noexcept
// For the above reasons, we want this contract to stand out. // For the above reasons, we want this contract to stand out.
UNREACHABLE("LogicError", {{"message", s}}); UNREACHABLE("LogicError", {{"message", s}});
std::abort(); std::abort();
// LCOV_EXCL_STOP
} }
} // namespace ripple } // namespace ripple

View File

@@ -30,9 +30,11 @@
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp> #include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/buffer.hpp> #include <boost/asio/buffer.hpp>
#include <boost/asio/error.hpp> #include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp> #include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/udp.hpp> #include <boost/asio/ip/udp.hpp>
#include <boost/asio/strand.hpp> #include <boost/asio/strand.hpp>
#include <boost/system/detail/error_code.hpp> #include <boost/system/detail/error_code.hpp>
@@ -238,9 +240,11 @@ private:
Journal m_journal; Journal m_journal;
IP::Endpoint m_address; IP::Endpoint m_address;
std::string m_prefix; std::string m_prefix;
boost::asio::io_service m_io_service; boost::asio::io_context m_io_context;
std::optional<boost::asio::io_service::work> m_work; std::optional<boost::asio::executor_work_guard<
boost::asio::io_service::strand m_strand; boost::asio::io_context::executor_type>>
m_work;
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer; boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
boost::asio::ip::udp::socket m_socket; boost::asio::ip::udp::socket m_socket;
std::deque<std::string> m_data; std::deque<std::string> m_data;
@@ -264,18 +268,24 @@ public:
: m_journal(journal) : m_journal(journal)
, m_address(address) , m_address(address)
, m_prefix(prefix) , m_prefix(prefix)
, m_work(std::ref(m_io_service)) , m_work(boost::asio::make_work_guard(m_io_context))
, m_strand(m_io_service) , m_strand(boost::asio::make_strand(m_io_context))
, m_timer(m_io_service) , m_timer(m_io_context)
, m_socket(m_io_service) , m_socket(m_io_context)
, m_thread(&StatsDCollectorImp::run, this) , m_thread(&StatsDCollectorImp::run, this)
{ {
} }
~StatsDCollectorImp() override ~StatsDCollectorImp() override
{ {
boost::system::error_code ec; try
m_timer.cancel(ec); {
m_timer.cancel();
}
catch (boost::system::system_error const&)
{
// ignored
}
m_work.reset(); m_work.reset();
m_thread.join(); m_thread.join();
@@ -334,10 +344,10 @@ public:
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
boost::asio::io_service& boost::asio::io_context&
get_io_service() get_io_context()
{ {
return m_io_service; return m_io_context;
} }
std::string const& std::string const&
@@ -355,8 +365,14 @@ public:
void void
post_buffer(std::string&& buffer) post_buffer(std::string&& buffer)
{ {
m_io_service.dispatch(m_strand.wrap(std::bind( boost::asio::dispatch(
&StatsDCollectorImp::do_post_buffer, this, std::move(buffer)))); m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&StatsDCollectorImp::do_post_buffer,
this,
std::move(buffer))));
} }
// The keepAlive parameter makes sure the buffers sent to // The keepAlive parameter makes sure the buffers sent to
@@ -386,8 +402,7 @@ public:
for (auto const& buffer : buffers) for (auto const& buffer : buffers)
{ {
std::string const s( std::string const s(
boost::asio::buffer_cast<char const*>(buffer), buffer.data(), boost::asio::buffer_size(buffer));
boost::asio::buffer_size(buffer));
std::cerr << s; std::cerr << s;
} }
std::cerr << '\n'; std::cerr << '\n';
@@ -456,7 +471,7 @@ public:
set_timer() set_timer()
{ {
using namespace std::chrono_literals; using namespace std::chrono_literals;
m_timer.expires_from_now(1s); m_timer.expires_after(1s);
m_timer.async_wait(std::bind( m_timer.async_wait(std::bind(
&StatsDCollectorImp::on_timer, this, std::placeholders::_1)); &StatsDCollectorImp::on_timer, this, std::placeholders::_1));
} }
@@ -498,13 +513,13 @@ public:
set_timer(); set_timer();
m_io_service.run(); m_io_context.run();
m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec); m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec);
m_socket.close(); m_socket.close();
m_io_service.poll(); m_io_context.poll();
} }
}; };
@@ -547,10 +562,12 @@ StatsDCounterImpl::~StatsDCounterImpl()
void void
StatsDCounterImpl::increment(CounterImpl::value_type amount) StatsDCounterImpl::increment(CounterImpl::value_type amount)
{ {
m_impl->get_io_service().dispatch(std::bind( boost::asio::dispatch(
&StatsDCounterImpl::do_increment, m_impl->get_io_context(),
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()), std::bind(
amount)); &StatsDCounterImpl::do_increment,
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
amount));
} }
void void
@@ -592,10 +609,12 @@ StatsDEventImpl::StatsDEventImpl(
void void
StatsDEventImpl::notify(EventImpl::value_type const& value) StatsDEventImpl::notify(EventImpl::value_type const& value)
{ {
m_impl->get_io_service().dispatch(std::bind( boost::asio::dispatch(
&StatsDEventImpl::do_notify, m_impl->get_io_context(),
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()), std::bind(
value)); &StatsDEventImpl::do_notify,
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
value));
} }
void void
@@ -625,19 +644,23 @@ StatsDGaugeImpl::~StatsDGaugeImpl()
void void
StatsDGaugeImpl::set(GaugeImpl::value_type value) StatsDGaugeImpl::set(GaugeImpl::value_type value)
{ {
m_impl->get_io_service().dispatch(std::bind( boost::asio::dispatch(
&StatsDGaugeImpl::do_set, m_impl->get_io_context(),
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()), std::bind(
value)); &StatsDGaugeImpl::do_set,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
value));
} }
void void
StatsDGaugeImpl::increment(GaugeImpl::difference_type amount) StatsDGaugeImpl::increment(GaugeImpl::difference_type amount)
{ {
m_impl->get_io_service().dispatch(std::bind( boost::asio::dispatch(
&StatsDGaugeImpl::do_increment, m_impl->get_io_context(),
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()), std::bind(
amount)); &StatsDGaugeImpl::do_increment,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
amount));
} }
void void
@@ -713,10 +736,12 @@ StatsDMeterImpl::~StatsDMeterImpl()
void void
StatsDMeterImpl::increment(MeterImpl::value_type amount) StatsDMeterImpl::increment(MeterImpl::value_type amount)
{ {
m_impl->get_io_service().dispatch(std::bind( boost::asio::dispatch(
&StatsDMeterImpl::do_increment, m_impl->get_io_context(),
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()), std::bind(
amount)); &StatsDMeterImpl::do_increment,
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
amount));
} }
void void

View File

@@ -25,11 +25,11 @@ namespace IP {
bool bool
is_private(AddressV4 const& addr) is_private(AddressV4 const& addr)
{ {
return ((addr.to_ulong() & 0xff000000) == return ((addr.to_uint() & 0xff000000) ==
0x0a000000) || // Prefix /8, 10. #.#.# 0x0a000000) || // Prefix /8, 10. #.#.#
((addr.to_ulong() & 0xfff00000) == ((addr.to_uint() & 0xfff00000) ==
0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.# 0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.#
((addr.to_ulong() & 0xffff0000) == ((addr.to_uint() & 0xffff0000) ==
0xc0a80000) || // Prefix /16 192.168.#.# 0xc0a80000) || // Prefix /16 192.168.#.#
addr.is_loopback(); addr.is_loopback();
} }
@@ -44,7 +44,7 @@ char
get_class(AddressV4 const& addr) get_class(AddressV4 const& addr)
{ {
static char const* table = "AAAABBCD"; static char const* table = "AAAABBCD";
return table[(addr.to_ulong() & 0xE0000000) >> 29]; return table[(addr.to_uint() & 0xE0000000) >> 29];
} }
} // namespace IP } // namespace IP

View File

@@ -20,6 +20,8 @@
#include <xrpl/beast/net/IPAddressV4.h> #include <xrpl/beast/net/IPAddressV4.h>
#include <xrpl/beast/net/IPAddressV6.h> #include <xrpl/beast/net/IPAddressV6.h>
#include <boost/asio/ip/address_v4.hpp>
namespace beast { namespace beast {
namespace IP { namespace IP {
@@ -28,7 +30,9 @@ is_private(AddressV6 const& addr)
{ {
return ( return (
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ? (addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
(addr.is_v4_mapped() && is_private(addr.to_v4()))); (addr.is_v4_mapped() &&
is_private(boost::asio::ip::make_address_v4(
boost::asio::ip::v4_mapped, addr))));
} }
bool bool

View File

@@ -21,6 +21,8 @@
#include <xrpl/beast/net/IPEndpoint.h> #include <xrpl/beast/net/IPEndpoint.h>
#include <boost/algorithm/string/trim.hpp> #include <boost/algorithm/string/trim.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/asio/ip/address_v4.hpp>
#include <boost/system/detail/error_code.hpp> #include <boost/system/detail/error_code.hpp>
#include <cctype> #include <cctype>
@@ -167,7 +169,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
} }
boost::system::error_code ec; boost::system::error_code ec;
auto addr = Address::from_string(addrStr, ec); auto addr = boost::asio::ip::make_address(addrStr, ec);
if (ec) if (ec)
{ {
is.setstate(std::ios_base::failbit); is.setstate(std::ios_base::failbit);

View File

@@ -19,12 +19,102 @@
#include <xrpl/beast/utility/Journal.h> #include <xrpl/beast/utility/Journal.h>
#include <chrono>
#include <ios> #include <ios>
#include <ostream> #include <ostream>
#include <ranges>
#include <string> #include <string>
#include <thread>
namespace beast { namespace beast {
namespace {
// Fast timestamp to ISO string conversion
// Returns string like "2024-01-15T10:30:45.123Z"
std::string_view
fastTimestampToString(std::int64_t milliseconds_since_epoch)
{
thread_local char buffer[64]; // "2024-01-15T10:30:45.123Z"
// Precomputed lookup table for 2-digit numbers 00-99
static constexpr char digits[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6',
'0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3',
'1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0',
'2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7',
'2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4',
'3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1',
'4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8',
'4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5',
'5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2',
'6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6',
'7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3',
'8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0',
'9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7',
'9', '8', '9', '9'};
constexpr std::int64_t UNIX_EPOCH_DAYS =
719468; // Days from year 0 to 1970-01-01
std::int64_t seconds = milliseconds_since_epoch / 1000;
int ms = milliseconds_since_epoch % 1000;
std::int64_t days = seconds / 86400 + UNIX_EPOCH_DAYS;
int sec_of_day = seconds % 86400;
// Calculate year, month, day from days using Gregorian calendar algorithm
int era = (days >= 0 ? days : days - 146096) / 146097;
int doe = days - era * 146097;
int yoe = (doe - doe / 1460 + doe / 36524 - doe / 146096) / 365;
int year = yoe + era * 400;
int doy = doe - (365 * yoe + yoe / 4 - yoe / 100);
int mp = (5 * doy + 2) / 153;
int day = doy - (153 * mp + 2) / 5 + 1;
int month = mp + (mp < 10 ? 3 : -9);
year += (month <= 2);
// Calculate hour, minute, second
int hour = sec_of_day / 3600;
int min = (sec_of_day % 3600) / 60;
int sec = sec_of_day % 60;
// Format: "2024-01-15T10:30:45.123Z"
buffer[0] = '0' + year / 1000;
buffer[1] = '0' + (year / 100) % 10;
buffer[2] = '0' + (year / 10) % 10;
buffer[3] = '0' + year % 10;
buffer[4] = '-';
buffer[5] = digits[month * 2];
buffer[6] = digits[month * 2 + 1];
buffer[7] = '-';
buffer[8] = digits[day * 2];
buffer[9] = digits[day * 2 + 1];
buffer[10] = 'T';
buffer[11] = digits[hour * 2];
buffer[12] = digits[hour * 2 + 1];
buffer[13] = ':';
buffer[14] = digits[min * 2];
buffer[15] = digits[min * 2 + 1];
buffer[16] = ':';
buffer[17] = digits[sec * 2];
buffer[18] = digits[sec * 2 + 1];
buffer[19] = '.';
buffer[20] = '0' + ms / 100;
buffer[21] = '0' + (ms / 10) % 10;
buffer[22] = '0' + ms % 10;
buffer[23] = 'Z';
return {buffer, 24};
}
} // anonymous namespace
std::string Journal::globalLogAttributes_;
std::shared_mutex Journal::globalLogAttributesMutex_;
bool Journal::jsonLogsEnabled_ = false;
thread_local Journal::JsonLogContext Journal::currentJsonLogContext_{};
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// A Sink that does nothing. // A Sink that does nothing.
@@ -87,6 +177,186 @@ Journal::getNullSink()
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
std::string_view
severities::to_string(Severity severity)
{
using namespace std::string_view_literals;
switch (severity)
{
case kDisabled:
return "disabled"sv;
case kTrace:
return "trace"sv;
case kDebug:
return "debug"sv;
case kInfo:
return "info"sv;
case kWarning:
return "warning"sv;
case kError:
return "error"sv;
case kFatal:
return "fatal"sv;
default:
UNREACHABLE("Unexpected severity value!");
}
return ""sv;
}
void
Journal::JsonLogContext::start(
std::source_location location,
severities::Severity severity,
std::string_view moduleName,
std::string_view journalAttributes) noexcept
{
struct ThreadIdStringInitializer
{
std::string value;
ThreadIdStringInitializer()
{
std::stringstream threadIdStream;
threadIdStream << std::this_thread::get_id();
value = threadIdStream.str();
}
};
thread_local ThreadIdStringInitializer const threadId;
messageOffset_ = 0;
messageBuffer_.clear();
jsonWriter_ = detail::SimpleJsonWriter{&messageBuffer_};
if (!jsonLogsEnabled_)
{
messageBuffer_ = journalAttributes;
return;
}
writer().startObject();
if (!journalAttributes.empty())
{
writer().writeKey("Jnl");
writer().writeRaw(journalAttributes);
writer().endObject();
}
{
std::shared_lock lock(globalLogAttributesMutex_);
if (!globalLogAttributes_.empty())
{
writer().writeKey("Glb");
writer().writeRaw(globalLogAttributes_);
writer().endObject();
}
}
writer().writeKey("Mtd");
writer().startObject();
writer().writeKey("Mdl");
writer().writeString(moduleName);
writer().writeKey("Fl");
constexpr size_t FILE_NAME_KEEP_CHARS = 20;
std::string_view fileName = location.file_name();
std::string_view trimmedFileName = (fileName.size() > FILE_NAME_KEEP_CHARS)
? fileName.substr(fileName.size() - FILE_NAME_KEEP_CHARS)
: fileName;
writer().writeString(trimmedFileName);
writer().writeKey("Ln");
writer().writeUInt(location.line());
writer().writeKey("ThId");
writer().writeString(threadId.value);
auto severityStr = to_string(severity);
writer().writeKey("Lv");
writer().writeString(severityStr);
auto nowMs = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count();
writer().writeKey("Tm");
writer().writeString(fastTimestampToString(nowMs));
writer().endObject();
hasMessageParams_ = false;
}
void
Journal::JsonLogContext::reuseJson()
{
messageOffset_ = messageBuffer_.size();
}
void
Journal::JsonLogContext::finish()
{
if (messageOffset_ != 0)
{
messageBuffer_.erase(messageOffset_);
}
else
{
messageBuffer_.clear();
}
jsonWriter_ = detail::SimpleJsonWriter{&messageBuffer_};
}
void
Journal::initMessageContext(
std::source_location location,
severities::Severity severity) const
{
currentJsonLogContext_.start(location, severity, name_, attributes_);
}
std::string&
Journal::formatLog(std::string const& message)
{
if (!jsonLogsEnabled_)
{
currentJsonLogContext_.writer().buffer() += message;
return currentJsonLogContext_.messageBuffer();
}
auto& writer = currentJsonLogContext_.writer();
currentJsonLogContext_.endMessageParams();
writer.writeKey("Msg");
writer.writeString(message);
writer.endObject();
writer.finish();
return currentJsonLogContext_.messageBuffer();
}
void
Journal::enableStructuredJournal()
{
jsonLogsEnabled_ = true;
}
void
Journal::disableStructuredJournal()
{
jsonLogsEnabled_ = false;
resetGlobalAttributes();
}
bool
Journal::isStructuredJournalEnabled()
{
return jsonLogsEnabled_;
}
Journal::Sink::Sink(Severity thresh, bool console) Journal::Sink::Sink(Severity thresh, bool console)
: thresh_(thresh), m_console(console) : thresh_(thresh), m_console(console)
{ {
@@ -143,13 +413,14 @@ Journal::ScopedStream::ScopedStream(
Journal::ScopedStream::~ScopedStream() Journal::ScopedStream::~ScopedStream()
{ {
std::string const& s(m_ostream.str()); std::string s = m_ostream.str();
if (!s.empty()) if (!s.empty())
{ {
if (s == "\n") if (s == "\n")
m_sink.write(m_level, ""); s = "";
else
m_sink.write(m_level, s); m_sink.write(m_level, formatLog(s));
currentJsonLogContext_.finish();
} }
} }
@@ -159,12 +430,4 @@ Journal::ScopedStream::operator<<(std::ostream& manip(std::ostream&)) const
return m_ostream << manip; return m_ostream << manip;
} }
//------------------------------------------------------------------------------
Journal::ScopedStream
Journal::Stream::operator<<(std::ostream& manip(std::ostream&)) const
{
return ScopedStream(*this, manip);
}
} // namespace beast } // namespace beast

View File

@@ -174,7 +174,7 @@ Array::append(Json::Value const& v)
return; return;
} }
} }
UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE UNREACHABLE("Json::Array::append : invalid type");
} }
void void
@@ -209,7 +209,7 @@ Object::set(std::string const& k, Json::Value const& v)
return; return;
} }
} }
UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE UNREACHABLE("Json::Object::set : invalid type");
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

Some files were not shown because too many files have changed in this diff Show More