Compare commits

..

3 Commits

Author SHA1 Message Date
Bronek Kozicki
e172510c4f Fixes 2025-09-02 21:28:20 +01:00
Bronek Kozicki
3a2e26c5c0 Remove unnecessary inputs 2025-09-02 20:14:47 +01:00
Bronek Kozicki
6bd3cc054a Add build-selected-commit workflow 2025-09-02 20:10:36 +01:00
525 changed files with 8681 additions and 39571 deletions

View File

@@ -33,6 +33,5 @@ slack_app: false
ignore:
- "src/test/"
- "src/tests/"
- "include/xrpl/beast/test/"
- "include/xrpl/beast/unit_test/"

View File

@@ -1,5 +1,7 @@
# This action installs and optionally uploads Conan dependencies to a remote
# repository. The dependencies will only be uploaded if the credentials are
# provided.
name: Build Conan dependencies
description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
@@ -10,40 +12,51 @@ inputs:
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
build_nproc:
description: "The number of processors to use for building."
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
force_build:
description: 'Force building of all dependencies ("true", "false").'
required: false
default: "false"
log_verbosity:
description: "The logging verbosity."
force_upload:
description: 'Force uploading of all dependencies ("true", "false").'
required: false
default: "verbose"
default: "false"
runs:
using: composite
steps:
- name: Install Conan dependencies
shell: bash
env:
BUILD_DIR: ${{ inputs.build_dir }}
BUILD_NPROC: ${{ inputs.build_nproc }}
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
run: |
echo 'Installing dependencies.'
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ inputs.build_dir }}
cd ${{ inputs.build_dir }}
conan install \
--output-folder . \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \
--settings:all build_type="${BUILD_TYPE}" \
--conf:all tools.build:jobs=${BUILD_NPROC} \
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
..
--build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \
--options:host '&:tests=True' \
--options:host '&:xrpld=True' \
--settings:all build_type=${{ inputs.build_type }} \
--format=json ..
- name: Upload Conan dependencies
if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}"
echo 'Uploading dependencies.'
conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }}

95
.github/actions/build-test/action.yml vendored Normal file
View File

@@ -0,0 +1,95 @@
# This action build and tests the binary. The Conan dependencies must have
# already been installed (see the build-deps action).
name: Build and Test
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
inputs:
build_dir:
description: "The directory where to build."
required: true
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: false
default: "false"
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
default: ""
cmake_target:
description: "The CMake target to build."
required: true
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
default: ""
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
runs:
using: composite
steps:
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Configuring CMake.'
cmake \
-G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \
${{ inputs.cmake_args }} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Building binary.'
cmake \
--build . \
--config ${{ inputs.build_type }} \
--parallel $(nproc) \
--target ${{ inputs.cmake_target }}
- name: Check linking
if: ${{ inputs.os == 'linux' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Checking linking.'
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify voidstar
if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Verifying presence of instrumentation.'
./rippled --version | grep libvoidstar
- name: Test the binary
if: ${{ inputs.build_only == 'false' }}
shell: bash
working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }}
run: |
echo 'Testing binary.'
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
- name: Upload coverage report
if: ${{ inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ inputs.codecov_token }}
verbose: true

View File

@@ -1,43 +0,0 @@
name: Print build environment
description: "Print environment and some tooling versions"
runs:
using: composite
steps:
- name: Check configuration (Windows)
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version

View File

@@ -1,46 +0,0 @@
name: Setup Conan
description: "Set up Conan configuration, profile, and remote."
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
default: https://conan.ripplex.io
runs:
using: composite
steps:
- name: Set up Conan configuration
shell: bash
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
shell: bash
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml)
- [`levelization.yml`](../../workflows/check-levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or

View File

@@ -7,6 +7,9 @@ Loop: test.jtx test.unit_test
Loop: xrpld.app xrpld.core
xrpld.app > xrpld.core
Loop: xrpld.app xrpld.ledger
xrpld.app > xrpld.ledger
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app

View File

@@ -2,10 +2,6 @@ libxrpl.basics > xrpl.basics
libxrpl.crypto > xrpl.basics
libxrpl.json > xrpl.basics
libxrpl.json > xrpl.json
libxrpl.ledger > xrpl.basics
libxrpl.ledger > xrpl.json
libxrpl.ledger > xrpl.ledger
libxrpl.ledger > xrpl.protocol
libxrpl.net > xrpl.basics
libxrpl.net > xrpl.net
libxrpl.protocol > xrpl.basics
@@ -25,11 +21,11 @@ test.app > test.unit_test
test.app > xrpl.basics
test.app > xrpld.app
test.app > xrpld.core
test.app > xrpld.ledger
test.app > xrpld.nodestore
test.app > xrpld.overlay
test.app > xrpld.rpc
test.app > xrpl.json
test.app > xrpl.ledger
test.app > xrpl.protocol
test.app > xrpl.resource
test.basics > test.jtx
@@ -48,8 +44,8 @@ test.consensus > test.unit_test
test.consensus > xrpl.basics
test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpld.ledger
test.consensus > xrpl.json
test.consensus > xrpl.ledger
test.core > test.jtx
test.core > test.toplevel
test.core > test.unit_test
@@ -67,9 +63,9 @@ test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.ledger
test.jtx > xrpld.rpc
test.jtx > xrpl.json
test.jtx > xrpl.ledger
test.jtx > xrpl.net
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
@@ -79,7 +75,7 @@ test.ledger > test.toplevel
test.ledger > xrpl.basics
test.ledger > xrpld.app
test.ledger > xrpld.core
test.ledger > xrpl.ledger
test.ledger > xrpld.ledger
test.ledger > xrpl.protocol
test.nodestore > test.jtx
test.nodestore > test.toplevel
@@ -138,11 +134,7 @@ test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol
xrpl.net > xrpl.basics
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
@@ -159,7 +151,6 @@ xrpld.app > xrpld.consensus
xrpld.app > xrpld.nodestore
xrpld.app > xrpld.perflog
xrpld.app > xrpl.json
xrpld.app > xrpl.ledger
xrpld.app > xrpl.net
xrpld.app > xrpl.protocol
xrpld.app > xrpl.resource
@@ -172,6 +163,9 @@ xrpld.core > xrpl.basics
xrpld.core > xrpl.json
xrpld.core > xrpl.net
xrpld.core > xrpl.protocol
xrpld.ledger > xrpl.basics
xrpld.ledger > xrpl.json
xrpld.ledger > xrpl.protocol
xrpld.nodestore > xrpl.basics
xrpld.nodestore > xrpld.core
xrpld.nodestore > xrpld.unity
@@ -192,9 +186,9 @@ xrpld.perflog > xrpl.basics
xrpld.perflog > xrpl.json
xrpld.rpc > xrpl.basics
xrpld.rpc > xrpld.core
xrpld.rpc > xrpld.ledger
xrpld.rpc > xrpld.nodestore
xrpld.rpc > xrpl.json
xrpld.rpc > xrpl.ledger
xrpld.rpc > xrpl.net
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.resource

63
.github/scripts/strategy-matrix/generate.py vendored Executable file → Normal file
View File

@@ -2,17 +2,7 @@
import argparse
import itertools
import json
from pathlib import Path
from dataclasses import dataclass
THIS_DIR = Path(__file__).parent.resolve()
@dataclass
class Config:
architecture: list[dict]
os: list[dict]
build_type: list[str]
cmake_args: list[str]
import re
'''
Generate a strategy matrix for GitHub Actions CI.
@@ -28,9 +18,9 @@ We will further set additional CMake arguments as follows:
- Certain Debian Bookworm configurations will change the reference fee, enable
codecov, and enable voidstar in PRs.
'''
def generate_strategy_matrix(all: bool, config: Config) -> list:
def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict:
configurations = []
for architecture, os, build_type, cmake_args in itertools.product(config.architecture, config.os, config.build_type, config.cmake_args):
for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args):
# The default CMake target is 'all' for Linux and MacOS and 'install'
# for Windows, but it can get overridden for certain configurations.
cmake_target = 'install' if os["distro_name"] == 'windows' else 'all'
@@ -45,7 +35,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
# Only generate a subset of configurations in PRs.
if not all:
# Debian:
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
# - Bookworm using GCC 13: Release and Unity on linux/arm64, set
# the reference fee to 500.
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
# code coverage (which will be done below).
@@ -57,7 +47,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if os['distro_name'] == 'debian':
skip = True
if os['distro_version'] == 'bookworm':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64':
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}'
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
@@ -74,14 +64,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
# - 9.4 using GCC 12: Debug and Unity on linux/amd64.
# - 9.6 using Clang: Release and no Unity on linux/amd64.
if os['distro_name'] == 'rhel':
skip = True
if os['distro_version'] == '9':
if os['distro_version'] == '9.4':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
elif os['distro_version'] == '10':
elif os['distro_version'] == '9.6':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if skip:
@@ -130,14 +120,16 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
continue
# We skip all clang 20+ on arm64 due to Boost build error.
if f'{os['compiler_name']}-{os['compiler_version']}' in ['clang-20', 'clang-21'] and architecture['platform'] == 'linux/arm64':
# We skip all clang-20 on arm64 due to boost 1.86 build error
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
# Unity on linux/amd64
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}'
cmake_target = 'coverage'
build_only = True
# Generate a unique name for the configuration, e.g. macos-arm64-debug
# or debian-bookworm-gcc-12-amd64-release-unity.
@@ -160,36 +152,27 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
'config_name': config_name,
'cmake_args': cmake_args,
'cmake_target': cmake_target,
'build_only': build_only,
'build_only': 'true' if build_only else 'false',
'build_type': build_type,
'os': os,
'architecture': architecture,
})
return configurations
def read_config(file: Path) -> Config:
config = json.loads(file.read_text())
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
return Config(**config)
return {'include': configurations}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true")
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=False, type=Path)
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str)
args = parser.parse_args()
matrix = []
if args.config is None or args.config == '':
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "linux.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "macos.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "windows.json"))
else:
matrix += generate_strategy_matrix(args.all, read_config(args.config))
# Load the JSON configuration file.
config = None
with open(args.config, 'r') as f:
config = json.load(f)
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
# Generate the strategy matrix.
print(f'matrix={json.dumps({"include": matrix})}')
print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}')

View File

@@ -14,197 +14,139 @@
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "0525eae"
"compiler_version": "12"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "0525eae"
"compiler_version": "13"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "0525eae"
"compiler_version": "14"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "0525eae"
"compiler_version": "15"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "0525eae"
"compiler_version": "16"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "0525eae"
"compiler_version": "17"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "0525eae"
"compiler_version": "18"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "0525eae"
"compiler_version": "19"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "0525eae"
"compiler_version": "20"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "0525eae"
"compiler_version": "12"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "0525eae"
"compiler_version": "13"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "0525eae"
"compiler_version": "any"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "clang",
"compiler_version": "21",
"image_sha": "0525eae"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "e1782cd"
"compiler_version": "any"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "e1782cd"
"compiler_version": "12"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "e1782cd"
"compiler_version": "13"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
"compiler_version": "14"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "e1782cd"
"compiler_version": "16"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "e1782cd"
"compiler_version": "17"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "e1782cd"
"compiler_version": "18"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "e1782cd"
"compiler_version": "19"
}
],
"build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "macos",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

View File

@@ -2,7 +2,7 @@
"architecture": [
{
"platform": "windows/amd64",
"runner": ["self-hosted", "Windows", "devbox"]
"runner": ["windows-latest"]
}
],
"os": [
@@ -10,8 +10,7 @@
"distro_name": "windows",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

View File

@@ -0,0 +1,163 @@
# This workflow builds the binary from the selected commit (not earlier than 2.5.0 release)
name: Build selected commit
# This workflow can only be triggered manually, by a project maintainer
on:
workflow_dispatch:
inputs:
commit:
description: "Commit to build from."
required: false
type: string
build_container:
description: "Build container image to use"
required: true
type: string
default: ghcr.io/xrplf/ci/debian-bullseye:gcc-12
strip_symbols:
description: "Strip debug symbols"
required: true
type: boolean
default: true
archive_archive:
description: "Archive rippled binary"
required: true
type: boolean
default: false
build_only:
description: "Only build, do not run unit tests"
required: true
type: boolean
default: false
build_type:
description: "Build type (Debug or Release)"
required: true
type: choice
default: Release
options:
- Debug
- Release
cmake_args:
description: "CMake args for build"
required: true
type: string
default: "-Dxrpld=ON -Dtests=ON -Dassert=OFF -Dunity=OFF"
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
BUILD_DIR: .build
jobs:
build:
runs-on: ["self-hosted", "Linux", "X64", "heavy"]
container: ${{ inputs.build_container }}
steps:
- name: Checkout this workflow
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
sparse-checkout: |
.github
conan
- name: Move workflow files on a side
run: |
mkdir -p ${{ runner.temp }}
mv .github conan ${{ runner.temp }}
rm -rf .git
- name: Checkout the commit to build
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
ref: ${{ inputs.commit }}
- name: Restore workflow files
run: |
rm -rf .github conan
mv ${{ runner.temp }}/.github .
mv ${{ runner.temp }}/conan .
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: true
- name: Check configuration
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${CC} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
cat conan/global.conf >> $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add --index 0 --force ${{ env.CONAN_REMOTE_NAME }} ${{ env.CONAN_REMOTE_URL }}
echo 'Listing Conan remotes.'
conan remote list
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ env.BUILD_DIR }}
build_type: ${{ inputs.build_type }}
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
force_build: ${{ inputs.dependencies_force_build }}
force_upload: false
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ env.BUILD_DIR }}
build_only: ${{ inputs.build_only }}
build_type: ${{ inputs.build_type }}
cmake_args: ${{ inputs.cmake_args }}
cmake_target: "all"
os: "linux"
- name: Strip symbols
if: ${{ inputs.strip_symbols == 'true' }}
run: |
strip -D --strip-unneeded ${{ env.BUILD_DIR }}/rippled
${{ env.BUILD_DIR }}/rippled --version
- name: Move the binary
run: |
mv ${{ env.BUILD_DIR }}/rippled .
- name: Archive rippled binary
if: ${{ inputs.archive_archive == 'true' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: rippled
path: ./rippled
retention-days: 90
compression-level: 8
overwrite: true

199
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,199 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
type: string
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
type: string
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}"
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
# Build and test the binary.
build-test:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
runs-on: ${{ matrix.architecture.runner }}
container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Check strategy matrix
run: |
echo 'Operating system distro name: ${{ matrix.os.distro_name }}'
echo 'Operating system distro version: ${{ matrix.os.distro_version }}'
echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}'
echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}'
echo 'Architecture platform: ${{ matrix.architecture.platform }}'
echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}'
echo 'Build type: ${{ matrix.build_type }}'
echo 'Build only: ${{ matrix.build_only }}'
echo 'CMake arguments: ${{ matrix.cmake_args }}'
echo 'CMake target: ${{ matrix.cmake_target }}'
echo 'Config name: ${{ matrix.config_name }}'
- name: Clean workspace (MacOS)
if: ${{ inputs.os == 'macos' }}
run: |
WORKSPACE=${{ github.workspace }}
echo "Cleaning workspace '${WORKSPACE}'."
if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then
echo "Invalid working directory '${WORKSPACE}'."
exit 1
fi
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
- name: Check configuration (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and MacOS)
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ matrix.build_type }}
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
conan_remote_username: ${{ secrets.conan_remote_username }}
conan_remote_password: ${{ secrets.conan_remote_password }}
force_build: ${{ inputs.dependencies_force_build }}
force_upload: ${{ inputs.dependencies_force_upload }}
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
codecov_token: ${{ secrets.codecov_token }}
os: ${{ inputs.os }}

75
.github/workflows/check-format.yml vendored Normal file
View File

@@ -0,0 +1,75 @@
# This workflow checks if the code is properly formatted.
name: Check format
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-format
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
pre-commit:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-pre-commit
steps:
# The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the
# same directory for jobs running in containers. The actions/checkout step
# is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to
# safe.directory (see instructions at https://github.com/actions/checkout)
# but that is apparently not happening for some container images. We
# therefore preemptively add both directories to safe.directory. See also
# https://github.com/actions/runner/issues/2058 for more details.
- name: Configure git safe.directory
run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory ${{ github.workspace }}
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check configuration
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking pre-commit version.'
pre-commit --version
echo 'Checking clang-format version.'
clang-format --version
echo 'Checking NPM version.'
npm --version
echo 'Checking Node.js version.'
node --version
echo 'Checking prettier version.'
prettier --version
- name: Format code
run: pre-commit run --show-diff-on-failure --color=always --all-files
- name: Check for differences
env:
MESSAGE: |
One or more files did not conform to the formatting. Maybe you did
not run 'pre-commit' before committing, or your version of
'clang-format' or 'prettier' has an incompatibility with the ones
used here (see the "Check configuration" step above).
Run 'pre-commit run --all-files' in your repo, and then commit and
push the changes.
run: |
DIFF=$(git status --porcelain)
if [ -n "${DIFF}" ]; then
# Print the files that changed to give the contributor a hint about
# what to expect when running pre-commit on their own machine.
git status
echo "${MESSAGE}"
exit 1
fi

View File

@@ -0,0 +1,62 @@
# This workflow checks that all commits in the "master" branch are also in the
# "release" and "develop" branches, and that all commits in the "release" branch
# are also in the "develop" branch.
name: Check for missing commits
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-missing-commits
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
fetch-depth: 0
- name: Check for missing commits
env:
MESSAGE: |
If you are reading this, then the commits indicated above are missing
from the "develop" and/or "release" branch. Do a reverse-merge as soon
as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches are ordered by how "canonical" they are. Every commit in one
# branch should be in all the branches behind it.
order=(master release develop)
branches=()
for branch in "${order[@]}"; do
# Check that the branches exist so that this job will work on forked
# repos, which don't necessarily have master and release branches.
echo "Checking if ${branch} exists."
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null; then
branches+=(origin/${branch})
fi
done
prior=()
for branch in "${branches[@]}"; do
if [[ ${#prior[@]} -ne 0 ]]; then
echo "Checking ${prior[@]} for commits missing from ${branch}."
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=("${branch}")
done
if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then
echo "${MESSAGE}"
exit 1
fi

View File

@@ -9,14 +9,12 @@ on:
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
required: true
type: string
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
required: true
type: string
default: https://conan.ripplex.io
secrets:
clio_notify_token:
description: "The GitHub token to notify Clio about new versions."
@@ -40,52 +38,43 @@ jobs:
upload:
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs
id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
id: conan_ref
- name: Add Conan remote
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
channel: ${{ steps.generate.outputs.channel }}
version: ${{ steps.generate.outputs.version }}
notify:
needs: upload
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
steps:
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"
-F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

View File

@@ -23,6 +23,10 @@ defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
# This job determines whether the rest of the workflow should run. It runs
# when the PR is not a draft (which should also cover merge-group) or
@@ -50,20 +54,18 @@ jobs:
files: |
# These paths are unique to `on-pr.yml`.
.github/scripts/levelization/**
.github/workflows/reusable-check-levelization.yml
.github/workflows/reusable-notify-clio.yml
.github/workflows/check-format.yml
.github/workflows/check-levelization.yml
.github/workflows/notify-clio.yml
.github/workflows/on-pr.yml
.clang-format
.pre-commit-config.yaml
# Keep the paths below in sync with those in `on-trigger.yml`.
.github/actions/build-deps/**
.github/actions/build-test/**
.github/actions/setup-conan/**
.github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/build-test.yml
.codecov.yml
cmake/**
conan/**
@@ -73,7 +75,6 @@ jobs:
tests/**
CMakeLists.txt
conanfile.py
conan.lock
- name: Check whether to run
# This step determines whether the rest of the workflow should
# run. The rest of the workflow will run if this job runs AND at
@@ -93,41 +94,61 @@ jobs:
outputs:
go: ${{ steps.go.outputs.go == 'true' }}
check-format:
needs: should-run
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/check-format.yml
check-levelization:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/check-levelization.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows.
generate-outputs:
needs: should-run
if: needs.should-run.outputs.go == 'true'
runs-on: ubuntu-latest
steps:
- name: No-op
run: true
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
build-test:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
os: ${{ matrix.os }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
needs:
- should-run
- generate-outputs
- build-test
if: ${{ needs.should-run.outputs.go == 'true' && (startsWith(github.base_ref, 'release') || github.base_ref == 'master') }}
uses: ./.github/workflows/reusable-notify-clio.yml
uses: ./.github/workflows/notify-clio.yml
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
if: failure() || cancelled()
needs:
- build-test
- check-format
- check-levelization
runs-on: ubuntu-latest
steps:
- name: Fail
run: false
- name: No-op
run: true

View File

@@ -9,23 +9,20 @@ name: Trigger
on:
push:
branches:
- "develop"
- "release*"
- "master"
- develop
- release
- master
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/check-missing-commits.yml"
- ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
- ".github/actions/build-test/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/build-test.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
@@ -35,7 +32,6 @@ on:
- "tests/**"
- "CMakeLists.txt"
- "conanfile.py"
- "conan.lock"
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
# will force all dependencies to be rebuilt, which is useful to verify that
@@ -44,31 +40,79 @@ on:
schedule:
- cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API.
# Run when manually triggered via the GitHub UI or API. If `force_upload` is
# true, then the dependencies that were missing (`force_rebuild` is false) or
# rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing
# dependencies if needed.
workflow_dispatch:
inputs:
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
concurrency:
# When a PR is merged into the develop branch it will be assigned a unique
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/check-missing-commits.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows. It also sets
# outputs that depend on the event that triggered the workflow.
generate-outputs:
runs-on: ubuntu-latest
steps:
- name: Check inputs and set outputs
id: generate
run: |
if [[ '${{ github.event_name }}' == 'push' ]]; then
echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
elif [[ '${{ github.event_name }}' == 'schedule' ]]; then
echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
else
echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}"
fi
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }}
dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }}
build-test:
uses: ./.github/workflows/reusable-build-test.yml
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }}
dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }}
os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
strategy_matrix: "all"
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}

View File

@@ -1,15 +0,0 @@
name: Run pre-commit hooks
on:
pull_request:
push:
branches: [develop, release, master]
workflow_dispatch:
jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
with:
runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'

View File

@@ -23,24 +23,16 @@ defaults:
env:
BUILD_DIR: .build
NPROC_SUBTRACT: 2
jobs:
publish:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
container: ghcr.io/xrplf/ci/tools-rippled-documentation
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Check configuration
run: |
echo 'Checking path.'
@@ -54,16 +46,12 @@ jobs:
echo 'Checking Doxygen version.'
doxygen --version
- name: Build documentation
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ env.BUILD_DIR }}
cd ${{ env.BUILD_DIR }}
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC}
cmake --build . --target docs --parallel $(nproc)
- name: Publish documentation
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0

View File

@@ -1,213 +0,0 @@
name: Build and test configuration
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
type: string
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
default: ""
cmake_target:
description: "The CMake target to build."
type: string
required: true
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The configuration string (used for naming artifacts and such)."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: false
type: number
default: 2
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
defaults:
run:
shell: bash
jobs:
build-and-test:
name: ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
env:
ENABLED_VOIDSTAR: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
ENABLED_COVERAGE: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ inputs.build_type }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Configure CMake
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel "${BUILD_NPROC}" \
--target "${CMAKE_TARGET}"
- name: Upload rippled artifact (Linux)
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/rippled
retention-days: 3
if-no-files-found: error
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
working-directory: ${{ inputs.build_dir }}
run: |
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify presence of instrumentation (Linux)
if: ${{ runner.os == 'Linux' && env.ENABLED_VOIDSTAR == 'true' }}
working-directory: ${{ inputs.build_dir }}
run: |
./rippled --version | grep libvoidstar
- name: Run the separate tests
if: ${{ !inputs.build_only }}
working-directory: ${{ inputs.build_dir }}
# Windows locks some of the build files while running tests, and parallel jobs can collide
env:
BUILD_TYPE: ${{ inputs.build_type }}
PARALLELISM: ${{ runner.os == 'Windows' && '1' || steps.nproc.outputs.nproc }}
run: |
ctest \
--output-on-failure \
-C "${BUILD_TYPE}" \
-j "${PARALLELISM}"
- name: Run the embedded tests
if: ${{ !inputs.build_only }}
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', inputs.build_dir, inputs.build_type) || inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
./rippled --unittest --unittest-jobs "${BUILD_NPROC}"
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
echo "Netstat:"
netstat -an
- name: Prepare coverage report
if: ${{ !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel "${BUILD_NPROC}" \
--target coverage
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -1,58 +0,0 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary for each configuration.
build-test-config:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-build-test-config.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,45 +0,0 @@
name: Generate strategy matrix
on:
workflow_call:
inputs:
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: false
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
outputs:
matrix:
description: "The generated strategy matrix."
value: ${{ jobs.generate-matrix.outputs.matrix }}
defaults:
run:
shell: bash
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"

View File

@@ -1,111 +0,0 @@
name: Upload Conan Dependencies
on:
schedule:
- cron: "0 3 * * 2-6"
workflow_dispatch:
inputs:
force_source_build:
description: "Force source build of all dependencies"
required: false
default: false
type: boolean
force_upload:
description: "Force upload of all dependencies"
required: false
default: false
type: boolean
pull_request:
branches: [develop]
paths:
# This allows testing changes to the upload workflow in a PR
- .github/workflows/upload-conan-deps.yml
push:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
NPROC_SUBTRACT: 2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: .build
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}

View File

@@ -1,5 +1,18 @@
# To run pre-commit hooks, first install pre-commit:
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}`
#
# Depending on your system, you can use `brew install` or `apt install` as well
# for installing the pre-commit package, but `pip` is needed to install the
# hooks; you can also use `pipx` if you prefer.
# Next, install the required formatters:
# - `pip install clang-format==${CLANG_VERSION}`
# - `npm install prettier@${PRETTIER_VERSION}`
#
# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml
# for the versions used in the CI pipeline. You will need to have the exact same
# versions of the tools installed on your system to produce the same results as
# the pipeline.
#
# Then, run the following command to install the git hook scripts:
# - `pre-commit install`
@@ -7,29 +20,42 @@
# - `pre-commit run --all-files`
# To manually run a specific hook, use:
# - `pre-commit run <hook_id> --all-files`
# To run the hooks against only the staged files, use:
# To run the hooks against only the files changed in the current commit, use:
# - `pre-commit run`
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: mixed-line-ending
- id: check-merge-conflict
args: [--assume-in-merge]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
- repo: local
hooks:
- id: clang-format
args: [--style=file]
"types_or": [c++, c, proto]
- repo: https://github.com/rbubley/mirrors-prettier
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
name: clang-format
language: system
entry: clang-format -i
files: '\.(cpp|hpp|h|ipp|proto)$'
- id: trailing-whitespace
name: trailing-whitespace
entry: trailing-whitespace-fixer
language: system
types: [text]
- id: end-of-file
name: end-of-file
entry: end-of-file-fixer
language: system
types: [text]
- id: mixed-line-ending
name: mixed-line-ending
entry: mixed-line-ending
language: system
types: [text]
- id: check-merge-conflict
name: check-merge-conflict
entry: check-merge-conflict --assume-in-merge
language: system
types: [text]
- repo: local
hooks:
- id: prettier
name: prettier
language: system
entry: prettier --ignore-unknown --write
exclude: |
(?x)^(

View File

@@ -39,12 +39,17 @@ found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/), or higher
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
[^1]:
It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it.
[^2]:
CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
@@ -127,7 +132,7 @@ higher index than the default Conan Center remote, so it is consulted first. You
can do this by running:
```bash
conan remote add --index 0 xrplf https://conan.ripplex.io
conan remote add --index 0 xrplf "https://conan.ripplex.io"
```
Alternatively, you can pull the patched recipes into the repository and use them
@@ -153,10 +158,6 @@ updated dependencies with the newer version. However, if we switch to a newer
version that no longer requires a patch, no action is required on your part, as
the new recipe will be automatically pulled from the official Conan Center.
> [!NOTE]
> You might need to add `--lockfile=""` to your `conan install` command
> to avoid automatic use of the existing `conan.lock` file when you run `conan export` manually on your machine
### Conan profile tweaks
#### Missing compiler version
@@ -277,6 +278,21 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
```
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
@@ -450,33 +466,6 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
The location of `rippled` binary in your build directory depends on your
CMake generator. Pass `--help` to see the rest of the command line options.
#### Conan lockfile
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
It is implicitly used when running `conan` commands, you don't need to specify it.
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
> [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True'
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Coverage report
The coverage report is intended for developers using compilers GCC
@@ -495,18 +484,18 @@ A coverage report is created when the following steps are completed, in order:
1. `rippled` binary built with instrumentation data, enabled by the `coverage`
option mentioned above
2. completed one or more run of the unit tests, which populates coverage capture data
2. completed run of unit tests, which populates coverage capture data
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
to assemble both instrumentation data and the coverage capture data into a coverage report
The last step of the above is automated into a single target `coverage`. The instrumented
The above steps are automated into a single target `coverage`. The instrumented
`rippled` binary can also be used for regular development or testing work, at
the cost of extra disk space utilization and a small performance hit
(to store coverage capture data). Since `rippled` binary is simply a dependency of the
coverage report target, it is possible to re-run the `coverage` target without
rebuilding the `rippled` binary. Note, running of the unit tests before the `coverage`
target is left to the developer. Each such run will append to the coverage data
collected in the build directory.
(to store coverage capture). In case of a spurious failure of unit tests, it is
possible to re-run the `coverage` target without rebuilding the `rippled` binary
(since it is simply a dependency of the coverage report target). It is also possible
to select only specific tests for the purpose of the coverage report, by setting
the `coverage_test` variable in `cmake`
The default coverage report format is `html-details`, but the user
can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake`
@@ -515,6 +504,11 @@ to generate more than one format at a time by setting the `coverage_extra_args`
variable in `cmake`. The specific command line used to run the `gcovr` tool will be
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
set to the number of available CPU cores. This may cause spurious test
errors on Apple. Developers can override the number of unit test jobs with
the `coverage_test_parallelism` variable in `cmake`.
Example use with some cmake variables set:
```
@@ -570,13 +564,7 @@ After any updates or changes to dependencies, you may need to do the following:
```
3. Re-run [conan export](#patched-recipes) if needed.
4. [Regenerate lockfile](#conan-lockfile).
5. Re-run [conan install](#build-and-test).
#### ERROR: Package not resolved
If you're seeing an error like `ERROR: Package 'snappy/1.1.10' not resolved: Unable to find 'snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246' in remotes.`,
please add `xrplf` remote or re-run `conan export` for [patched recipes](#patched-recipes).
4. Re-run [conan install](#build-and-test).
### `protobuf/port_def.inc` file not found

View File

@@ -6,7 +6,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe
## XRP
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
## rippled
@@ -23,19 +23,19 @@ If you are interested in running an **API Server** (including a **Full History S
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees.
- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research
[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html
[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger
[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys
[Modern Features]: https://xrpl.org/use-specialized-payment-types.html
[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
## Source Code

View File

@@ -1,3 +1,21 @@
macro(group_sources_in source_dir curdir)
file(GLOB children RELATIVE ${source_dir}/${curdir}
${source_dir}/${curdir}/*)
foreach (child ${children})
if (IS_DIRECTORY ${source_dir}/${curdir}/${child})
group_sources_in(${source_dir} ${curdir}/${child})
else()
string(REPLACE "/" "\\" groupname ${curdir})
source_group(${groupname} FILES
${source_dir}/${curdir}/${child})
endif()
endforeach()
endmacro()
macro(group_sources curdir)
group_sources_in(${PROJECT_SOURCE_DIR} ${curdir})
endmacro()
macro (exclude_from_default target_)
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_ALL ON)
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON)

View File

@@ -104,14 +104,6 @@
# 2025-08-28, Bronek Kozicki
# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175
#
# 2025-09-03, Jingchen Wu
# - remove the unused function append_coverage_compiler_flags and append_coverage_compiler_flags_to_target
# - add a new function add_code_coverage_to_target
# - remove some unused code
#
# 2025-11-11, Bronek Kozicki
# - make EXECUTABLE and EXECUTABLE_ARGS optional
#
# USAGE:
#
# 1. Copy this file into your cmake modules path.
@@ -120,8 +112,10 @@
# using a CMake option() to enable it just optionally):
# include(CodeCoverage)
#
# 3. Append necessary compiler flags and linker flags for all supported source files:
# add_code_coverage_to_target(<target> <PRIVATE|PUBLIC|INTERFACE>)
# 3. Append necessary compiler flags for all supported source files:
# append_coverage_compiler_flags()
# Or for specific target:
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
#
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
#
@@ -210,69 +204,67 @@ endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
CACHE INTERNAL "")
set(COVERAGE_CXX_COMPILER_FLAGS "")
set(COVERAGE_C_COMPILER_FLAGS "")
set(COVERAGE_CXX_LINKER_FLAGS "")
set(COVERAGE_C_LINKER_FLAGS "")
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
include(CheckLinkerFlag)
set(COVERAGE_CXX_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_CXX_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if(HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if(HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(CXX -fprofile-abs-path HAVE_cxx_linker_fprofile_abs_path)
if(HAVE_cxx_linker_fprofile_abs_path)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(C -fprofile-abs-path HAVE_c_linker_fprofile_abs_path)
if(HAVE_c_linker_fprofile_abs_path)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update)
check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update)
if(HAVE_cxx_fprofile_update)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update)
check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update)
if(HAVE_c_fprofile_update)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(CXX -fprofile-update=atomic HAVE_cxx_linker_fprofile_update)
if(HAVE_cxx_linker_fprofile_update)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(C -fprofile-update=atomic HAVE_c_linker_fprofile_update)
if(HAVE_c_linker_fprofile_update)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-update=atomic")
endif()
endif()
set(CMAKE_Fortran_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
FORCE )
set(CMAKE_CXX_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE )
set(CMAKE_C_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C compiler during coverage builds."
FORCE )
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE )
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE )
mark_as_advanced(
CMAKE_Fortran_FLAGS_COVERAGE
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
link_libraries(gcov)
endif()
# Defines a target for running and collection code coverage information
# Builds dependencies, runs the given executable and outputs reports.
# NOTE! The executable should always have a ZERO as exit code otherwise
@@ -320,10 +312,6 @@ function(setup_target_for_coverage_gcovr)
set(Coverage_FORMAT xml)
endif()
if(NOT DEFINED Coverage_EXECUTABLE AND DEFINED Coverage_EXECUTABLE_ARGS)
message(FATAL_ERROR "EXECUTABLE_ARGS must not be set if EXECUTABLE is not set")
endif()
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
else()
@@ -405,18 +393,17 @@ function(setup_target_for_coverage_gcovr)
endforeach()
# Set up commands which will be run to generate coverage data
# If EXECUTABLE is not set, the user is expected to run the tests manually
# before running the coverage target NAME
if(DEFINED Coverage_EXECUTABLE)
set(GCOVR_EXEC_TESTS_CMD
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
)
endif()
# Run tests
set(GCOVR_EXEC_TESTS_CMD
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
)
# Create folder
if(DEFINED GCOVR_CREATE_FOLDER)
set(GCOVR_FOLDER_CMD
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
else()
set(GCOVR_FOLDER_CMD echo) # dummy
endif()
# Running gcovr
@@ -433,13 +420,11 @@ function(setup_target_for_coverage_gcovr)
if(CODE_COVERAGE_VERBOSE)
message(STATUS "Executed command report")
if(NOT "${GCOVR_EXEC_TESTS_CMD}" STREQUAL "")
message(STATUS "Command to run tests: ")
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
endif()
message(STATUS "Command to run tests: ")
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
if(NOT "${GCOVR_FOLDER_CMD}" STREQUAL "")
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
message(STATUS "Command to create a folder: ")
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
@@ -469,19 +454,18 @@ function(setup_target_for_coverage_gcovr)
)
endfunction() # setup_target_for_coverage_gcovr
function(add_code_coverage_to_target name scope)
separate_arguments(COVERAGE_CXX_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_COMPILER_FLAGS}")
separate_arguments(COVERAGE_C_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_C_COMPILER_FLAGS}")
separate_arguments(COVERAGE_CXX_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_LINKER_FLAGS}")
separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}")
function(append_coverage_compiler_flags)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
endfunction() # append_coverage_compiler_flags
# Add compiler options to the target
target_compile_options(${name} ${scope}
$<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_link_libraries (${name} ${scope}
$<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS} gcov>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS} gcov>
)
endfunction() # add_code_coverage_to_target
# Setup coverage for specific library
function(append_coverage_compiler_flags_to_target name)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} PRIVATE ${_flag_list})
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
target_link_libraries(${name} PRIVATE gcov)
endif()
endfunction()

View File

@@ -12,7 +12,7 @@ if (static OR MSVC)
else ()
set (Boost_USE_STATIC_RUNTIME OFF)
endif ()
find_dependency (Boost
find_dependency (Boost 1.70
COMPONENTS
chrono
container
@@ -52,3 +52,5 @@ if (TARGET ZLIB::ZLIB)
set_target_properties(OpenSSL::Crypto PROPERTIES
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
endif ()
include ("${CMAKE_CURRENT_LIST_DIR}/RippleTargets.cmake")

View File

@@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions (common
INTERFACE
$<$<CONFIG:Debug>:DEBUG _DEBUG>
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
# defined, so no need to add it explicitly except for
# this special case of (profile ON) and (assert OFF)
# -- presumably this is because we don't want profile
# builds asserting unless asserts were specifically
# requested
#[===[
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
explicitly except for the special case of (profile ON) and (assert OFF).
Presumably this is because we don't want profile builds asserting unless
asserts were specifically requested.
]===]
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
OPENSSL_SUPPRESS_DEPRECATED
)
if (MSVC)
# remove existing exception flag since we set it to -EHa

View File

@@ -72,7 +72,10 @@ include(target_link_modules)
# Level 01
add_module(xrpl beast)
target_link_libraries(xrpl.libxrpl.beast PUBLIC xrpl.imports.main)
target_link_libraries(xrpl.libxrpl.beast PUBLIC
xrpl.imports.main
xrpl.libpb
)
# Level 02
add_module(xrpl basics)
@@ -108,12 +111,6 @@ target_link_libraries(xrpl.libxrpl.net PUBLIC
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
add_module(xrpl ledger)
target_link_libraries(xrpl.libxrpl.ledger PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
)
add_library(xrpl.libxrpl)
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
@@ -134,7 +131,6 @@ target_link_modules(xrpl PUBLIC
resource
server
net
ledger
)
# All headers in libxrpl are in modules.

View File

@@ -11,9 +11,6 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
return()
endif()
include(ProcessorCount)
ProcessorCount(PROCESSOR_COUNT)
include(CodeCoverage)
# The instructions for these commands come from the `CodeCoverage` module,
@@ -29,13 +26,13 @@ list(APPEND GCOVR_ADDITIONAL_ARGS
--exclude-throw-branches
--exclude-noncode-lines
--exclude-unreachable-branches -s
-j ${PROCESSOR_COUNT})
-j ${coverage_test_parallelism})
setup_target_for_coverage_gcovr(
NAME coverage
FORMAT ${coverage_format}
EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES rippled xrpl.tests
EXECUTABLE rippled
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES rippled
)
add_code_coverage_to_target(opts INTERFACE)

View File

@@ -18,7 +18,6 @@ install (
xrpl.libxrpl.json
xrpl.libxrpl.protocol
xrpl.libxrpl.resource
xrpl.libxrpl.ledger
xrpl.libxrpl.server
xrpl.libxrpl.net
xrpl.libxrpl

View File

@@ -28,11 +28,15 @@ target_compile_options (opts
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized>
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
target_link_libraries (opts
INTERFACE
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)

View File

@@ -1,5 +1,5 @@
#[===================================================================[
sanity checks
convenience variables and sanity checks
#]===================================================================]
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
@@ -16,19 +16,39 @@ if (NOT is_multiconfig)
endif ()
endif ()
get_directory_property(has_parent PARENT_DIRECTORY)
if (has_parent)
set (is_root_project OFF)
else ()
set (is_root_project ON)
endif ()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
set (is_clang TRUE)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0)
message (FATAL_ERROR "This project requires clang 16 or later")
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
message (FATAL_ERROR "This project requires clang 8 or later")
endif ()
# TODO min AppleClang version check ?
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set (is_gcc TRUE)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
message (FATAL_ERROR "This project requires GCC 12 or later")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
message (FATAL_ERROR "This project requires GCC 8 or later")
endif ()
endif ()
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
set (is_linux TRUE)
else ()
set (is_linux FALSE)
endif ()
if ("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
set (is_ci TRUE)
else ()
set (is_ci FALSE)
endif ()
# check for in-source build and fail
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message (FATAL_ERROR "Builds (in-source) are not allowed in "

View File

@@ -1,25 +1,10 @@
#[===================================================================[
declare options and variables
declare user options/settings
#]===================================================================]
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
set (is_linux TRUE)
else()
set(is_linux FALSE)
endif()
include(ProcessorCount)
if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
set(is_ci TRUE)
else()
set(is_ci FALSE)
endif()
get_directory_property(has_parent PARENT_DIRECTORY)
if(has_parent)
set(is_root_project OFF)
else()
set(is_root_project ON)
endif()
ProcessorCount(PROCESSOR_COUNT)
option(assert "Enables asserts, even in release builds" OFF)
@@ -40,28 +25,29 @@ if(unity)
endif()
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
endif()
if(is_clang AND is_linux)
option(voidstar "Enable Antithesis instrumentation." OFF)
endif()
if(is_gcc OR is_clang)
include(ProcessorCount)
ProcessorCount(PROCESSOR_COUNT)
option(coverage "Generates coverage info." OFF)
option(profile "Add profiling flags" OFF)
set(coverage_test_parallelism "${PROCESSOR_COUNT}" CACHE STRING
"Unit tests parallelism for the purpose of coverage report.")
set(coverage_format "html-details" CACHE STRING
"Output format of the coverage report.")
set(coverage_extra_args "" CACHE STRING
"Additional arguments to pass to gcovr.")
set(coverage_test "" CACHE STRING
"On gcc & clang, the specific unit test(s) to run for coverage. Default is all tests.")
if(coverage_test AND NOT coverage)
set(coverage ON CACHE BOOL "gcc/clang only" FORCE)
endif()
option(wextra "compile with extra gcc/clang warnings enabled" ON)
else()
set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
endif()
if(is_linux)
option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
option(static "link protobuf, openssl, libc++, and boost statically" ON)
@@ -78,13 +64,11 @@ else()
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
endif()
if(is_clang)
option(use_lld "enables detection of lld linker" ON)
else()
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
endif()
option(jemalloc "Enables jemalloc for heap profiling" OFF)
option(werr "treat warnings as errors" OFF)
option(local_protobuf
@@ -118,26 +102,38 @@ if(san)
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
endif()
endif()
set(container_label "" CACHE STRING "tag to use for package building containers")
option(packages_only
"ONLY generate package building targets. This is special use-case and almost \
certainly not what you want. Use with caution as you won't be able to build \
any compiled targets locally." OFF)
option(have_package_container
"Sometimes you already have the tagged container you want to use for package \
building and you don't want docker to rebuild it. This flag will detach the \
dependency of the package build from the container build. It's an advanced \
use case and most likely you should not be touching this flag." OFF)
# the remaining options are obscure and rarely used
option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table"
OFF)
option(single_io_service_thread
"Restricts the number of threads calling io_service::run to one. \
"Restricts the number of threads calling io_context::run to one. \
This can be useful when debugging."
OFF)
option(boost_show_deprecated
"Allow boost to fail on deprecated usage. Only useful if you're trying\
to find deprecated calls."
OFF)
option(beast_hashers
"Use local implementations for sha/ripemd hashes (experimental, not recommended)"
OFF)
if(WIN32)
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
else()
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
endif()
if(coverage)
message(STATUS "coverage build requested - forcing Debug build")
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)

View File

@@ -24,6 +24,7 @@ target_link_libraries(ripple_boost
Boost::date_time
Boost::filesystem
Boost::json
Boost::process
Boost::program_options
Boost::regex
Boost::system

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
)
add_executable(${target} ${ARGN} ${sources})
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
isolate_headers(
${target}
@@ -22,4 +22,20 @@ function(xrpl_add_test name)
UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
add_test(NAME ${target} COMMAND ${target})
set_tests_properties(
${target} PROPERTIES
FIXTURES_REQUIRED ${target}_fixture
)
add_test(
NAME ${target}.build
COMMAND
${CMAKE_COMMAND}
--build ${CMAKE_BINARY_DIR}
--config $<CONFIG>
--target ${target}
)
set_tests_properties(${target}.build PROPERTIES
FIXTURES_SETUP ${target}_fixture
)
endfunction()

View File

@@ -1,56 +0,0 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901",
"msys2/cci.latest#5b73b10144f73cc5bfe0572ed9be39e1%1751977009.857",
"m4/1.4.19#b38ced39a01e31fef5435bc634461fd2%1700758725.451",
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28",
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86"
],
"python_requires": [],
"overrides": {
"protobuf/3.21.12": [
null,
"protobuf/3.21.12"
],
"lz4/1.9.4": [
"lz4/1.10.0"
],
"boost/1.83.0": [
"boost/1.83.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"
]
},
"config_requires": []
}

View File

@@ -1,5 +1,9 @@
# Global configuration for Conan. This is used to set the number of parallel
# downloads and uploads.
# downloads, uploads, and build jobs. The verbosity is set to verbose to
# provide more information during the build process.
core:non_interactive=True
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose

View File

@@ -21,14 +21,14 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "clang" and compiler_version == 16 %}
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags+=['-Wno-restrict']
tools.build:cxxflags=['-Wno-restrict']
{% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -27,7 +27,7 @@ class Xrpl(ConanFile):
'grpc/1.50.1',
'libarchive/3.8.1',
'nudb/2.0.9',
'openssl/1.1.1w',
'openssl/3.5.2',
'soci/4.0.3',
'zlib/1.3.1',
]
@@ -100,11 +100,13 @@ class Xrpl(ConanFile):
def configure(self):
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
if self.settings.compiler in ['clang', 'gcc']:
self.options['boost'].without_cobalt = True
def requirements(self):
# Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True)
@@ -175,6 +177,7 @@ class Xrpl(ConanFile):
'boost::filesystem',
'boost::json',
'boost::program_options',
'boost::process',
'boost::regex',
'boost::system',
'boost::thread',

View File

@@ -541,7 +541,7 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact(
/** Verify an ECDSA signature.
*
* Returns: 1: correct signature
* 0: incorrect or unparsable signature
* 0: incorrect or unparseable signature
* Args: ctx: pointer to a context object
* In: sig: the signature being verified.
* msghash32: the 32-byte message hash being verified.

View File

@@ -654,14 +654,12 @@ SharedWeakUnion<T>::convertToWeak()
break;
case destroy:
// We just added a weak ref. How could we destroy?
// LCOV_EXCL_START
UNREACHABLE(
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
"added ref");
delete p;
unsafeSetRawPtr(nullptr);
return true; // Should never happen
// LCOV_EXCL_STOP
case partialDestroy:
// This is a weird case. We just converted the last strong
// pointer to a weak pointer.

View File

@@ -32,15 +32,6 @@ class Number;
std::string
to_string(Number const& amount);
template <typename T>
constexpr bool
isPowerOfTen(T value)
{
while (value >= 10 && value % 10 == 0)
value /= 10;
return value == 1;
}
class Number
{
using rep = std::int64_t;
@@ -50,9 +41,7 @@ class Number
public:
// The range for the mantissa when normalized
constexpr static std::int64_t minMantissa = 1'000'000'000'000'000LL;
static_assert(isPowerOfTen(minMantissa));
constexpr static std::int64_t maxMantissa = minMantissa * 10 - 1;
static_assert(maxMantissa == 9'999'999'999'999'999LL);
constexpr static std::int64_t maxMantissa = 9'999'999'999'999'999LL;
// The range for the exponent when normalized
constexpr static int minExponent = -32768;
@@ -161,9 +150,6 @@ public:
return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0);
}
Number
truncate() const noexcept;
friend constexpr bool
operator>(Number const& x, Number const& y) noexcept
{
@@ -207,8 +193,6 @@ private:
class Guard;
};
constexpr static Number numZero{};
inline constexpr Number::Number(rep mantissa, int exponent, unchecked) noexcept
: mantissa_{mantissa}, exponent_{exponent}
{

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/Resolver.h>
#include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
namespace ripple {
@@ -33,7 +33,7 @@ public:
explicit ResolverAsio() = default;
static std::unique_ptr<ResolverAsio>
New(boost::asio::io_service&, beast::Journal);
New(boost::asio::io_context&, beast::Journal);
};
} // namespace ripple

View File

@@ -176,7 +176,7 @@ public:
@param count the number of items the slab allocator can allocate; note
that a count of 0 is valid and means that the allocator
is, effectively, disabled. This can be very useful in some
contexts (e.g. when minimal memory usage is needed) and
contexts (e.g. when mimimal memory usage is needed) and
allows for graceful failure.
*/
constexpr explicit SlabAllocator(

View File

@@ -565,7 +565,7 @@ operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
// This comparison might seem wrong on a casual inspection because it
// compares data internally stored as std::uint32_t byte-by-byte. But
// note that the underlying data is stored in big endian, even if the
// platform is little endian. This makes the comparison correct.
// plaform is little endian. This makes the comparison correct.
//
// FIXME: use std::lexicographical_compare_three_way once support is
// added to MacOS.
@@ -632,16 +632,6 @@ to_string(base_uint<Bits, Tag> const& a)
return strHex(a.cbegin(), a.cend());
}
template <std::size_t Bits, class Tag>
inline std::string
to_short_string(base_uint<Bits, Tag> const& a)
{
static_assert(
base_uint<Bits, Tag>::bytes > 4,
"For 4 bytes or less, use a native type");
return strHex(a.cbegin(), a.cbegin() + 4) + "...";
}
template <std::size_t Bits, class Tag>
inline std::ostream&
operator<<(std::ostream& out, base_uint<Bits, Tag> const& u)

View File

@@ -28,7 +28,7 @@ namespace ripple {
/*
* MSVC 2019 version 16.9.0 added [[nodiscard]] to the std comparison
* operator() functions. boost::bimap checks that the comparator is a
* operator() functions. boost::bimap checks that the comparitor is a
* BinaryFunction, in part by calling the function and ignoring the value.
* These two things don't play well together. These wrapper classes simply
* strip [[nodiscard]] from operator() for use in boost::bimap.

View File

@@ -28,8 +28,9 @@ namespace ripple {
// the destination can hold all values of the source. This is particularly
// handy when the source or destination is an enumeration type.
template <class Src, class Dest>
concept SafeToCast = (std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
template <class Dest, class Src>
static constexpr bool is_safetocasttovalue_v =
(std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
(std::is_signed<Src>::value || std::is_unsigned<Dest>::value) &&
(std::is_signed<Src>::value != std::is_signed<Dest>::value
? sizeof(Dest) > sizeof(Src)
@@ -77,7 +78,7 @@ inline constexpr std::
unsafe_cast(Src s) noexcept
{
static_assert(
!SafeToCast<Src, Dest>,
!is_safetocasttovalue_v<Dest, Src>,
"Only unsafe if casting signed to unsigned or "
"destination is too small");
return static_cast<Dest>(s);

View File

@@ -23,7 +23,8 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/post.hpp>
#include <chrono>
#include <condition_variable>
@@ -32,7 +33,7 @@
namespace beast {
/** Measures handler latency on an io_service queue. */
/** Measures handler latency on an io_context queue. */
template <class Clock>
class io_latency_probe
{
@@ -44,12 +45,12 @@ private:
std::condition_variable_any m_cond;
std::size_t m_count;
duration const m_period;
boost::asio::io_service& m_ios;
boost::asio::io_context& m_ios;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
bool m_cancel;
public:
io_latency_probe(duration const& period, boost::asio::io_service& ios)
io_latency_probe(duration const& period, boost::asio::io_context& ios)
: m_count(1)
, m_period(period)
, m_ios(ios)
@@ -64,16 +65,16 @@ public:
cancel(lock, true);
}
/** Return the io_service associated with the latency probe. */
/** Return the io_context associated with the latency probe. */
/** @{ */
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return m_ios;
}
boost::asio::io_service const&
get_io_service() const
boost::asio::io_context const&
get_io_context() const
{
return m_ios;
}
@@ -109,8 +110,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
}
/** Initiate continuous i/o latency sampling.
@@ -124,8 +127,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
}
private:
@@ -236,12 +241,13 @@ private:
// The latency is too high to maintain the desired
// period so don't bother with a timer.
//
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
else
{
m_probe->m_timer.expires_from_now(when - now);
m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
@@ -254,7 +260,8 @@ private:
if (!m_probe)
return;
typename Clock::time_point const now(Clock::now());
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
};

View File

@@ -94,11 +94,7 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept
else if (addr.is_v6())
hash_append(h, addr.to_v6().to_bytes());
else
{
// LCOV_EXCL_START
UNREACHABLE("beast::hash_append : invalid address type");
// LCOV_EXCL_STOP
}
}
} // namespace beast

View File

@@ -8,9 +8,11 @@
#ifndef BEAST_TEST_YIELD_TO_HPP
#define BEAST_TEST_YIELD_TO_HPP
#include <boost/asio/io_service.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/optional.hpp>
#include <boost/thread/csbl/memory/allocator_arg.hpp>
#include <condition_variable>
#include <mutex>
@@ -29,10 +31,12 @@ namespace test {
class enable_yield_to
{
protected:
boost::asio::io_service ios_;
boost::asio::io_context ios_;
private:
boost::optional<boost::asio::io_service::work> work_;
boost::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::vector<std::thread> threads_;
std::mutex m_;
std::condition_variable cv_;
@@ -42,7 +46,8 @@ public:
/// The type of yield context passed to functions.
using yield_context = boost::asio::yield_context;
explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_)
explicit enable_yield_to(std::size_t concurrency = 1)
: work_(boost::asio::make_work_guard(ios_))
{
threads_.reserve(concurrency);
while (concurrency--)
@@ -56,9 +61,9 @@ public:
t.join();
}
/// Return the `io_service` associated with the object
boost::asio::io_service&
get_io_service()
/// Return the `io_context` associated with the object
boost::asio::io_context&
get_io_context()
{
return ios_;
}
@@ -111,13 +116,18 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
{
boost::asio::spawn(
ios_,
boost::allocator_arg,
boost::context::fixedsize_stack(2 * 1024 * 1024),
[&](yield_context yield) {
f(yield);
std::lock_guard lock{m_};
if (--running_ == 0)
cv_.notify_all();
},
boost::coroutines::attributes(2 * 1024 * 1024));
[](std::exception_ptr e) {
if (e)
std::rethrow_exception(e);
});
spawn(fn...);
}

View File

@@ -42,7 +42,7 @@ public:
The argument string is available to suites and
allows for customization of the test. Each suite
defines its own syntax for the argument string.
defines its own syntax for the argumnet string.
The same argument is passed to all suites.
*/
void

View File

@@ -32,23 +32,18 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// The duplication is because Visual Studio 2019 cannot compile that header
// even with the option -Zc:__cplusplus added.
#define ALWAYS(cond, message, ...) assert((message) && (cond))
#define ALWAYS_OR_UNREACHABLE(cond, message) assert((message) && (cond))
#define ALWAYS_OR_UNREACHABLE(cond, message, ...) assert((message) && (cond))
#define SOMETIMES(cond, message, ...)
#define REACHABLE(message, ...)
#define UNREACHABLE(message, ...) assert((message) && false)
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE
#define XRPL_ASSERT_PARTS(cond, function, description, ...) \
XRPL_ASSERT(cond, function " : " description)
// How to use the instrumentation macros:
//
// * XRPL_ASSERT if cond must be true but the line might not be reached during
// fuzzing. Same like `assert` in normal use.
// * XRPL_ASSERT_PARTS is for convenience, and works like XRPL_ASSERT, but
// splits the message param into "function" and "description", then joins
// them with " : " before passing to XRPL_ASSERT.
// * ALWAYS if cond must be true _and_ the line must be reached during fuzzing.
// Same like `assert` in normal use.
// * REACHABLE if the line must be reached during fuzzing

View File

@@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs)
std::string s;
s.reserve(buffer_size(bs));
for (auto const& b : bs)
s.append(buffer_cast<char const*>(b), buffer_size(b));
s.append(static_cast<char const*>(b.data()), buffer_size(b));
return parse(s, root);
}

View File

@@ -24,7 +24,6 @@
#include <xrpl/json/json_forwards.h>
#include <cstring>
#include <limits>
#include <map>
#include <string>
#include <vector>
@@ -159,9 +158,9 @@ public:
using ArrayIndex = UInt;
static Value const null;
static constexpr Int minInt = std::numeric_limits<Int>::min();
static constexpr Int maxInt = std::numeric_limits<Int>::max();
static constexpr UInt maxUInt = std::numeric_limits<UInt>::max();
static Int const minInt;
static Int const maxInt;
static UInt const maxUInt;
private:
class CZString
@@ -264,10 +263,6 @@ public:
bool
asBool() const;
/** Correct absolute value from int or unsigned int */
UInt
asAbsUInt() const;
// TODO: What is the "empty()" method this docstring mentions?
/** isNull() tests to see if this field is null. Don't use this method to
test for emptiness: use empty(). */
@@ -400,9 +395,6 @@ public:
/// Return true if the object has a member named key.
bool
isMember(std::string const& key) const;
/// Return true if the object has a member named key.
bool
isMember(StaticString const& key) const;
/// \brief Return a list of the member names.
///

View File

@@ -46,7 +46,7 @@ public:
* without formatting (not human friendly).
*
* The JSON document is written in a single line. It is not intended for 'human'
* consumption, but may be useful to support feature such as RPC where bandwidth
* consumption, but may be useful to support feature such as RPC where bandwith
* is limited. \sa Reader, Value
*/

View File

@@ -47,7 +47,7 @@ public:
public:
AutoSocket(
boost::asio::io_service& s,
boost::asio::io_context& s,
boost::asio::ssl::context& c,
bool secureOnly,
bool plainOnly)
@@ -58,7 +58,7 @@ public:
mSocket = std::make_unique<ssl_socket>(s, c);
}
AutoSocket(boost::asio::io_service& s, boost::asio::ssl::context& c)
AutoSocket(boost::asio::io_context& s, boost::asio::ssl::context& c)
: AutoSocket(s, c, false, false)
{
}

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/streambuf.hpp>
#include <chrono>
@@ -51,7 +51,7 @@ public:
static void
get(bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::deque<std::string> deqSites,
unsigned short const port,
std::string const& strPath,
@@ -65,7 +65,7 @@ public:
static void
get(bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::string const& strPath,
@@ -80,7 +80,7 @@ public:
static void
request(
bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::function<

View File

@@ -153,7 +153,7 @@ public:
{
strm.set_verify_callback(
std::bind(
&rfc2818_verify,
&rfc6125_verify,
host,
std::placeholders::_1,
std::placeholders::_2,
@@ -167,7 +167,7 @@ public:
/**
* @brief callback invoked for name verification - just passes through
* to the asio rfc2818 implementation.
* to the asio `host_name_verification` (rfc6125) implementation.
*
* @param domain hostname expected
* @param preverified passed by implementation
@@ -175,13 +175,13 @@ public:
* @param j journal for logging
*/
static bool
rfc2818_verify(
rfc6125_verify(
std::string const& domain,
bool preverified,
boost::asio::ssl::verify_context& ctx,
beast::Journal j)
{
if (boost::asio::ssl::rfc2818_verification(domain)(preverified, ctx))
if (boost::asio::ssl::host_name_verification(domain)(preverified, ctx))
return true;
JLOG(j.warn()) << "Outbound SSL connection to " << domain

View File

@@ -100,27 +100,7 @@ public:
bool
native() const
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) {
if constexpr (std::is_same_v<TIss, Issue>)
return issue.native();
if constexpr (std::is_same_v<TIss, MPTIssue>)
return false;
},
issue_);
}
bool
integral() const
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) {
if constexpr (std::is_same_v<TIss, Issue>)
return issue.native();
if constexpr (std::is_same_v<TIss, MPTIssue>)
return true;
},
issue_);
return holds<Issue>() && get<Issue>().native();
}
friend constexpr bool

View File

@@ -0,0 +1,565 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BASICS_FEES_H_INCLUDED
#define BASICS_FEES_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace feeunit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee units" calculations are a not-really-unitless value that is used
to express the cost of a given transaction vs. a reference transaction.
They are primarily used by the Transactor classes. */
struct feeunitTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a TaggedFee. They are
used for calculations in this header. */
struct unitlessTag;
template <class T>
using enable_if_unit_t = typename std::enable_if_t<
std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>>;
/** `is_usable_unit_v` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-fee contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T, class = enable_if_unit_t<T>>
constexpr bool is_usable_unit_v =
std::is_same_v<typename T::unit_type, feeunitTag> ||
std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag>;
template <class UnitTag, class T>
class TaggedFee : private boost::totally_ordered<TaggedFee<UnitTag, T>>,
private boost::additive<TaggedFee<UnitTag, T>>,
private boost::equality_comparable<TaggedFee<UnitTag, T>, T>,
private boost::dividable<TaggedFee<UnitTag, T>, T>,
private boost::modable<TaggedFee<UnitTag, T>, T>,
private boost::unit_steppable<TaggedFee<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type fee_;
protected:
template <class Other>
static constexpr bool is_compatible_v =
std::is_arithmetic_v<Other> && std::is_arithmetic_v<value_type> &&
std::is_convertible_v<Other, value_type>;
template <class OtherFee, class = enable_if_unit_t<OtherFee>>
static constexpr bool is_compatiblefee_v =
is_compatible_v<typename OtherFee::value_type> &&
std::is_same_v<UnitTag, typename OtherFee::unit_type>;
template <class Other>
using enable_if_compatible_t =
typename std::enable_if_t<is_compatible_v<Other>>;
template <class OtherFee>
using enable_if_compatiblefee_t =
typename std::enable_if_t<is_compatiblefee_v<OtherFee>>;
public:
TaggedFee() = default;
constexpr TaggedFee(TaggedFee const& other) = default;
constexpr TaggedFee&
operator=(TaggedFee const& other) = default;
constexpr explicit TaggedFee(beast::Zero) : fee_(0)
{
}
constexpr TaggedFee&
operator=(beast::Zero)
{
fee_ = 0;
return *this;
}
constexpr explicit TaggedFee(value_type fee) : fee_(fee)
{
}
TaggedFee&
operator=(value_type fee)
{
fee_ = fee;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <
class Other,
class = std::enable_if_t<
is_compatible_v<Other> &&
is_safetocasttovalue_v<value_type, Other>>>
constexpr TaggedFee(TaggedFee<unit_type, Other> const& fee)
: TaggedFee(safe_cast<value_type>(fee.fee()))
{
}
constexpr TaggedFee
operator*(value_type const& rhs) const
{
return TaggedFee{fee_ * rhs};
}
friend constexpr TaggedFee
operator*(value_type lhs, TaggedFee const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(TaggedFee const& rhs) const
{
return fee_ / rhs.fee_;
}
TaggedFee&
operator+=(TaggedFee const& other)
{
fee_ += other.fee();
return *this;
}
TaggedFee&
operator-=(TaggedFee const& other)
{
fee_ -= other.fee();
return *this;
}
TaggedFee&
operator++()
{
++fee_;
return *this;
}
TaggedFee&
operator--()
{
--fee_;
return *this;
}
TaggedFee&
operator*=(value_type const& rhs)
{
fee_ *= rhs;
return *this;
}
TaggedFee&
operator/=(value_type const& rhs)
{
fee_ /= rhs;
return *this;
}
template <class transparent = value_type>
std::enable_if_t<std::is_integral_v<transparent>, TaggedFee&>
operator%=(value_type const& rhs)
{
fee_ %= rhs;
return *this;
}
TaggedFee
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned fee types");
return TaggedFee{-fee_};
}
bool
operator==(TaggedFee const& other) const
{
return fee_ == other.fee_;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator==(TaggedFee<unit_type, Other> const& other) const
{
return fee_ == other.fee();
}
bool
operator==(value_type other) const
{
return fee_ == other;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator!=(TaggedFee<unit_type, Other> const& other) const
{
return !operator==(other);
}
bool
operator<(TaggedFee const& other) const
{
return fee_ < other.fee_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return fee_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (fee_ < 0) ? -1 : (fee_ ? 1 : 0);
}
/** Returns the number of drops */
constexpr value_type
fee() const
{
return fee_;
}
template <class Other>
constexpr double
decimalFromReference(TaggedFee<unit_type, Other> reference) const
{
return static_cast<double>(fee_) / reference.fee();
}
// `is_usable_unit_v` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
std::enable_if_t<is_usable_unit_v<TaggedFee>, Json::Value>
jsonClipped() const
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (fee_ < min)
return min;
if (fee_ > max)
return max;
return static_cast<jsontype>(fee_);
}
else
{
return fee_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return fee_;
}
friend std::istream&
operator>>(std::istream& s, TaggedFee& val)
{
s >> val.fee_;
return s;
}
};
// Output Fees as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, TaggedFee<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(TaggedFee<UnitTag, T> const& amount)
{
return std::to_string(amount.fee());
}
template <class Source, class = enable_if_unit_t<Source>>
constexpr bool can_muldiv_source_v =
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest, class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_dest_v =
can_muldiv_source_v<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <
class Source1,
class Source2,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>>
constexpr bool can_muldiv_sources_v =
can_muldiv_source_v<Source1> && can_muldiv_source_v<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_v =
can_muldiv_sources_v<Source1, Source2> && can_muldiv_dest_v<Dest>;
// Source and Dest can be the same by default
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_commute_v = can_muldiv_v<Source1, Source2, Dest> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
using enable_muldiv_source_t =
typename std::enable_if_t<can_muldiv_source_v<T>>;
template <class T>
using enable_muldiv_dest_t = typename std::enable_if_t<can_muldiv_dest_v<T>>;
template <class Source1, class Source2>
using enable_muldiv_sources_t =
typename std::enable_if_t<can_muldiv_sources_v<Source1, Source2>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_t =
typename std::enable_if_t<can_muldiv_v<Source1, Source2, Dest>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_commute_t =
typename std::enable_if_t<can_muldiv_commute_v<Source1, Source2, Dest>>;
template <class T>
TaggedFee<unitlessTag, T>
scalar(T value)
{
return TaggedFee<unitlessTag, T>{value};
}
template <
class Source1,
class Source2,
class Dest,
class = enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// Fees can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0,
"ripple::feeunit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::feeunit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() >= 0, "ripple::feeunit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace feeunit
template <class T>
using FeeLevel = feeunit::TaggedFee<feeunit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return feeunit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_commute_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return feeunit::mulDivU(mul, value, div);
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return feeunit::mulDivU(feeunit::scalar(value), mul, feeunit::scalar(div));
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = feeunit::mulDivU(value, feeunit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
} // namespace ripple
#endif // BASICS_FEES_H_INCLUDED

View File

@@ -287,11 +287,9 @@ delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept;
Keylet
bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType);
// `seq` is stored as `sfXChainClaimID` in the object
Keylet
xChainClaimID(STXChainBridge const& bridge, std::uint64_t seq);
// `seq` is stored as `sfXChainAccountCreateCount` in the object
Keylet
xChainCreateAccountClaimID(STXChainBridge const& bridge, std::uint64_t seq);
@@ -346,24 +344,6 @@ vault(uint256 const& vaultKey)
return {ltVAULT, vaultKey};
}
Keylet
loanbroker(AccountID const& owner, std::uint32_t seq) noexcept;
inline Keylet
loanbroker(uint256 const& key)
{
return {ltLOAN_BROKER, key};
}
Keylet
loan(uint256 const& loanBrokerID, std::uint32_t loanSeq) noexcept;
inline Keylet
loan(uint256 const& key)
{
return {ltLOAN, key};
}
Keylet
permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept;

View File

@@ -56,7 +56,7 @@ enum LedgerEntryType : std::uint16_t
#pragma push_macro("LEDGER_ENTRY")
#undef LEDGER_ENTRY
#define LEDGER_ENTRY(tag, value, ...) tag = value,
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) tag = value,
#include <xrpl/protocol/detail/ledger_entries.macro>
@@ -188,15 +188,6 @@ enum LedgerSpecificFlags {
lsfMPTCanTransfer = 0x00000020,
lsfMPTCanClawback = 0x00000040,
lsmfMPTCanMutateCanLock = 0x00000002,
lsmfMPTCanMutateRequireAuth = 0x00000004,
lsmfMPTCanMutateCanEscrow = 0x00000008,
lsmfMPTCanMutateCanTrade = 0x00000010,
lsmfMPTCanMutateCanTransfer = 0x00000020,
lsmfMPTCanMutateCanClawback = 0x00000040,
lsmfMPTCanMutateMetadata = 0x00010000,
lsmfMPTCanMutateTransferFee = 0x00020000,
// ltMPTOKEN
lsfMPTAuthorized = 0x00000002,
@@ -205,11 +196,6 @@ enum LedgerSpecificFlags {
// ltVAULT
lsfVaultPrivate = 0x00010000,
// ltLOAN
lsfLoanDefault = 0x00010000,
lsfLoanImpaired = 0x00020000,
lsfLoanOverpayment = 0x00040000, // True, loan allows overpayments
};
//------------------------------------------------------------------------------

View File

@@ -20,8 +20,6 @@
#ifndef RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED
#define RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED
#include <xrpl/protocol/Rules.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFormats.h>
#include <optional>
@@ -55,8 +53,6 @@ class Permission
private:
Permission();
std::unordered_map<std::uint16_t, uint256> txFeatureMap_;
std::unordered_map<std::uint16_t, Delegation> delegatableTx_;
std::unordered_map<std::string, GranularPermissionType>
@@ -74,9 +70,6 @@ public:
Permission&
operator=(Permission const&) = delete;
std::optional<std::string>
getPermissionName(std::uint32_t const value) const;
std::optional<std::uint32_t>
getGranularValue(std::string const& name) const;
@@ -86,12 +79,8 @@ public:
std::optional<TxType>
getGranularTxType(GranularPermissionType const& gpType) const;
std::optional<std::reference_wrapper<uint256 const>> const
getTxFeature(TxType txType) const;
bool
isDelegatable(std::uint32_t const& permissionValue, Rules const& rules)
const;
isDelegatable(std::uint32_t const& permissionValue) const;
// for tx level permission, permission value is equal to tx type plus one
uint32_t

View File

@@ -22,7 +22,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>
@@ -56,10 +56,7 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory
Made obsolete by fixDirectoryLimit amendment.
*/
/** The maximum number of pages allowed in a directory */
std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */
@@ -85,140 +82,6 @@ std::size_t constexpr maxDeletableTokenOfferEntries = 500;
*/
std::uint16_t constexpr maxTransferFee = 50000;
/** There are 10,000 basis points (bips) in 100%.
*
* Basis points represent 0.01%.
*
* Given a value X, to find the amount for B bps,
* use X * B / bipsPerUnity
*
* Example: If a loan broker has 999 XRP of debt, and must maintain 1,000 bps of
* that debt as cover (10%), then the minimum cover amount is 999,000,000 drops
* * 1000 / bipsPerUnity = 99,900,00 drops or 99.9 XRP.
*
* Given a percentage P, to find the number of bps that percentage represents,
* use P * bipsPerUnity.
*
* Example: 50% is 0.50 * bipsPerUnity = 5,000 bps.
*/
Bips32 constexpr bipsPerUnity(100 * 100);
static_assert(bipsPerUnity == Bips32{10'000});
TenthBips32 constexpr tenthBipsPerUnity(bipsPerUnity.value() * 10);
static_assert(tenthBipsPerUnity == TenthBips32(100'000));
constexpr Bips32
percentageToBips(std::uint32_t percentage)
{
return Bips32(percentage * bipsPerUnity.value() / 100);
}
constexpr TenthBips32
percentageToTenthBips(std::uint32_t percentage)
{
return TenthBips32(percentage * tenthBipsPerUnity.value() / 100);
}
template <typename T, class TBips>
constexpr T
bipsOfValue(T value, Bips<TBips> bips)
{
return value * bips.value() / bipsPerUnity.value();
}
template <typename T, class TBips>
constexpr T
tenthBipsOfValue(T value, TenthBips<TBips> bips)
{
return value * bips.value() / tenthBipsPerUnity.value();
}
namespace Lending {
/** The maximum management fee rate allowed by a loan broker in 1/10 bips.
Valid values are between 0 and 10% inclusive.
*/
TenthBips16 constexpr maxManagementFeeRate(
unsafe_cast<std::uint16_t>(percentageToTenthBips(10).value()));
static_assert(maxManagementFeeRate == TenthBips16(std::uint16_t(10'000u)));
/** The maximum coverage rate required of a loan broker in 1/10 bips.
Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxCoverRate = percentageToTenthBips(100);
static_assert(maxCoverRate == TenthBips32(100'000u));
/** The maximum overpayment fee on a loan in 1/10 bips.
*
Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxOverpaymentFee = percentageToTenthBips(100);
static_assert(maxOverpaymentFee == TenthBips32(100'000u));
/** Annualized interest rate of the Loan in 1/10 bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxInterestRate = percentageToTenthBips(100);
static_assert(maxInterestRate == TenthBips32(100'000u));
/** The maximum premium added to the interest rate for late payments on a loan
* in 1/10 bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxLateInterestRate = percentageToTenthBips(100);
static_assert(maxLateInterestRate == TenthBips32(100'000u));
/** The maximum close interest rate charged for repaying a loan early in 1/10
* bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxCloseInterestRate = percentageToTenthBips(100);
static_assert(maxCloseInterestRate == TenthBips32(100'000u));
/** The maximum overpayment interest rate charged on loan overpayments in 1/10
* bips.
*
* Valid values are between 0 and 100% inclusive.
*/
TenthBips32 constexpr maxOverpaymentInterestRate = percentageToTenthBips(100);
static_assert(maxOverpaymentInterestRate == TenthBips32(100'000u));
/** LoanPay transaction cost will be one base fee per X combined payments
*
* The number of payments is estimated based on the Amount paid and the Loan's
* Fixed Payment size. Overpayments (indicated with the tfLoanOverpayment flag)
* count as one more payment.
*
* This number was chosen arbitrarily, but should not be changed once released
* without an amendment
*/
static constexpr int loanPaymentsPerFeeIncrement = 5;
/** Maximum number of combined payments that a LoanPay transaction will process
*
* This limit is enforced during the loan payment process, and thus is not
* estimated. If the limit is hit, no further payments or overpayments will be
* processed, no matter how much of the transation Amount is left, but the
* transaction will succeed with the payments that have been processed up to
* that point.
*
* This limit is independent of loanPaymentsPerFeeIncrement, so a transaction
* could potentially be charged for many more payments than actually get
* processed. Users should take care not to submit a transaction paying more
* than loanMaximumPaymentsPerTransaction * Loan.PeriodicPayment. Because
* overpayments are charged as a payment, if submitting
* loanMaximumPaymentsPerTransaction * Loan.PeriodicPayment, users should not
* set the tfLoanOverpayment flag.
*
* Even though they're independent, loanMaximumPaymentsPerTransaction should be
* a multiple of loanPaymentsPerFeeIncrement.
*
* This number was chosen arbitrarily, but should not be changed once released
* without an amendment
*/
static constexpr int loanMaximumPaymentsPerTransaction = 100;
} // namespace Lending
/** The maximum length of a URI inside an NFT */
std::size_t constexpr maxTokenURILength = 256;
@@ -259,13 +122,6 @@ std::size_t constexpr maxDataPayloadLength = 256;
/** Vault withdrawal policies */
std::uint8_t constexpr vaultStrategyFirstComeFirstServe = 1;
/** Default IOU scale factor for a Vault */
std::uint8_t constexpr vaultDefaultIOUScale = 6;
/** Maximum scale factor for a Vault. The number is chosen to ensure that
1 IOU can be always converted to shares.
10^19 > maxMPTokenAmount (2^64-1) > 10^18 */
std::uint8_t constexpr vaultMaximumIOUScale = 18;
/** Maximum recursion depth for vault shares being put as an asset inside
* another vault; counted from 0 */
std::uint8_t constexpr maxAssetCheckDepth = 5;

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/safe_cast.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <map>
@@ -72,10 +71,8 @@ class STCurrency;
STYPE(STI_VL, 7) \
STYPE(STI_ACCOUNT, 8) \
STYPE(STI_NUMBER, 9) \
STYPE(STI_INT32, 10) \
STYPE(STI_INT64, 11) \
\
/* 12-13 are reserved */ \
/* 10-13 are reserved */ \
STYPE(STI_OBJECT, 14) \
STYPE(STI_ARRAY, 15) \
\
@@ -139,8 +136,8 @@ field_code(int id, int index)
SFields are created at compile time.
Each SField, once constructed, lives until program termination, and there
is only one instance per fieldType/fieldValue pair which serves the
entire application.
is only one instance per fieldType/fieldValue pair which serves the entire
application.
*/
class SField
{
@@ -151,10 +148,8 @@ public:
sMD_ChangeNew = 0x02, // new value when it changes
sMD_DeleteFinal = 0x04, // final value when it is deleted
sMD_Create = 0x08, // value when it's created
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20, // value is treated as base 10, overriding behavior
sMD_PseudoAccount = 0x40, // if this field is set in an ACCOUNT_ROOT
// _only_, then it is a pseudo-account
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20,
sMD_Default =
sMD_ChangeOrig | sMD_ChangeNew | sMD_DeleteFinal | sMD_Create
};
@@ -189,7 +184,7 @@ public:
char const* fn,
int meta = sMD_Default,
IsSigning signing = IsSigning::yes);
explicit SField(private_access_tag_t, int fc, char const* fn);
explicit SField(private_access_tag_t, int fc);
static SField const&
getField(int fieldCode);
@@ -302,7 +297,7 @@ public:
static int
compare(SField const& f1, SField const& f2);
static std::unordered_map<int, SField const*> const&
static std::map<int, SField const*> const&
getKnownCodeToField()
{
return knownCodeToField;
@@ -310,8 +305,7 @@ public:
private:
static int num;
static std::unordered_map<int, SField const*> knownCodeToField;
static std::unordered_map<std::string, SField const*> knownNameToField;
static std::map<int, SField const*> knownCodeToField;
};
/** A field with a type known at compile time. */
@@ -358,9 +352,6 @@ using SF_UINT256 = TypedField<STBitString<256>>;
using SF_UINT384 = TypedField<STBitString<384>>;
using SF_UINT512 = TypedField<STBitString<512>>;
using SF_INT32 = TypedField<STInteger<std::int32_t>>;
using SF_INT64 = TypedField<STInteger<std::int64_t>>;
using SF_ACCOUNT = TypedField<STAccount>;
using SF_AMOUNT = TypedField<STAmount>;
using SF_ISSUE = TypedField<STIssue>;

View File

@@ -66,18 +66,16 @@ public:
static int const cMaxOffset = 80;
// Maximum native value supported by the code
constexpr static std::uint64_t cMinValue = 1'000'000'000'000'000ull;
static_assert(isPowerOfTen(cMinValue));
constexpr static std::uint64_t cMaxValue = cMinValue * 10 - 1;
static_assert(cMaxValue == 9'999'999'999'999'999ull);
constexpr static std::uint64_t cMaxNative = 9'000'000'000'000'000'000ull;
static std::uint64_t const cMinValue = 1000000000000000ull;
static std::uint64_t const cMaxValue = 9999999999999999ull;
static std::uint64_t const cMaxNative = 9000000000000000000ull;
// Max native value on network.
constexpr static std::uint64_t cMaxNativeN = 100'000'000'000'000'000ull;
constexpr static std::uint64_t cIssuedCurrency = 0x8'000'000'000'000'000ull;
constexpr static std::uint64_t cPositive = 0x4'000'000'000'000'000ull;
constexpr static std::uint64_t cMPToken = 0x2'000'000'000'000'000ull;
constexpr static std::uint64_t cValueMask = ~(cPositive | cMPToken);
static std::uint64_t const cMaxNativeN = 100000000000000000ull;
static std::uint64_t const cIssuedCurrency = 0x8000000000000000ull;
static std::uint64_t const cPositive = 0x4000000000000000ull;
static std::uint64_t const cMPToken = 0x2000000000000000ull;
static std::uint64_t const cValueMask = ~(cPositive | cMPToken);
static std::uint64_t const uRateOne;
@@ -176,9 +174,6 @@ public:
int
exponent() const noexcept;
bool
integral() const noexcept;
bool
native() const noexcept;
@@ -459,12 +454,6 @@ STAmount::exponent() const noexcept
return mOffset;
}
inline bool
STAmount::integral() const noexcept
{
return mAsset.integral();
}
inline bool
STAmount::native() const noexcept
{
@@ -583,7 +572,7 @@ STAmount::clear()
{
// The -100 is used to allow 0 to sort less than a small positive values
// which have a negative exponent.
mOffset = integral() ? 0 : -100;
mOffset = native() ? 0 : -100;
mValue = 0;
mIsNegative = false;
}
@@ -706,53 +695,6 @@ divRoundStrict(
std::uint64_t
getRate(STAmount const& offerOut, STAmount const& offerIn);
/** Round an arbitrary precision Amount to the precision of an STAmount that has
* a given exponent.
*
* This is used to ensure that calculations involving IOU amounts do not collect
* dust beyond the precision of the reference value.
*
* @param value The value to be rounded
* @param scale An exponent value to establish the precision limit of
* `value`. Should be larger than `value.exponent()`.
* @param rounding Optional Number rounding mode
*
*/
STAmount
roundToScale(
STAmount const& value,
std::int32_t scale,
Number::rounding_mode rounding = Number::getround());
/** Round an arbitrary precision Number to the precision of a given Asset.
*
* This is used to ensure that calculations do not collect dust beyond the
* precision of the reference value for IOUs, or fractional amounts for the
* integral types XRP and MPT.
*
* @param asset The relevant asset
* @param value The value to be rounded
* @param scale Only relevant to IOU assets. An exponent value to establish the
* precision limit of `value`. Should be larger than `value.exponent()`.
* @param rounding Optional Number rounding mode
*/
template <AssetType A>
Number
roundToAsset(
A const& asset,
Number const& value,
std::int32_t scale,
Number::rounding_mode rounding = Number::getround())
{
NumberRoundModeGuard mg(rounding);
STAmount const ret{asset, value};
if (ret.integral())
return ret;
// Note that the ctor will round integral types (XRP, MPT) via canonicalize,
// so no extra work is needed for those.
return roundToScale(ret, scale);
}
//------------------------------------------------------------------------------
inline bool

View File

@@ -81,8 +81,6 @@ using STUInt16 = STInteger<std::uint16_t>;
using STUInt32 = STInteger<std::uint32_t>;
using STUInt64 = STInteger<std::uint64_t>;
using STInt32 = STInteger<std::int32_t>;
template <typename Integer>
inline STInteger<Integer>::STInteger(Integer v) : value_(v)
{

View File

@@ -26,9 +26,7 @@
namespace ripple {
class Rules;
namespace test {
class Invariants_test;
}
class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
{
@@ -38,8 +36,6 @@ class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
public:
using pointer = std::shared_ptr<STLedgerEntry>;
using ref = std::shared_ptr<STLedgerEntry> const&;
using const_pointer = std::shared_ptr<STLedgerEntry const>;
using const_ref = std::shared_ptr<STLedgerEntry const> const&;
/** Create an empty object with the given key and type. */
explicit STLedgerEntry(Keylet const& k);
@@ -58,7 +54,7 @@ public:
getText() const override;
Json::Value
getJson(JsonOptions options = JsonOptions::none) const override;
getJson(JsonOptions options) const override;
/** Returns the 'key' (or 'index') of this item.
The key identifies this entry's position in
@@ -88,8 +84,7 @@ private:
void
setSLEType();
friend test::Invariants_test; // this test wants access to the private
// type_
friend Invariants_test; // this test wants access to the private type_
STBase*
copy(std::size_t n, void* buf) const override;

View File

@@ -25,6 +25,7 @@
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/SOTemplate.h>
#include <xrpl/protocol/STAmount.h>
@@ -33,7 +34,6 @@
#include <xrpl/protocol/STIssue.h>
#include <xrpl/protocol/STPathSet.h>
#include <xrpl/protocol/STVector256.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/detail/STVar.h>
#include <boost/iterator/transform_iterator.hpp>
@@ -231,8 +231,6 @@ public:
getFieldH192(SField const& field) const;
uint256
getFieldH256(SField const& field) const;
std::int32_t
getFieldI32(SField const& field) const;
AccountID
getAccountID(SField const& field) const;
@@ -244,9 +242,6 @@ public:
getFieldPathSet(SField const& field) const;
STVector256 const&
getFieldV256(SField const& field) const;
// If not found, returns an object constructed with the given field
STObject
getFieldObject(SField const& field) const;
STArray const&
getFieldArray(SField const& field) const;
STCurrency const&
@@ -370,8 +365,6 @@ public:
void
setFieldH256(SField const& field, uint256 const&);
void
setFieldI32(SField const& field, std::int32_t);
void
setFieldVL(SField const& field, Blob const&);
void
setFieldVL(SField const& field, Slice const&);
@@ -393,8 +386,6 @@ public:
setFieldV256(SField const& field, STVector256 const& v);
void
setFieldArray(SField const& field, STArray const& v);
void
setFieldObject(SField const& field, STObject const& v);
template <class Tag>
void
@@ -501,8 +492,6 @@ public:
value_type
operator*() const;
/// Do not use operator->() unless the field is required, or you've checked
/// that it's set.
T const*
operator->() const;
@@ -526,26 +515,7 @@ protected:
// Constraint += and -= ValueProxy operators
// to value types that support arithmetic operations
template <typename U>
concept IsArithmeticNumber = std::is_arithmetic_v<U> ||
std::is_same_v<U, Number> || std::is_same_v<U, STAmount>;
template <
typename U,
typename Value = typename U::value_type,
typename Unit = typename U::unit_type>
concept IsArithmeticValueUnit =
std::is_same_v<U, unit::ValueUnit<Unit, Value>> &&
IsArithmeticNumber<Value> && std::is_class_v<Unit>;
template <typename U, typename Value = typename U::value_type>
concept IsArithmeticST = !IsArithmeticValueUnit<U> && IsArithmeticNumber<Value>;
template <typename U>
concept IsArithmetic =
IsArithmeticNumber<U> || IsArithmeticST<U> || IsArithmeticValueUnit<U>;
template <class T, class U>
concept Addable = requires(T t, U u) { t = t + u; };
template <typename T, typename U>
concept IsArithmeticCompatible =
IsArithmetic<typename T::value_type> && Addable<typename T::value_type, U>;
concept IsArithmetic = std::is_arithmetic_v<U> || std::is_same_v<U, STAmount>;
template <class T>
class STObject::ValueProxy : public Proxy<T>
@@ -565,12 +535,10 @@ public:
// Convenience operators for value types supporting
// arithmetic operations
template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
ValueProxy&
operator+=(U const& u);
template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
ValueProxy&
operator-=(U const& u);
@@ -760,8 +728,6 @@ STObject::Proxy<T>::operator*() const -> value_type
return this->value();
}
/// Do not use operator->() unless the field is required, or you've checked that
/// it's set.
template <class T>
T const*
STObject::Proxy<T>::operator->() const
@@ -808,7 +774,6 @@ STObject::ValueProxy<T>::operator=(U&& u)
template <typename T>
template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
STObject::ValueProxy<T>&
STObject::ValueProxy<T>::operator+=(U const& u)
{
@@ -818,7 +783,6 @@ STObject::ValueProxy<T>::operator+=(U const& u)
template <class T>
template <IsArithmetic U>
requires IsArithmeticCompatible<T, U>
STObject::ValueProxy<T>&
STObject::ValueProxy<T>::operator-=(U const& u)
{

View File

@@ -54,6 +54,34 @@ public:
Json::Value error;
};
/** Holds the serialized result of parsing an input JSON array.
This does validation and checking on the provided JSON.
*/
class STParsedJSONArray
{
public:
/** Parses and creates an STParsedJSON array.
The result of the parsing is stored in array and error.
Exceptions:
Does not throw.
@param name The name of the JSON field, used in diagnostics.
@param json The JSON-RPC to parse.
*/
STParsedJSONArray(std::string const& name, Json::Value const& json);
STParsedJSONArray() = delete;
STParsedJSONArray(STParsedJSONArray const&) = delete;
STParsedJSONArray&
operator=(STParsedJSONArray const&) = delete;
~STParsedJSONArray() = default;
/** The STArray if the parse was successful. */
std::optional<STArray> array;
/** On failure, an appropriate set of error values. */
Json::Value error;
};
} // namespace ripple
#endif

View File

@@ -87,14 +87,8 @@ public:
getFullText() const override;
// Outer transaction functions / signature functions.
static Blob
getSignature(STObject const& sigObject);
Blob
getSignature() const
{
return getSignature(*this);
}
getSignature() const;
uint256
getSigningHash() const;
@@ -125,20 +119,13 @@ public:
getJson(JsonOptions options, bool binary) const;
void
sign(
PublicKey const& publicKey,
SecretKey const& secretKey,
std::optional<std::reference_wrapper<SField const>> signatureTarget =
{});
enum class RequireFullyCanonicalSig : bool { no, yes };
sign(PublicKey const& publicKey, SecretKey const& secretKey);
/** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@return `true` if valid signature. If invalid, the error message string.
*/
enum class RequireFullyCanonicalSig : bool { no, yes };
Expected<void, std::string>
checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules)
const;
@@ -163,34 +150,17 @@ public:
char status,
std::string const& escapedMetaData) const;
std::vector<uint256> const&
std::vector<uint256>
getBatchTransactionIDs() const;
private:
/** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@param sigObject Reference to object that contains the signature fields.
Will be *this more often than not.
@return `true` if valid signature. If invalid, the error message string.
*/
Expected<void, std::string>
checkSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const;
Expected<void, std::string>
checkSingleSign(
RequireFullyCanonicalSig requireCanonicalSig,
STObject const& sigObject) const;
checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const;
Expected<void, std::string>
checkMultiSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const;
Rules const& rules) const;
Expected<void, std::string>
checkBatchSingleSign(
@@ -209,7 +179,7 @@ private:
move(std::size_t n, void* buf) override;
friend class detail::STVar;
mutable std::vector<uint256> batchTxnIds_;
mutable std::vector<uint256> batch_txn_ids_;
};
bool

View File

@@ -22,10 +22,10 @@
#include <xrpl/basics/Log.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <optional>

View File

@@ -673,8 +673,7 @@ isTerRetry(TER x) noexcept
inline bool
isTesSuccess(TER x) noexcept
{
// Makes use of TERSubset::operator bool()
return !(x);
return (x == tesSUCCESS);
}
inline bool

View File

@@ -127,8 +127,6 @@ constexpr std::uint32_t tfTrustSetPermissionMask = ~(tfUniversal | tfSetfAuth |
// EnableAmendment flags:
constexpr std::uint32_t tfGotMajority = 0x00010000;
constexpr std::uint32_t tfLostMajority = 0x00020000;
constexpr std::uint32_t tfChangeMask =
~( tfUniversal | tfGotMajority | tfLostMajority);
// PaymentChannelClaim flags:
constexpr std::uint32_t tfRenew = 0x00010000;
@@ -143,8 +141,7 @@ constexpr std::uint32_t const tfTransferable = 0x00000008;
constexpr std::uint32_t const tfMutable = 0x00000010;
// MPTokenIssuanceCreate flags:
// Note: tf/lsfMPTLocked is intentionally omitted, since this transaction
// is not allowed to modify it.
// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate.
constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock;
constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth;
constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow;
@@ -154,20 +151,6 @@ constexpr std::uint32_t const tfMPTCanClawback = lsfMPTCanClawback;
constexpr std::uint32_t const tfMPTokenIssuanceCreateMask =
~(tfUniversal | tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback);
// MPTokenIssuanceCreate MutableFlags:
// Indicating specific fields or flags may be changed after issuance.
constexpr std::uint32_t const tmfMPTCanMutateCanLock = lsmfMPTCanMutateCanLock;
constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lsmfMPTCanMutateRequireAuth;
constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lsmfMPTCanMutateCanEscrow;
constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lsmfMPTCanMutateCanTrade;
constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lsmfMPTCanMutateCanTransfer;
constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lsmfMPTCanMutateCanClawback;
constexpr std::uint32_t const tmfMPTCanMutateMetadata = lsmfMPTCanMutateMetadata;
constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lsmfMPTCanMutateTransferFee;
constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask =
~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade
| tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee);
// MPTokenAuthorize flags:
constexpr std::uint32_t const tfMPTUnauthorize = 0x00000001;
constexpr std::uint32_t const tfMPTokenAuthorizeMask = ~(tfUniversal | tfMPTUnauthorize);
@@ -178,25 +161,6 @@ constexpr std::uint32_t const tfMPTUnlock = 0x00000002;
constexpr std::uint32_t const tfMPTokenIssuanceSetMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock);
constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock);
// MPTokenIssuanceSet MutableFlags:
// Set or Clear flags.
constexpr std::uint32_t const tmfMPTSetCanLock = 0x00000001;
constexpr std::uint32_t const tmfMPTClearCanLock = 0x00000002;
constexpr std::uint32_t const tmfMPTSetRequireAuth = 0x00000004;
constexpr std::uint32_t const tmfMPTClearRequireAuth = 0x00000008;
constexpr std::uint32_t const tmfMPTSetCanEscrow = 0x00000010;
constexpr std::uint32_t const tmfMPTClearCanEscrow = 0x00000020;
constexpr std::uint32_t const tmfMPTSetCanTrade = 0x00000040;
constexpr std::uint32_t const tmfMPTClearCanTrade = 0x00000080;
constexpr std::uint32_t const tmfMPTSetCanTransfer = 0x00000100;
constexpr std::uint32_t const tmfMPTClearCanTransfer = 0x00000200;
constexpr std::uint32_t const tmfMPTSetCanClawback = 0x00000400;
constexpr std::uint32_t const tmfMPTClearCanClawback = 0x00000800;
constexpr std::uint32_t const tmfMPTokenIssuanceSetMutableMask = ~(tmfMPTSetCanLock | tmfMPTClearCanLock |
tmfMPTSetRequireAuth | tmfMPTClearRequireAuth | tmfMPTSetCanEscrow | tmfMPTClearCanEscrow |
tmfMPTSetCanTrade | tmfMPTClearCanTrade | tmfMPTSetCanTransfer | tmfMPTClearCanTransfer |
tmfMPTSetCanClawback | tmfMPTClearCanClawback);
// MPTokenIssuanceDestroy flags:
constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal;
@@ -285,32 +249,6 @@ constexpr std::uint32_t tfIndependent = 0x00080000;
constexpr std::uint32_t const tfBatchMask =
~(tfUniversal | tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent) | tfInnerBatchTxn;
// LoanSet and LoanPay flags:
// LoanSet: True, indicates the loan supports overpayments
// LoanPay: True, indicates any excess in this payment can be used
// as an overpayment. False, no overpayments will be taken.
constexpr std::uint32_t const tfLoanOverpayment = 0x00010000;
// LoanPay exclusive flags:
// tfLoanFullPayment: True, indicates that the payment is an early
// full payment. It must pay the entire loan including close
// interest and fees, or it will fail. False: Not a full payment.
constexpr std::uint32_t const tfLoanFullPayment = 0x00020000;
// tfLoanLatePayment: True, indicates that the payment is late,
// and includes late iterest and fees. If the loan is not late,
// it will fail. False: not a late payment. If the current payment
// is overdue, the transaction will fail.
constexpr std::uint32_t const tfLoanLatePayment = 0x00040000;
constexpr std::uint32_t const tfLoanSetMask = ~(tfUniversal |
tfLoanOverpayment);
constexpr std::uint32_t const tfLoanPayMask = ~(tfUniversal |
tfLoanOverpayment | tfLoanFullPayment | tfLoanLatePayment);
// LoanManage flags:
constexpr std::uint32_t const tfLoanDefault = 0x00010000;
constexpr std::uint32_t const tfLoanImpair = 0x00020000;
constexpr std::uint32_t const tfLoanUnimpair = 0x00040000;
constexpr std::uint32_t const tfLoanManageMask = ~(tfUniversal | tfLoanDefault | tfLoanImpair | tfLoanUnimpair);
// clang-format on
} // namespace ripple

View File

@@ -59,7 +59,7 @@ enum TxType : std::uint16_t
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, ...) tag = value,
#define TRANSACTION(tag, value, name, delegatable, fields) tag = value,
#include <xrpl/protocol/detail/transactions.macro>

View File

@@ -1,555 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef PROTOCOL_UNITS_H_INCLUDED
#define PROTOCOL_UNITS_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace unit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a ValueUnit. They are
used for calculations in this header. */
struct unitlessTag;
/** Units to represent basis points (bips) and 1/10 basis points */
class BipsTag;
class TenthBipsTag;
// These names don't have to be too descriptive, because we're in the "unit"
// namespace.
template <class T>
concept Valid = std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>;
/** `Usable` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-unit contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T>
concept Usable = Valid<T> &&
(std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag> ||
std::is_same_v<typename T::unit_type, BipsTag> ||
std::is_same_v<typename T::unit_type, TenthBipsTag>);
template <class Other, class VU>
concept Compatible = Valid<VU> && std::is_arithmetic_v<Other> &&
std::is_arithmetic_v<typename VU::value_type> &&
std::is_convertible_v<Other, typename VU::value_type>;
template <class T>
concept Integral = std::is_integral_v<T>;
template <class VU>
concept IntegralValue = Integral<typename VU::value_type>;
template <class VU1, class VU2>
concept CastableValue = IntegralValue<VU1> && IntegralValue<VU2> &&
std::is_same_v<typename VU1::unit_type, typename VU2::unit_type>;
template <class UnitTag, class T>
class ValueUnit : private boost::totally_ordered<ValueUnit<UnitTag, T>>,
private boost::additive<ValueUnit<UnitTag, T>>,
private boost::equality_comparable<ValueUnit<UnitTag, T>, T>,
private boost::dividable<ValueUnit<UnitTag, T>, T>,
private boost::modable<ValueUnit<UnitTag, T>, T>,
private boost::unit_steppable<ValueUnit<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type value_;
public:
ValueUnit() = default;
constexpr ValueUnit(ValueUnit const& other) = default;
constexpr ValueUnit&
operator=(ValueUnit const& other) = default;
constexpr explicit ValueUnit(beast::Zero) : value_(0)
{
}
constexpr ValueUnit&
operator=(beast::Zero)
{
value_ = 0;
return *this;
}
constexpr explicit ValueUnit(value_type value) : value_(value)
{
}
constexpr ValueUnit&
operator=(value_type value)
{
value_ = value;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <Compatible<ValueUnit> Other>
constexpr ValueUnit(ValueUnit<unit_type, Other> const& value)
requires SafeToCast<Other, value_type>
: ValueUnit(safe_cast<value_type>(value.value()))
{
}
constexpr ValueUnit
operator+(value_type const& rhs) const
{
return ValueUnit{value_ + rhs};
}
friend constexpr ValueUnit
operator+(value_type lhs, ValueUnit const& rhs)
{
// addition is commutative
return rhs + lhs;
}
constexpr ValueUnit
operator-(value_type const& rhs) const
{
return ValueUnit{value_ - rhs};
}
friend constexpr ValueUnit
operator-(value_type lhs, ValueUnit const& rhs)
{
// subtraction is NOT commutative, but (lhs + (-rhs)) is addition, which
// is
return -rhs + lhs;
}
constexpr ValueUnit
operator*(value_type const& rhs) const
{
return ValueUnit{value_ * rhs};
}
friend constexpr ValueUnit
operator*(value_type lhs, ValueUnit const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(ValueUnit const& rhs) const
{
return value_ / rhs.value_;
}
ValueUnit&
operator+=(ValueUnit const& other)
{
value_ += other.value();
return *this;
}
ValueUnit&
operator-=(ValueUnit const& other)
{
value_ -= other.value();
return *this;
}
ValueUnit&
operator++()
{
++value_;
return *this;
}
ValueUnit&
operator--()
{
--value_;
return *this;
}
ValueUnit&
operator*=(value_type const& rhs)
{
value_ *= rhs;
return *this;
}
ValueUnit&
operator/=(value_type const& rhs)
{
value_ /= rhs;
return *this;
}
template <Integral transparent = value_type>
ValueUnit&
operator%=(value_type const& rhs)
{
value_ %= rhs;
return *this;
}
ValueUnit
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned value types");
return ValueUnit{-value_};
}
constexpr bool
operator==(ValueUnit const& other) const
{
return value_ == other.value_;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator==(ValueUnit<unit_type, Other> const& other) const
{
return value_ == other.value();
}
constexpr bool
operator==(value_type other) const
{
return value_ == other;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator!=(ValueUnit<unit_type, Other> const& other) const
{
return !operator==(other);
}
constexpr bool
operator<(ValueUnit const& other) const
{
return value_ < other.value_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return value_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (value_ < 0) ? -1 : (value_ ? 1 : 0);
}
/** Returns the number of drops */
// TODO: Move this to a new class, maybe with the old "TaggedFee" name
constexpr value_type
fee() const
{
return value_;
}
template <class Other>
constexpr double
decimalFromReference(ValueUnit<unit_type, Other> reference) const
{
return static_cast<double>(value_) / reference.value();
}
// `Usable` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
Json::Value
jsonClipped() const
requires Usable<ValueUnit>
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (value_ < min)
return min;
if (value_ > max)
return max;
return static_cast<jsontype>(value_);
}
else
{
return value_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return value_;
}
friend std::istream&
operator>>(std::istream& s, ValueUnit& val)
{
s >> val.value_;
return s;
}
};
// Output Values as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, ValueUnit<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(ValueUnit<UnitTag, T> const& amount)
{
return std::to_string(amount.value());
}
template <class Source>
concept muldivSource = Valid<Source> &&
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest>
concept muldivDest = muldivSource<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <class Source2, class Source1>
concept muldivSources = muldivSource<Source1> && muldivSource<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <class Dest, class Source1, class Source2>
concept muldivable = muldivSources<Source1, Source2> && muldivDest<Dest>;
// Source and Dest can be the same by default
template <class Dest, class Source1, class Source2>
concept muldivCommutable = muldivable<Dest, Source1, Source2> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
ValueUnit<unitlessTag, T>
scalar(T value)
{
return ValueUnit<unitlessTag, T>{value};
}
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// values can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0, "ripple::unit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::unit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() > 0, "ripple::unit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace unit
// Fee Levels
template <class T>
using FeeLevel = unit::ValueUnit<unit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
// Basis points (Bips)
template <class T>
using Bips = unit::ValueUnit<unit::BipsTag, T>;
using Bips16 = Bips<std::uint16_t>;
using Bips32 = Bips<std::uint32_t>;
template <class T>
using TenthBips = unit::ValueUnit<unit::TenthBipsTag, T>;
using TenthBips16 = TenthBips<std::uint16_t>;
using TenthBips32 = TenthBips<std::uint32_t>;
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return unit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
unit::muldivCommutable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return unit::mulDivU(mul, value, div);
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return unit::mulDivU(unit::scalar(value), mul, unit::scalar(div));
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = unit::mulDivU(value, unit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s)};
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s)};
}
} // namespace ripple
#endif // PROTOCOL_UNITS_H_INCLUDED

View File

@@ -24,7 +24,7 @@
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/FeeUnits.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
@@ -42,7 +42,7 @@ class XRPAmount : private boost::totally_ordered<XRPAmount>,
private boost::additive<XRPAmount, std::int64_t>
{
public:
using unit_type = unit::dropTag;
using unit_type = feeunit::dropTag;
using value_type = std::int64_t;
private:

View File

@@ -129,12 +129,10 @@ inplace_bigint_div_rem(std::span<uint64_t> numerator, std::uint64_t divisor)
{
// should never happen, but if it does then it seems natural to define
// the a null set of numbers to be zero, so the remainder is also zero.
// LCOV_EXCL_START
UNREACHABLE(
"ripple::b58_fast::detail::inplace_bigint_div_rem : empty "
"numerator");
return 0;
// LCOV_EXCL_STOP
}
auto to_u128 = [](std::uint64_t high,

View File

@@ -27,27 +27,21 @@
#error "undefined macro: XRPL_RETIRE"
#endif
// clang-format off
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FEATURE(LendingProtocol, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
@@ -158,5 +152,3 @@ XRPL_RETIRE(fix1512)
XRPL_RETIRE(fix1523)
XRPL_RETIRE(fix1528)
XRPL_RETIRE(FlowCross)
// clang-format on

View File

@@ -120,7 +120,6 @@ LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, ({
// All fields are soeREQUIRED because there is always a SignerEntries.
// If there are no SignerEntries the node is deleted.
LEDGER_ENTRY(ltSIGNER_LIST, 0x0053, SignerList, signer_list, ({
{sfOwner, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfSignerQuorum, soeREQUIRED},
{sfSignerEntries, soeREQUIRED},
@@ -168,7 +167,6 @@ LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, ({
{sfFirstNFTokenSequence, soeOPTIONAL},
{sfAMMID, soeOPTIONAL}, // pseudo-account designator
{sfVaultID, soeOPTIONAL}, // pseudo-account designator
{sfLoanBrokerID, soeOPTIONAL}, // pseudo-account designator
}))
/** A ledger object which contains a list of object identifiers.
@@ -190,7 +188,7 @@ LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({
{sfNFTokenID, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
{sfDomainID, soeOPTIONAL} // order book directories
{sfDomainID, soeOPTIONAL}
}))
/** The ledger object which lists details about amendments on the network.
@@ -345,7 +343,6 @@ LEDGER_ENTRY(ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID, 0x0074, XChainOwnedCreateAc
*/
LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({
{sfAccount, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED},
{sfCondition, soeOPTIONAL},
@@ -368,7 +365,6 @@ LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({
LEDGER_ENTRY(ltPAYCHAN, 0x0078, PayChannel, payment_channel, ({
{sfAccount, soeREQUIRED},
{sfDestination, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfAmount, soeREQUIRED},
{sfBalance, soeREQUIRED},
{sfPublicKey, soeREQUIRED},
@@ -416,7 +412,6 @@ LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfDomainID, soeOPTIONAL},
{sfMutableFlags, soeDEFAULT},
}))
/** A ledger object which tracks MPToken
@@ -437,7 +432,6 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({
*/
LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({
{sfOwner, soeREQUIRED},
{sfOracleDocumentID, soeOPTIONAL},
{sfProvider, soeREQUIRED},
{sfPriceDataSeries, soeREQUIRED},
{sfAssetClass, soeREQUIRED},
@@ -458,7 +452,7 @@ LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, ({
{sfExpiration, soeOPTIONAL},
{sfURI, soeOPTIONAL},
{sfIssuerNode, soeREQUIRED},
{sfSubjectNode, soeOPTIONAL},
{sfSubjectNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
@@ -499,128 +493,15 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
{sfAccount, soeREQUIRED},
{sfData, soeOPTIONAL},
{sfAsset, soeREQUIRED},
{sfAssetsTotal, soeDEFAULT},
{sfAssetsAvailable, soeDEFAULT},
{sfAssetsTotal, soeREQUIRED},
{sfAssetsAvailable, soeREQUIRED},
{sfAssetsMaximum, soeDEFAULT},
{sfLossUnrealized, soeDEFAULT},
{sfLossUnrealized, soeREQUIRED},
{sfShareMPTID, soeREQUIRED},
{sfWithdrawalPolicy, soeREQUIRED},
{sfScale, soeDEFAULT},
// no SharesTotal ever (use MPTIssuance.sfOutstandingAmount)
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
}))
/** Reserve 0x0084-0x0087 for future Vault-related objects. */
/** A ledger object representing a loan broker
\sa keylet::loanbroker
*/
LEDGER_ENTRY(ltLOAN_BROKER, 0x0088, LoanBroker, loan_broker, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfVaultNode, soeREQUIRED},
{sfVaultID, soeREQUIRED},
{sfAccount, soeREQUIRED},
{sfOwner, soeREQUIRED},
{sfLoanSequence, soeREQUIRED},
{sfData, soeDEFAULT},
{sfManagementFeeRate, soeDEFAULT},
{sfOwnerCount, soeDEFAULT},
{sfDebtTotal, soeDEFAULT},
{sfDebtMaximum, soeDEFAULT},
{sfCoverAvailable, soeDEFAULT},
{sfCoverRateMinimum, soeDEFAULT},
{sfCoverRateLiquidation, soeDEFAULT},
}))
/** A ledger object representing a loan between a Borrower and a Loan Broker
\sa keylet::loan
*/
LEDGER_ENTRY(ltLOAN, 0x0089, Loan, loan, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfLoanBrokerNode, soeREQUIRED},
{sfLoanBrokerID, soeREQUIRED},
{sfLoanSequence, soeREQUIRED},
{sfBorrower, soeREQUIRED},
{sfLoanOriginationFee, soeDEFAULT},
{sfLoanServiceFee, soeDEFAULT},
{sfLatePaymentFee, soeDEFAULT},
{sfClosePaymentFee, soeDEFAULT},
{sfOverpaymentFee, soeDEFAULT},
{sfInterestRate, soeDEFAULT},
{sfLateInterestRate, soeDEFAULT},
{sfCloseInterestRate, soeDEFAULT},
{sfOverpaymentInterestRate, soeDEFAULT},
{sfStartDate, soeREQUIRED},
{sfPaymentInterval, soeREQUIRED},
{sfGracePeriod, soeDEFAULT},
{sfPreviousPaymentDate, soeDEFAULT},
{sfNextPaymentDueDate, soeDEFAULT},
// The loan object tracks these values:
//
// - PaymentRemaining: The number of payments left in the loan. When it
// reaches 0, the loan is paid off, and all other relevant values
// must also be 0.
//
// - PeriodicPayment: The fixed, unrounded amount to be paid each
// interval. Stored with as much precision as possible.
// Payment transactions must round this value *UP*.
//
// - TotalValueOutstanding: The rounded total amount owed by the
// borrower to the lender / vault.
//
// - PrincipalOutstanding: The rounded portion of the
// TotalValueOutstanding that is from the principal borrowed.
//
// - ManagementFeeOutstanding: The rounded portion of the
// TotalValueOutstanding that represents management fees
// specifically owed to the broker based on the initial
// loan parameters.
//
// There are additional values that can be computed from these:
//
// - InterestOutstanding = TotalValueOutstanding - PrincipalOutstanding
// The total amount of interest still pending on the loan,
// independent of management fees.
//
// - InterestOwedToVault = InterestOutstanding - ManagementFeeOutstanding
// The amount of the total interest that is owed to the vault, and
// will be sent to it as part of a payment.
//
// - TrueTotalLoanValue = PaymentRemaining * PeriodicPayment
// The unrounded true total value of the loan.
//
// - TrueTotalPrincialOutstanding can be computed using the algorithm
// in the ripple::detail::loanPrincipalFromPeriodicPayment function.
//
// - TrueTotalInterestOutstanding = TrueTotalLoanValue -
// TrueTotalPrincipalOutstanding
// The unrounded true total interest remaining.
//
// - TrueTotalManagementFeeOutstanding = TrueTotalInterestOutstanding *
// LoanBroker.ManagementFeeRate
// The unrounded true total fee still owed to the broker.
//
// Note the the "True" values may differ significantly from the tracked
// rounded values.
{sfPaymentRemaining, soeDEFAULT},
{sfPeriodicPayment, soeREQUIRED},
{sfPrincipalOutstanding, soeDEFAULT},
{sfTotalValueOutstanding, soeDEFAULT},
{sfManagementFeeOutstanding, soeDEFAULT},
// Based on the computed total value at creation, used for
// rounding calculated values so they are all on a
// consistent scale - that is, they all have the same
// number of digits after the decimal point (excluding
// trailing zeros).
{sfLoanScale, soeDEFAULT},
}))
#undef EXPAND
#undef LEDGER_ENTRY_DUPLICATE

View File

@@ -24,8 +24,6 @@
#error "undefined macro: TYPED_SFIELD"
#endif
// clang-format off
// untyped
UNTYPED_SFIELD(sfLedgerEntry, LEDGERENTRY, 257)
UNTYPED_SFIELD(sfTransaction, TRANSACTION, 257)
@@ -61,7 +59,6 @@ TYPED_SFIELD(sfHookEmitCount, UINT16, 18)
TYPED_SFIELD(sfHookExecutionIndex, UINT16, 19)
TYPED_SFIELD(sfHookApiVersion, UINT16, 20)
TYPED_SFIELD(sfLedgerFixType, UINT16, 21)
TYPED_SFIELD(sfManagementFeeRate, UINT16, 22) // 1/10 basis points (bips)
// 32-bit integers (common)
TYPED_SFIELD(sfNetworkID, UINT32, 1)
@@ -117,22 +114,6 @@ TYPED_SFIELD(sfVoteWeight, UINT32, 48)
TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50)
TYPED_SFIELD(sfOracleDocumentID, UINT32, 51)
TYPED_SFIELD(sfPermissionValue, UINT32, 52)
TYPED_SFIELD(sfMutableFlags, UINT32, 53)
TYPED_SFIELD(sfStartDate, UINT32, 54)
TYPED_SFIELD(sfPaymentInterval, UINT32, 55)
TYPED_SFIELD(sfGracePeriod, UINT32, 56)
TYPED_SFIELD(sfPreviousPaymentDate, UINT32, 57)
TYPED_SFIELD(sfNextPaymentDueDate, UINT32, 58)
TYPED_SFIELD(sfPaymentRemaining, UINT32, 59)
TYPED_SFIELD(sfPaymentTotal, UINT32, 60)
TYPED_SFIELD(sfLoanSequence, UINT32, 61)
TYPED_SFIELD(sfCoverRateMinimum, UINT32, 62) // 1/10 basis points (bips)
TYPED_SFIELD(sfCoverRateLiquidation, UINT32, 63) // 1/10 basis points (bips)
TYPED_SFIELD(sfOverpaymentFee, UINT32, 64) // 1/10 basis points (bips)
TYPED_SFIELD(sfInterestRate, UINT32, 65) // 1/10 basis points (bips)
TYPED_SFIELD(sfLateInterestRate, UINT32, 66) // 1/10 basis points (bips)
TYPED_SFIELD(sfCloseInterestRate, UINT32, 67) // 1/10 basis points (bips)
TYPED_SFIELD(sfOverpaymentInterestRate, UINT32, 68) // 1/10 basis points (bips)
// 64-bit integers (common)
TYPED_SFIELD(sfIndexNext, UINT64, 1)
@@ -164,8 +145,6 @@ TYPED_SFIELD(sfMPTAmount, UINT64, 26, SField::sMD_BaseTen|SFie
TYPED_SFIELD(sfIssuerNode, UINT64, 27)
TYPED_SFIELD(sfSubjectNode, UINT64, 28)
TYPED_SFIELD(sfLockedAmount, UINT64, 29, SField::sMD_BaseTen|SField::sMD_Default)
TYPED_SFIELD(sfVaultNode, UINT64, 30)
TYPED_SFIELD(sfLoanBrokerNode, UINT64, 31)
// 128-bit
TYPED_SFIELD(sfEmailHash, UINT128, 1)
@@ -194,8 +173,7 @@ TYPED_SFIELD(sfNFTokenID, UINT256, 10)
TYPED_SFIELD(sfEmitParentTxnID, UINT256, 11)
TYPED_SFIELD(sfEmitNonce, UINT256, 12)
TYPED_SFIELD(sfEmitHookHash, UINT256, 13)
TYPED_SFIELD(sfAMMID, UINT256, 14,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfAMMID, UINT256, 14)
// 256-bit (uncommon)
TYPED_SFIELD(sfBookDirectory, UINT256, 16)
@@ -217,12 +195,8 @@ TYPED_SFIELD(sfHookHash, UINT256, 31)
TYPED_SFIELD(sfHookNamespace, UINT256, 32)
TYPED_SFIELD(sfHookSetTxnID, UINT256, 33)
TYPED_SFIELD(sfDomainID, UINT256, 34)
TYPED_SFIELD(sfVaultID, UINT256, 35,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfVaultID, UINT256, 35)
TYPED_SFIELD(sfParentBatchID, UINT256, 36)
TYPED_SFIELD(sfLoanBrokerID, UINT256, 37,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfLoanID, UINT256, 38)
// number (common)
TYPED_SFIELD(sfNumber, NUMBER, 1)
@@ -230,21 +204,6 @@ TYPED_SFIELD(sfAssetsAvailable, NUMBER, 2)
TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3)
TYPED_SFIELD(sfAssetsTotal, NUMBER, 4)
TYPED_SFIELD(sfLossUnrealized, NUMBER, 5)
TYPED_SFIELD(sfDebtTotal, NUMBER, 6)
TYPED_SFIELD(sfDebtMaximum, NUMBER, 7)
TYPED_SFIELD(sfCoverAvailable, NUMBER, 8)
TYPED_SFIELD(sfLoanOriginationFee, NUMBER, 9)
TYPED_SFIELD(sfLoanServiceFee, NUMBER, 10)
TYPED_SFIELD(sfLatePaymentFee, NUMBER, 11)
TYPED_SFIELD(sfClosePaymentFee, NUMBER, 12)
TYPED_SFIELD(sfPrincipalOutstanding, NUMBER, 13)
TYPED_SFIELD(sfPrincipalRequested, NUMBER, 14)
TYPED_SFIELD(sfTotalValueOutstanding, NUMBER, 15)
TYPED_SFIELD(sfPeriodicPayment, NUMBER, 16)
TYPED_SFIELD(sfManagementFeeOutstanding, NUMBER, 17)
// int32
TYPED_SFIELD(sfLoanScale, INT32, 1)
// currency amount (common)
TYPED_SFIELD(sfAmount, AMOUNT, 1)
@@ -340,8 +299,6 @@ TYPED_SFIELD(sfAttestationRewardAccount, ACCOUNT, 21)
TYPED_SFIELD(sfLockingChainDoor, ACCOUNT, 22)
TYPED_SFIELD(sfIssuingChainDoor, ACCOUNT, 23)
TYPED_SFIELD(sfSubject, ACCOUNT, 24)
TYPED_SFIELD(sfBorrower, ACCOUNT, 25)
TYPED_SFIELD(sfCounterparty, ACCOUNT, 26)
// vector of 256-bit
TYPED_SFIELD(sfIndexes, VECTOR256, 1, SField::sMD_Never)
@@ -405,7 +362,6 @@ UNTYPED_SFIELD(sfCredential, OBJECT, 33)
UNTYPED_SFIELD(sfRawTransaction, OBJECT, 34)
UNTYPED_SFIELD(sfBatchSigner, OBJECT, 35)
UNTYPED_SFIELD(sfBook, OBJECT, 36)
UNTYPED_SFIELD(sfCounterpartySignature, OBJECT, 37, SField::sMD_Default, SField::notSigning)
// array of objects (common)
// ARRAY/1 is reserved for end of array
@@ -440,5 +396,3 @@ UNTYPED_SFIELD(sfAcceptedCredentials, ARRAY, 28)
UNTYPED_SFIELD(sfPermissions, ARRAY, 29)
UNTYPED_SFIELD(sfRawTransactions, ARRAY, 30)
UNTYPED_SFIELD(sfBatchSigners, ARRAY, 31, SField::sMD_Default, SField::notSigning)
// clang-format on

File diff suppressed because it is too large Load Diff

View File

@@ -59,8 +59,6 @@ JSS(BaseAsset); // in: Oracle
JSS(BidMax); // in: AMM Bid
JSS(BidMin); // in: AMM Bid
JSS(ClearFlag); // field.
JSS(Counterparty); // field.
JSS(CounterpartySignature);// field.
JSS(DeliverMax); // out: alias to Amount
JSS(DeliverMin); // in: TransactionSign
JSS(Destination); // in: TransactionSign; field.
@@ -394,8 +392,6 @@ JSS(load_factor_local); // out: NetworkOPs
JSS(load_factor_net); // out: NetworkOPs
JSS(load_factor_server); // out: NetworkOPs
JSS(load_fee); // out: LoadFeeTrackImp, NetworkOPs
JSS(loan_broker_id); // in: LedgerEntry
JSS(loan_seq); // in: LedgerEntry
JSS(local); // out: resource/Logic.h
JSS(local_txs); // out: GetCounts
JSS(local_static_keys); // out: ValidatorList
@@ -508,7 +504,6 @@ JSS(propose_seq); // out: LedgerPropose
JSS(proposers); // out: NetworkOPs, LedgerConsensus
JSS(protocol); // out: NetworkOPs, PeerImp
JSS(proxied); // out: RPC ping
JSS(pseudo_account); // out: AccountInfo
JSS(pubkey_node); // out: NetworkOPs
JSS(pubkey_publisher); // out: ValidatorList
JSS(pubkey_validator); // out: NetworkOPs, ValidatorList
@@ -574,7 +569,6 @@ JSS(settle_delay); // out: AccountChannels
JSS(severity); // in: LogLevel
JSS(shares); // out: VaultInfo
JSS(signature); // out: NetworkOPs, ChannelAuthorize
JSS(signature_target); // in: TransactionSign
JSS(signature_verified); // out: ChannelVerify
JSS(signing_key); // out: NetworkOPs
JSS(signing_keys); // out: ValidatorList
@@ -716,7 +710,7 @@ JSS(write_load); // out: GetCounts
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, name, ...) JSS(name);
#define TRANSACTION(tag, value, name, delegatable, fields) JSS(name);
#include <xrpl/protocol/detail/transactions.macro>
@@ -728,11 +722,11 @@ JSS(write_load); // out: GetCounts
#pragma push_macro("LEDGER_ENTRY_DUPLICATE")
#undef LEDGER_ENTRY_DUPLICATE
#define LEDGER_ENTRY(tag, value, name, rpcName, ...) \
JSS(name); \
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \
JSS(name); \
JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, ...) JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, fields) JSS(rpcName);
#include <xrpl/protocol/detail/ledger_entries.macro>

View File

@@ -436,12 +436,10 @@ public:
admin_.erase(admin_.iterator_to(entry));
break;
default:
// LCOV_EXCL_START
UNREACHABLE(
"ripple::Resource::Logic::release : invalid entry "
"kind");
break;
// LCOV_EXCL_STOP
}
inactive_.push_back(entry);
entry.whenExpires = m_clock.now() + secondsUntilExpiration;

View File

@@ -25,7 +25,7 @@
#include <xrpl/server/Port.h>
#include <xrpl/server/detail/ServerImpl.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
namespace ripple {
@@ -34,10 +34,10 @@ template <class Handler>
std::unique_ptr<Server>
make_Server(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal)
{
return std::make_unique<ServerImpl<Handler>>(handler, io_service, journal);
return std::make_unique<ServerImpl<Handler>>(handler, io_context, journal);
}
} // namespace ripple

View File

@@ -88,9 +88,7 @@ public:
++iter)
{
typename BufferSequence::value_type const& buffer(*iter);
write(
boost::asio::buffer_cast<void const*>(buffer),
boost::asio::buffer_size(buffer));
write(buffer.data(), boost::asio::buffer_size(buffer));
}
}
@@ -104,7 +102,7 @@ public:
/** Detach the session.
This holds the session open so that the response can be sent
asynchronously. Calls to io_service::run made by the server
asynchronously. Calls to io_context::run made by the server
will not return until all detached sessions are closed.
*/
virtual std::shared_ptr<Session>

View File

@@ -24,11 +24,13 @@
#include <xrpl/beast/net/IPAddressConversion.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/server/Session.h>
#include <xrpl/server/detail/Spawn.h>
#include <xrpl/server/detail/io_list.h>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/streambuf.hpp>
#include <boost/beast/core/stream_traits.hpp>
#include <boost/beast/http/dynamic_body.hpp>
@@ -215,8 +217,8 @@ BaseHTTPPeer<Handler, Impl>::BaseHTTPPeer(
ConstBufferSequence const& buffers)
: port_(port)
, handler_(handler)
, work_(executor)
, strand_(executor)
, work_(boost::asio::make_work_guard(executor))
, strand_(boost::asio::make_strand(executor))
, remote_address_(remote_address)
, journal_(journal)
{
@@ -356,7 +358,7 @@ BaseHTTPPeer<Handler, Impl>::on_write(
return;
if (graceful_)
return do_close();
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
@@ -375,7 +377,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
{
auto const p = impl().shared_from_this();
resume = std::function<void(void)>([this, p, writer, keep_alive]() {
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer,
@@ -406,7 +408,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
if (!keep_alive)
return do_close();
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
@@ -448,14 +450,14 @@ BaseHTTPPeer<Handler, Impl>::write(
std::shared_ptr<Writer> const& writer,
bool keep_alive)
{
boost::asio::spawn(bind_executor(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer,
impl().shared_from_this(),
writer,
keep_alive,
std::placeholders::_1)));
std::placeholders::_1));
}
// DEPRECATED
@@ -490,12 +492,12 @@ BaseHTTPPeer<Handler, Impl>::complete()
}
// keep-alive
boost::asio::spawn(bind_executor(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
impl().shared_from_this(),
std::placeholders::_1)));
std::placeholders::_1));
}
// DEPRECATED

View File

@@ -91,8 +91,8 @@ BasePeer<Handler, Impl>::BasePeer(
return "##" + std::to_string(++id) + " ";
}())
, j_(sink_)
, work_(executor)
, strand_(executor)
, work_(boost::asio::make_work_guard(executor))
, strand_(boost::asio::make_strand(executor))
{
}

View File

@@ -29,6 +29,7 @@
#include <xrpl/server/detail/BasePeer.h>
#include <xrpl/server/detail/LowestLayer.h>
#include <boost/asio/error.hpp>
#include <boost/beast/core/multi_buffer.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/websocket.hpp>
@@ -420,11 +421,17 @@ BaseWSPeer<Handler, Impl>::start_timer()
// Max seconds without completing a message
static constexpr std::chrono::seconds timeout{30};
static constexpr std::chrono::seconds timeoutLocal{3};
error_code ec;
timer_.expires_from_now(
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec);
if (ec)
return fail(ec, "start_timer");
try
{
timer_.expires_after(
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout);
}
catch (boost::system::system_error const& e)
{
return fail(e.code(), "start_timer");
}
timer_.async_wait(bind_executor(
strand_,
std::bind(
@@ -438,8 +445,14 @@ template <class Handler, class Impl>
void
BaseWSPeer<Handler, Impl>::cancel_timer()
{
error_code ec;
timer_.cancel(ec);
try
{
timer_.cancel();
}
catch (boost::system::system_error const&)
{
// ignored
}
}
template <class Handler, class Impl>

View File

@@ -69,7 +69,7 @@ private:
stream_type stream_;
socket_type& socket_;
endpoint_type remote_address_;
boost::asio::io_context::strand strand_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
beast::Journal const j_;
public:
@@ -95,7 +95,7 @@ private:
Handler& handler_;
boost::asio::io_context& ioc_;
acceptor_type acceptor_;
boost::asio::io_context::strand strand_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
bool ssl_;
bool plain_;
@@ -155,7 +155,7 @@ Door<Handler>::Detector::Detector(
, stream_(std::move(stream))
, socket_(stream_.socket())
, remote_address_(remote_address)
, strand_(ioc_)
, strand_(boost::asio::make_strand(ioc_))
, j_(j)
{
}
@@ -164,7 +164,7 @@ template <class Handler>
void
Door<Handler>::Detector::run()
{
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&Detector::do_detect,
@@ -269,7 +269,7 @@ Door<Handler>::reOpen()
Throw<std::exception>();
}
acceptor_.listen(boost::asio::socket_base::max_connections, ec);
acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
if (ec)
{
JLOG(j_.error()) << "Listen on port '" << port_.name
@@ -291,7 +291,7 @@ Door<Handler>::Door(
, handler_(handler)
, ioc_(io_context)
, acceptor_(io_context)
, strand_(io_context)
, strand_(boost::asio::make_strand(io_context))
, ssl_(
port_.protocol.count("https") > 0 ||
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
@@ -307,7 +307,7 @@ template <class Handler>
void
Door<Handler>::run()
{
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&Door<Handler>::do_accept,
@@ -320,7 +320,8 @@ void
Door<Handler>::close()
{
if (!strand_.running_in_this_thread())
return strand_.post(
return boost::asio::post(
strand_,
std::bind(&Door<Handler>::close, this->shared_from_this()));
error_code ec;
acceptor_.close(ec);

View File

@@ -105,7 +105,7 @@ PlainHTTPPeer<Handler>::run()
{
if (!this->handler_.onAccept(this->session(), this->remote_address_))
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
return;
@@ -114,7 +114,7 @@ PlainHTTPPeer<Handler>::run()
if (!socket_.is_open())
return;
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&PlainHTTPPeer::do_read,

View File

@@ -115,14 +115,14 @@ SSLHTTPPeer<Handler>::run()
{
if (!this->handler_.onAccept(this->session(), this->remote_address_))
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
return;
}
if (!socket_.is_open())
return;
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&SSLHTTPPeer::do_handshake,
@@ -164,7 +164,7 @@ SSLHTTPPeer<Handler>::do_handshake(yield_context do_yield)
this->port().protocol.count("https") > 0;
if (http)
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&SSLHTTPPeer::do_read,

View File

@@ -26,6 +26,8 @@
#include <xrpl/server/detail/io_list.h>
#include <boost/asio.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <array>
#include <chrono>
@@ -85,9 +87,11 @@ private:
Handler& handler_;
beast::Journal const j_;
boost::asio::io_service& io_service_;
boost::asio::io_service::strand strand_;
std::optional<boost::asio::io_service::work> work_;
boost::asio::io_context& io_context_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
std::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::mutex m_;
std::vector<Port> ports_;
@@ -100,7 +104,7 @@ private:
public:
ServerImpl(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal);
~ServerImpl();
@@ -123,10 +127,10 @@ public:
return ios_;
}
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return io_service_;
return io_context_;
}
bool
@@ -140,13 +144,13 @@ private:
template <class Handler>
ServerImpl<Handler>::ServerImpl(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal)
: handler_(handler)
, j_(journal)
, io_service_(io_service)
, strand_(io_service_)
, work_(io_service_)
, io_context_(io_context)
, strand_(boost::asio::make_strand(io_context_))
, work_(std::in_place, boost::asio::make_work_guard(io_context_))
{
}
@@ -173,7 +177,7 @@ ServerImpl<Handler>::ports(std::vector<Port> const& ports)
ports_.push_back(port);
auto& internalPort = ports_.back();
if (auto sp = ios_.emplace<Door<Handler>>(
handler_, io_service_, internalPort, j_))
handler_, io_context_, internalPort, j_))
{
list_.push_back(sp);

View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright(c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED
#define RIPPLE_SERVER_SPAWN_H_INCLUDED
#include <xrpl/basics/Log.h>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <concepts>
#include <type_traits>
namespace ripple::util {
namespace impl {
template <typename T>
concept IsStrand = std::same_as<
std::decay_t<T>,
boost::asio::strand<typename std::decay_t<T>::inner_executor_type>>;
/**
* @brief A completion handler that restores `boost::asio::spawn`'s behaviour
* from Boost 1.83
*
* This is intended to be passed as the third argument to `boost::asio::spawn`
* so that exceptions are not ignored but propagated to `io_context.run()` call
* site.
*
* @param ePtr The exception that was caught on the coroutine
*/
inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) {
if (ePtr)
{
try
{
std::rethrow_exception(ePtr);
}
catch (std::exception const& e)
{
JLOG(debugLog().warn()) << "Spawn exception: " << e.what();
throw;
}
catch (...)
{
JLOG(debugLog().warn()) << "Spawn exception: Unknown";
throw;
}
}
};
} // namespace impl
/**
* @brief Spawns a coroutine using `boost::asio::spawn`
*
* @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions
* through `io_context`
* @note Since implicit strand was removed from boost::asio::spawn this helper
* function adds the strand back
*
* @tparam Ctx The type of the context/strand
* @tparam F The type of the function to execute
* @param ctx The execution context
* @param func The function to execute. Must return `void`
*/
template <typename Ctx, typename F>
requires std::is_invocable_r_v<void, F, boost::asio::yield_context>
void
spawn(Ctx&& ctx, F&& func)
{
if constexpr (impl::IsStrand<Ctx>)
{
boost::asio::spawn(
std::forward<Ctx>(ctx),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
else
{
boost::asio::spawn(
boost::asio::make_strand(
boost::asio::get_associated_executor(std::forward<Ctx>(ctx))),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
}
} // namespace ripple::util
#endif

Some files were not shown because too many files have changed in this diff Show More