Compare commits

..

2 Commits

Author SHA1 Message Date
Vito
a15abd4067 attempt to appease formatting gods 2025-09-02 15:08:57 +02:00
Vito
d9695be838 refactor(overlay): Overhaul peer disconnection logic
This commit refactors the peer shutdown and failure handling
mechanism to be more robust, consistent, and communicative.

The previous implementation used raw strings to represent error reasons
and did not communicate these reasons to peers when shutting down a connection.

With this change disconnections are now explicitly communicated via a
`TMClose` protocol message with strongly-typed reasons. This new
approach provides better diagnostics and makes the peer disconnection
process more stable and predictable.
2025-09-02 15:00:12 +02:00
567 changed files with 11001 additions and 29089 deletions

View File

@@ -33,6 +33,5 @@ slack_app: false
ignore:
- "src/test/"
- "src/tests/"
- "include/xrpl/beast/test/"
- "include/xrpl/beast/unit_test/"

View File

@@ -1,5 +1,7 @@
# This action installs and optionally uploads Conan dependencies to a remote
# repository. The dependencies will only be uploaded if the credentials are
# provided.
name: Build Conan dependencies
description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
@@ -10,40 +12,51 @@ inputs:
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
build_nproc:
description: "The number of processors to use for building."
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
force_build:
description: 'Force building of all dependencies ("true", "false").'
required: false
default: "false"
log_verbosity:
description: "The logging verbosity."
force_upload:
description: 'Force uploading of all dependencies ("true", "false").'
required: false
default: "verbose"
default: "false"
runs:
using: composite
steps:
- name: Install Conan dependencies
shell: bash
env:
BUILD_DIR: ${{ inputs.build_dir }}
BUILD_NPROC: ${{ inputs.build_nproc }}
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
run: |
echo 'Installing dependencies.'
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ inputs.build_dir }}
cd ${{ inputs.build_dir }}
conan install \
--output-folder . \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \
--settings:all build_type="${BUILD_TYPE}" \
--conf:all tools.build:jobs=${BUILD_NPROC} \
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
..
--build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \
--options:host '&:tests=True' \
--options:host '&:xrpld=True' \
--settings:all build_type=${{ inputs.build_type }} \
--format=json ..
- name: Upload Conan dependencies
if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}"
echo 'Uploading dependencies.'
conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }}

95
.github/actions/build-test/action.yml vendored Normal file
View File

@@ -0,0 +1,95 @@
# This action build and tests the binary. The Conan dependencies must have
# already been installed (see the build-deps action).
name: Build and Test
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
inputs:
build_dir:
description: "The directory where to build."
required: true
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: false
default: "false"
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
default: ""
cmake_target:
description: "The CMake target to build."
required: true
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
default: ""
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
runs:
using: composite
steps:
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Configuring CMake.'
cmake \
-G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \
${{ inputs.cmake_args }} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Building binary.'
cmake \
--build . \
--config ${{ inputs.build_type }} \
--parallel $(nproc) \
--target ${{ inputs.cmake_target }}
- name: Check linking
if: ${{ inputs.os == 'linux' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Checking linking.'
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify voidstar
if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Verifying presence of instrumentation.'
./rippled --version | grep libvoidstar
- name: Test the binary
if: ${{ inputs.build_only == 'false' }}
shell: bash
working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }}
run: |
echo 'Testing binary.'
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
- name: Upload coverage report
if: ${{ inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ inputs.codecov_token }}
verbose: true

View File

@@ -1,43 +0,0 @@
name: Print build environment
description: "Print environment and some tooling versions"
runs:
using: composite
steps:
- name: Check configuration (Windows)
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version

View File

@@ -1,46 +0,0 @@
name: Setup Conan
description: "Set up Conan configuration, profile, and remote."
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
default: https://conan.ripplex.io
runs:
using: composite
steps:
- name: Set up Conan configuration
shell: bash
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
shell: bash
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml)
- [`levelization.yml`](../../workflows/check-levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or

View File

@@ -7,6 +7,9 @@ Loop: test.jtx test.unit_test
Loop: xrpld.app xrpld.core
xrpld.app > xrpld.core
Loop: xrpld.app xrpld.ledger
xrpld.app > xrpld.ledger
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app
@@ -17,7 +20,7 @@ Loop: xrpld.app xrpld.rpc
xrpld.rpc > xrpld.app
Loop: xrpld.app xrpld.shamap
xrpld.shamap ~= xrpld.app
xrpld.app > xrpld.shamap
Loop: xrpld.core xrpld.perflog
xrpld.perflog == xrpld.core

View File

@@ -2,16 +2,8 @@ libxrpl.basics > xrpl.basics
libxrpl.crypto > xrpl.basics
libxrpl.json > xrpl.basics
libxrpl.json > xrpl.json
libxrpl.ledger > xrpl.basics
libxrpl.ledger > xrpl.json
libxrpl.ledger > xrpl.ledger
libxrpl.ledger > xrpl.protocol
libxrpl.net > xrpl.basics
libxrpl.net > xrpl.net
libxrpl.nodestore > xrpl.basics
libxrpl.nodestore > xrpl.json
libxrpl.nodestore > xrpl.nodestore
libxrpl.nodestore > xrpl.protocol
libxrpl.protocol > xrpl.basics
libxrpl.protocol > xrpl.json
libxrpl.protocol > xrpl.protocol
@@ -22,9 +14,6 @@ libxrpl.server > xrpl.basics
libxrpl.server > xrpl.json
libxrpl.server > xrpl.protocol
libxrpl.server > xrpl.server
libxrpl.shamap > xrpl.basics
libxrpl.shamap > xrpl.protocol
libxrpl.shamap > xrpl.shamap
test.app > test.jtx
test.app > test.rpc
test.app > test.toplevel
@@ -32,11 +21,11 @@ test.app > test.unit_test
test.app > xrpl.basics
test.app > xrpld.app
test.app > xrpld.core
test.app > xrpld.ledger
test.app > xrpld.nodestore
test.app > xrpld.overlay
test.app > xrpld.rpc
test.app > xrpl.json
test.app > xrpl.ledger
test.app > xrpl.nodestore
test.app > xrpl.protocol
test.app > xrpl.resource
test.basics > test.jtx
@@ -55,8 +44,8 @@ test.consensus > test.unit_test
test.consensus > xrpl.basics
test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpld.ledger
test.consensus > xrpl.json
test.consensus > xrpl.ledger
test.core > test.jtx
test.core > test.toplevel
test.core > test.unit_test
@@ -74,9 +63,9 @@ test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.ledger
test.jtx > xrpld.rpc
test.jtx > xrpl.json
test.jtx > xrpl.ledger
test.jtx > xrpl.net
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
@@ -86,14 +75,15 @@ test.ledger > test.toplevel
test.ledger > xrpl.basics
test.ledger > xrpld.app
test.ledger > xrpld.core
test.ledger > xrpl.ledger
test.ledger > xrpld.ledger
test.ledger > xrpl.protocol
test.nodestore > test.jtx
test.nodestore > test.toplevel
test.nodestore > test.unit_test
test.nodestore > xrpl.basics
test.nodestore > xrpld.core
test.nodestore > xrpl.nodestore
test.nodestore > xrpld.nodestore
test.nodestore > xrpld.unity
test.overlay > test.jtx
test.overlay > test.toplevel
test.overlay > test.unit_test
@@ -101,8 +91,8 @@ test.overlay > xrpl.basics
test.overlay > xrpld.app
test.overlay > xrpld.overlay
test.overlay > xrpld.peerfinder
test.overlay > xrpld.shamap
test.overlay > xrpl.protocol
test.overlay > xrpl.shamap
test.peerfinder > test.beast
test.peerfinder > test.unit_test
test.peerfinder > xrpl.basics
@@ -137,21 +127,15 @@ test.server > xrpl.json
test.server > xrpl.server
test.shamap > test.unit_test
test.shamap > xrpl.basics
test.shamap > xrpl.nodestore
test.shamap > xrpld.nodestore
test.shamap > xrpld.shamap
test.shamap > xrpl.protocol
test.shamap > xrpl.shamap
test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol
xrpl.net > xrpl.basics
xrpl.nodestore > xrpl.basics
xrpl.nodestore > xrpl.protocol
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
xrpl.resource > xrpl.basics
@@ -160,21 +144,16 @@ xrpl.resource > xrpl.protocol
xrpl.server > xrpl.basics
xrpl.server > xrpl.json
xrpl.server > xrpl.protocol
xrpl.shamap > xrpl.basics
xrpl.shamap > xrpl.nodestore
xrpl.shamap > xrpl.protocol
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpld.conditions
xrpld.app > xrpld.consensus
xrpld.app > xrpld.nodestore
xrpld.app > xrpld.perflog
xrpld.app > xrpl.json
xrpld.app > xrpl.ledger
xrpld.app > xrpl.net
xrpld.app > xrpl.nodestore
xrpld.app > xrpl.protocol
xrpld.app > xrpl.resource
xrpld.app > xrpl.shamap
xrpld.conditions > xrpl.basics
xrpld.conditions > xrpl.protocol
xrpld.consensus > xrpl.basics
@@ -184,6 +163,14 @@ xrpld.core > xrpl.basics
xrpld.core > xrpl.json
xrpld.core > xrpl.net
xrpld.core > xrpl.protocol
xrpld.ledger > xrpl.basics
xrpld.ledger > xrpl.json
xrpld.ledger > xrpl.protocol
xrpld.nodestore > xrpl.basics
xrpld.nodestore > xrpld.core
xrpld.nodestore > xrpld.unity
xrpld.nodestore > xrpl.json
xrpld.nodestore > xrpl.protocol
xrpld.overlay > xrpl.basics
xrpld.overlay > xrpld.core
xrpld.overlay > xrpld.peerfinder
@@ -199,11 +186,13 @@ xrpld.perflog > xrpl.basics
xrpld.perflog > xrpl.json
xrpld.rpc > xrpl.basics
xrpld.rpc > xrpld.core
xrpld.rpc > xrpld.ledger
xrpld.rpc > xrpld.nodestore
xrpld.rpc > xrpl.json
xrpld.rpc > xrpl.ledger
xrpld.rpc > xrpl.net
xrpld.rpc > xrpl.nodestore
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server
xrpld.shamap > xrpl.shamap
xrpld.shamap > xrpl.basics
xrpld.shamap > xrpld.nodestore
xrpld.shamap > xrpl.protocol

57
.github/scripts/strategy-matrix/generate.py vendored Executable file → Normal file
View File

@@ -2,17 +2,7 @@
import argparse
import itertools
import json
from pathlib import Path
from dataclasses import dataclass
THIS_DIR = Path(__file__).parent.resolve()
@dataclass
class Config:
architecture: list[dict]
os: list[dict]
build_type: list[str]
cmake_args: list[str]
import re
'''
Generate a strategy matrix for GitHub Actions CI.
@@ -28,9 +18,9 @@ We will further set additional CMake arguments as follows:
- Certain Debian Bookworm configurations will change the reference fee, enable
codecov, and enable voidstar in PRs.
'''
def generate_strategy_matrix(all: bool, config: Config) -> list:
def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict:
configurations = []
for architecture, os, build_type, cmake_args in itertools.product(config.architecture, config.os, config.build_type, config.cmake_args):
for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args):
# The default CMake target is 'all' for Linux and MacOS and 'install'
# for Windows, but it can get overridden for certain configurations.
cmake_target = 'install' if os["distro_name"] == 'windows' else 'all'
@@ -45,7 +35,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
# Only generate a subset of configurations in PRs.
if not all:
# Debian:
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
# - Bookworm using GCC 13: Release and Unity on linux/arm64, set
# the reference fee to 500.
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
# code coverage (which will be done below).
@@ -57,7 +47,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if os['distro_name'] == 'debian':
skip = True
if os['distro_version'] == 'bookworm':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64':
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}'
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
@@ -74,14 +64,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
# - 9.4 using GCC 12: Debug and Unity on linux/amd64.
# - 9.6 using Clang: Release and no Unity on linux/amd64.
if os['distro_name'] == 'rhel':
skip = True
if os['distro_version'] == '9':
if os['distro_version'] == '9.4':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
elif os['distro_version'] == '10':
elif os['distro_version'] == '9.6':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if skip:
@@ -162,36 +152,27 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
'config_name': config_name,
'cmake_args': cmake_args,
'cmake_target': cmake_target,
'build_only': build_only,
'build_only': 'true' if build_only else 'false',
'build_type': build_type,
'os': os,
'architecture': architecture,
})
return configurations
def read_config(file: Path) -> Config:
config = json.loads(file.read_text())
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
return Config(**config)
return {'include': configurations}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true")
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=False, type=Path)
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str)
args = parser.parse_args()
matrix = []
if args.config is None or args.config == '':
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "linux.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "macos.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "windows.json"))
else:
matrix += generate_strategy_matrix(args.all, read_config(args.config))
# Load the JSON configuration file.
config = None
with open(args.config, 'r') as f:
config = json.load(f)
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
# Generate the strategy matrix.
print(f'matrix={json.dumps({"include": matrix})}')
print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}')

View File

@@ -14,169 +14,139 @@
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
"compiler_version": "12"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
"compiler_version": "13"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
"compiler_version": "14"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "97ba375"
"compiler_version": "15"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "97ba375"
"compiler_version": "16"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "97ba375"
"compiler_version": "17"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "97ba375"
"compiler_version": "18"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "97ba375"
"compiler_version": "19"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "97ba375"
"compiler_version": "20"
},
{
"distro_name": "rhel",
"distro_version": "8",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
"compiler_version": "12"
},
{
"distro_name": "rhel",
"distro_version": "8",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
"compiler_version": "any"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"distro_version": "9.6",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
"compiler_version": "any"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
"compiler_version": "12"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
"compiler_version": "13"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
"compiler_version": "14"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "97ba375"
"compiler_version": "16"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "97ba375"
"compiler_version": "17"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "97ba375"
"compiler_version": "18"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "97ba375"
"compiler_version": "19"
}
],
"build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "macos",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

View File

@@ -2,7 +2,7 @@
"architecture": [
{
"platform": "windows/amd64",
"runner": ["self-hosted", "Windows", "devbox"]
"runner": ["windows-latest"]
}
],
"os": [
@@ -10,8 +10,7 @@
"distro_name": "windows",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

199
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,199 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
type: string
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
type: string
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}"
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
# Build and test the binary.
build-test:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
runs-on: ${{ matrix.architecture.runner }}
container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Check strategy matrix
run: |
echo 'Operating system distro name: ${{ matrix.os.distro_name }}'
echo 'Operating system distro version: ${{ matrix.os.distro_version }}'
echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}'
echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}'
echo 'Architecture platform: ${{ matrix.architecture.platform }}'
echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}'
echo 'Build type: ${{ matrix.build_type }}'
echo 'Build only: ${{ matrix.build_only }}'
echo 'CMake arguments: ${{ matrix.cmake_args }}'
echo 'CMake target: ${{ matrix.cmake_target }}'
echo 'Config name: ${{ matrix.config_name }}'
- name: Clean workspace (MacOS)
if: ${{ inputs.os == 'macos' }}
run: |
WORKSPACE=${{ github.workspace }}
echo "Cleaning workspace '${WORKSPACE}'."
if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then
echo "Invalid working directory '${WORKSPACE}'."
exit 1
fi
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
- name: Check configuration (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and MacOS)
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ matrix.build_type }}
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
conan_remote_username: ${{ secrets.conan_remote_username }}
conan_remote_password: ${{ secrets.conan_remote_password }}
force_build: ${{ inputs.dependencies_force_build }}
force_upload: ${{ inputs.dependencies_force_upload }}
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
codecov_token: ${{ secrets.codecov_token }}
os: ${{ inputs.os }}

75
.github/workflows/check-format.yml vendored Normal file
View File

@@ -0,0 +1,75 @@
# This workflow checks if the code is properly formatted.
name: Check format
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-format
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
pre-commit:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-pre-commit
steps:
# The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the
# same directory for jobs running in containers. The actions/checkout step
# is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to
# safe.directory (see instructions at https://github.com/actions/checkout)
# but that is apparently not happening for some container images. We
# therefore preemptively add both directories to safe.directory. See also
# https://github.com/actions/runner/issues/2058 for more details.
- name: Configure git safe.directory
run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory ${{ github.workspace }}
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check configuration
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking pre-commit version.'
pre-commit --version
echo 'Checking clang-format version.'
clang-format --version
echo 'Checking NPM version.'
npm --version
echo 'Checking Node.js version.'
node --version
echo 'Checking prettier version.'
prettier --version
- name: Format code
run: pre-commit run --show-diff-on-failure --color=always --all-files
- name: Check for differences
env:
MESSAGE: |
One or more files did not conform to the formatting. Maybe you did
not run 'pre-commit' before committing, or your version of
'clang-format' or 'prettier' has an incompatibility with the ones
used here (see the "Check configuration" step above).
Run 'pre-commit run --all-files' in your repo, and then commit and
push the changes.
run: |
DIFF=$(git status --porcelain)
if [ -n "${DIFF}" ]; then
# Print the files that changed to give the contributor a hint about
# what to expect when running pre-commit on their own machine.
git status
echo "${MESSAGE}"
exit 1
fi

View File

@@ -9,14 +9,12 @@ on:
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
required: true
type: string
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
required: true
type: string
default: https://conan.ripplex.io
secrets:
clio_notify_token:
description: "The GitHub token to notify Clio about new versions."
@@ -40,52 +38,43 @@ jobs:
upload:
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs
id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
id: conan_ref
- name: Add Conan remote
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
channel: ${{ steps.generate.outputs.channel }}
version: ${{ steps.generate.outputs.version }}
notify:
needs: upload
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
steps:
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"
-F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

View File

@@ -23,6 +23,10 @@ defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
# This job determines whether the rest of the workflow should run. It runs
# when the PR is not a draft (which should also cover merge-group) or
@@ -50,20 +54,18 @@ jobs:
files: |
# These paths are unique to `on-pr.yml`.
.github/scripts/levelization/**
.github/workflows/reusable-check-levelization.yml
.github/workflows/reusable-notify-clio.yml
.github/workflows/check-format.yml
.github/workflows/check-levelization.yml
.github/workflows/notify-clio.yml
.github/workflows/on-pr.yml
.clang-format
.pre-commit-config.yaml
# Keep the paths below in sync with those in `on-trigger.yml`.
.github/actions/build-deps/**
.github/actions/build-test/**
.github/actions/setup-conan/**
.github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/build-test.yml
.codecov.yml
cmake/**
conan/**
@@ -73,7 +75,6 @@ jobs:
tests/**
CMakeLists.txt
conanfile.py
conan.lock
- name: Check whether to run
# This step determines whether the rest of the workflow should
# run. The rest of the workflow will run if this job runs AND at
@@ -93,41 +94,61 @@ jobs:
outputs:
go: ${{ steps.go.outputs.go == 'true' }}
check-format:
needs: should-run
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/check-format.yml
check-levelization:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/check-levelization.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows.
generate-outputs:
needs: should-run
if: needs.should-run.outputs.go == 'true'
runs-on: ubuntu-latest
steps:
- name: No-op
run: true
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
build-test:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
os: ${{ matrix.os }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
needs:
- should-run
- generate-outputs
- build-test
if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-notify-clio.yml
uses: ./.github/workflows/notify-clio.yml
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
if: failure() || cancelled()
needs:
- build-test
- check-format
- check-levelization
runs-on: ubuntu-latest
steps:
- name: Fail
run: false
- name: No-op
run: true

View File

@@ -9,25 +9,20 @@ name: Trigger
on:
push:
branches:
- "develop"
- "release*"
- "master"
- develop
- release
- master
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/reusable-check-missing-commits.yml"
- ".github/workflows/check-missing-commits.yml"
- ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
- ".github/actions/build-test/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/build-test.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
@@ -37,7 +32,6 @@ on:
- "tests/**"
- "CMakeLists.txt"
- "conanfile.py"
- "conan.lock"
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
# will force all dependencies to be rebuilt, which is useful to verify that
@@ -46,35 +40,79 @@ on:
schedule:
- cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API.
# Run when manually triggered via the GitHub UI or API. If `force_upload` is
# true, then the dependencies that were missing (`force_rebuild` is false) or
# rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing
# dependencies if needed.
workflow_dispatch:
inputs:
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
concurrency:
# When a PR is merged into the develop branch it will be assigned a unique
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-check-missing-commits.yml
uses: ./.github/workflows/check-missing-commits.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows. It also sets
# outputs that depend on the event that triggered the workflow.
generate-outputs:
runs-on: ubuntu-latest
steps:
- name: Check inputs and set outputs
id: generate
run: |
if [[ '${{ github.event_name }}' == 'push' ]]; then
echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
elif [[ '${{ github.event_name }}' == 'schedule' ]]; then
echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
else
echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}"
fi
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }}
dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }}
build-test:
uses: ./.github/workflows/reusable-build-test.yml
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }}
dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }}
os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
strategy_matrix: "all"
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}

View File

@@ -1,15 +0,0 @@
name: Run pre-commit hooks
on:
pull_request:
push:
branches: [develop, release, master]
workflow_dispatch:
jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
with:
runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'

View File

@@ -23,24 +23,16 @@ defaults:
env:
BUILD_DIR: .build
NPROC_SUBTRACT: 2
jobs:
publish:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
container: ghcr.io/xrplf/ci/tools-rippled-documentation
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Check configuration
run: |
echo 'Checking path.'
@@ -54,16 +46,12 @@ jobs:
echo 'Checking Doxygen version.'
doxygen --version
- name: Build documentation
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ env.BUILD_DIR }}
cd ${{ env.BUILD_DIR }}
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC}
cmake --build . --target docs --parallel $(nproc)
- name: Publish documentation
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0

View File

@@ -1,77 +0,0 @@
name: Build and test configuration
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
type: string
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
default: ""
cmake_target:
description: "The CMake target to build."
type: string
required: true
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The configuration string (used for naming artifacts and such)."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: false
type: number
default: 2
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
build:
uses: ./.github/workflows/reusable-build.yml
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ inputs.build_type }}
cmake_args: ${{ inputs.cmake_args }}
cmake_target: ${{ inputs.cmake_target }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
test:
needs: build
uses: ./.github/workflows/reusable-test.yml
with:
run_tests: ${{ !inputs.build_only }}
verify_voidstar: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}

View File

@@ -1,58 +0,0 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary for each configuration.
build-test-config:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-build-test-config.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,154 +0,0 @@
name: Build rippled
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
type: string
cmake_args:
description: "Additional arguments to pass to CMake."
required: true
type: string
cmake_target:
description: "The CMake target to build."
required: true
type: string
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
defaults:
run:
shell: bash
jobs:
build:
name: Build ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ inputs.build_type }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel ${BUILD_NPROC} \
--target "${CMAKE_TARGET}"
- name: Put built binaries in one location
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE_DIR: ${{ runner.os == 'Windows' && inputs.build_type || '' }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
mkdir -p ./binaries/doctest/
cp ./${BUILD_TYPE_DIR}/rippled* ./binaries/
if [ "${CMAKE_TARGET}" != 'coverage' ]; then
cp ./src/tests/libxrpl/${BUILD_TYPE_DIR}/xrpl.test.* ./binaries/doctest/
fi
- name: Upload rippled artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/binaries/
retention-days: 3
if-no-files-found: error
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -1,41 +0,0 @@
name: Generate strategy matrix
on:
workflow_call:
inputs:
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: false
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
outputs:
matrix:
description: "The generated strategy matrix."
value: ${{ jobs.generate-matrix.outputs.matrix }}
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"

View File

@@ -1,111 +0,0 @@
name: Test rippled
on:
workflow_call:
inputs:
verify_voidstar:
description: "Whether to verify the presence of voidstar instrumentation."
required: true
type: boolean
run_tests:
description: "Whether to run unit tests"
required: true
type: boolean
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
jobs:
test:
name: Test ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 30
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Download rippled artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: rippled-${{ inputs.config_name }}
- name: Make binary executable (Linux and macOS)
shell: bash
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
run: |
chmod +x ./rippled
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verifying presence of instrumentation
if: ${{ inputs.verify_voidstar }}
shell: bash
run: |
./rippled --version | grep libvoidstar
- name: Run the embedded tests
if: ${{ inputs.run_tests }}
shell: bash
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
./rippled --unittest --unittest-jobs ${BUILD_NPROC}
- name: Run the separate tests
if: ${{ inputs.run_tests }}
env:
EXT: ${{ runner.os == 'Windows' && '.exe' || '' }}
shell: bash
run: |
for test_file in ./doctest/*${EXT}; do
echo "Executing $test_file"
chmod +x "$test_file"
if [[ "${{ runner.os }}" == "Windows" && "$test_file" == "./doctest/xrpl.test.net.exe" ]]; then
echo "Skipping $test_file on Windows"
else
"$test_file"
fi
done
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && inputs.run_tests }}
shell: bash
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
echo "Netstat:"
netstat -an

View File

@@ -1,107 +0,0 @@
name: Upload Conan Dependencies
on:
schedule:
- cron: "0 3 * * 2-6"
workflow_dispatch:
inputs:
force_source_build:
description: "Force source build of all dependencies"
required: false
default: false
type: boolean
force_upload:
description: "Force upload of all dependencies"
required: false
default: false
type: boolean
pull_request:
branches: [develop]
paths:
# This allows testing changes to the upload workflow in a PR
- .github/workflows/upload-conan-deps.yml
push:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
NPROC_SUBTRACT: 2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: .build
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}

View File

@@ -1,5 +1,18 @@
# To run pre-commit hooks, first install pre-commit:
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}`
#
# Depending on your system, you can use `brew install` or `apt install` as well
# for installing the pre-commit package, but `pip` is needed to install the
# hooks; you can also use `pipx` if you prefer.
# Next, install the required formatters:
# - `pip install clang-format==${CLANG_VERSION}`
# - `npm install prettier@${PRETTIER_VERSION}`
#
# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml
# for the versions used in the CI pipeline. You will need to have the exact same
# versions of the tools installed on your system to produce the same results as
# the pipeline.
#
# Then, run the following command to install the git hook scripts:
# - `pre-commit install`
@@ -7,33 +20,45 @@
# - `pre-commit run --all-files`
# To manually run a specific hook, use:
# - `pre-commit run <hook_id> --all-files`
# To run the hooks against only the staged files, use:
# To run the hooks against only the files changed in the current commit, use:
# - `pre-commit run`
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: mixed-line-ending
- id: check-merge-conflict
args: [--assume-in-merge]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
- repo: local
hooks:
- id: clang-format
args: [--style=file]
"types_or": [c++, c, proto]
- repo: https://github.com/rbubley/mirrors-prettier
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
name: clang-format
language: system
entry: clang-format -i
files: '\.(cpp|hpp|h|ipp|proto)$'
- id: trailing-whitespace
name: trailing-whitespace
entry: trailing-whitespace-fixer
language: system
types: [text]
- id: end-of-file
name: end-of-file
entry: end-of-file-fixer
language: system
types: [text]
- id: mixed-line-ending
name: mixed-line-ending
entry: mixed-line-ending
language: system
types: [text]
- id: check-merge-conflict
name: check-merge-conflict
entry: check-merge-conflict --assume-in-merge
language: system
types: [text]
- repo: local
hooks:
- id: prettier
name: prettier
language: system
entry: prettier --ignore-unknown --write
exclude: |
(?x)^(
external/.*|
.github/scripts/levelization/results/.*\.txt|
conan\.lock
.github/scripts/levelization/results/.*\.txt
)$

View File

@@ -39,12 +39,17 @@ found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/), or higher
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
[^1]:
It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it.
[^2]:
CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
@@ -127,7 +132,7 @@ higher index than the default Conan Center remote, so it is consulted first. You
can do this by running:
```bash
conan remote add --index 0 xrplf https://conan.ripplex.io
conan remote add --index 0 xrplf "https://conan.ripplex.io"
```
Alternatively, you can pull the patched recipes into the repository and use them
@@ -153,10 +158,6 @@ updated dependencies with the newer version. However, if we switch to a newer
version that no longer requires a patch, no action is required on your part, as
the new recipe will be automatically pulled from the official Conan Center.
> [!NOTE]
> You might need to add `--lockfile=""` to your `conan install` command
> to avoid automatic use of the existing `conan.lock` file when you run `conan export` manually on your machine
### Conan profile tweaks
#### Missing compiler version
@@ -277,6 +278,21 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
```
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
@@ -450,33 +466,6 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
The location of `rippled` binary in your build directory depends on your
CMake generator. Pass `--help` to see the rest of the command line options.
#### Conan lockfile
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
It is implicitly used when running `conan` commands, you don't need to specify it.
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
> [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True'
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Coverage report
The coverage report is intended for developers using compilers GCC
@@ -575,13 +564,7 @@ After any updates or changes to dependencies, you may need to do the following:
```
3. Re-run [conan export](#patched-recipes) if needed.
4. [Regenerate lockfile](#conan-lockfile).
5. Re-run [conan install](#build-and-test).
#### ERROR: Package not resolved
If you're seeing an error like `ERROR: Package 'snappy/1.1.10' not resolved: Unable to find 'snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246' in remotes.`,
please add `xrplf` remote or re-run `conan export` for [patched recipes](#patched-recipes).
4. Re-run [conan install](#build-and-test).
### `protobuf/port_def.inc` file not found

View File

@@ -6,7 +6,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe
## XRP
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
## rippled
@@ -23,19 +23,19 @@ If you are interested in running an **API Server** (including a **Full History S
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees.
- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research
[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html
[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger
[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys
[Modern Features]: https://xrpl.org/use-specialized-payment-types.html
[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
## Source Code

View File

@@ -975,47 +975,6 @@
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# Optional keys for NuDB only:
#
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
# Must be a power of 2 between 4096 and 32768. Default is 4096.
#
# This parameter controls the fundamental storage unit
# size for NuDB's internal data structures. The choice
# of block size can significantly impact performance
# depending on your storage hardware and filesystem:
#
# - 4096 bytes: Optimal for most standard SSDs and
# traditional filesystems (ext4, NTFS, HFS+).
# Provides good balance of performance and storage
# efficiency. Recommended for most deployments.
# Minimizes memory footprint and provides consistent
# low-latency access patterns across diverse hardware.
#
# - 8192-16384 bytes: May improve performance on
# high-end NVMe SSDs and copy-on-write filesystems
# like ZFS or Btrfs that benefit from larger block
# alignment. Can reduce metadata overhead for large
# databases. Offers better sequential throughput and
# reduced I/O operations at the cost of higher memory
# usage per operation.
#
# - 32768 bytes (32K): Maximum supported block size
# for high-performance scenarios with very fast
# storage. May increase memory usage and reduce
# efficiency for smaller databases. Best suited for
# enterprise environments with abundant RAM.
#
# Performance testing is recommended before deploying
# any non-default block size in production environments.
#
# Note: This setting cannot be changed after database
# creation without rebuilding the entire database.
# Choose carefully based on your hardware and expected
# database size.
#
# Example: nudb_block_size=4096
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#
@@ -1512,7 +1471,6 @@ secure_gateway = 127.0.0.1
[node_db]
type=NuDB
path=/var/lib/rippled/db/nudb
nudb_block_size=4096
online_delete=512
advisory_delete=0

View File

@@ -104,11 +104,6 @@
# 2025-08-28, Bronek Kozicki
# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175
#
# 2025-09-03, Jingchen Wu
# - remove the unused function append_coverage_compiler_flags and append_coverage_compiler_flags_to_target
# - add a new function add_code_coverage_to_target
# - remove some unused code
#
# USAGE:
#
# 1. Copy this file into your cmake modules path.
@@ -117,8 +112,10 @@
# using a CMake option() to enable it just optionally):
# include(CodeCoverage)
#
# 3. Append necessary compiler flags and linker flags for all supported source files:
# add_code_coverage_to_target(<target> <PRIVATE|PUBLIC|INTERFACE>)
# 3. Append necessary compiler flags for all supported source files:
# append_coverage_compiler_flags()
# Or for specific target:
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
#
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
#
@@ -207,69 +204,67 @@ endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
CACHE INTERNAL "")
set(COVERAGE_CXX_COMPILER_FLAGS "")
set(COVERAGE_C_COMPILER_FLAGS "")
set(COVERAGE_CXX_LINKER_FLAGS "")
set(COVERAGE_C_LINKER_FLAGS "")
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
include(CheckLinkerFlag)
set(COVERAGE_CXX_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_CXX_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if(HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if(HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(CXX -fprofile-abs-path HAVE_cxx_linker_fprofile_abs_path)
if(HAVE_cxx_linker_fprofile_abs_path)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(C -fprofile-abs-path HAVE_c_linker_fprofile_abs_path)
if(HAVE_c_linker_fprofile_abs_path)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update)
check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update)
if(HAVE_cxx_fprofile_update)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update)
check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update)
if(HAVE_c_fprofile_update)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(CXX -fprofile-update=atomic HAVE_cxx_linker_fprofile_update)
if(HAVE_cxx_linker_fprofile_update)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(C -fprofile-update=atomic HAVE_c_linker_fprofile_update)
if(HAVE_c_linker_fprofile_update)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-update=atomic")
endif()
endif()
set(CMAKE_Fortran_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
FORCE )
set(CMAKE_CXX_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE )
set(CMAKE_C_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C compiler during coverage builds."
FORCE )
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE )
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE )
mark_as_advanced(
CMAKE_Fortran_FLAGS_COVERAGE
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
link_libraries(gcov)
endif()
# Defines a target for running and collection code coverage information
# Builds dependencies, runs the given executable and outputs reports.
# NOTE! The executable should always have a ZERO as exit code otherwise
@@ -459,19 +454,18 @@ function(setup_target_for_coverage_gcovr)
)
endfunction() # setup_target_for_coverage_gcovr
function(add_code_coverage_to_target name scope)
separate_arguments(COVERAGE_CXX_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_COMPILER_FLAGS}")
separate_arguments(COVERAGE_C_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_C_COMPILER_FLAGS}")
separate_arguments(COVERAGE_CXX_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_LINKER_FLAGS}")
separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}")
function(append_coverage_compiler_flags)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
endfunction() # append_coverage_compiler_flags
# Add compiler options to the target
target_compile_options(${name} ${scope}
$<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_link_libraries (${name} ${scope}
$<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS} gcov>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS} gcov>
)
endfunction() # add_code_coverage_to_target
# Setup coverage for specific library
function(append_coverage_compiler_flags_to_target name)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} PRIVATE ${_flag_list})
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
target_link_libraries(${name} PRIVATE gcov)
endif()
endfunction()

View File

@@ -45,7 +45,7 @@ if (static OR APPLE OR MSVC)
set (OPENSSL_USE_STATIC_LIBS ON)
endif ()
set (OPENSSL_MSVC_STATIC_RT ON)
find_dependency (OpenSSL REQUIRED)
find_dependency (OpenSSL 1.1.1 REQUIRED)
find_dependency (ZLIB)
find_dependency (date)
if (TARGET ZLIB::ZLIB)

View File

@@ -53,15 +53,14 @@ add_library(xrpl.imports.main INTERFACE)
target_link_libraries(xrpl.imports.main
INTERFACE
absl::random_random
date::date
ed25519::ed25519
LibArchive::LibArchive
OpenSSL::Crypto
Ripple::boost
Ripple::libs
Ripple::opts
Ripple::syslibs
absl::random_random
date::date
ed25519::ed25519
secp256k1::secp256k1
xrpl.libpb
xxHash::xxhash
@@ -112,27 +111,6 @@ target_link_libraries(xrpl.libxrpl.net PUBLIC
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
add_module(xrpl nodestore)
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
)
add_module(xrpl shamap)
target_link_libraries(xrpl.libxrpl.shamap PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.crypto
xrpl.libxrpl.protocol
xrpl.libxrpl.nodestore
)
add_module(xrpl ledger)
target_link_libraries(xrpl.libxrpl.ledger PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
)
add_library(xrpl.libxrpl)
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
@@ -152,10 +130,7 @@ target_link_modules(xrpl PUBLIC
protocol
resource
server
nodestore
shamap
net
ledger
)
# All headers in libxrpl are in modules.

View File

@@ -33,8 +33,6 @@ setup_target_for_coverage_gcovr(
FORMAT ${coverage_format}
EXECUTABLE rippled
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES rippled
)
add_code_coverage_to_target(opts INTERFACE)

View File

@@ -8,23 +8,19 @@ install (
TARGETS
common
opts
ripple_boost
ripple_libs
ripple_syslibs
ripple_boost
xrpl.imports.main
xrpl.libpb
xrpl.libxrpl
xrpl.libxrpl.basics
xrpl.libxrpl.beast
xrpl.libxrpl.crypto
xrpl.libxrpl.json
xrpl.libxrpl.ledger
xrpl.libxrpl.net
xrpl.libxrpl.nodestore
xrpl.libxrpl.protocol
xrpl.libxrpl.resource
xrpl.libxrpl.server
xrpl.libxrpl.shamap
xrpl.libxrpl.net
xrpl.libxrpl
antithesis-sdk-cpp
EXPORT RippleExports
LIBRARY DESTINATION lib
@@ -41,7 +37,7 @@ install(CODE "
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
include(create_symbolic_link)
create_symbolic_link(xrpl \
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple)
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple)
")
install (EXPORT RippleExports
@@ -75,7 +71,7 @@ if (is_root_project AND TARGET rippled)
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
include(create_symbolic_link)
create_symbolic_link(rippled${suffix} \
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
")
endif ()

View File

@@ -28,11 +28,15 @@ target_compile_options (opts
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized>
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
target_link_libraries (opts
INTERFACE
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)

View File

@@ -1,4 +1,4 @@
option (validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
if (validator_keys)
git_branch (current_branch)
@@ -6,15 +6,17 @@ if (validator_keys)
if (NOT (current_branch STREQUAL "release"))
set (current_branch "master")
endif ()
message (STATUS "Tracking ValidatorKeys branch: ${current_branch}")
message (STATUS "tracking ValidatorKeys branch: ${current_branch}")
FetchContent_Declare (
validator_keys
validator_keys_src
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
GIT_TAG "${current_branch}"
)
FetchContent_MakeAvailable(validator_keys)
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
FetchContent_GetProperties (validator_keys_src)
if (NOT validator_keys_src_POPULATED)
message (STATUS "Pausing to download ValidatorKeys...")
FetchContent_Populate (validator_keys_src)
endif ()
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
endif ()

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
)
add_executable(${target} ${ARGN} ${sources})
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
isolate_headers(
${target}

View File

@@ -1,56 +0,0 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1759746684.671",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901",
"msys2/cci.latest#5b73b10144f73cc5bfe0572ed9be39e1%1751977009.857",
"m4/1.4.19#b38ced39a01e31fef5435bc634461fd2%1700758725.451",
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28",
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86"
],
"python_requires": [],
"overrides": {
"protobuf/3.21.12": [
null,
"protobuf/3.21.12"
],
"lz4/1.9.4": [
"lz4/1.10.0"
],
"boost/1.83.0": [
"boost/1.88.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"
]
},
"config_requires": []
}

View File

@@ -1,5 +1,9 @@
# Global configuration for Conan. This is used to set the number of parallel
# downloads and uploads.
# downloads, uploads, and build jobs. The verbosity is set to verbose to
# provide more information during the build process.
core:non_interactive=True
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose

View File

@@ -21,11 +21,14 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags+=['-Wno-restrict']
tools.build:cxxflags=['-Wno-restrict']
{% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -27,7 +27,7 @@ class Xrpl(ConanFile):
'grpc/1.50.1',
'libarchive/3.8.1',
'nudb/2.0.9',
'openssl/3.5.4',
'openssl/3.5.2',
'soci/4.0.3',
'zlib/1.3.1',
]

View File

@@ -654,14 +654,12 @@ SharedWeakUnion<T>::convertToWeak()
break;
case destroy:
// We just added a weak ref. How could we destroy?
// LCOV_EXCL_START
UNREACHABLE(
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
"added ref");
delete p;
unsafeSetRawPtr(nullptr);
return true; // Should never happen
// LCOV_EXCL_STOP
case partialDestroy:
// This is a weird case. We just converted the last strong
// pointer to a weak pointer.

View File

@@ -150,24 +150,6 @@ public:
return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0);
}
Number
truncate() const noexcept
{
if (exponent_ >= 0 || mantissa_ == 0)
return *this;
Number ret = *this;
while (ret.exponent_ < 0 && ret.mantissa_ != 0)
{
ret.exponent_ += 1;
ret.mantissa_ /= rep(10);
}
// We are guaranteed that normalize() will never throw an exception
// because exponent is either negative or zero at this point.
ret.normalize();
return ret;
}
friend constexpr bool
operator>(Number const& x, Number const& y) noexcept
{

View File

@@ -632,16 +632,6 @@ to_string(base_uint<Bits, Tag> const& a)
return strHex(a.cbegin(), a.cend());
}
template <std::size_t Bits, class Tag>
inline std::string
to_short_string(base_uint<Bits, Tag> const& a)
{
static_assert(
base_uint<Bits, Tag>::bytes > 4,
"For 4 bytes or less, use a native type");
return strHex(a.cbegin(), a.cbegin() + 4) + "...";
}
template <std::size_t Bits, class Tag>
inline std::ostream&
operator<<(std::ostream& out, base_uint<Bits, Tag> const& u)

View File

@@ -28,8 +28,9 @@ namespace ripple {
// the destination can hold all values of the source. This is particularly
// handy when the source or destination is an enumeration type.
template <class Src, class Dest>
concept SafeToCast = (std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
template <class Dest, class Src>
static constexpr bool is_safetocasttovalue_v =
(std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
(std::is_signed<Src>::value || std::is_unsigned<Dest>::value) &&
(std::is_signed<Src>::value != std::is_signed<Dest>::value
? sizeof(Dest) > sizeof(Src)
@@ -77,7 +78,7 @@ inline constexpr std::
unsafe_cast(Src s) noexcept
{
static_assert(
!SafeToCast<Src, Dest>,
!is_safetocasttovalue_v<Dest, Src>,
"Only unsafe if casting signed to unsigned or "
"destination is too small");
return static_cast<Dest>(s);

View File

@@ -94,11 +94,7 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept
else if (addr.is_v6())
hash_append(h, addr.to_v6().to_bytes());
else
{
// LCOV_EXCL_START
UNREACHABLE("beast::hash_append : invalid address type");
// LCOV_EXCL_STOP
}
}
} // namespace beast

View File

@@ -39,16 +39,11 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE
#define XRPL_ASSERT_PARTS(cond, function, description, ...) \
XRPL_ASSERT(cond, function " : " description)
// How to use the instrumentation macros:
//
// * XRPL_ASSERT if cond must be true but the line might not be reached during
// fuzzing. Same like `assert` in normal use.
// * XRPL_ASSERT_PARTS is for convenience, and works like XRPL_ASSERT, but
// splits the message param into "function" and "description", then joins
// them with " : " before passing to XRPL_ASSERT.
// * ALWAYS if cond must be true _and_ the line must be reached during fuzzing.
// Same like `assert` in normal use.
// * REACHABLE if the line must be reached during fuzzing

View File

@@ -46,7 +46,7 @@ public:
* without formatting (not human friendly).
*
* The JSON document is written in a single line. It is not intended for 'human'
* consumption, but may be useful to support feature such as RPC where bandwidth
* consumption, but may be useful to support feature such as RPC where bandwith
* is limited. \sa Reader, Value
*/

View File

@@ -26,6 +26,7 @@ enum MessageType {
mtREPLAY_DELTA_RESPONSE = 60;
mtHAVE_TRANSACTIONS = 63;
mtTRANSACTIONS = 64;
mtCLOSE = 65;
}
// token, iterations, target, challenge = issue demand for proof of work
@@ -341,3 +342,19 @@ message TMReplayDeltaResponse {
message TMHaveTransactions {
repeated bytes hashes = 1;
}
enum TMCloseReason {
crRESOURCE = 1;
crINVALID_CLOSED_LEDGER = 2;
crINVALID_PREV_LEDGER = 3;
crBAD_LEDGER_HEADERS = 4;
crLARGE_SEND_QUEUE = 5;
crNOT_USEFUL = 6;
crPING_TIMEOUT = 7;
crINTERNAL = 8;
crSHUTDOWN = 9;
}
message TMClose {
required TMCloseReason reason = 1;
}

View File

@@ -20,11 +20,6 @@
#ifndef RIPPLE_PROTOCOL_APIVERSION_H_INCLUDED
#define RIPPLE_PROTOCOL_APIVERSION_H_INCLUDED
#include <xrpl/beast/core/SemanticVersion.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/jss.h>
#include <type_traits>
#include <utility>
@@ -77,77 +72,6 @@ static_assert(apiMaximumSupportedVersion >= apiMinimumSupportedVersion);
static_assert(apiBetaVersion >= apiMaximumSupportedVersion);
static_assert(apiMaximumValidVersion >= apiMaximumSupportedVersion);
template <class JsonObject>
void
setVersion(JsonObject& parent, unsigned int apiVersion, bool betaEnabled)
{
XRPL_ASSERT(
apiVersion != apiInvalidVersion,
"ripple::RPC::setVersion : input is valid");
auto& retObj = addObject(parent, jss::version);
if (apiVersion == apiVersionIfUnspecified)
{
// API version numbers used in API version 1
static beast::SemanticVersion const firstVersion{"1.0.0"};
static beast::SemanticVersion const goodVersion{"1.0.0"};
static beast::SemanticVersion const lastVersion{"1.0.0"};
retObj[jss::first] = firstVersion.print();
retObj[jss::good] = goodVersion.print();
retObj[jss::last] = lastVersion.print();
}
else
{
retObj[jss::first] = apiMinimumSupportedVersion.value;
retObj[jss::last] =
betaEnabled ? apiBetaVersion : apiMaximumSupportedVersion;
}
}
/**
* Retrieve the api version number from the json value
*
* Note that APIInvalidVersion will be returned if
* 1) the version number field has a wrong format
* 2) the version number retrieved is out of the supported range
* 3) the version number is unspecified and
* APIVersionIfUnspecified is out of the supported range
*
* @param jv a Json value that may or may not specifies
* the api version number
* @param betaEnabled if the beta API version is enabled
* @return the api version number
*/
inline unsigned int
getAPIVersionNumber(Json::Value const& jv, bool betaEnabled)
{
static Json::Value const minVersion(RPC::apiMinimumSupportedVersion);
Json::Value const maxVersion(
betaEnabled ? RPC::apiBetaVersion : RPC::apiMaximumSupportedVersion);
if (jv.isObject())
{
if (jv.isMember(jss::api_version))
{
auto const specifiedVersion = jv[jss::api_version];
if (!specifiedVersion.isInt() && !specifiedVersion.isUInt())
{
return RPC::apiInvalidVersion;
}
auto const specifiedVersionInt = specifiedVersion.asInt();
if (specifiedVersionInt < minVersion ||
specifiedVersionInt > maxVersion)
{
return RPC::apiInvalidVersion;
}
return specifiedVersionInt;
}
}
return RPC::apiVersionIfUnspecified;
}
} // namespace RPC
template <unsigned minVer, unsigned maxVer, typename Fn, typename... Args>

View File

@@ -1,180 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_PROTOCOL_CONFIDENTIALTRANSFER_H_INCLUDED
#define RIPPLE_PROTOCOL_CONFIDENTIALTRANSFER_H_INCLUDED
#include <xrpl/basics/Slice.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/MPTIssue.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/protocol/Rate.h>
#include <xrpl/protocol/STLedgerEntry.h>
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/detail/secp256k1.h>
#include <secp256k1.h>
namespace ripple {
/**
* @brief Generates a new secp256k1 key pair.
*/
SECP256K1_API int
secp256k1_elgamal_generate_keypair(
secp256k1_context const* ctx,
unsigned char* privkey,
secp256k1_pubkey* pubkey);
/**
* @brief Encrypts a 64-bit amount using ElGamal.
*/
SECP256K1_API int
secp256k1_elgamal_encrypt(
secp256k1_context const* ctx,
secp256k1_pubkey* c1,
secp256k1_pubkey* c2,
secp256k1_pubkey const* pubkey_Q,
uint64_t amount,
unsigned char const* blinding_factor);
/**
* @brief Decrypts an ElGamal ciphertext to recover the amount.
*/
SECP256K1_API int
secp256k1_elgamal_decrypt(
secp256k1_context const* ctx,
uint64_t* amount,
secp256k1_pubkey const* c1,
secp256k1_pubkey const* c2,
unsigned char const* privkey);
/**
* @brief Homomorphically adds two ElGamal ciphertexts.
*/
SECP256K1_API int
secp256k1_elgamal_add(
secp256k1_context const* ctx,
secp256k1_pubkey* sum_c1,
secp256k1_pubkey* sum_c2,
secp256k1_pubkey const* a_c1,
secp256k1_pubkey const* a_c2,
secp256k1_pubkey const* b_c1,
secp256k1_pubkey const* b_c2);
/**
* @brief Homomorphically subtracts two ElGamal ciphertexts.
*/
SECP256K1_API int
secp256k1_elgamal_subtract(
secp256k1_context const* ctx,
secp256k1_pubkey* diff_c1,
secp256k1_pubkey* diff_c2,
secp256k1_pubkey const* a_c1,
secp256k1_pubkey const* a_c2,
secp256k1_pubkey const* b_c1,
secp256k1_pubkey const* b_c2);
/**
* @brief Generates the canonical encrypted zero for a given MPT token instance.
*
* This ciphertext represents a zero balance for a specific account's holding
* of a token defined by its MPTokenIssuanceID.
*
* @param[in] ctx A pointer to a valid secp256k1 context.
* @param[out] enc_zero_c1 The C1 component of the canonical ciphertext.
* @param[out] enc_zero_c2 The C2 component of the canonical ciphertext.
* @param[in] pubkey The ElGamal public key of the account holder.
* @param[in] account_id A pointer to the 20-byte AccountID.
* @param[in] mpt_issuance_id A pointer to the 24-byte MPTokenIssuanceID.
*
* @return 1 on success, 0 on failure.
*/
SECP256K1_API int
generate_canonical_encrypted_zero(
secp256k1_context const* ctx,
secp256k1_pubkey* enc_zero_c1,
secp256k1_pubkey* enc_zero_c2,
secp256k1_pubkey const* pubkey,
unsigned char const* account_id, // 20 bytes
unsigned char const* mpt_issuance_id // 24 bytes
);
// breaks a 66-byte encrypted amount into two 33-byte components
// then parses each 33-byte component into 64-byte secp256k1_pubkey format
bool
makeEcPair(Slice const& buffer, secp256k1_pubkey& out1, secp256k1_pubkey& out2);
// serialize two secp256k1_pubkey components back into compressed 66-byte form
bool
serializeEcPair(
secp256k1_pubkey const& in1,
secp256k1_pubkey const& in2,
Buffer& buffer);
/**
* @brief Verifies that a buffer contains two valid, parsable EC public keys.
* @param buffer The input buffer containing two concatenated components.
* @return true if both components can be parsed successfully, false otherwise.
*/
bool
isValidCiphertext(Slice const& buffer);
TER
homomorphicAdd(Slice const& a, Slice const& b, Buffer& out);
TER
homomorphicSubtract(Slice const& a, Slice const& b, Buffer& out);
TER
proveEquality(
Slice const& proof,
Slice const& encAmt, // encrypted amount
Slice const& pubkey,
uint64_t const amount,
uint256 const& txHash, // Transaction context data
std::uint32_t const spendVersion);
Buffer
encryptAmount(uint64_t amt, Slice const& pubKeySlice);
Buffer
encryptCanonicalZeroAmount(
Slice const& pubKeySlice,
AccountID const& account,
MPTID const& mptId);
TER
verifyConfidentialSendProof(
Slice const& proof,
Slice const& encSenderBalance,
Slice const& encSenderAmt,
Slice const& encDestAmt,
Slice const& encIssuerAmt,
Slice const& senderPubKey,
Slice const& destPubKey,
Slice const& issuerPubKey,
std::uint32_t const version,
uint256 const& txHash);
} // namespace ripple
#endif

View File

@@ -0,0 +1,565 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BASICS_FEES_H_INCLUDED
#define BASICS_FEES_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace feeunit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee units" calculations are a not-really-unitless value that is used
to express the cost of a given transaction vs. a reference transaction.
They are primarily used by the Transactor classes. */
struct feeunitTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a TaggedFee. They are
used for calculations in this header. */
struct unitlessTag;
template <class T>
using enable_if_unit_t = typename std::enable_if_t<
std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>>;
/** `is_usable_unit_v` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-fee contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T, class = enable_if_unit_t<T>>
constexpr bool is_usable_unit_v =
std::is_same_v<typename T::unit_type, feeunitTag> ||
std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag>;
template <class UnitTag, class T>
class TaggedFee : private boost::totally_ordered<TaggedFee<UnitTag, T>>,
private boost::additive<TaggedFee<UnitTag, T>>,
private boost::equality_comparable<TaggedFee<UnitTag, T>, T>,
private boost::dividable<TaggedFee<UnitTag, T>, T>,
private boost::modable<TaggedFee<UnitTag, T>, T>,
private boost::unit_steppable<TaggedFee<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type fee_;
protected:
template <class Other>
static constexpr bool is_compatible_v =
std::is_arithmetic_v<Other> && std::is_arithmetic_v<value_type> &&
std::is_convertible_v<Other, value_type>;
template <class OtherFee, class = enable_if_unit_t<OtherFee>>
static constexpr bool is_compatiblefee_v =
is_compatible_v<typename OtherFee::value_type> &&
std::is_same_v<UnitTag, typename OtherFee::unit_type>;
template <class Other>
using enable_if_compatible_t =
typename std::enable_if_t<is_compatible_v<Other>>;
template <class OtherFee>
using enable_if_compatiblefee_t =
typename std::enable_if_t<is_compatiblefee_v<OtherFee>>;
public:
TaggedFee() = default;
constexpr TaggedFee(TaggedFee const& other) = default;
constexpr TaggedFee&
operator=(TaggedFee const& other) = default;
constexpr explicit TaggedFee(beast::Zero) : fee_(0)
{
}
constexpr TaggedFee&
operator=(beast::Zero)
{
fee_ = 0;
return *this;
}
constexpr explicit TaggedFee(value_type fee) : fee_(fee)
{
}
TaggedFee&
operator=(value_type fee)
{
fee_ = fee;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <
class Other,
class = std::enable_if_t<
is_compatible_v<Other> &&
is_safetocasttovalue_v<value_type, Other>>>
constexpr TaggedFee(TaggedFee<unit_type, Other> const& fee)
: TaggedFee(safe_cast<value_type>(fee.fee()))
{
}
constexpr TaggedFee
operator*(value_type const& rhs) const
{
return TaggedFee{fee_ * rhs};
}
friend constexpr TaggedFee
operator*(value_type lhs, TaggedFee const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(TaggedFee const& rhs) const
{
return fee_ / rhs.fee_;
}
TaggedFee&
operator+=(TaggedFee const& other)
{
fee_ += other.fee();
return *this;
}
TaggedFee&
operator-=(TaggedFee const& other)
{
fee_ -= other.fee();
return *this;
}
TaggedFee&
operator++()
{
++fee_;
return *this;
}
TaggedFee&
operator--()
{
--fee_;
return *this;
}
TaggedFee&
operator*=(value_type const& rhs)
{
fee_ *= rhs;
return *this;
}
TaggedFee&
operator/=(value_type const& rhs)
{
fee_ /= rhs;
return *this;
}
template <class transparent = value_type>
std::enable_if_t<std::is_integral_v<transparent>, TaggedFee&>
operator%=(value_type const& rhs)
{
fee_ %= rhs;
return *this;
}
TaggedFee
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned fee types");
return TaggedFee{-fee_};
}
bool
operator==(TaggedFee const& other) const
{
return fee_ == other.fee_;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator==(TaggedFee<unit_type, Other> const& other) const
{
return fee_ == other.fee();
}
bool
operator==(value_type other) const
{
return fee_ == other;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator!=(TaggedFee<unit_type, Other> const& other) const
{
return !operator==(other);
}
bool
operator<(TaggedFee const& other) const
{
return fee_ < other.fee_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return fee_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (fee_ < 0) ? -1 : (fee_ ? 1 : 0);
}
/** Returns the number of drops */
constexpr value_type
fee() const
{
return fee_;
}
template <class Other>
constexpr double
decimalFromReference(TaggedFee<unit_type, Other> reference) const
{
return static_cast<double>(fee_) / reference.fee();
}
// `is_usable_unit_v` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
std::enable_if_t<is_usable_unit_v<TaggedFee>, Json::Value>
jsonClipped() const
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (fee_ < min)
return min;
if (fee_ > max)
return max;
return static_cast<jsontype>(fee_);
}
else
{
return fee_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return fee_;
}
friend std::istream&
operator>>(std::istream& s, TaggedFee& val)
{
s >> val.fee_;
return s;
}
};
// Output Fees as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, TaggedFee<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(TaggedFee<UnitTag, T> const& amount)
{
return std::to_string(amount.fee());
}
template <class Source, class = enable_if_unit_t<Source>>
constexpr bool can_muldiv_source_v =
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest, class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_dest_v =
can_muldiv_source_v<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <
class Source1,
class Source2,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>>
constexpr bool can_muldiv_sources_v =
can_muldiv_source_v<Source1> && can_muldiv_source_v<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_v =
can_muldiv_sources_v<Source1, Source2> && can_muldiv_dest_v<Dest>;
// Source and Dest can be the same by default
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_commute_v = can_muldiv_v<Source1, Source2, Dest> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
using enable_muldiv_source_t =
typename std::enable_if_t<can_muldiv_source_v<T>>;
template <class T>
using enable_muldiv_dest_t = typename std::enable_if_t<can_muldiv_dest_v<T>>;
template <class Source1, class Source2>
using enable_muldiv_sources_t =
typename std::enable_if_t<can_muldiv_sources_v<Source1, Source2>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_t =
typename std::enable_if_t<can_muldiv_v<Source1, Source2, Dest>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_commute_t =
typename std::enable_if_t<can_muldiv_commute_v<Source1, Source2, Dest>>;
template <class T>
TaggedFee<unitlessTag, T>
scalar(T value)
{
return TaggedFee<unitlessTag, T>{value};
}
template <
class Source1,
class Source2,
class Dest,
class = enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// Fees can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0,
"ripple::feeunit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::feeunit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() >= 0, "ripple::feeunit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace feeunit
template <class T>
using FeeLevel = feeunit::TaggedFee<feeunit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return feeunit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_commute_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return feeunit::mulDivU(mul, value, div);
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return feeunit::mulDivU(feeunit::scalar(value), mul, feeunit::scalar(div));
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = feeunit::mulDivU(value, feeunit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
} // namespace ripple
#endif // BASICS_FEES_H_INCLUDED

View File

@@ -287,11 +287,9 @@ delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept;
Keylet
bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType);
// `seq` is stored as `sfXChainClaimID` in the object
Keylet
xChainClaimID(STXChainBridge const& bridge, std::uint64_t seq);
// `seq` is stored as `sfXChainAccountCreateCount` in the object
Keylet
xChainCreateAccountClaimID(STXChainBridge const& bridge, std::uint64_t seq);

View File

@@ -56,7 +56,7 @@ enum LedgerEntryType : std::uint16_t
#pragma push_macro("LEDGER_ENTRY")
#undef LEDGER_ENTRY
#define LEDGER_ENTRY(tag, value, ...) tag = value,
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) tag = value,
#include <xrpl/protocol/detail/ledger_entries.macro>
@@ -187,16 +187,6 @@ enum LedgerSpecificFlags {
lsfMPTCanTrade = 0x00000010,
lsfMPTCanTransfer = 0x00000020,
lsfMPTCanClawback = 0x00000040,
lsfMPTNoConfidentialTransfer = 0x00000080,
lsmfMPTCanMutateCanLock = 0x00000002,
lsmfMPTCanMutateRequireAuth = 0x00000004,
lsmfMPTCanMutateCanEscrow = 0x00000008,
lsmfMPTCanMutateCanTrade = 0x00000010,
lsmfMPTCanMutateCanTransfer = 0x00000020,
lsmfMPTCanMutateCanClawback = 0x00000040,
lsmfMPTCanMutateMetadata = 0x00010000,
lsmfMPTCanMutateTransferFee = 0x00020000,
// ltMPTOKEN
lsfMPTAuthorized = 0x00000002,

View File

@@ -20,8 +20,6 @@
#ifndef RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED
#define RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED
#include <xrpl/protocol/Rules.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFormats.h>
#include <optional>
@@ -55,8 +53,6 @@ class Permission
private:
Permission();
std::unordered_map<std::uint16_t, uint256> txFeatureMap_;
std::unordered_map<std::uint16_t, Delegation> delegatableTx_;
std::unordered_map<std::string, GranularPermissionType>
@@ -74,9 +70,6 @@ public:
Permission&
operator=(Permission const&) = delete;
std::optional<std::string>
getPermissionName(std::uint32_t const value) const;
std::optional<std::uint32_t>
getGranularValue(std::string const& name) const;
@@ -86,12 +79,8 @@ public:
std::optional<TxType>
getGranularTxType(GranularPermissionType const& gpType) const;
std::optional<std::reference_wrapper<uint256 const>> const
getTxFeature(TxType txType) const;
bool
isDelegatable(std::uint32_t const& permissionValue, Rules const& rules)
const;
isDelegatable(std::uint32_t const& permissionValue) const;
// for tx level permission, permission value is equal to tx type plus one
uint32_t

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>
@@ -55,10 +56,7 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory
Made obsolete by fixDirectoryLimit amendment.
*/
/** The maximum number of pages allowed in a directory */
std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */
@@ -124,13 +122,6 @@ std::size_t constexpr maxDataPayloadLength = 256;
/** Vault withdrawal policies */
std::uint8_t constexpr vaultStrategyFirstComeFirstServe = 1;
/** Default IOU scale factor for a Vault */
std::uint8_t constexpr vaultDefaultIOUScale = 6;
/** Maximum scale factor for a Vault. The number is chosen to ensure that
1 IOU can be always converted to shares.
10^19 > maxMPTokenAmount (2^64-1) > 10^18 */
std::uint8_t constexpr vaultMaximumIOUScale = 18;
/** Maximum recursion depth for vault shares being put as an asset inside
* another vault; counted from 0 */
std::uint8_t constexpr maxAssetCheckDepth = 5;
@@ -181,20 +172,6 @@ std::size_t constexpr permissionMaxSize = 10;
/** The maximum number of transactions that can be in a batch. */
std::size_t constexpr maxBatchTxCount = 8;
/** EC ElGamal ciphertext length 33-byte */
std::size_t constexpr ecGamalEncryptedLength = 33;
/** EC ElGamal ciphertext length: two 33-byte components concatenated */
std::size_t constexpr ecGamalEncryptedTotalLength = 66;
/** Length of equality ZKProof */
std::size_t constexpr ecEqualityProofLength = 98;
/** Length of EC public key */
std::size_t constexpr ecPubKeyLength = 64;
/** Length of EC private key */
std::size_t constexpr ecPrivKeyLength = 32;
} // namespace ripple
#endif

View File

@@ -21,7 +21,6 @@
#define RIPPLE_PROTOCOL_PUBLICKEY_H_INCLUDED
#include <xrpl/basics/Slice.h>
#include <xrpl/beast/net/IPEndpoint.h>
#include <xrpl/protocol/KeyType.h>
#include <xrpl/protocol/STExchange.h>
#include <xrpl/protocol/UintTypes.h>
@@ -265,24 +264,6 @@ calcNodeID(PublicKey const&);
AccountID
calcAccountID(PublicKey const& pk);
inline std::string
getFingerprint(
beast::IP::Endpoint const& address,
std::optional<PublicKey> const& publicKey = std::nullopt,
std::optional<std::string> const& id = std::nullopt)
{
std::stringstream ss;
ss << "IP Address: " << address;
if (publicKey.has_value())
{
ss << ", Public Key: " << toBase58(TokenType::NodePublic, *publicKey);
}
if (id.has_value())
{
ss << ", Id: " << id.value();
}
return ss.str();
}
} // namespace ripple
//------------------------------------------------------------------------------

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/safe_cast.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <map>
@@ -72,10 +71,8 @@ class STCurrency;
STYPE(STI_VL, 7) \
STYPE(STI_ACCOUNT, 8) \
STYPE(STI_NUMBER, 9) \
STYPE(STI_INT32, 10) \
STYPE(STI_INT64, 11) \
\
/* 12-13 are reserved */ \
/* 10-13 are reserved */ \
STYPE(STI_OBJECT, 14) \
STYPE(STI_ARRAY, 15) \
\
@@ -151,10 +148,8 @@ public:
sMD_ChangeNew = 0x02, // new value when it changes
sMD_DeleteFinal = 0x04, // final value when it is deleted
sMD_Create = 0x08, // value when it's created
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20, // value is treated as base 10, overriding behavior
sMD_PseudoAccount = 0x40, // if this field is set in an ACCOUNT_ROOT
// _only_, then it is a pseudo-account
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20,
sMD_Default =
sMD_ChangeOrig | sMD_ChangeNew | sMD_DeleteFinal | sMD_Create
};
@@ -189,7 +184,7 @@ public:
char const* fn,
int meta = sMD_Default,
IsSigning signing = IsSigning::yes);
explicit SField(private_access_tag_t, int fc, char const* fn);
explicit SField(private_access_tag_t, int fc);
static SField const&
getField(int fieldCode);
@@ -302,7 +297,7 @@ public:
static int
compare(SField const& f1, SField const& f2);
static std::unordered_map<int, SField const*> const&
static std::map<int, SField const*> const&
getKnownCodeToField()
{
return knownCodeToField;
@@ -310,8 +305,7 @@ public:
private:
static int num;
static std::unordered_map<int, SField const*> knownCodeToField;
static std::unordered_map<std::string, SField const*> knownNameToField;
static std::map<int, SField const*> knownCodeToField;
};
/** A field with a type known at compile time. */
@@ -358,9 +352,6 @@ using SF_UINT256 = TypedField<STBitString<256>>;
using SF_UINT384 = TypedField<STBitString<384>>;
using SF_UINT512 = TypedField<STBitString<512>>;
using SF_INT32 = TypedField<STInteger<std::int32_t>>;
using SF_INT64 = TypedField<STInteger<std::int64_t>>;
using SF_ACCOUNT = TypedField<STAccount>;
using SF_AMOUNT = TypedField<STAmount>;
using SF_ISSUE = TypedField<STIssue>;

View File

@@ -709,6 +709,37 @@ canAdd(STAmount const& amt1, STAmount const& amt2);
bool
canSubtract(STAmount const& amt1, STAmount const& amt2);
// Since `canonicalize` does not have access to a ledger, this is needed to put
// the low-level routine stAmountCanonicalize on an amendment switch. Only
// transactions need to use this switchover. Outside of a transaction it's safe
// to unconditionally use the new behavior.
bool
getSTAmountCanonicalizeSwitchover();
void
setSTAmountCanonicalizeSwitchover(bool v);
/** RAII class to set and restore the STAmount canonicalize switchover.
*/
class STAmountSO
{
public:
explicit STAmountSO(bool v) : saved_(getSTAmountCanonicalizeSwitchover())
{
setSTAmountCanonicalizeSwitchover(v);
}
~STAmountSO()
{
setSTAmountCanonicalizeSwitchover(saved_);
}
private:
bool saved_;
};
} // namespace ripple
//------------------------------------------------------------------------------

View File

@@ -81,8 +81,6 @@ using STUInt16 = STInteger<std::uint16_t>;
using STUInt32 = STInteger<std::uint32_t>;
using STUInt64 = STInteger<std::uint64_t>;
using STInt32 = STInteger<std::int32_t>;
template <typename Integer>
inline STInteger<Integer>::STInteger(Integer v) : value_(v)
{

View File

@@ -26,9 +26,7 @@
namespace ripple {
class Rules;
namespace test {
class Invariants_test;
}
class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
{
@@ -38,8 +36,6 @@ class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
public:
using pointer = std::shared_ptr<STLedgerEntry>;
using ref = std::shared_ptr<STLedgerEntry> const&;
using const_pointer = std::shared_ptr<STLedgerEntry const>;
using const_ref = std::shared_ptr<STLedgerEntry const> const&;
/** Create an empty object with the given key and type. */
explicit STLedgerEntry(Keylet const& k);
@@ -58,7 +54,7 @@ public:
getText() const override;
Json::Value
getJson(JsonOptions options = JsonOptions::none) const override;
getJson(JsonOptions options) const override;
/** Returns the 'key' (or 'index') of this item.
The key identifies this entry's position in
@@ -88,8 +84,7 @@ private:
void
setSLEType();
friend test::Invariants_test; // this test wants access to the private
// type_
friend Invariants_test; // this test wants access to the private type_
STBase*
copy(std::size_t n, void* buf) const override;

View File

@@ -25,6 +25,7 @@
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/SOTemplate.h>
#include <xrpl/protocol/STAmount.h>
@@ -33,7 +34,6 @@
#include <xrpl/protocol/STIssue.h>
#include <xrpl/protocol/STPathSet.h>
#include <xrpl/protocol/STVector256.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/detail/STVar.h>
#include <boost/iterator/transform_iterator.hpp>
@@ -231,8 +231,6 @@ public:
getFieldH192(SField const& field) const;
uint256
getFieldH256(SField const& field) const;
std::int32_t
getFieldI32(SField const& field) const;
AccountID
getAccountID(SField const& field) const;
@@ -244,9 +242,6 @@ public:
getFieldPathSet(SField const& field) const;
STVector256 const&
getFieldV256(SField const& field) const;
// If not found, returns an object constructed with the given field
STObject
getFieldObject(SField const& field) const;
STArray const&
getFieldArray(SField const& field) const;
STCurrency const&
@@ -370,8 +365,6 @@ public:
void
setFieldH256(SField const& field, uint256 const&);
void
setFieldI32(SField const& field, std::int32_t);
void
setFieldVL(SField const& field, Blob const&);
void
setFieldVL(SField const& field, Slice const&);
@@ -393,8 +386,6 @@ public:
setFieldV256(SField const& field, STVector256 const& v);
void
setFieldArray(SField const& field, STArray const& v);
void
setFieldObject(SField const& field, STObject const& v);
template <class Tag>
void

View File

@@ -54,6 +54,34 @@ public:
Json::Value error;
};
/** Holds the serialized result of parsing an input JSON array.
This does validation and checking on the provided JSON.
*/
class STParsedJSONArray
{
public:
/** Parses and creates an STParsedJSON array.
The result of the parsing is stored in array and error.
Exceptions:
Does not throw.
@param name The name of the JSON field, used in diagnostics.
@param json The JSON-RPC to parse.
*/
STParsedJSONArray(std::string const& name, Json::Value const& json);
STParsedJSONArray() = delete;
STParsedJSONArray(STParsedJSONArray const&) = delete;
STParsedJSONArray&
operator=(STParsedJSONArray const&) = delete;
~STParsedJSONArray() = default;
/** The STArray if the parse was successful. */
std::optional<STArray> array;
/** On failure, an appropriate set of error values. */
Json::Value error;
};
} // namespace ripple
#endif

View File

@@ -87,14 +87,8 @@ public:
getFullText() const override;
// Outer transaction functions / signature functions.
static Blob
getSignature(STObject const& sigObject);
Blob
getSignature() const
{
return getSignature(*this);
}
getSignature() const;
uint256
getSigningHash() const;
@@ -125,20 +119,13 @@ public:
getJson(JsonOptions options, bool binary) const;
void
sign(
PublicKey const& publicKey,
SecretKey const& secretKey,
std::optional<std::reference_wrapper<SField const>> signatureTarget =
{});
enum class RequireFullyCanonicalSig : bool { no, yes };
sign(PublicKey const& publicKey, SecretKey const& secretKey);
/** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@return `true` if valid signature. If invalid, the error message string.
*/
enum class RequireFullyCanonicalSig : bool { no, yes };
Expected<void, std::string>
checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules)
const;
@@ -163,34 +150,17 @@ public:
char status,
std::string const& escapedMetaData) const;
std::vector<uint256> const&
std::vector<uint256>
getBatchTransactionIDs() const;
private:
/** Check the signature.
@param requireCanonicalSig If `true`, check that the signature is fully
canonical. If `false`, only check that the signature is valid.
@param rules The current ledger rules.
@param sigObject Reference to object that contains the signature fields.
Will be *this more often than not.
@return `true` if valid signature. If invalid, the error message string.
*/
Expected<void, std::string>
checkSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const;
Expected<void, std::string>
checkSingleSign(
RequireFullyCanonicalSig requireCanonicalSig,
STObject const& sigObject) const;
checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const;
Expected<void, std::string>
checkMultiSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const;
Rules const& rules) const;
Expected<void, std::string>
checkBatchSingleSign(
@@ -209,7 +179,7 @@ private:
move(std::size_t n, void* buf) override;
friend class detail::STVar;
mutable std::vector<uint256> batchTxnIds_;
mutable std::vector<uint256> batch_txn_ids_;
};
bool

View File

@@ -22,10 +22,10 @@
#include <xrpl/basics/Log.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <optional>

View File

@@ -73,8 +73,14 @@ static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ{32570u};
* used in asserts and tests. */
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES{562177u};
/** The minimum amount of support an amendment should have. */
constexpr std::ratio<80, 100> amendmentMajorityCalcThreshold;
/** The minimum amount of support an amendment should have.
@note This value is used by legacy code and will become obsolete
once the fixAmendmentMajorityCalc amendment activates.
*/
constexpr std::ratio<204, 256> preFixAmendmentMajorityCalcThreshold;
constexpr std::ratio<80, 100> postFixAmendmentMajorityCalcThreshold;
/** The minimum amount of time an amendment must hold a majority */
constexpr std::chrono::seconds const defaultAmendmentMajorityTime = weeks{2};

View File

@@ -141,7 +141,6 @@ enum TEMcodes : TERUnderlyingType {
temARRAY_TOO_LARGE,
temBAD_TRANSFER_FEE,
temINVALID_INNER_BATCH,
temBAD_CIPHERTEXT,
};
//------------------------------------------------------------------------------
@@ -226,9 +225,8 @@ enum TERcodes : TERUnderlyingType {
terQUEUED, // Transaction is being held in TxQ until fee drops
terPRE_TICKET, // Ticket is not yet in ledger but might be on its way
terNO_AMM, // AMM doesn't exist for the asset pair
terADDRESS_COLLISION, // Failed to allocate AccountID when trying to
// create a pseudo-account
terNO_DELEGATE_PERMISSION, // Delegate does not have permission
terADDRESS_COLLISION, // Failed to allocate AccountID when trying to
// create a pseudo-account
};
//------------------------------------------------------------------------------
@@ -363,11 +361,7 @@ enum TECcodes : TERUnderlyingType {
tecLIMIT_EXCEEDED = 195,
tecPSEUDO_ACCOUNT = 196,
tecPRECISION_LOSS = 197,
// DEPRECATED: This error code tecNO_DELEGATE_PERMISSION is reserved for
// backward compatibility with historical data on non-prod networks, can be
// reclaimed after those networks reset.
tecNO_DELEGATE_PERMISSION = 198,
tecBAD_PROOF = 199
};
//------------------------------------------------------------------------------
@@ -679,8 +673,7 @@ isTerRetry(TER x) noexcept
inline bool
isTesSuccess(TER x) noexcept
{
// Makes use of TERSubset::operator bool()
return !(x);
return (x == tesSUCCESS);
}
inline bool

View File

@@ -127,8 +127,6 @@ constexpr std::uint32_t tfTrustSetPermissionMask = ~(tfUniversal | tfSetfAuth |
// EnableAmendment flags:
constexpr std::uint32_t tfGotMajority = 0x00010000;
constexpr std::uint32_t tfLostMajority = 0x00020000;
constexpr std::uint32_t tfChangeMask =
~( tfUniversal | tfGotMajority | tfLostMajority);
// PaymentChannelClaim flags:
constexpr std::uint32_t tfRenew = 0x00010000;
@@ -143,31 +141,15 @@ constexpr std::uint32_t const tfTransferable = 0x00000008;
constexpr std::uint32_t const tfMutable = 0x00000010;
// MPTokenIssuanceCreate flags:
// Note: tf/lsfMPTLocked is intentionally omitted, since this transaction
// is not allowed to modify it.
// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate.
constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock;
constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth;
constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow;
constexpr std::uint32_t const tfMPTCanTrade = lsfMPTCanTrade;
constexpr std::uint32_t const tfMPTCanTransfer = lsfMPTCanTransfer;
constexpr std::uint32_t const tfMPTCanClawback = lsfMPTCanClawback;
constexpr std::uint32_t const tfMPTNoConfidentialTransfer = lsfMPTNoConfidentialTransfer;
constexpr std::uint32_t const tfMPTokenIssuanceCreateMask =
~(tfUniversal | tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback | tfMPTNoConfidentialTransfer);
// MPTokenIssuanceCreate MutableFlags:
// Indicating specific fields or flags may be changed after issuance.
constexpr std::uint32_t const tmfMPTCanMutateCanLock = lsmfMPTCanMutateCanLock;
constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lsmfMPTCanMutateRequireAuth;
constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lsmfMPTCanMutateCanEscrow;
constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lsmfMPTCanMutateCanTrade;
constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lsmfMPTCanMutateCanTransfer;
constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lsmfMPTCanMutateCanClawback;
constexpr std::uint32_t const tmfMPTCanMutateMetadata = lsmfMPTCanMutateMetadata;
constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lsmfMPTCanMutateTransferFee;
constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask =
~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade
| tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee);
~(tfUniversal | tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback);
// MPTokenAuthorize flags:
constexpr std::uint32_t const tfMPTUnauthorize = 0x00000001;
@@ -179,25 +161,6 @@ constexpr std::uint32_t const tfMPTUnlock = 0x00000002;
constexpr std::uint32_t const tfMPTokenIssuanceSetMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock);
constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock);
// MPTokenIssuanceSet MutableFlags:
// Set or Clear flags.
constexpr std::uint32_t const tmfMPTSetCanLock = 0x00000001;
constexpr std::uint32_t const tmfMPTClearCanLock = 0x00000002;
constexpr std::uint32_t const tmfMPTSetRequireAuth = 0x00000004;
constexpr std::uint32_t const tmfMPTClearRequireAuth = 0x00000008;
constexpr std::uint32_t const tmfMPTSetCanEscrow = 0x00000010;
constexpr std::uint32_t const tmfMPTClearCanEscrow = 0x00000020;
constexpr std::uint32_t const tmfMPTSetCanTrade = 0x00000040;
constexpr std::uint32_t const tmfMPTClearCanTrade = 0x00000080;
constexpr std::uint32_t const tmfMPTSetCanTransfer = 0x00000100;
constexpr std::uint32_t const tmfMPTClearCanTransfer = 0x00000200;
constexpr std::uint32_t const tmfMPTSetCanClawback = 0x00000400;
constexpr std::uint32_t const tmfMPTClearCanClawback = 0x00000800;
constexpr std::uint32_t const tmfMPTokenIssuanceSetMutableMask = ~(tmfMPTSetCanLock | tmfMPTClearCanLock |
tmfMPTSetRequireAuth | tmfMPTClearRequireAuth | tmfMPTSetCanEscrow | tmfMPTClearCanEscrow |
tmfMPTSetCanTrade | tmfMPTClearCanTrade | tmfMPTSetCanTransfer | tmfMPTClearCanTransfer |
tmfMPTSetCanClawback | tmfMPTClearCanClawback);
// MPTokenIssuanceDestroy flags:
constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal;

View File

@@ -59,7 +59,7 @@ enum TxType : std::uint16_t
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, ...) tag = value,
#define TRANSACTION(tag, value, name, delegatable, fields) tag = value,
#include <xrpl/protocol/detail/transactions.macro>

View File

@@ -33,35 +33,51 @@ namespace ripple {
class TxMeta
{
private:
struct CtorHelper
{
explicit CtorHelper() = default;
};
template <class T>
TxMeta(
uint256 const& txID,
std::uint32_t ledger,
T const& data,
CtorHelper);
public:
TxMeta(uint256 const& transactionID, std::uint32_t ledger);
TxMeta(
uint256 const& transactionID,
std::uint32_t ledger,
std::optional<uint256> parentBatchId = std::nullopt);
TxMeta(uint256 const& txID, std::uint32_t ledger, Blob const&);
TxMeta(uint256 const& txID, std::uint32_t ledger, std::string const&);
TxMeta(uint256 const& txID, std::uint32_t ledger, STObject const&);
uint256 const&
getTxID() const
{
return transactionID_;
return mTransactionID;
}
std::uint32_t
getLgrSeq() const
{
return ledgerSeq_;
return mLedger;
}
int
getResult() const
{
return result_;
return mResult;
}
TER
getResultTER() const
{
return TER::fromInt(result_);
return TER::fromInt(mResult);
}
std::uint32_t
getIndex() const
{
return index_;
return mIndex;
}
void
@@ -88,52 +104,66 @@ public:
STArray&
getNodes()
{
return nodes_;
return (mNodes);
}
STArray const&
getNodes() const
{
return nodes_;
return (mNodes);
}
void
setAdditionalFields(STObject const& obj)
setDeliveredAmount(STAmount const& delivered)
{
if (obj.isFieldPresent(sfDeliveredAmount))
deliveredAmount_ = obj.getFieldAmount(sfDeliveredAmount);
if (obj.isFieldPresent(sfParentBatchID))
parentBatchID_ = obj.getFieldH256(sfParentBatchID);
mDelivered = delivered;
}
std::optional<STAmount> const&
STAmount
getDeliveredAmount() const
{
return deliveredAmount_;
XRPL_ASSERT(
hasDeliveredAmount(),
"ripple::TxMeta::getDeliveredAmount : non-null delivered amount");
return *mDelivered;
}
bool
hasDeliveredAmount() const
{
return static_cast<bool>(mDelivered);
}
void
setDeliveredAmount(std::optional<STAmount> const& amount)
setParentBatchId(uint256 const& parentBatchId)
{
deliveredAmount_ = amount;
mParentBatchId = parentBatchId;
}
void
setParentBatchID(std::optional<uint256> const& id)
uint256
getParentBatchId() const
{
parentBatchID_ = id;
XRPL_ASSERT(
hasParentBatchId(),
"ripple::TxMeta::getParentBatchId : non-null batch id");
return *mParentBatchId;
}
bool
hasParentBatchId() const
{
return static_cast<bool>(mParentBatchId);
}
private:
uint256 transactionID_;
std::uint32_t ledgerSeq_;
std::uint32_t index_;
int result_;
uint256 mTransactionID;
std::uint32_t mLedger;
std::uint32_t mIndex;
int mResult;
std::optional<STAmount> deliveredAmount_;
std::optional<uint256> parentBatchID_;
std::optional<STAmount> mDelivered;
std::optional<uint256> mParentBatchId;
STArray nodes_;
STArray mNodes;
};
} // namespace ripple

View File

@@ -1,555 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef PROTOCOL_UNITS_H_INCLUDED
#define PROTOCOL_UNITS_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace unit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a ValueUnit. They are
used for calculations in this header. */
struct unitlessTag;
/** Units to represent basis points (bips) and 1/10 basis points */
class BipsTag;
class TenthBipsTag;
// These names don't have to be too descriptive, because we're in the "unit"
// namespace.
template <class T>
concept Valid = std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>;
/** `Usable` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-unit contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T>
concept Usable = Valid<T> &&
(std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag> ||
std::is_same_v<typename T::unit_type, BipsTag> ||
std::is_same_v<typename T::unit_type, TenthBipsTag>);
template <class Other, class VU>
concept Compatible = Valid<VU> && std::is_arithmetic_v<Other> &&
std::is_arithmetic_v<typename VU::value_type> &&
std::is_convertible_v<Other, typename VU::value_type>;
template <class T>
concept Integral = std::is_integral_v<T>;
template <class VU>
concept IntegralValue = Integral<typename VU::value_type>;
template <class VU1, class VU2>
concept CastableValue = IntegralValue<VU1> && IntegralValue<VU2> &&
std::is_same_v<typename VU1::unit_type, typename VU2::unit_type>;
template <class UnitTag, class T>
class ValueUnit : private boost::totally_ordered<ValueUnit<UnitTag, T>>,
private boost::additive<ValueUnit<UnitTag, T>>,
private boost::equality_comparable<ValueUnit<UnitTag, T>, T>,
private boost::dividable<ValueUnit<UnitTag, T>, T>,
private boost::modable<ValueUnit<UnitTag, T>, T>,
private boost::unit_steppable<ValueUnit<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type value_;
public:
ValueUnit() = default;
constexpr ValueUnit(ValueUnit const& other) = default;
constexpr ValueUnit&
operator=(ValueUnit const& other) = default;
constexpr explicit ValueUnit(beast::Zero) : value_(0)
{
}
constexpr ValueUnit&
operator=(beast::Zero)
{
value_ = 0;
return *this;
}
constexpr explicit ValueUnit(value_type value) : value_(value)
{
}
constexpr ValueUnit&
operator=(value_type value)
{
value_ = value;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <Compatible<ValueUnit> Other>
constexpr ValueUnit(ValueUnit<unit_type, Other> const& value)
requires SafeToCast<Other, value_type>
: ValueUnit(safe_cast<value_type>(value.value()))
{
}
constexpr ValueUnit
operator+(value_type const& rhs) const
{
return ValueUnit{value_ + rhs};
}
friend constexpr ValueUnit
operator+(value_type lhs, ValueUnit const& rhs)
{
// addition is commutative
return rhs + lhs;
}
constexpr ValueUnit
operator-(value_type const& rhs) const
{
return ValueUnit{value_ - rhs};
}
friend constexpr ValueUnit
operator-(value_type lhs, ValueUnit const& rhs)
{
// subtraction is NOT commutative, but (lhs + (-rhs)) is addition, which
// is
return -rhs + lhs;
}
constexpr ValueUnit
operator*(value_type const& rhs) const
{
return ValueUnit{value_ * rhs};
}
friend constexpr ValueUnit
operator*(value_type lhs, ValueUnit const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(ValueUnit const& rhs) const
{
return value_ / rhs.value_;
}
ValueUnit&
operator+=(ValueUnit const& other)
{
value_ += other.value();
return *this;
}
ValueUnit&
operator-=(ValueUnit const& other)
{
value_ -= other.value();
return *this;
}
ValueUnit&
operator++()
{
++value_;
return *this;
}
ValueUnit&
operator--()
{
--value_;
return *this;
}
ValueUnit&
operator*=(value_type const& rhs)
{
value_ *= rhs;
return *this;
}
ValueUnit&
operator/=(value_type const& rhs)
{
value_ /= rhs;
return *this;
}
template <Integral transparent = value_type>
ValueUnit&
operator%=(value_type const& rhs)
{
value_ %= rhs;
return *this;
}
ValueUnit
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned value types");
return ValueUnit{-value_};
}
constexpr bool
operator==(ValueUnit const& other) const
{
return value_ == other.value_;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator==(ValueUnit<unit_type, Other> const& other) const
{
return value_ == other.value();
}
constexpr bool
operator==(value_type other) const
{
return value_ == other;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator!=(ValueUnit<unit_type, Other> const& other) const
{
return !operator==(other);
}
constexpr bool
operator<(ValueUnit const& other) const
{
return value_ < other.value_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return value_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (value_ < 0) ? -1 : (value_ ? 1 : 0);
}
/** Returns the number of drops */
// TODO: Move this to a new class, maybe with the old "TaggedFee" name
constexpr value_type
fee() const
{
return value_;
}
template <class Other>
constexpr double
decimalFromReference(ValueUnit<unit_type, Other> reference) const
{
return static_cast<double>(value_) / reference.value();
}
// `Usable` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
Json::Value
jsonClipped() const
requires Usable<ValueUnit>
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (value_ < min)
return min;
if (value_ > max)
return max;
return static_cast<jsontype>(value_);
}
else
{
return value_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return value_;
}
friend std::istream&
operator>>(std::istream& s, ValueUnit& val)
{
s >> val.value_;
return s;
}
};
// Output Values as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, ValueUnit<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(ValueUnit<UnitTag, T> const& amount)
{
return std::to_string(amount.value());
}
template <class Source>
concept muldivSource = Valid<Source> &&
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest>
concept muldivDest = muldivSource<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <class Source2, class Source1>
concept muldivSources = muldivSource<Source1> && muldivSource<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <class Dest, class Source1, class Source2>
concept muldivable = muldivSources<Source1, Source2> && muldivDest<Dest>;
// Source and Dest can be the same by default
template <class Dest, class Source1, class Source2>
concept muldivCommutable = muldivable<Dest, Source1, Source2> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
ValueUnit<unitlessTag, T>
scalar(T value)
{
return ValueUnit<unitlessTag, T>{value};
}
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// values can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0, "ripple::unit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::unit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() > 0, "ripple::unit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace unit
// Fee Levels
template <class T>
using FeeLevel = unit::ValueUnit<unit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
// Basis points (Bips)
template <class T>
using Bips = unit::ValueUnit<unit::BipsTag, T>;
using Bips16 = Bips<std::uint16_t>;
using Bips32 = Bips<std::uint32_t>;
template <class T>
using TenthBips = unit::ValueUnit<unit::TenthBipsTag, T>;
using TenthBips16 = TenthBips<std::uint16_t>;
using TenthBips32 = TenthBips<std::uint32_t>;
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return unit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
unit::muldivCommutable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return unit::mulDivU(mul, value, div);
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return unit::mulDivU(unit::scalar(value), mul, unit::scalar(div));
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = unit::mulDivU(value, unit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s)};
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s)};
}
} // namespace ripple
#endif // PROTOCOL_UNITS_H_INCLUDED

View File

@@ -24,7 +24,7 @@
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/FeeUnits.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
@@ -42,7 +42,7 @@ class XRPAmount : private boost::totally_ordered<XRPAmount>,
private boost::additive<XRPAmount, std::int64_t>
{
public:
using unit_type = unit::dropTag;
using unit_type = feeunit::dropTag;
using value_type = std::int64_t;
private:

View File

@@ -129,12 +129,10 @@ inplace_bigint_div_rem(std::span<uint64_t> numerator, std::uint64_t divisor)
{
// should never happen, but if it does then it seems natural to define
// the a null set of numbers to be zero, so the remainder is also zero.
// LCOV_EXCL_START
UNREACHABLE(
"ripple::b58_fast::detail::inplace_bigint_div_rem : empty "
"numerator");
return 0;
// LCOV_EXCL_STOP
}
auto to_u128 = [](std::uint64_t high,

View File

@@ -29,22 +29,19 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FEATURE(ConfidentialTransfer, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
@@ -78,24 +75,45 @@ XRPL_FIX (DisallowIncomingV1, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(XChainBridge, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(AMM, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (ReducedOffersV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (NFTokenRemint, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (NonFungibleTokensV1_2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(ImmediateOfferKilled, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (TrustLinesToSelf, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(NonFungibleTokensV1_1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(ExpandedSignerList, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(CheckCashMakesTrustLine, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RmSmallIncreasedQOffers, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (STAmountCanonicalize, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(TicketBatch, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (AmendmentMajorityCalc, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(HardenedValidations, Supported::yes, VoteBehavior::DefaultYes)
// fix1781: XRPEndpointSteps should be included in the circular payment check
XRPL_FIX (1781, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (QualityUpperBound, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (PayChanRecipientOwnerDir, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (CheckThreading, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (MasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (TakerDryOfferRemoval, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1578, Supported::yes, VoteBehavior::DefaultYes)
// fix1515: Use liquidity from strands that consume max offers, but mark as dry
XRPL_FIX (1515, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1623, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1543, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1571, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes)
// The following amendments are obsolete, but must remain supported
@@ -109,46 +127,28 @@ XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYe
//
// If a feature remains obsolete for long enough that no clients are able
// to vote for it, the feature can be removed (entirely?) from the code.
XRPL_FIX (NFTokenNegOffer, Supported::yes, VoteBehavior::Obsolete)
XRPL_FIX (NFTokenDirV1, Supported::yes, VoteBehavior::Obsolete)
XRPL_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete)
XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete)
// The following amendments have been active for at least two years. Their
// pre-amendment code has been removed and the identifiers are deprecated.
// All known amendments and amendments that may appear in a validated ledger
// must be registered either here or above with the "active" amendments
//
// Please keep this list sorted alphabetically for convenience.
XRPL_RETIRE(fix1201)
// All known amendments and amendments that may appear in a validated
// ledger must be registered either here or above with the "active" amendments
XRPL_RETIRE(MultiSign)
XRPL_RETIRE(TrustSetAuth)
XRPL_RETIRE(FeeEscalation)
XRPL_RETIRE(PayChan)
XRPL_RETIRE(CryptoConditions)
XRPL_RETIRE(TickSize)
XRPL_RETIRE(fix1368)
XRPL_RETIRE(Escrow)
XRPL_RETIRE(fix1373)
XRPL_RETIRE(EnforceInvariants)
XRPL_RETIRE(SortedDirectories)
XRPL_RETIRE(fix1201)
XRPL_RETIRE(fix1512)
XRPL_RETIRE(fix1513)
XRPL_RETIRE(fix1515)
XRPL_RETIRE(fix1523)
XRPL_RETIRE(fix1528)
XRPL_RETIRE(fix1543)
XRPL_RETIRE(fix1571)
XRPL_RETIRE(fix1578)
XRPL_RETIRE(fix1623)
XRPL_RETIRE(fix1781)
XRPL_RETIRE(fixAmendmentMajorityCalc)
XRPL_RETIRE(fixCheckThreading)
XRPL_RETIRE(fixNonFungibleTokensV1_2)
XRPL_RETIRE(fixNFTokenRemint)
XRPL_RETIRE(fixMasterKeyAsRegularKey)
XRPL_RETIRE(fixQualityUpperBound)
XRPL_RETIRE(fixReducedOffersV1)
XRPL_RETIRE(fixRmSmallIncreasedQOffers)
XRPL_RETIRE(fixSTAmountCanonicalize)
XRPL_RETIRE(fixTakerDryOfferRemoval)
XRPL_RETIRE(CryptoConditions)
XRPL_RETIRE(Escrow)
XRPL_RETIRE(EnforceInvariants)
XRPL_RETIRE(FeeEscalation)
XRPL_RETIRE(FlowCross)
XRPL_RETIRE(ImmediateOfferKilled)
XRPL_RETIRE(MultiSign)
XRPL_RETIRE(NonFungibleTokensV1_1)
XRPL_RETIRE(PayChan)
XRPL_RETIRE(SortedDirectories)
XRPL_RETIRE(TickSize)
XRPL_RETIRE(TrustSetAuth)

View File

@@ -120,7 +120,6 @@ LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, ({
// All fields are soeREQUIRED because there is always a SignerEntries.
// If there are no SignerEntries the node is deleted.
LEDGER_ENTRY(ltSIGNER_LIST, 0x0053, SignerList, signer_list, ({
{sfOwner, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfSignerQuorum, soeREQUIRED},
{sfSignerEntries, soeREQUIRED},
@@ -189,7 +188,7 @@ LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({
{sfNFTokenID, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
{sfDomainID, soeOPTIONAL} // order book directories
{sfDomainID, soeOPTIONAL}
}))
/** The ledger object which lists details about amendments on the network.
@@ -344,7 +343,6 @@ LEDGER_ENTRY(ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID, 0x0074, XChainOwnedCreateAc
*/
LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({
{sfAccount, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED},
{sfCondition, soeOPTIONAL},
@@ -367,7 +365,6 @@ LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({
LEDGER_ENTRY(ltPAYCHAN, 0x0078, PayChannel, payment_channel, ({
{sfAccount, soeREQUIRED},
{sfDestination, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfAmount, soeREQUIRED},
{sfBalance, soeREQUIRED},
{sfPublicKey, soeREQUIRED},
@@ -415,9 +412,6 @@ LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfDomainID, soeOPTIONAL},
{sfMutableFlags, soeDEFAULT},
{sfIssuerElGamalPublicKey, soeOPTIONAL},
{sfConfidentialOutstandingAmount, soeDEFAULT},
}))
/** A ledger object which tracks MPToken
@@ -431,11 +425,6 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfConfidentialBalanceInbox, soeOPTIONAL},
{sfConfidentialBalanceSpending, soeOPTIONAL},
{sfConfidentialBalanceVersion, soeDEFAULT},
{sfIssuerEncryptedBalance, soeOPTIONAL},
{sfHolderElGamalPublicKey, soeOPTIONAL},
}))
/** A ledger object which tracks Oracle
@@ -443,7 +432,6 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({
*/
LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({
{sfOwner, soeREQUIRED},
{sfOracleDocumentID, soeOPTIONAL},
{sfProvider, soeREQUIRED},
{sfPriceDataSeries, soeREQUIRED},
{sfAssetClass, soeREQUIRED},
@@ -464,7 +452,7 @@ LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, ({
{sfExpiration, soeOPTIONAL},
{sfURI, soeOPTIONAL},
{sfIssuerNode, soeREQUIRED},
{sfSubjectNode, soeOPTIONAL},
{sfSubjectNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
@@ -511,7 +499,6 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
{sfLossUnrealized, soeREQUIRED},
{sfShareMPTID, soeREQUIRED},
{sfWithdrawalPolicy, soeREQUIRED},
{sfScale, soeDEFAULT},
// no SharesTotal ever (use MPTIssuance.sfOutstandingAmount)
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
}))

View File

@@ -114,8 +114,6 @@ TYPED_SFIELD(sfVoteWeight, UINT32, 48)
TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50)
TYPED_SFIELD(sfOracleDocumentID, UINT32, 51)
TYPED_SFIELD(sfPermissionValue, UINT32, 52)
TYPED_SFIELD(sfMutableFlags, UINT32, 53)
TYPED_SFIELD(sfConfidentialBalanceVersion, UINT32, 54)
// 64-bit integers (common)
TYPED_SFIELD(sfIndexNext, UINT64, 1)
@@ -147,7 +145,6 @@ TYPED_SFIELD(sfMPTAmount, UINT64, 26, SField::sMD_BaseTen|SFie
TYPED_SFIELD(sfIssuerNode, UINT64, 27)
TYPED_SFIELD(sfSubjectNode, UINT64, 28)
TYPED_SFIELD(sfLockedAmount, UINT64, 29, SField::sMD_BaseTen|SField::sMD_Default)
TYPED_SFIELD(sfConfidentialOutstandingAmount, UINT64, 30, SField::sMD_BaseTen|SField::sMD_Default)
// 128-bit
TYPED_SFIELD(sfEmailHash, UINT128, 1)
@@ -176,8 +173,7 @@ TYPED_SFIELD(sfNFTokenID, UINT256, 10)
TYPED_SFIELD(sfEmitParentTxnID, UINT256, 11)
TYPED_SFIELD(sfEmitNonce, UINT256, 12)
TYPED_SFIELD(sfEmitHookHash, UINT256, 13)
TYPED_SFIELD(sfAMMID, UINT256, 14,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfAMMID, UINT256, 14)
// 256-bit (uncommon)
TYPED_SFIELD(sfBookDirectory, UINT256, 16)
@@ -199,8 +195,7 @@ TYPED_SFIELD(sfHookHash, UINT256, 31)
TYPED_SFIELD(sfHookNamespace, UINT256, 32)
TYPED_SFIELD(sfHookSetTxnID, UINT256, 33)
TYPED_SFIELD(sfDomainID, UINT256, 34)
TYPED_SFIELD(sfVaultID, UINT256, 35,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfVaultID, UINT256, 35)
TYPED_SFIELD(sfParentBatchID, UINT256, 36)
// number (common)
@@ -210,12 +205,6 @@ TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3)
TYPED_SFIELD(sfAssetsTotal, NUMBER, 4)
TYPED_SFIELD(sfLossUnrealized, NUMBER, 5)
// int32
// NOTE: Do not use `sfDummyInt32`. It's so far the only use of INT32
// in this file and has been defined here for test only.
// TODO: Replace `sfDummyInt32` with actually useful field.
TYPED_SFIELD(sfDummyInt32, INT32, 1) // for tests only
// currency amount (common)
TYPED_SFIELD(sfAmount, AMOUNT, 1)
TYPED_SFIELD(sfBalance, AMOUNT, 2)
@@ -286,16 +275,6 @@ TYPED_SFIELD(sfAssetClass, VL, 28)
TYPED_SFIELD(sfProvider, VL, 29)
TYPED_SFIELD(sfMPTokenMetadata, VL, 30)
TYPED_SFIELD(sfCredentialType, VL, 31)
TYPED_SFIELD(sfConfidentialBalanceInbox, VL, 32)
TYPED_SFIELD(sfConfidentialBalanceSpending, VL, 33)
TYPED_SFIELD(sfIssuerEncryptedBalance, VL, 34)
TYPED_SFIELD(sfIssuerElGamalPublicKey, VL, 35)
TYPED_SFIELD(sfHolderElGamalPublicKey, VL, 36)
TYPED_SFIELD(sfZKProof, VL, 37)
TYPED_SFIELD(sfHolderEncryptedAmount, VL, 38)
TYPED_SFIELD(sfIssuerEncryptedAmount, VL, 39)
TYPED_SFIELD(sfSenderEncryptedAmount, VL, 40)
TYPED_SFIELD(sfDestinationEncryptedAmount, VL, 41)
// account (common)
TYPED_SFIELD(sfAccount, ACCOUNT, 1)

File diff suppressed because it is too large Load Diff

View File

@@ -569,7 +569,6 @@ JSS(settle_delay); // out: AccountChannels
JSS(severity); // in: LogLevel
JSS(shares); // out: VaultInfo
JSS(signature); // out: NetworkOPs, ChannelAuthorize
JSS(signature_target); // in: TransactionSign
JSS(signature_verified); // out: ChannelVerify
JSS(signing_key); // out: NetworkOPs
JSS(signing_keys); // out: ValidatorList
@@ -711,7 +710,7 @@ JSS(write_load); // out: GetCounts
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, name, ...) JSS(name);
#define TRANSACTION(tag, value, name, delegatable, fields) JSS(name);
#include <xrpl/protocol/detail/transactions.macro>
@@ -723,11 +722,11 @@ JSS(write_load); // out: GetCounts
#pragma push_macro("LEDGER_ENTRY_DUPLICATE")
#undef LEDGER_ENTRY_DUPLICATE
#define LEDGER_ENTRY(tag, value, name, rpcName, ...) \
JSS(name); \
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \
JSS(name); \
JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, ...) JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, fields) JSS(rpcName);
#include <xrpl/protocol/detail/ledger_entries.macro>

View File

@@ -21,7 +21,6 @@
#define RIPPLE_RESOURCE_CONSUMER_H_INCLUDED
#include <xrpl/basics/Log.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/resource/Charge.h>
#include <xrpl/resource/Disposition.h>
@@ -88,9 +87,6 @@ public:
Entry&
entry();
void
setPublicKey(PublicKey const& publicKey);
private:
Logic* m_logic;
Entry* m_entry;

View File

@@ -53,7 +53,7 @@ struct Entry : public beast::List<Entry>::Node
std::string
to_string() const
{
return getFingerprint(key->address, publicKey);
return key->address.to_string();
}
/**
@@ -82,9 +82,6 @@ struct Entry : public beast::List<Entry>::Node
return local_balance.add(charge, now) + remote_balance;
}
// The public key of the peer
std::optional<PublicKey> publicKey;
// Back pointer to the map key (bit of a hack here)
Key const* key;

View File

@@ -436,12 +436,10 @@ public:
admin_.erase(admin_.iterator_to(entry));
break;
default:
// LCOV_EXCL_START
UNREACHABLE(
"ripple::Resource::Logic::release : invalid entry "
"kind");
break;
// LCOV_EXCL_STOP
}
inactive_.push_back(entry);
entry.whenExpires = m_clock.now() + secondsUntilExpiration;

View File

@@ -30,29 +30,15 @@
#include <boost/asio/buffer.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/beast/core/detect_ssl.hpp>
#include <boost/beast/core/multi_buffer.hpp>
#include <boost/beast/core/tcp_stream.hpp>
#include <boost/container/flat_map.hpp>
#include <boost/predef.h>
#if !BOOST_OS_WINDOWS
#include <sys/resource.h>
#include <dirent.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <chrono>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <sstream>
namespace ripple {
@@ -112,27 +98,10 @@ private:
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
bool ssl_;
bool plain_;
static constexpr std::chrono::milliseconds INITIAL_ACCEPT_DELAY{50};
static constexpr std::chrono::milliseconds MAX_ACCEPT_DELAY{2000};
std::chrono::milliseconds accept_delay_{INITIAL_ACCEPT_DELAY};
boost::asio::steady_timer backoff_timer_;
static constexpr double FREE_FD_THRESHOLD = 0.70;
struct FDStats
{
std::uint64_t used{0};
std::uint64_t limit{0};
};
void
reOpen();
std::optional<FDStats>
query_fd_stats() const;
bool
should_throttle_for_fds();
public:
Door(
Handler& handler,
@@ -330,7 +299,6 @@ Door<Handler>::Door(
, plain_(
port_.protocol.count("http") > 0 || port_.protocol.count("ws") > 0 ||
port_.protocol.count("ws2"))
, backoff_timer_(io_context)
{
reOpen();
}
@@ -355,7 +323,6 @@ Door<Handler>::close()
return boost::asio::post(
strand_,
std::bind(&Door<Handler>::close, this->shared_from_this()));
backoff_timer_.cancel();
error_code ec;
acceptor_.close(ec);
}
@@ -401,17 +368,6 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
{
while (acceptor_.is_open())
{
if (should_throttle_for_fds())
{
backoff_timer_.expires_after(accept_delay_);
boost::system::error_code tec;
backoff_timer_.async_wait(do_yield[tec]);
accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY);
JLOG(j_.warn()) << "Throttling do_accept for "
<< accept_delay_.count() << "ms.";
continue;
}
error_code ec;
endpoint_type remote_address;
stream_type stream(ioc_);
@@ -421,28 +377,15 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
{
if (ec == boost::asio::error::operation_aborted)
break;
if (ec == boost::asio::error::no_descriptors ||
ec == boost::asio::error::no_buffer_space)
JLOG(j_.error()) << "accept: " << ec.message();
if (ec == boost::asio::error::no_descriptors)
{
JLOG(j_.warn()) << "accept: Too many open files. Pausing for "
<< accept_delay_.count() << "ms.";
backoff_timer_.expires_after(accept_delay_);
boost::system::error_code tec;
backoff_timer_.async_wait(do_yield[tec]);
accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY);
}
else
{
JLOG(j_.error()) << "accept error: " << ec.message();
JLOG(j_.info()) << "re-opening acceptor";
reOpen();
}
continue;
}
accept_delay_ = INITIAL_ACCEPT_DELAY;
if (ssl_ && plain_)
{
if (auto sp = ios().template emplace<Detector>(
@@ -465,60 +408,6 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
}
}
template <class Handler>
std::optional<typename Door<Handler>::FDStats>
Door<Handler>::query_fd_stats() const
{
#if BOOST_OS_WINDOWS
return std::nullopt;
#else
FDStats s;
struct rlimit rl;
if (getrlimit(RLIMIT_NOFILE, &rl) != 0 || rl.rlim_cur == RLIM_INFINITY)
return std::nullopt;
s.limit = static_cast<std::uint64_t>(rl.rlim_cur);
#if BOOST_OS_LINUX
constexpr char const* kFdDir = "/proc/self/fd";
#else
constexpr char const* kFdDir = "/dev/fd";
#endif
if (DIR* d = ::opendir(kFdDir))
{
std::uint64_t cnt = 0;
while (::readdir(d) != nullptr)
++cnt;
::closedir(d);
// readdir counts '.', '..', and the DIR* itself shows in the list
s.used = (cnt >= 3) ? (cnt - 3) : 0;
return s;
}
return std::nullopt;
#endif
}
template <class Handler>
bool
Door<Handler>::should_throttle_for_fds()
{
#if BOOST_OS_WINDOWS
return false;
#else
auto const stats = query_fd_stats();
if (!stats || stats->limit == 0)
return false;
auto const& s = *stats;
auto const free = (s.limit > s.used) ? (s.limit - s.used) : 0ull;
double const free_ratio =
static_cast<double>(free) / static_cast<double>(s.limit);
if (free_ratio < FREE_FD_THRESHOLD)
{
return true;
}
return false;
#endif
}
} // namespace ripple
#endif

View File

@@ -239,11 +239,9 @@ Logs::fromSeverity(beast::severities::Severity level)
case kError:
return lsERROR;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::fromSeverity : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case kFatal:
break;
}
@@ -267,11 +265,9 @@ Logs::toSeverity(LogSeverity level)
return kWarning;
case lsERROR:
return kError;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::toSeverity : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case lsFATAL:
break;
}
@@ -296,11 +292,9 @@ Logs::toString(LogSeverity s)
return "Error";
case lsFATAL:
return "Fatal";
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::toString : invalid severity");
return "Unknown";
// LCOV_EXCL_STOP
}
}
@@ -362,11 +356,9 @@ Logs::format(
case kError:
output += "ERR ";
break;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::format : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case kFatal:
output += "FTL ";
break;

View File

@@ -36,7 +36,6 @@ LogThrow(std::string const& title)
[[noreturn]] void
LogicError(std::string const& s) noexcept
{
// LCOV_EXCL_START
JLOG(debugLog().fatal()) << s;
std::cerr << "Logic error: " << s << std::endl;
// Use a non-standard contract naming here (without namespace) because
@@ -46,7 +45,6 @@ LogicError(std::string const& s) noexcept
// For the above reasons, we want this contract to stand out.
UNREACHABLE("LogicError", {{"message", s}});
std::abort();
// LCOV_EXCL_STOP
}
} // namespace ripple

View File

@@ -174,7 +174,7 @@ Array::append(Json::Value const& v)
return;
}
}
UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE
UNREACHABLE("Json::Array::append : invalid type");
}
void
@@ -209,7 +209,7 @@ Object::set(std::string const& k, Json::Value const& v)
return;
}
}
UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE
UNREACHABLE("Json::Object::set : invalid type");
}
//------------------------------------------------------------------------------

View File

@@ -213,10 +213,8 @@ Value::Value(ValueType type) : type_(type), allocated_(0)
value_.bool_ = false;
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::Value(ValueType) : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -292,10 +290,8 @@ Value::Value(Value const& other) : type_(other.type_)
value_.map_ = new ObjectValues(*other.value_.map_);
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::Value(Value const&) : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -322,10 +318,8 @@ Value::~Value()
delete value_.map_;
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::~Value : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -425,10 +419,8 @@ operator<(Value const& x, Value const& y)
return *x.value_.map_ < *y.value_.map_;
}
// LCOV_EXCL_START
default:
UNREACHABLE("Json::operator<(Value, Value) : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable
@@ -473,10 +465,8 @@ operator==(Value const& x, Value const& y)
return x.value_.map_->size() == y.value_.map_->size() &&
*x.value_.map_ == *y.value_.map_;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::operator==(Value, Value) : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable
@@ -516,10 +506,8 @@ Value::asString() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to string");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asString : invalid type");
// LCOV_EXCL_STOP
}
return ""; // unreachable
@@ -560,10 +548,8 @@ Value::asInt() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to int");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asInt : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -604,10 +590,8 @@ Value::asUInt() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to uint");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asUInt : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -638,10 +622,8 @@ Value::asDouble() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to double");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asDouble : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -672,10 +654,8 @@ Value::asBool() const
case objectValue:
return value_.map_->size() != 0;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asBool : invalid type");
// LCOV_EXCL_STOP
}
return false; // unreachable;
@@ -730,10 +710,8 @@ Value::isConvertibleTo(ValueType other) const
return other == objectValue ||
(other == nullValue && value_.map_->size() == 0);
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::isConvertible : invalid type");
// LCOV_EXCL_STOP
}
return false; // unreachable;
@@ -766,10 +744,8 @@ Value::size() const
case objectValue:
return Int(value_.map_->size());
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::size : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;

View File

@@ -383,7 +383,7 @@ public:
static boost::regex reStatus{
"\\`HTTP/1\\S+ (\\d{3}) .*\\'"}; // HTTP/1.1 200 OK
static boost::regex reSize{
"\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'", boost::regex::icase};
"\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'"};
static boost::regex reBody{"\\`.*\\r\\n\\r\\n(.*)\\'"};
boost::smatch smMatch;

View File

@@ -36,7 +36,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "3.0.0-b1"
char const* const versionString = "2.6.0"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

View File

@@ -1,607 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/protocol/ConfidentialTransfer.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/protocol/TER.h>
#include <openssl/rand.h>
#include <openssl/sha.h>
namespace ripple {
int
secp256k1_elgamal_generate_keypair(
secp256k1_context const* ctx,
unsigned char* privkey,
secp256k1_pubkey* pubkey)
{
// 1. Generate 32 random bytes for the private key
do
{
if (RAND_bytes(privkey, 32) != 1)
{
return 0; // Failure
}
// 2. Verify the random data is a valid private key.
} while (secp256k1_ec_seckey_verify(ctx, privkey) != 1);
// 3. Create the corresponding public key.
if (secp256k1_ec_pubkey_create(ctx, pubkey, privkey) != 1)
{
return 0; // Failure
}
return 1; // Success
}
// ... implementation of secp256k1_elgamal_encrypt ...
int
secp256k1_elgamal_encrypt(
secp256k1_context const* ctx,
secp256k1_pubkey* c1,
secp256k1_pubkey* c2,
secp256k1_pubkey const* pubkey_Q,
uint64_t amount,
unsigned char const* blinding_factor)
{
secp256k1_pubkey S;
// First, calculate C1 = k * G
if (secp256k1_ec_pubkey_create(ctx, c1, blinding_factor) != 1)
{
return 0;
}
// Next, calculate the shared secret S = k * Q
S = *pubkey_Q;
if (secp256k1_ec_pubkey_tweak_mul(ctx, &S, blinding_factor) != 1)
{
return 0;
}
// --- Handle the amount ---
if (amount == 0)
{
// For amount = 0, C2 = S.
*c2 = S;
}
else
{
// For non-zero amounts, proceed as before.
unsigned char amount_scalar[32] = {0};
secp256k1_pubkey M;
secp256k1_pubkey const* points_to_add[2];
// Convert amount to a 32-byte BIG-ENDIAN scalar.
for (int i = 0; i < 8; ++i)
{
amount_scalar[31 - i] = (amount >> (i * 8)) & 0xFF;
}
// Calculate M = amount * G
if (secp256k1_ec_pubkey_create(ctx, &M, amount_scalar) != 1)
{
return 0;
}
// Calculate C2 = M + S
points_to_add[0] = &M;
points_to_add[1] = &S;
if (secp256k1_ec_pubkey_combine(ctx, c2, points_to_add, 2) != 1)
{
return 0;
}
}
return 1; // Success
}
// ... implementation of secp256k1_elgamal_decrypt ...
int
secp256k1_elgamal_decrypt(
secp256k1_context const* ctx,
uint64_t* amount,
secp256k1_pubkey const* c1,
secp256k1_pubkey const* c2,
unsigned char const* privkey)
{
secp256k1_pubkey S, M, G_point, current_M, next_M;
secp256k1_pubkey const* points_to_add[2];
unsigned char c2_bytes[33], s_bytes[33], m_bytes[33], current_m_bytes[33];
size_t len;
uint64_t i;
/* Create the scalar '1' in big-endian format */
unsigned char one_scalar[32] = {0};
one_scalar[31] = 1;
/* --- Executable Code --- */
// 1. Calculate S = privkey * C1
S = *c1;
if (secp256k1_ec_pubkey_tweak_mul(ctx, &S, privkey) != 1)
{
return 0;
}
// 2. Check for amount = 0 by comparing serialized points
len = sizeof(c2_bytes);
if (secp256k1_ec_pubkey_serialize(
ctx, c2_bytes, &len, c2, SECP256K1_EC_COMPRESSED) != 1)
return 0;
len = sizeof(s_bytes);
if (secp256k1_ec_pubkey_serialize(
ctx, s_bytes, &len, &S, SECP256K1_EC_COMPRESSED) != 1)
return 0;
if (memcmp(c2_bytes, s_bytes, sizeof(c2_bytes)) == 0)
{
*amount = 0;
return 1;
}
// 3. Recover M = C2 - S
if (secp256k1_ec_pubkey_negate(ctx, &S) != 1)
return 0;
points_to_add[0] = c2;
points_to_add[1] = &S;
if (secp256k1_ec_pubkey_combine(ctx, &M, points_to_add, 2) != 1)
{
return 0;
}
// 4. Serialize M once for comparison in the loop
len = sizeof(m_bytes);
if (secp256k1_ec_pubkey_serialize(
ctx, m_bytes, &len, &M, SECP256K1_EC_COMPRESSED) != 1)
return 0;
// 5. Brute-force search loop
if (secp256k1_ec_pubkey_create(ctx, &G_point, one_scalar) != 1)
return 0;
current_M = G_point;
for (i = 1; i <= 1000000; ++i)
{
len = sizeof(current_m_bytes);
if (secp256k1_ec_pubkey_serialize(
ctx,
current_m_bytes,
&len,
&current_M,
SECP256K1_EC_COMPRESSED) != 1)
return 0;
if (memcmp(m_bytes, current_m_bytes, sizeof(m_bytes)) == 0)
{
*amount = i;
return 1;
}
points_to_add[0] = &current_M;
points_to_add[1] = &G_point;
if (secp256k1_ec_pubkey_combine(ctx, &next_M, points_to_add, 2) != 1)
return 0;
current_M = next_M;
}
return 0; // Not found
}
int
secp256k1_elgamal_add(
secp256k1_context const* ctx,
secp256k1_pubkey* sum_c1,
secp256k1_pubkey* sum_c2,
secp256k1_pubkey const* a_c1,
secp256k1_pubkey const* a_c2,
secp256k1_pubkey const* b_c1,
secp256k1_pubkey const* b_c2)
{
secp256k1_pubkey const* c1_points[2] = {a_c1, b_c1};
if (secp256k1_ec_pubkey_combine(ctx, sum_c1, c1_points, 2) != 1)
{
return 0;
}
secp256k1_pubkey const* c2_points[2] = {a_c2, b_c2};
if (secp256k1_ec_pubkey_combine(ctx, sum_c2, c2_points, 2) != 1)
{
return 0;
}
return 1;
}
int
secp256k1_elgamal_subtract(
secp256k1_context const* ctx,
secp256k1_pubkey* diff_c1,
secp256k1_pubkey* diff_c2,
secp256k1_pubkey const* a_c1,
secp256k1_pubkey const* a_c2,
secp256k1_pubkey const* b_c1,
secp256k1_pubkey const* b_c2)
{
// To subtract, we add the negation: (A - B) is (A + (-B))
// Make a local, modifiable copy of B's points.
secp256k1_pubkey neg_b_c1 = *b_c1;
secp256k1_pubkey neg_b_c2 = *b_c2;
// Negate the copies
if (secp256k1_ec_pubkey_negate(ctx, &neg_b_c1) != 1 ||
secp256k1_ec_pubkey_negate(ctx, &neg_b_c2) != 1)
{
return 0; // Negation failed
}
// Now, add A and the negated copies of B
secp256k1_pubkey const* c1_points[2] = {a_c1, &neg_b_c1};
if (secp256k1_ec_pubkey_combine(ctx, diff_c1, c1_points, 2) != 1)
{
return 0;
}
secp256k1_pubkey const* c2_points[2] = {a_c2, &neg_b_c2};
if (secp256k1_ec_pubkey_combine(ctx, diff_c2, c2_points, 2) != 1)
{
return 0;
}
return 1; // Success
}
// Helper function to concatenate data for hashing
static void
build_hash_input(
unsigned char* output_buffer,
size_t buffer_size,
unsigned char const* account_id, // 20 bytes
unsigned char const* mpt_issuance_id // 24 bytes
)
{
char const* domain_separator = "EncZero";
size_t domain_len = strlen(domain_separator);
size_t offset = 0;
// Ensure buffer is large enough (should be checked by caller if necessary)
// Size = strlen("EncZero") + 20 + 24 = 7 + 20 + 24 = 51 bytes
memcpy(output_buffer + offset, domain_separator, domain_len);
offset += domain_len;
memcpy(output_buffer + offset, account_id, 20);
offset += 20;
memcpy(output_buffer + offset, mpt_issuance_id, 24);
// offset += 24; // Final size is offset + 24
}
// The canonical encrypted zero
int
generate_canonical_encrypted_zero(
secp256k1_context const* ctx,
secp256k1_pubkey* enc_zero_c1,
secp256k1_pubkey* enc_zero_c2,
secp256k1_pubkey const* pubkey,
unsigned char const* account_id, // 20 bytes
unsigned char const* mpt_issuance_id // 24 bytes
)
{
unsigned char deterministic_scalar[32];
unsigned char hash_input[51]; // Size calculated above
/* 1. Create the input buffer for hashing */
build_hash_input(
hash_input, sizeof(hash_input), account_id, mpt_issuance_id);
/* 2. Hash the buffer to create the deterministic scalar 'r' */
do
{
// Hash the concatenated bytes
SHA256(hash_input, sizeof(hash_input), deterministic_scalar);
/* Note: If the hash output could be invalid (0 or >= n),
* you might need to add a nonce/counter to hash_input
* and re-hash in a loop until a valid scalar is produced. */
} while (secp256k1_ec_seckey_verify(ctx, deterministic_scalar) != 1);
/* 3. Encrypt the amount 0 using the deterministic scalar */
return secp256k1_elgamal_encrypt(
ctx,
enc_zero_c1,
enc_zero_c2,
pubkey,
0, /* The amount is zero */
deterministic_scalar);
}
bool
makeEcPair(Slice const& buffer, secp256k1_pubkey& out1, secp256k1_pubkey& out2)
{
auto parsePubKey = [](Slice const& slice, secp256k1_pubkey& out) {
return secp256k1_ec_pubkey_parse(
secp256k1Context(),
&out,
reinterpret_cast<unsigned char const*>(slice.data()),
slice.length());
};
Slice s1{buffer.data(), ecGamalEncryptedLength};
Slice s2{buffer.data() + ecGamalEncryptedLength, ecGamalEncryptedLength};
int const ret1 = parsePubKey(s1, out1);
int const ret2 = parsePubKey(s2, out2);
return ret1 == 1 && ret2 == 1;
}
bool
serializeEcPair(
secp256k1_pubkey const& in1,
secp256k1_pubkey const& in2,
Buffer& buffer)
{
auto serializePubKey = [](secp256k1_pubkey const& pub, unsigned char* out) {
size_t outLen = ecGamalEncryptedLength; // 33 bytes
int const ret = secp256k1_ec_pubkey_serialize(
secp256k1Context(), out, &outLen, &pub, SECP256K1_EC_COMPRESSED);
return ret == 1 && outLen == ecGamalEncryptedLength;
};
unsigned char* ptr = buffer.data();
bool const res1 = serializePubKey(in1, ptr);
bool const res2 = serializePubKey(in2, ptr + ecGamalEncryptedLength);
return res1 && res2;
}
bool
isValidCiphertext(Slice const& buffer)
{
// Local/temporary variables to pass to makeEcPair.
// Their contents will be discarded when the function returns.
secp256k1_pubkey key1;
secp256k1_pubkey key2;
// Call makeEcPair and return its result.
return makeEcPair(buffer, key1, key2);
}
TER
homomorphicAdd(Slice const& a, Slice const& b, Buffer& out)
{
if (a.length() != ecGamalEncryptedTotalLength ||
b.length() != ecGamalEncryptedTotalLength)
return tecINTERNAL;
secp256k1_pubkey aC1;
secp256k1_pubkey aC2;
secp256k1_pubkey bC1;
secp256k1_pubkey bC2;
if (!makeEcPair(a, aC1, aC2) || !makeEcPair(b, bC1, bC2))
return tecINTERNAL;
secp256k1_pubkey sumC1;
secp256k1_pubkey sumC2;
if (secp256k1_elgamal_add(
secp256k1Context(), &sumC1, &sumC2, &aC1, &aC2, &bC1, &bC2) != 1)
return tecINTERNAL;
if (!serializeEcPair(sumC1, sumC2, out))
return tecINTERNAL;
return tesSUCCESS;
}
TER
homomorphicSubtract(Slice const& a, Slice const& b, Buffer& out)
{
if (a.length() != ecGamalEncryptedTotalLength ||
b.length() != ecGamalEncryptedTotalLength)
return tecINTERNAL;
secp256k1_pubkey aC1;
secp256k1_pubkey aC2;
secp256k1_pubkey bC1;
secp256k1_pubkey bC2;
if (!makeEcPair(a, aC1, aC2) || !makeEcPair(b, bC1, bC2))
return tecINTERNAL;
secp256k1_pubkey diffC1;
secp256k1_pubkey diffC2;
if (secp256k1_elgamal_subtract(
secp256k1Context(), &diffC1, &diffC2, &aC1, &aC2, &bC1, &bC2) != 1)
return tecINTERNAL;
if (!serializeEcPair(diffC1, diffC2, out))
return tecINTERNAL;
return tesSUCCESS;
}
TER
proveEquality(
Slice const& proof,
Slice const& encAmt, // encrypted amount
Slice const& pubkey,
uint64_t const amount,
uint256 const& txHash, // Transaction context data
std::uint32_t const spendVersion)
{
if (proof.length() != ecEqualityProofLength)
return tecINTERNAL;
secp256k1_pubkey c1;
secp256k1_pubkey c2;
if (!makeEcPair(encAmt, c1, c2))
return tecINTERNAL;
// todo: might need to change how its hashed
Serializer s;
s.addRaw(txHash.data(), txHash.bytes);
s.add32(spendVersion);
// auto const txContextId = s.getSHA512Half();
// todo: support equality
// if (secp256k1_equality_verify(
// secp256k1Context(),
// reinterpret_cast<unsigned char const*>(proof.data()),
// proof.length(), // Length of the proof byte array (98 bytes)
// &c1,
// &c2,
// reinterpret_cast<unsigned char const*>(pubkey.data()),
// amount,
// txContextId.data(), // Transaction context data
// txContextId.bytes // Length of context data
// ) != 1)
// return tecBAD_PROOF;
return tesSUCCESS;
}
Buffer
encryptAmount(uint64_t amt, Slice const& pubKeySlice)
{
Buffer buf(ecGamalEncryptedTotalLength);
// Allocate ciphertext placeholders
secp256k1_pubkey c1, c2;
// todo: might need to be updated using another RNG
// Prepare a random blinding factor
unsigned char blindingFactor[32];
if (RAND_bytes(blindingFactor, 32) != 1)
Throw<std::runtime_error>("Failed to generate random number");
secp256k1_pubkey pubKey;
std::memcpy(pubKey.data, pubKeySlice.data(), ecPubKeyLength);
// Encrypt the amount
if (!secp256k1_elgamal_encrypt(
secp256k1Context(), &c1, &c2, &pubKey, amt, blindingFactor))
Throw<std::runtime_error>("Failed to encrypt amount");
// Serialize the ciphertext pair into the buffer
if (!serializeEcPair(c1, c2, buf))
Throw<std::runtime_error>(
"Failed to serialize into 66 byte compressed format");
return buf;
}
Buffer
encryptCanonicalZeroAmount(
Slice const& pubKeySlice,
AccountID const& account,
MPTID const& mptId)
{
Buffer buf(ecGamalEncryptedTotalLength);
// Allocate ciphertext placeholders
secp256k1_pubkey c1, c2;
secp256k1_pubkey pubKey;
std::memcpy(pubKey.data, pubKeySlice.data(), ecPubKeyLength);
// Encrypt the amount
if (!generate_canonical_encrypted_zero(
secp256k1Context(),
&c1,
&c2,
&pubKey,
account.data(),
mptId.data()))
Throw<std::runtime_error>("Failed to encrypt amount");
// Serialize the ciphertext pair into the buffer
if (!serializeEcPair(c1, c2, buf))
Throw<std::runtime_error>(
"Failed to serialize into 66 byte compressed format");
return buf;
}
TER
verifyConfidentialSendProof(
Slice const& proof,
Slice const& encSenderBalance,
Slice const& encSenderAmt,
Slice const& encDestAmt,
Slice const& encIssuerAmt,
Slice const& senderPubKey,
Slice const& destPubKey,
Slice const& issuerPubKey,
std::uint32_t const version,
uint256 const& txHash)
{
// if (proof.length() != ecConfidentialSendProofLength)
// return tecINTERNAL;
secp256k1_pubkey balC1, balC2;
if (!makeEcPair(encSenderBalance, balC1, balC2))
return tecINTERNAL;
secp256k1_pubkey senderC1, senderC2;
if (!makeEcPair(encSenderAmt, senderC1, senderC2))
return tecINTERNAL;
secp256k1_pubkey destC1, destC2;
if (!makeEcPair(encDestAmt, destC1, destC2))
return tecINTERNAL;
secp256k1_pubkey issuerC1, issuerC2;
if (!makeEcPair(encIssuerAmt, issuerC1, issuerC2))
return tecINTERNAL;
Serializer s;
s.addRaw(txHash.data(), txHash.bytes);
s.add32(version);
// auto const txContextId = s.getSHA512Half();
// todo: equality and range proof verification
// if (secp256k1_equal_range_verify(
// secp256k1Context(),
// reinterpret_cast<unsigned char const*>(proof.data()),
// proof.length(),
// txContextId.data(),
// &balC1,
// &balC2,
// &senderC1,
// &senderC2,
// reinterpret_cast<unsigned char const*>(senderPubKey.data()),
// &destC1,
// &destC2,
// reinterpret_cast<unsigned char const*>(destPubKey.data()),
// &issuerC1,
// &issuerC2,
// reinterpret_cast<unsigned char const*>(issuerPubKey.data()),
// txContextId.data(),
// txContextId.bytes) != 1)
// return tecBAD_PROOF;
return tesSUCCESS;
}
} // namespace ripple

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Permissions.h>
#include <xrpl/protocol/jss.h>
@@ -26,24 +25,11 @@ namespace ripple {
Permission::Permission()
{
txFeatureMap_ = {
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, name, delegatable, amendment, ...) \
{value, amendment},
#include <xrpl/protocol/detail/transactions.macro>
#undef TRANSACTION
#pragma pop_macro("TRANSACTION")
};
delegatableTx_ = {
#pragma push_macro("TRANSACTION")
#undef TRANSACTION
#define TRANSACTION(tag, value, name, delegatable, ...) {value, delegatable},
#define TRANSACTION(tag, value, name, delegatable, fields) {value, delegatable},
#include <xrpl/protocol/detail/transactions.macro>
@@ -101,22 +87,6 @@ Permission::getInstance()
return instance;
}
std::optional<std::string>
Permission::getPermissionName(std::uint32_t const value) const
{
auto const permissionValue = static_cast<GranularPermissionType>(value);
if (auto const granular = getGranularName(permissionValue))
return *granular;
// not a granular permission, check if it maps to a transaction type
auto const txType = permissionToTxType(value);
if (auto const* item = TxFormats::getInstance().findByType(txType);
item != nullptr)
return item->getName();
return std::nullopt;
}
std::optional<std::uint32_t>
Permission::getGranularValue(std::string const& name) const
{
@@ -147,23 +117,8 @@ Permission::getGranularTxType(GranularPermissionType const& gpType) const
return std::nullopt;
}
std::optional<std::reference_wrapper<uint256 const>> const
Permission::getTxFeature(TxType txType) const
{
auto const txFeaturesIt = txFeatureMap_.find(txType);
XRPL_ASSERT(
txFeaturesIt != txFeatureMap_.end(),
"ripple::Permissions::getTxFeature : tx exists in txFeatureMap_");
if (txFeaturesIt->second == uint256{})
return std::nullopt;
return txFeaturesIt->second;
}
bool
Permission::isDelegatable(
std::uint32_t const& permissionValue,
Rules const& rules) const
Permission::isDelegatable(std::uint32_t const& permissionValue) const
{
auto const granularPermission =
getGranularName(static_cast<GranularPermissionType>(permissionValue));
@@ -171,25 +126,8 @@ Permission::isDelegatable(
// granular permissions are always allowed to be delegated
return true;
auto const txType = permissionToTxType(permissionValue);
auto const it = delegatableTx_.find(txType);
if (it == delegatableTx_.end())
return false;
auto const txFeaturesIt = txFeatureMap_.find(txType);
XRPL_ASSERT(
txFeaturesIt != txFeatureMap_.end(),
"ripple::Permissions::isDelegatable : tx exists in txFeatureMap_");
// Delegation is only allowed if the required amendment for the transaction
// is enabled. For transactions that do not require an amendment, delegation
// is always allowed.
if (txFeaturesIt->second != uint256{} &&
!rules.enabled(txFeaturesIt->second))
return false;
if (it->second == Delegation::notDelegatable)
auto const it = delegatableTx_.find(permissionValue - 1);
if (it != delegatableTx_.end() && it->second == Delegation::notDelegatable)
return false;
return true;

View File

@@ -131,6 +131,17 @@ Rules::enabled(uint256 const& feature) const
{
XRPL_ASSERT(impl_, "ripple::Rules::enabled : initialized");
// The functionality of the "NonFungibleTokensV1_1" amendment is
// precisely the functionality of the following three amendments
// so if their status is ever queried individually, we inject an
// extra check here to simplify the checking elsewhere.
if (feature == featureNonFungibleTokensV1 ||
feature == fixNFTokenNegOffer || feature == fixNFTokenDirV1)
{
if (impl_->enabled(featureNonFungibleTokensV1_1))
return true;
}
return impl_->enabled(feature);
}

View File

@@ -17,7 +17,6 @@
*/
//==============================================================================
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/SField.h>
#include <map>
@@ -28,8 +27,7 @@ namespace ripple {
// Storage for static const members.
SField::IsSigning const SField::notSigning;
int SField::num = 0;
std::unordered_map<int, SField const*> SField::knownCodeToField;
std::unordered_map<std::string, SField const*> SField::knownNameToField;
std::map<int, SField const*> SField::knownCodeToField;
// Give only this translation unit permission to construct SFields
struct SField::private_access_tag_t
@@ -47,7 +45,7 @@ TypedField<T>::TypedField(private_access_tag_t pat, Args&&... args)
}
// Construct all compile-time SFields, and register them in the knownCodeToField
// and knownNameToField databases:
// database:
// Use macros for most SField construction to enforce naming conventions.
#pragma push_macro("UNTYPED_SFIELD")
@@ -71,8 +69,8 @@ TypedField<T>::TypedField(private_access_tag_t pat, Args&&... args)
##__VA_ARGS__);
// SFields which, for historical reasons, do not follow naming conventions.
SField const sfInvalid(access, -1, "");
SField const sfGeneric(access, 0, "Generic");
SField const sfInvalid(access, -1);
SField const sfGeneric(access, 0);
// The following two fields aren't used anywhere, but they break tests/have
// downstream effects.
SField const sfHash(access, STI_UINT256, 257, "hash");
@@ -101,34 +99,19 @@ SField::SField(
, signingField(signing)
, jsonName(fieldName.c_str())
{
XRPL_ASSERT(
!knownCodeToField.contains(fieldCode),
"ripple::SField::SField(tid,fv,fn,meta,signing) : fieldCode is unique");
XRPL_ASSERT(
!knownNameToField.contains(fieldName),
"ripple::SField::SField(tid,fv,fn,meta,signing) : fieldName is unique");
knownCodeToField[fieldCode] = this;
knownNameToField[fieldName] = this;
}
SField::SField(private_access_tag_t, int fc, char const* fn)
SField::SField(private_access_tag_t, int fc)
: fieldCode(fc)
, fieldType(STI_UNKNOWN)
, fieldValue(0)
, fieldName(fn)
, fieldMeta(sMD_Never)
, fieldNum(++num)
, signingField(IsSigning::yes)
, jsonName(fieldName.c_str())
{
XRPL_ASSERT(
!knownCodeToField.contains(fieldCode),
"ripple::SField::SField(fc,fn) : fieldCode is unique");
XRPL_ASSERT(
!knownNameToField.contains(fieldName),
"ripple::SField::SField(fc,fn) : fieldName is unique");
knownCodeToField[fieldCode] = this;
knownNameToField[fieldName] = this;
}
SField const&
@@ -162,11 +145,11 @@ SField::compare(SField const& f1, SField const& f2)
SField const&
SField::getField(std::string const& fieldName)
{
auto it = knownNameToField.find(fieldName);
if (it != knownNameToField.end())
for (auto const& [_, f] : knownCodeToField)
{
return *(it->second);
(void)_;
if (f->fieldName == fieldName)
return *f;
}
return sfInvalid;
}

View File

@@ -68,6 +68,29 @@
namespace ripple {
namespace {
// Use a static inside a function to help prevent order-of-initialzation issues
LocalValue<bool>&
getStaticSTAmountCanonicalizeSwitchover()
{
static LocalValue<bool> r{true};
return r;
}
} // namespace
bool
getSTAmountCanonicalizeSwitchover()
{
return *getStaticSTAmountCanonicalizeSwitchover();
}
void
setSTAmountCanonicalizeSwitchover(bool v)
{
*getStaticSTAmountCanonicalizeSwitchover() = v;
}
static std::uint64_t const tenTo14 = 100000000000000ull;
static std::uint64_t const tenTo14m1 = tenTo14 - 1;
static std::uint64_t const tenTo17 = tenTo14 * 1000;
@@ -861,14 +884,18 @@ STAmount::canonicalize()
return;
}
// log(cMaxNativeN, 10) == 17
if (native() && mOffset > 17)
Throw<std::runtime_error>("Native currency amount out of range");
// log(maxMPTokenAmount, 10) ~ 18.96
if (mAsset.holds<MPTIssue>() && mOffset > 18)
Throw<std::runtime_error>("MPT amount out of range");
if (getSTAmountCanonicalizeSwitchover())
{
// log(cMaxNativeN, 10) == 17
if (native() && mOffset > 17)
Throw<std::runtime_error>(
"Native currency amount out of range");
// log(maxMPTokenAmount, 10) ~ 18.96
if (mAsset.holds<MPTIssue>() && mOffset > 18)
Throw<std::runtime_error>("MPT amount out of range");
}
if (getSTNumberSwitchover())
if (getSTNumberSwitchover() && getSTAmountCanonicalizeSwitchover())
{
Number num(
mIsNegative ? -mValue : mValue, mOffset, Number::unchecked{});
@@ -892,14 +919,16 @@ STAmount::canonicalize()
while (mOffset > 0)
{
// N.B. do not move the overflow check to after the
// multiplication
if (native() && mValue > cMaxNativeN)
Throw<std::runtime_error>(
"Native currency amount out of range");
else if (!native() && mValue > maxMPTokenAmount)
Throw<std::runtime_error>("MPT amount out of range");
if (getSTAmountCanonicalizeSwitchover())
{
// N.B. do not move the overflow check to after the
// multiplication
if (native() && mValue > cMaxNativeN)
Throw<std::runtime_error>(
"Native currency amount out of range");
else if (!native() && mValue > maxMPTokenAmount)
Throw<std::runtime_error>("MPT amount out of range");
}
mValue *= 10;
--mOffset;
}

View File

@@ -112,9 +112,7 @@ void
STBase::add(Serializer& s) const
{
// Should never be called
// LCOV_EXCL_START
UNREACHABLE("ripple::STBase::add : not implemented");
// LCOV_EXCL_STOP
}
bool

View File

@@ -62,10 +62,8 @@ STUInt8::getText() const
if (transResultInfo(TER::fromInt(value_), token, human))
return human;
// LCOV_EXCL_START
JLOG(debugLog().error())
<< "Unknown result code in metadata: " << value_;
// LCOV_EXCL_STOP
}
return std::to_string(value_);
@@ -82,10 +80,8 @@ STUInt8::getJson(JsonOptions) const
if (transResultInfo(TER::fromInt(value_), token, human))
return token;
// LCOV_EXCL_START
JLOG(debugLog().error())
<< "Unknown result code in metadata: " << value_;
// LCOV_EXCL_STOP
}
return value_;
@@ -175,13 +171,6 @@ template <>
std::string
STUInt32::getText() const
{
if (getFName() == sfPermissionValue)
{
auto const permissionName =
Permission::getInstance().getPermissionName(value_);
if (permissionName)
return *permissionName;
}
return std::to_string(value_);
}
@@ -191,10 +180,23 @@ STUInt32::getJson(JsonOptions) const
{
if (getFName() == sfPermissionValue)
{
auto const permissionName =
Permission::getInstance().getPermissionName(value_);
if (permissionName)
return *permissionName;
auto const permissionValue =
static_cast<GranularPermissionType>(value_);
auto const granular =
Permission::getInstance().getGranularName(permissionValue);
if (granular)
{
return *granular;
}
else
{
auto const txType =
Permission::getInstance().permissionToTxType(value_);
auto item = TxFormats::getInstance().findByType(txType);
if (item != nullptr)
return item->getName();
}
}
return value_;
@@ -249,33 +251,4 @@ STUInt64::getJson(JsonOptions) const
return convertToString(value_, 16); // Convert to base 16
}
//------------------------------------------------------------------------------
template <>
STInteger<std::int32_t>::STInteger(SerialIter& sit, SField const& name)
: STInteger(name, sit.get32())
{
}
template <>
SerializedTypeID
STInt32::getSType() const
{
return STI_INT32;
}
template <>
std::string
STInt32::getText() const
{
return std::to_string(value_);
}
template <>
Json::Value
STInt32::getJson(JsonOptions) const
{
return value_;
}
} // namespace ripple

View File

@@ -647,12 +647,6 @@ STObject::getFieldH256(SField const& field) const
return getFieldByValue<STUInt256>(field);
}
std::int32_t
STObject::getFieldI32(SField const& field) const
{
return getFieldByValue<STInt32>(field);
}
AccountID
STObject::getAccountID(SField const& field) const
{
@@ -688,16 +682,6 @@ STObject::getFieldV256(SField const& field) const
return getFieldByConstRef<STVector256>(field, empty);
}
STObject
STObject::getFieldObject(SField const& field) const
{
STObject const empty{field};
auto ret = getFieldByConstRef<STObject>(field, empty);
if (ret != empty)
ret.applyTemplateFromSField(field);
return ret;
}
STArray const&
STObject::getFieldArray(SField const& field) const
{
@@ -777,12 +761,6 @@ STObject::setFieldH256(SField const& field, uint256 const& v)
setFieldUsingSetValue<STUInt256>(field, v);
}
void
STObject::setFieldI32(SField const& field, std::int32_t v)
{
setFieldUsingSetValue<STInt32>(field, v);
}
void
STObject::setFieldV256(SField const& field, STVector256 const& v)
{
@@ -843,12 +821,6 @@ STObject::setFieldArray(SField const& field, STArray const& v)
setFieldUsingAssignment(field, v);
}
void
STObject::setFieldObject(SField const& field, STObject const& v)
{
setFieldUsingAssignment(field, v);
}
Json::Value
STObject::getJson(JsonOptions options) const
{

View File

@@ -83,8 +83,7 @@ constexpr std::
return static_cast<U1>(value);
}
// LCOV_EXCL_START
static inline std::string
static std::string
make_name(std::string const& object, std::string const& field)
{
if (field.empty())
@@ -93,7 +92,7 @@ make_name(std::string const& object, std::string const& field)
return object + "." + field;
}
static inline Json::Value
static Json::Value
not_an_object(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -101,20 +100,20 @@ not_an_object(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' is not a JSON object.");
}
static inline Json::Value
static Json::Value
not_an_object(std::string const& object)
{
return not_an_object(object, "");
}
static inline Json::Value
static Json::Value
not_an_array(std::string const& object)
{
return RPC::make_error(
rpcINVALID_PARAMS, "Field '" + object + "' is not a JSON array.");
}
static inline Json::Value
static Json::Value
unknown_field(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -122,7 +121,7 @@ unknown_field(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' is unknown.");
}
static inline Json::Value
static Json::Value
out_of_range(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -130,7 +129,7 @@ out_of_range(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' is out of range.");
}
static inline Json::Value
static Json::Value
bad_type(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -138,7 +137,7 @@ bad_type(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' has bad type.");
}
static inline Json::Value
static Json::Value
invalid_data(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -146,13 +145,13 @@ invalid_data(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' has invalid data.");
}
static inline Json::Value
static Json::Value
invalid_data(std::string const& object)
{
return invalid_data(object, "");
}
static inline Json::Value
static Json::Value
array_expected(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -160,7 +159,7 @@ array_expected(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' must be a JSON array.");
}
static inline Json::Value
static Json::Value
string_expected(std::string const& object, std::string const& field)
{
return RPC::make_error(
@@ -168,7 +167,7 @@ string_expected(std::string const& object, std::string const& field)
"Field '" + make_name(object, field) + "' must be a string.");
}
static inline Json::Value
static Json::Value
too_deep(std::string const& object)
{
return RPC::make_error(
@@ -176,7 +175,7 @@ too_deep(std::string const& object)
"Field '" + object + "' exceeds nesting depth limit.");
}
static inline Json::Value
static Json::Value
singleton_expected(std::string const& object, unsigned int index)
{
return RPC::make_error(
@@ -185,7 +184,7 @@ singleton_expected(std::string const& object, unsigned int index)
"]' must be an object with a single key/object value.");
}
static inline Json::Value
static Json::Value
template_mismatch(SField const& sField)
{
return RPC::make_error(
@@ -194,7 +193,7 @@ template_mismatch(SField const& sField)
"' contents did not meet requirements for that type.");
}
static inline Json::Value
static Json::Value
non_object_in_array(std::string const& item, Json::UInt index)
{
return RPC::make_error(
@@ -202,176 +201,6 @@ non_object_in_array(std::string const& item, Json::UInt index)
"Item '" + item + "' at index " + std::to_string(index) +
" is not an object. Arrays may only contain objects.");
}
// LCOV_EXCL_STOP
template <class STResult, class Integer>
static std::optional<detail::STVar>
parseUnsigned(
SField const& field,
std::string const& json_name,
std::string const& fieldName,
SField const* name,
Json::Value const& value,
Json::Value& error)
{
std::optional<detail::STVar> ret;
try
{
if (value.isString())
{
ret = detail::make_stvar<STResult>(
field,
safe_cast<typename STResult::value_type>(
beast::lexicalCastThrow<Integer>(value.asString())));
}
else if (value.isInt())
{
ret = detail::make_stvar<STResult>(
field,
to_unsigned<typename STResult::value_type>(value.asInt()));
}
else if (value.isUInt())
{
ret = detail::make_stvar<STResult>(
field,
to_unsigned<typename STResult::value_type>(value.asUInt()));
}
else
{
error = bad_type(json_name, fieldName);
return ret;
}
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
return ret;
}
template <class STResult, class Integer = std::uint16_t>
static std::optional<detail::STVar>
parseUint16(
SField const& field,
std::string const& json_name,
std::string const& fieldName,
SField const* name,
Json::Value const& value,
Json::Value& error)
{
std::optional<detail::STVar> ret;
try
{
if (value.isString())
{
std::string const strValue = value.asString();
if (!strValue.empty() &&
((strValue[0] < '0') || (strValue[0] > '9')))
{
if (field == sfTransactionType)
{
ret = detail::make_stvar<STResult>(
field,
safe_cast<typename STResult::value_type>(
static_cast<Integer>(
TxFormats::getInstance().findTypeByName(
strValue))));
if (*name == sfGeneric)
name = &sfTransaction;
}
else if (field == sfLedgerEntryType)
{
ret = detail::make_stvar<STResult>(
field,
safe_cast<typename STResult::value_type>(
static_cast<Integer>(
LedgerFormats::getInstance().findTypeByName(
strValue))));
if (*name == sfGeneric)
name = &sfLedgerEntry;
}
else
{
error = invalid_data(json_name, fieldName);
return ret;
}
}
}
if (!ret)
return parseUnsigned<STResult, Integer>(
field, json_name, fieldName, name, value, error);
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
return ret;
}
template <class STResult, class Integer = std::uint32_t>
static std::optional<detail::STVar>
parseUint32(
SField const& field,
std::string const& json_name,
std::string const& fieldName,
SField const* name,
Json::Value const& value,
Json::Value& error)
{
std::optional<detail::STVar> ret;
try
{
if (value.isString())
{
if (field == sfPermissionValue)
{
std::string const strValue = value.asString();
auto const granularPermission =
Permission::getInstance().getGranularValue(strValue);
if (granularPermission)
{
ret = detail::make_stvar<STResult>(
field, *granularPermission);
}
else
{
auto const& txType =
TxFormats::getInstance().findTypeByName(strValue);
ret = detail::make_stvar<STResult>(
field,
Permission::getInstance().txToPermissionType(txType));
}
}
else
{
ret = detail::make_stvar<STResult>(
field,
safe_cast<typename STResult::value_type>(
beast::lexicalCastThrow<Integer>(value.asString())));
}
}
if (!ret)
return parseUnsigned<STResult, Integer>(
field, json_name, fieldName, name, value, error);
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
return ret;
}
// This function is used by parseObject to parse any JSON type that doesn't
// recurse. Everything represented here is a leaf-type.
@@ -387,13 +216,10 @@ parseLeaf(
auto const& field = SField::getField(fieldName);
// checked in parseObject
if (field == sfInvalid)
{
// LCOV_EXCL_START
error = unknown_field(json_name, fieldName);
return ret;
// LCOV_EXCL_STOP
}
switch (field.fieldType)
@@ -476,18 +302,130 @@ parseLeaf(
break;
case STI_UINT16:
ret = parseUint16<STUInt16>(
field, json_name, fieldName, name, value, error);
if (!ret)
try
{
if (value.isString())
{
std::string const strValue = value.asString();
if (!strValue.empty() &&
((strValue[0] < '0') || (strValue[0] > '9')))
{
if (field == sfTransactionType)
{
ret = detail::make_stvar<STUInt16>(
field,
static_cast<std::uint16_t>(
TxFormats::getInstance().findTypeByName(
strValue)));
if (*name == sfGeneric)
name = &sfTransaction;
}
else if (field == sfLedgerEntryType)
{
ret = detail::make_stvar<STUInt16>(
field,
static_cast<std::uint16_t>(
LedgerFormats::getInstance().findTypeByName(
strValue)));
if (*name == sfGeneric)
name = &sfLedgerEntry;
}
else
{
error = invalid_data(json_name, fieldName);
return ret;
}
}
else
{
ret = detail::make_stvar<STUInt16>(
field,
beast::lexicalCastThrow<std::uint16_t>(strValue));
}
}
else if (value.isInt())
{
ret = detail::make_stvar<STUInt16>(
field, to_unsigned<std::uint16_t>(value.asInt()));
}
else if (value.isUInt())
{
ret = detail::make_stvar<STUInt16>(
field, to_unsigned<std::uint16_t>(value.asUInt()));
}
else
{
error = bad_type(json_name, fieldName);
return ret;
}
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
break;
case STI_UINT32:
ret = parseUint32<STUInt32>(
field, json_name, fieldName, name, value, error);
if (!ret)
try
{
if (value.isString())
{
if (field == sfPermissionValue)
{
std::string const strValue = value.asString();
auto const granularPermission =
Permission::getInstance().getGranularValue(
strValue);
if (granularPermission)
{
ret = detail::make_stvar<STUInt32>(
field, *granularPermission);
}
else
{
auto const& txType =
TxFormats::getInstance().findTypeByName(
strValue);
ret = detail::make_stvar<STUInt32>(
field,
Permission::getInstance().txToPermissionType(
txType));
}
}
else
{
ret = detail::make_stvar<STUInt32>(
field,
beast::lexicalCastThrow<std::uint32_t>(
value.asString()));
}
}
else if (value.isInt())
{
ret = detail::make_stvar<STUInt32>(
field, to_unsigned<std::uint32_t>(value.asInt()));
}
else if (value.isUInt())
{
ret = detail::make_stvar<STUInt32>(
field, safe_cast<std::uint32_t>(value.asUInt()));
}
else
{
error = bad_type(json_name, fieldName);
return ret;
}
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
break;
@@ -563,30 +501,6 @@ parseLeaf(
break;
}
case STI_UINT160: {
if (!value.isString())
{
error = bad_type(json_name, fieldName);
return ret;
}
uint160 num;
if (auto const s = value.asString(); !num.parseHex(s))
{
if (!s.empty())
{
error = invalid_data(json_name, fieldName);
return ret;
}
num.zero();
}
ret = detail::make_stvar<STUInt160>(field, num);
break;
}
case STI_UINT192: {
if (!value.isString())
{
@@ -611,6 +525,30 @@ parseLeaf(
break;
}
case STI_UINT160: {
if (!value.isString())
{
error = bad_type(json_name, fieldName);
return ret;
}
uint160 num;
if (auto const s = value.asString(); !num.parseHex(s))
{
if (!s.empty())
{
error = invalid_data(json_name, fieldName);
return ret;
}
num.zero();
}
ret = detail::make_stvar<STUInt160>(field, num);
break;
}
case STI_UINT256: {
if (!value.isString())
{
@@ -635,52 +573,6 @@ parseLeaf(
break;
}
case STI_INT32:
try
{
if (value.isString())
{
ret = detail::make_stvar<STInt32>(
field,
beast::lexicalCastThrow<std::int32_t>(
value.asString()));
}
else if (value.isInt())
{
// future-proofing - a static assert failure if the JSON
// library ever supports larger ints
// In such case, we will need additional bounds checks here
static_assert(
std::is_same_v<decltype(value.asInt()), std::int32_t>);
ret = detail::make_stvar<STInt32>(field, value.asInt());
}
else if (value.isUInt())
{
auto const uintValue = value.asUInt();
if (uintValue >
static_cast<std::uint32_t>(
std::numeric_limits<std::int32_t>::max()))
{
error = out_of_range(json_name, fieldName);
return ret;
}
ret = detail::make_stvar<STInt32>(
field, static_cast<std::int32_t>(uintValue));
}
else
{
error = bad_type(json_name, fieldName);
return ret;
}
}
catch (std::exception const&)
{
error = invalid_data(json_name, fieldName);
return ret;
}
break;
case STI_VL:
if (!value.isString())
{
@@ -811,12 +703,6 @@ parseLeaf(
AccountID uAccount, uIssuer;
Currency uCurrency;
if (!account && !currency && !issuer)
{
error = invalid_data(element_name);
return ret;
}
if (account)
{
// human account id
@@ -1166,7 +1052,8 @@ parseArray(
Json::Value const objectFields(json[i][objectName]);
std::stringstream ss;
ss << json_name << "." << "[" << i << "]." << objectName;
ss << json_name << "."
<< "[" << i << "]." << objectName;
auto ret = parseObject(
ss.str(), objectFields, nameField, depth + 1, error);
@@ -1209,4 +1096,24 @@ STParsedJSONObject::STParsedJSONObject(
object = parseObject(name, json, sfGeneric, 0, error);
}
//------------------------------------------------------------------------------
STParsedJSONArray::STParsedJSONArray(
std::string const& name,
Json::Value const& json)
{
using namespace STParsedJSONDetail;
auto arr = parseArray(name, json, sfGeneric, 0, error);
if (!arr)
array.reset();
else
{
auto p = dynamic_cast<STArray*>(&arr->get());
if (p == nullptr)
array.reset();
else
array = std::move(*p);
}
}
} // namespace ripple

View File

@@ -200,11 +200,11 @@ STTx::getSigningHash() const
}
Blob
STTx::getSignature(STObject const& sigObject)
STTx::getSignature() const
{
try
{
return sigObject.getFieldVL(sfTxnSignature);
return getFieldVL(sfTxnSignature);
}
catch (std::exception const&)
{
@@ -234,68 +234,35 @@ STTx::getSeqValue() const
}
void
STTx::sign(
PublicKey const& publicKey,
SecretKey const& secretKey,
std::optional<std::reference_wrapper<SField const>> signatureTarget)
STTx::sign(PublicKey const& publicKey, SecretKey const& secretKey)
{
auto const data = getSigningData(*this);
auto const sig = ripple::sign(publicKey, secretKey, makeSlice(data));
if (signatureTarget)
{
auto& target = peekFieldObject(*signatureTarget);
target.setFieldVL(sfTxnSignature, sig);
}
else
{
setFieldVL(sfTxnSignature, sig);
}
setFieldVL(sfTxnSignature, sig);
tid_ = getHash(HashPrefix::transactionID);
}
Expected<void, std::string>
STTx::checkSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const
{
try
{
// Determine whether we're single- or multi-signing by looking
// at the SigningPubKey. If it's empty we must be
// multi-signing. Otherwise we're single-signing.
Blob const& signingPubKey = sigObject.getFieldVL(sfSigningPubKey);
return signingPubKey.empty()
? checkMultiSign(requireCanonicalSig, rules, sigObject)
: checkSingleSign(requireCanonicalSig, sigObject);
}
catch (std::exception const&)
{
}
return Unexpected("Internal signature check failure.");
}
Expected<void, std::string>
STTx::checkSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules) const
{
if (auto const ret = checkSign(requireCanonicalSig, rules, *this); !ret)
return ret;
/* Placeholder for field that will be added by Lending Protocol
if (isFieldPresent(sfCounterpartySignature))
try
{
auto const counterSig = getFieldObject(sfCounterpartySignature);
if (auto const ret = checkSign(requireCanonicalSig, rules, counterSig);
!ret)
return Unexpected("Counterparty: " + ret.error());
// Determine whether we're single- or multi-signing by looking
// at the SigningPubKey. If it's empty we must be
// multi-signing. Otherwise we're single-signing.
Blob const& signingPubKey = getFieldVL(sfSigningPubKey);
return signingPubKey.empty()
? checkMultiSign(requireCanonicalSig, rules)
: checkSingleSign(requireCanonicalSig);
}
*/
return {};
catch (std::exception const&)
{
}
return Unexpected("Internal signature check failure.");
}
Expected<void, std::string>
@@ -415,23 +382,23 @@ STTx::getMetaSQL(
static Expected<void, std::string>
singleSignHelper(
STObject const& sigObject,
STObject const& signer,
Slice const& data,
bool const fullyCanonical)
{
// We don't allow both a non-empty sfSigningPubKey and an sfSigners.
// That would allow the transaction to be signed two ways. So if both
// fields are present the signature is invalid.
if (sigObject.isFieldPresent(sfSigners))
if (signer.isFieldPresent(sfSigners))
return Unexpected("Cannot both single- and multi-sign.");
bool validSig = false;
try
{
auto const spk = sigObject.getFieldVL(sfSigningPubKey);
auto const spk = signer.getFieldVL(sfSigningPubKey);
if (publicKeyType(makeSlice(spk)))
{
Blob const signature = sigObject.getFieldVL(sfTxnSignature);
Blob const signature = signer.getFieldVL(sfTxnSignature);
validSig = verify(
PublicKey(makeSlice(spk)),
data,
@@ -451,14 +418,12 @@ singleSignHelper(
}
Expected<void, std::string>
STTx::checkSingleSign(
RequireFullyCanonicalSig requireCanonicalSig,
STObject const& sigObject) const
STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const
{
auto const data = getSigningData(*this);
bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) ||
(requireCanonicalSig == STTx::RequireFullyCanonicalSig::yes);
return singleSignHelper(sigObject, makeSlice(data), fullyCanonical);
return singleSignHelper(*this, makeSlice(data), fullyCanonical);
}
Expected<void, std::string>
@@ -475,29 +440,31 @@ STTx::checkBatchSingleSign(
Expected<void, std::string>
multiSignHelper(
STObject const& sigObject,
std::optional<AccountID> txnAccountID,
STObject const& signerObj,
bool const fullyCanonical,
std::function<Serializer(AccountID const&)> makeMsg,
Rules const& rules)
{
// Make sure the MultiSigners are present. Otherwise they are not
// attempting multi-signing and we just have a bad SigningPubKey.
if (!sigObject.isFieldPresent(sfSigners))
if (!signerObj.isFieldPresent(sfSigners))
return Unexpected("Empty SigningPubKey.");
// We don't allow both an sfSigners and an sfTxnSignature. Both fields
// being present would indicate that the transaction is signed both ways.
if (sigObject.isFieldPresent(sfTxnSignature))
if (signerObj.isFieldPresent(sfTxnSignature))
return Unexpected("Cannot both single- and multi-sign.");
STArray const& signers{sigObject.getFieldArray(sfSigners)};
STArray const& signers{signerObj.getFieldArray(sfSigners)};
// There are well known bounds that the number of signers must be within.
if (signers.size() < STTx::minMultiSigners ||
signers.size() > STTx::maxMultiSigners(&rules))
return Unexpected("Invalid Signers array size.");
// We also use the sfAccount field inside the loop. Get it once.
auto const txnAccountID = signerObj.getAccountID(sfAccount);
// Signers must be in sorted order by AccountID.
AccountID lastAccountID(beast::zero);
@@ -505,10 +472,8 @@ multiSignHelper(
{
auto const accountID = signer.getAccountID(sfAccount);
// The account owner may not usually multisign for themselves.
// If they can, txnAccountID will be unseated, which is not equal to any
// value.
if (txnAccountID == accountID)
// The account owner may not multisign for themselves.
if (accountID == txnAccountID)
return Unexpected("Invalid multisigner.");
// No duplicate signers allowed.
@@ -524,7 +489,6 @@ multiSignHelper(
// Verify the signature.
bool validSig = false;
std::optional<std::string> errorWhat;
try
{
auto spk = signer.getFieldVL(sfSigningPubKey);
@@ -538,16 +502,15 @@ multiSignHelper(
fullyCanonical);
}
}
catch (std::exception const& e)
catch (std::exception const&)
{
// We assume any problem lies with the signature.
validSig = false;
errorWhat = e.what();
}
if (!validSig)
return Unexpected(
std::string("Invalid signature on account ") +
toBase58(accountID) + errorWhat.value_or("") + ".");
toBase58(accountID) + ".");
}
// All signatures verified.
return {};
@@ -569,9 +532,8 @@ STTx::checkBatchMultiSign(
serializeBatch(dataStart, getFlags(), getBatchTransactionIDs());
return multiSignHelper(
batchSigner,
std::nullopt,
fullyCanonical,
[&dataStart](AccountID const& accountID) -> Serializer {
[&dataStart](AccountID const& accountID) mutable -> Serializer {
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
return s;
@@ -582,27 +544,19 @@ STTx::checkBatchMultiSign(
Expected<void, std::string>
STTx::checkMultiSign(
RequireFullyCanonicalSig requireCanonicalSig,
Rules const& rules,
STObject const& sigObject) const
Rules const& rules) const
{
bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) ||
(requireCanonicalSig == RequireFullyCanonicalSig::yes);
// Used inside the loop in multiSignHelper to enforce that
// the account owner may not multisign for themselves.
auto const txnAccountID = &sigObject != this
? std::nullopt
: std::optional<AccountID>(getAccountID(sfAccount));
// We can ease the computational load inside the loop a bit by
// pre-constructing part of the data that we hash. Fill a Serializer
// with the stuff that stays constant from signature to signature.
Serializer dataStart = startMultiSigningData(*this);
return multiSignHelper(
sigObject,
txnAccountID,
*this,
fullyCanonical,
[&dataStart](AccountID const& accountID) -> Serializer {
[&dataStart](AccountID const& accountID) mutable -> Serializer {
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
return s;
@@ -615,7 +569,7 @@ STTx::checkMultiSign(
*
* This function returns a vector of transaction IDs by extracting them from
* the field array `sfRawTransactions` within the STTx. If the batch
* transaction IDs have already been computed and cached in `batchTxnIds_`,
* transaction IDs have already been computed and cached in `batch_txn_ids_`,
* it returns the cached vector. Otherwise, it computes the transaction IDs,
* caches them, and then returns the vector.
*
@@ -625,7 +579,7 @@ STTx::checkMultiSign(
* empty and that the size of the computed batch transaction IDs matches the
* size of the `sfRawTransactions` field array.
*/
std::vector<uint256> const&
std::vector<uint256>
STTx::getBatchTransactionIDs() const
{
XRPL_ASSERT(
@@ -634,20 +588,16 @@ STTx::getBatchTransactionIDs() const
XRPL_ASSERT(
getFieldArray(sfRawTransactions).size() != 0,
"STTx::getBatchTransactionIDs : empty raw transactions");
if (batch_txn_ids_.size() != 0)
return batch_txn_ids_;
// The list of inner ids is built once, then reused on subsequent calls.
// After the list is built, it must always have the same size as the array
// `sfRawTransactions`. The assert below verifies that.
if (batchTxnIds_.size() == 0)
{
for (STObject const& rb : getFieldArray(sfRawTransactions))
batchTxnIds_.push_back(rb.getHash(HashPrefix::transactionID));
}
for (STObject const& rb : getFieldArray(sfRawTransactions))
batch_txn_ids_.push_back(rb.getHash(HashPrefix::transactionID));
XRPL_ASSERT(
batchTxnIds_.size() == getFieldArray(sfRawTransactions).size(),
batch_txn_ids_.size() == getFieldArray(sfRawTransactions).size(),
"STTx::getBatchTransactionIDs : batch transaction IDs size mismatch");
return batchTxnIds_;
return batch_txn_ids_;
}
//------------------------------------------------------------------------------

Some files were not shown because too many files have changed in this diff Show More