Compare commits

..

20 Commits

Author SHA1 Message Date
JCW
84e729bc74 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:48:26 +01:00
JCW
7af195781c Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:37 +01:00
JCW
628c3c59aa Address PR comments
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:37 +01:00
JCW
73086872f0 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:37 +01:00
JCW
c00432cc0b Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
47b5c0367c Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
e6dfddfc30 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
3fa9a9fa38 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
7796b453c2 Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
30530e42de Fix build error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:36 +01:00
JCW
c4450dc54f Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:35 +01:00
JCW
304c4ceb9e Make Keylet strongly typed so that we can get strongly typed ledger obejcts from views
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:37:34 +01:00
JCW
e7633c939a Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:36:55 +01:00
JCW
70ce254511 Support const ledger entries
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 16:36:55 +01:00
JCW
9248f60a09 Fix formatting 2025-09-18 16:36:55 +01:00
JCW
888b97346e Add comments 2025-09-18 16:36:55 +01:00
JCW
9dbc4e1c64 Fix formatting 2025-09-18 16:36:55 +01:00
JCW
1bd3d227ff Fix build error 2025-09-18 16:36:54 +01:00
JCW
31a127eddc Fix test coverage 2025-09-18 16:36:54 +01:00
JCW
d1b36dcbd9 Strongly typed ledger objects support 2025-09-18 16:36:53 +01:00
360 changed files with 9676 additions and 15691 deletions

View File

@@ -10,40 +10,24 @@ inputs:
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
build_nproc:
description: "The number of processors to use for building."
required: true
force_build:
description: 'Force building of all dependencies ("true", "false").'
required: false
default: "false"
log_verbosity:
description: "The logging verbosity."
required: false
default: "verbose"
runs:
using: composite
steps:
- name: Install Conan dependencies
shell: bash
env:
BUILD_DIR: ${{ inputs.build_dir }}
BUILD_NPROC: ${{ inputs.build_nproc }}
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
run: |
echo 'Installing dependencies.'
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ inputs.build_dir }}
cd ${{ inputs.build_dir }}
conan install \
--output-folder . \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \
--settings:all build_type="${BUILD_TYPE}" \
--conf:all tools.build:jobs=${BUILD_NPROC} \
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
--build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \
--options:host '&:tests=True' \
--options:host '&:xrpld=True' \
--settings:all build_type=${{ inputs.build_type }} \
..

96
.github/actions/build-test/action.yml vendored Normal file
View File

@@ -0,0 +1,96 @@
# This action build and tests the binary. The Conan dependencies must have
# already been installed (see the build-deps action).
name: Build and Test
description: "Build and test the binary."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
inputs:
build_dir:
description: "The directory where to build."
required: true
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: false
default: "false"
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
default: ""
cmake_target:
description: "The CMake target to build."
required: true
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
default: ""
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
runs:
using: composite
steps:
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Configuring CMake.'
cmake \
-G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \
${{ inputs.cmake_args }} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Building binary.'
cmake \
--build . \
--config ${{ inputs.build_type }} \
--parallel $(nproc) \
--target ${{ inputs.cmake_target }}
- name: Check linking
if: ${{ inputs.os == 'linux' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Checking linking.'
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verify voidstar
if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo 'Verifying presence of instrumentation.'
./rippled --version | grep libvoidstar
- name: Test the binary
if: ${{ inputs.build_only == 'false' }}
shell: bash
working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }}
run: |
echo 'Testing binary.'
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
- name: Upload coverage report
if: ${{ inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ inputs.codecov_token }}
verbose: true

View File

@@ -1,43 +0,0 @@
name: Print build environment
description: "Print environment and some tooling versions"
runs:
using: composite
steps:
- name: Check configuration (Windows)
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version

View File

@@ -35,12 +35,9 @@ runs:
- name: Set up Conan remote
shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../workflows/check-levelization.yml) to validate
that nothing changed.
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml)
- [`levelization.yml`](../../workflows/check-levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or

View File

@@ -138,8 +138,6 @@ test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol

View File

@@ -74,14 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
# - 9.4 using GCC 12: Debug and Unity on linux/amd64.
# - 9.6 using Clang: Release and no Unity on linux/amd64.
if os['distro_name'] == 'rhel':
skip = True
if os['distro_version'] == '9':
if os['distro_version'] == '9.4':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
elif os['distro_version'] == '10':
elif os['distro_version'] == '9.6':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if skip:
@@ -162,7 +162,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
'config_name': config_name,
'cmake_args': cmake_args,
'cmake_target': cmake_target,
'build_only': build_only,
'build_only': 'true' if build_only else 'false',
'build_type': build_type,
'os': os,
'architecture': architecture,

View File

@@ -14,169 +14,139 @@
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "6948666"
"compiler_version": "12"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "6948666"
"compiler_version": "13"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "6948666"
"compiler_version": "14"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "6948666"
"compiler_version": "15"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "6948666"
"compiler_version": "16"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "6948666"
"compiler_version": "17"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "6948666"
"compiler_version": "18"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "6948666"
"compiler_version": "19"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "6948666"
"compiler_version": "20"
},
{
"distro_name": "rhel",
"distro_version": "8",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "10e69b4"
"compiler_version": "12"
},
{
"distro_name": "rhel",
"distro_version": "8",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "13"
},
{
"distro_name": "rhel",
"distro_version": "9.6",
"compiler_name": "gcc",
"compiler_version": "14"
},
{
"distro_name": "rhel",
"distro_version": "9.4",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "10e69b4"
"compiler_version": "any"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "10e69b4"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "10e69b4"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "10e69b4"
},
{
"distro_name": "rhel",
"distro_version": "9",
"distro_version": "9.6",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "10e69b4"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "10e69b4"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "10e69b4"
"compiler_version": "any"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "6948666"
"compiler_version": "12"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "6948666"
"compiler_version": "13"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "6948666"
"compiler_version": "14"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "6948666"
"compiler_version": "16"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "6948666"
"compiler_version": "17"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "6948666"
"compiler_version": "18"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "6948666"
"compiler_version": "19"
}
],
"build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "macos",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

View File

@@ -10,8 +10,7 @@
"distro_name": "windows",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
"compiler_version": ""
}
],
"build_type": ["Debug", "Release"],

147
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,147 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary.
build-test:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Check strategy matrix
run: |
echo 'Operating system distro name: ${{ matrix.os.distro_name }}'
echo 'Operating system distro version: ${{ matrix.os.distro_version }}'
echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}'
echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}'
echo 'Architecture platform: ${{ matrix.architecture.platform }}'
echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}'
echo 'Build type: ${{ matrix.build_type }}'
echo 'Build only: ${{ matrix.build_only }}'
echo 'CMake arguments: ${{ matrix.cmake_args }}'
echo 'CMake target: ${{ matrix.cmake_target }}'
echo 'Config name: ${{ matrix.config_name }}'
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: false
- name: Check configuration (Windows)
if: ${{ inputs.os == 'windows' }}
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and MacOS)
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ matrix.build_type }}
force_build: ${{ inputs.dependencies_force_build }}
- name: Build and test binary
uses: ./.github/actions/build-test
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
codecov_token: ${{ secrets.codecov_token }}
os: ${{ inputs.os }}

View File

@@ -40,52 +40,47 @@ jobs:
upload:
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs
id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
id: conan_ref
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
notify:
needs: upload
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
steps:
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"
-F "client_payload[pr_url]=${{ github.event.pull_request.html_url }}"

View File

@@ -50,8 +50,8 @@ jobs:
files: |
# These paths are unique to `on-pr.yml`.
.github/scripts/levelization/**
.github/workflows/reusable-check-levelization.yml
.github/workflows/reusable-notify-clio.yml
.github/workflows/check-levelization.yml
.github/workflows/notify-clio.yml
.github/workflows/on-pr.yml
# Keep the paths below in sync with those in `on-trigger.yml`.
@@ -59,11 +59,8 @@ jobs:
.github/actions/build-test/**
.github/actions/setup-conan/**
.github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/build-test.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.codecov.yml
cmake/**
conan/**
@@ -96,27 +93,26 @@ jobs:
check-levelization:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml
uses: ./.github/workflows/check-levelization.yml
build-test:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
with:
os: ${{ matrix.os }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
needs:
- should-run
- build-test
if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-notify-clio.yml
uses: ./.github/workflows/notify-clio.yml
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}

View File

@@ -9,12 +9,12 @@ name: Trigger
on:
push:
branches:
- "develop"
- "release*"
- "master"
- develop
- release
- master
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/reusable-check-missing-commits.yml"
- ".github/workflows/check-missing-commits.yml"
- ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
@@ -23,11 +23,8 @@ on:
- ".github/actions/build-test/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
@@ -46,16 +43,25 @@ on:
schedule:
- cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API.
# Run when manually triggered via the GitHub UI or API. If `force_upload` is
# true, then the dependencies that were missing (`force_rebuild` is false) or
# rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing
# dependencies if needed.
workflow_dispatch:
inputs:
dependencies_force_build:
description: "Force building of all dependencies."
required: false
type: boolean
default: false
dependencies_force_upload:
description: "Force uploading of all dependencies."
required: false
type: boolean
default: false
concurrency:
# When a PR is merged into the develop branch it will be assigned a unique
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
@@ -65,16 +71,15 @@ defaults:
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-check-missing-commits.yml
uses: ./.github/workflows/check-missing-commits.yml
build-test:
uses: ./.github/workflows/reusable-build-test.yml
uses: ./.github/workflows/build-test.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
with:
os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -7,9 +7,8 @@ on:
workflow_dispatch:
jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3
with:
runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit" }'

View File

@@ -23,24 +23,16 @@ defaults:
env:
BUILD_DIR: .build
NPROC_SUBTRACT: 2
jobs:
publish:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
container: ghcr.io/xrplf/ci/tools-rippled-documentation
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Check configuration
run: |
echo 'Checking path.'
@@ -54,16 +46,12 @@ jobs:
echo 'Checking Doxygen version.'
doxygen --version
- name: Build documentation
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
mkdir -p ${{ env.BUILD_DIR }}
cd ${{ env.BUILD_DIR }}
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC}
cmake --build . --target docs --parallel $(nproc)
- name: Publish documentation
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0

View File

@@ -1,77 +0,0 @@
name: Build and test configuration
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
type: string
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
default: ""
cmake_target:
description: "The CMake target to build."
type: string
required: true
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The configuration string (used for naming artifacts and such)."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: false
type: number
default: 2
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
build:
uses: ./.github/workflows/reusable-build.yml
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ inputs.build_type }}
cmake_args: ${{ inputs.cmake_args }}
cmake_target: ${{ inputs.cmake_target }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
test:
needs: build
uses: ./.github/workflows/reusable-test.yml
with:
run_tests: ${{ !inputs.build_only }}
verify_voidstar: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}

View File

@@ -1,58 +0,0 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary for each configuration.
build-test-config:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-build-test-config.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,154 +0,0 @@
name: Build rippled
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
type: string
cmake_args:
description: "Additional arguments to pass to CMake."
required: true
type: string
cmake_target:
description: "The CMake target to build."
required: true
type: string
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
defaults:
run:
shell: bash
jobs:
build:
name: Build ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ inputs.build_type }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel ${BUILD_NPROC} \
--target "${CMAKE_TARGET}"
- name: Put built binaries in one location
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE_DIR: ${{ runner.os == 'Windows' && inputs.build_type || '' }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
mkdir -p ./binaries/doctest/
cp ./${BUILD_TYPE_DIR}/rippled* ./binaries/
if [ "${CMAKE_TARGET}" != 'coverage' ]; then
cp ./src/tests/libxrpl/${BUILD_TYPE_DIR}/xrpl.test.* ./binaries/doctest/
fi
- name: Upload rippled artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/binaries/
retention-days: 3
if-no-files-found: error
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -35,7 +35,4 @@ jobs:
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} >> "${GITHUB_OUTPUT}"

View File

@@ -1,111 +0,0 @@
name: Test rippled
on:
workflow_call:
inputs:
verify_voidstar:
description: "Whether to verify the presence of voidstar instrumentation."
required: true
type: boolean
run_tests:
description: "Whether to run unit tests"
required: true
type: boolean
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
jobs:
test:
name: Test ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 30
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Download rippled artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: rippled-${{ inputs.config_name }}
- name: Make binary executable (Linux and macOS)
shell: bash
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
run: |
chmod +x ./rippled
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verifying presence of instrumentation
if: ${{ inputs.verify_voidstar }}
shell: bash
run: |
./rippled --version | grep libvoidstar
- name: Run the embedded tests
if: ${{ inputs.run_tests }}
shell: bash
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
./rippled --unittest --unittest-jobs ${BUILD_NPROC}
- name: Run the separate tests
if: ${{ inputs.run_tests }}
env:
EXT: ${{ runner.os == 'Windows' && '.exe' || '' }}
shell: bash
run: |
for test_file in ./doctest/*${EXT}; do
echo "Executing $test_file"
chmod +x "$test_file"
if [[ "${{ runner.os }}" == "Windows" && "$test_file" == "./doctest/xrpl.test.net.exe" ]]; then
echo "Skipping $test_file on Windows"
else
"$test_file"
fi
done
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && inputs.run_tests }}
shell: bash
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
echo "Netstat:"
netstat -an

View File

@@ -24,30 +24,30 @@ on:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
NPROC_SUBTRACT: 2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps:
needs:
- generate-matrix
@@ -56,29 +56,19 @@ jobs:
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
with:
@@ -89,19 +79,13 @@ jobs:
uses: ./.github/actions/build-deps
with:
build_dir: .build
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
run: conan upload "*" -r=${{ env.CONAN_REMOTE_NAME }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}

View File

@@ -39,12 +39,17 @@ found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/), or higher
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
[^1]:
It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it.
[^2]:
CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
@@ -277,6 +282,21 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
```
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or

View File

@@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions (common
INTERFACE
$<$<CONFIG:Debug>:DEBUG _DEBUG>
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
# defined, so no need to add it explicitly except for
# this special case of (profile ON) and (assert OFF)
# -- presumably this is because we don't want profile
# builds asserting unless asserts were specifically
# requested
#[===[
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
explicitly except for the special case of (profile ON) and (assert OFF).
Presumably this is because we don't want profile builds asserting unless
asserts were specifically requested.
]===]
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
OPENSSL_SUPPRESS_DEPRECATED
)
if (MSVC)
# remove existing exception flag since we set it to -EHa

View File

@@ -118,7 +118,7 @@ option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table"
OFF)
option(single_io_service_thread
"Restricts the number of threads calling io_service::run to one. \
"Restricts the number of threads calling io_context::run to one. \
This can be useful when debugging."
OFF)
option(boost_show_deprecated

View File

@@ -24,6 +24,7 @@ target_link_libraries(ripple_boost
Boost::date_time
Boost::filesystem
Boost::json
Boost::process
Boost::program_options
Boost::regex
Boost::system

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
)
add_executable(${target} ${ARGN} ${sources})
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
isolate_headers(
${target}

View File

@@ -9,7 +9,7 @@
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"openssl/3.5.2#0c5a5e15ae569f45dff57adcf1770cf7%1756234259.61",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
@@ -21,7 +21,7 @@
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
],
"build_requires": [
@@ -46,7 +46,7 @@
"lz4/1.10.0"
],
"boost/1.83.0": [
"boost/1.83.0"
"boost/1.88.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"

View File

@@ -1,5 +1,9 @@
# Global configuration for Conan. This is used to set the number of parallel
# downloads and uploads.
# downloads, uploads, and build jobs. The verbosity is set to verbose to
# provide more information during the build process.
core:non_interactive=True
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose

View File

@@ -21,14 +21,14 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "clang" and compiler_version == 16 %}
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags+=['-Wno-restrict']
tools.build:cxxflags=['-Wno-restrict']
{% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -27,7 +27,7 @@ class Xrpl(ConanFile):
'grpc/1.50.1',
'libarchive/3.8.1',
'nudb/2.0.9',
'openssl/1.1.1w',
'openssl/3.5.2',
'soci/4.0.3',
'zlib/1.3.1',
]
@@ -100,11 +100,13 @@ class Xrpl(ConanFile):
def configure(self):
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
if self.settings.compiler in ['clang', 'gcc']:
self.options['boost'].without_cobalt = True
def requirements(self):
# Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True)
@@ -175,6 +177,7 @@ class Xrpl(ConanFile):
'boost::filesystem',
'boost::json',
'boost::program_options',
'boost::process',
'boost::regex',
'boost::system',
'boost::thread',

View File

@@ -654,14 +654,12 @@ SharedWeakUnion<T>::convertToWeak()
break;
case destroy:
// We just added a weak ref. How could we destroy?
// LCOV_EXCL_START
UNREACHABLE(
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
"added ref");
delete p;
unsafeSetRawPtr(nullptr);
return true; // Should never happen
// LCOV_EXCL_STOP
case partialDestroy:
// This is a weird case. We just converted the last strong
// pointer to a weak pointer.

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/Resolver.h>
#include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
namespace ripple {
@@ -33,7 +33,7 @@ public:
explicit ResolverAsio() = default;
static std::unique_ptr<ResolverAsio>
New(boost::asio::io_service&, beast::Journal);
New(boost::asio::io_context&, beast::Journal);
};
} // namespace ripple

View File

@@ -632,16 +632,6 @@ to_string(base_uint<Bits, Tag> const& a)
return strHex(a.cbegin(), a.cend());
}
template <std::size_t Bits, class Tag>
inline std::string
to_short_string(base_uint<Bits, Tag> const& a)
{
static_assert(
base_uint<Bits, Tag>::bytes > 4,
"For 4 bytes or less, use a native type");
return strHex(a.cbegin(), a.cbegin() + 4) + "...";
}
template <std::size_t Bits, class Tag>
inline std::ostream&
operator<<(std::ostream& out, base_uint<Bits, Tag> const& u)

View File

@@ -28,8 +28,9 @@ namespace ripple {
// the destination can hold all values of the source. This is particularly
// handy when the source or destination is an enumeration type.
template <class Src, class Dest>
concept SafeToCast = (std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
template <class Dest, class Src>
static constexpr bool is_safetocasttovalue_v =
(std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
(std::is_signed<Src>::value || std::is_unsigned<Dest>::value) &&
(std::is_signed<Src>::value != std::is_signed<Dest>::value
? sizeof(Dest) > sizeof(Src)
@@ -77,7 +78,7 @@ inline constexpr std::
unsafe_cast(Src s) noexcept
{
static_assert(
!SafeToCast<Src, Dest>,
!is_safetocasttovalue_v<Dest, Src>,
"Only unsafe if casting signed to unsigned or "
"destination is too small");
return static_cast<Dest>(s);

View File

@@ -23,7 +23,8 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/post.hpp>
#include <chrono>
#include <condition_variable>
@@ -32,7 +33,7 @@
namespace beast {
/** Measures handler latency on an io_service queue. */
/** Measures handler latency on an io_context queue. */
template <class Clock>
class io_latency_probe
{
@@ -44,12 +45,12 @@ private:
std::condition_variable_any m_cond;
std::size_t m_count;
duration const m_period;
boost::asio::io_service& m_ios;
boost::asio::io_context& m_ios;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
bool m_cancel;
public:
io_latency_probe(duration const& period, boost::asio::io_service& ios)
io_latency_probe(duration const& period, boost::asio::io_context& ios)
: m_count(1)
, m_period(period)
, m_ios(ios)
@@ -64,16 +65,16 @@ public:
cancel(lock, true);
}
/** Return the io_service associated with the latency probe. */
/** Return the io_context associated with the latency probe. */
/** @{ */
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return m_ios;
}
boost::asio::io_service const&
get_io_service() const
boost::asio::io_context const&
get_io_context() const
{
return m_ios;
}
@@ -109,8 +110,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
}
/** Initiate continuous i/o latency sampling.
@@ -124,8 +127,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
}
private:
@@ -236,12 +241,13 @@ private:
// The latency is too high to maintain the desired
// period so don't bother with a timer.
//
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
else
{
m_probe->m_timer.expires_from_now(when - now);
m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
@@ -254,7 +260,8 @@ private:
if (!m_probe)
return;
typename Clock::time_point const now(Clock::now());
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
};

View File

@@ -94,11 +94,7 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept
else if (addr.is_v6())
hash_append(h, addr.to_v6().to_bytes());
else
{
// LCOV_EXCL_START
UNREACHABLE("beast::hash_append : invalid address type");
// LCOV_EXCL_STOP
}
}
} // namespace beast

View File

@@ -8,9 +8,11 @@
#ifndef BEAST_TEST_YIELD_TO_HPP
#define BEAST_TEST_YIELD_TO_HPP
#include <boost/asio/io_service.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/optional.hpp>
#include <boost/thread/csbl/memory/allocator_arg.hpp>
#include <condition_variable>
#include <mutex>
@@ -29,10 +31,12 @@ namespace test {
class enable_yield_to
{
protected:
boost::asio::io_service ios_;
boost::asio::io_context ios_;
private:
boost::optional<boost::asio::io_service::work> work_;
boost::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::vector<std::thread> threads_;
std::mutex m_;
std::condition_variable cv_;
@@ -42,7 +46,8 @@ public:
/// The type of yield context passed to functions.
using yield_context = boost::asio::yield_context;
explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_)
explicit enable_yield_to(std::size_t concurrency = 1)
: work_(boost::asio::make_work_guard(ios_))
{
threads_.reserve(concurrency);
while (concurrency--)
@@ -56,9 +61,9 @@ public:
t.join();
}
/// Return the `io_service` associated with the object
boost::asio::io_service&
get_io_service()
/// Return the `io_context` associated with the object
boost::asio::io_context&
get_io_context()
{
return ios_;
}
@@ -111,13 +116,18 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
{
boost::asio::spawn(
ios_,
boost::allocator_arg,
boost::context::fixedsize_stack(2 * 1024 * 1024),
[&](yield_context yield) {
f(yield);
std::lock_guard lock{m_};
if (--running_ == 0)
cv_.notify_all();
},
boost::coroutines::attributes(2 * 1024 * 1024));
[](std::exception_ptr e) {
if (e)
std::rethrow_exception(e);
});
spawn(fn...);
}

View File

@@ -39,16 +39,11 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE
#define XRPL_ASSERT_PARTS(cond, function, description, ...) \
XRPL_ASSERT(cond, function " : " description)
// How to use the instrumentation macros:
//
// * XRPL_ASSERT if cond must be true but the line might not be reached during
// fuzzing. Same like `assert` in normal use.
// * XRPL_ASSERT_PARTS is for convenience, and works like XRPL_ASSERT, but
// splits the message param into "function" and "description", then joins
// them with " : " before passing to XRPL_ASSERT.
// * ALWAYS if cond must be true _and_ the line must be reached during fuzzing.
// Same like `assert` in normal use.
// * REACHABLE if the line must be reached during fuzzing

View File

@@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs)
std::string s;
s.reserve(buffer_size(bs));
for (auto const& b : bs)
s.append(buffer_cast<char const*>(b), buffer_size(b));
s.append(static_cast<char const*>(b.data()), buffer_size(b));
return parse(s, root);
}

View File

@@ -46,7 +46,7 @@ public:
* without formatting (not human friendly).
*
* The JSON document is written in a single line. It is not intended for 'human'
* consumption, but may be useful to support feature such as RPC where bandwidth
* consumption, but may be useful to support feature such as RPC where bandwith
* is limited. \sa Reader, Value
*/

View File

@@ -284,14 +284,12 @@ public:
{
if (key.type != ltOFFER)
{
// LCOV_EXCL_START
UNREACHABLE(
"ripple::ApplyView::dirAppend : only Offers are appended to "
"book directories");
// Only Offers are appended to book directories. Call dirInsert()
// instead
return std::nullopt;
// LCOV_EXCL_STOP
}
return dirAdd(true, directory, key.key, describe);
}

View File

@@ -561,28 +561,12 @@ createPseudoAccount(
[[nodiscard]] bool
isPseudoAccount(std::shared_ptr<SLE const> sleAcct);
// Returns the list of fields that define an ACCOUNT_ROOT as a pseudo-account if
// set
// Pseudo-account designator fields MUST be maintained by including the
// SField::sMD_PseudoAccount flag in the SField definition. (Don't forget to
// "| SField::sMD_Default"!) The fields do NOT need to be amendment-gated,
// since a non-active amendment will not set any field, by definition.
// Specific properties of a pseudo-account are NOT checked here, that's what
// InvariantCheck is for.
[[nodiscard]] std::vector<SField const*> const&
getPseudoAccountFields();
[[nodiscard]] inline bool
isPseudoAccount(ReadView const& view, AccountID accountId)
{
return isPseudoAccount(view.read(keylet::account(accountId)));
}
[[nodiscard]] TER
canAddHolding(ReadView const& view, Asset const& asset);
/// Any transactors that call addEmptyHolding() in doApply must call
/// canAddHolding() in preflight with the same View and Asset
[[nodiscard]] TER
addEmptyHolding(
ApplyView& view,

View File

@@ -47,7 +47,7 @@ public:
public:
AutoSocket(
boost::asio::io_service& s,
boost::asio::io_context& s,
boost::asio::ssl::context& c,
bool secureOnly,
bool plainOnly)
@@ -58,7 +58,7 @@ public:
mSocket = std::make_unique<ssl_socket>(s, c);
}
AutoSocket(boost::asio::io_service& s, boost::asio::ssl::context& c)
AutoSocket(boost::asio::io_context& s, boost::asio::ssl::context& c)
: AutoSocket(s, c, false, false)
{
}

View File

@@ -23,7 +23,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/streambuf.hpp>
#include <chrono>
@@ -51,7 +51,7 @@ public:
static void
get(bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::deque<std::string> deqSites,
unsigned short const port,
std::string const& strPath,
@@ -65,7 +65,7 @@ public:
static void
get(bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::string const& strPath,
@@ -80,7 +80,7 @@ public:
static void
request(
bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::function<

View File

@@ -153,7 +153,7 @@ public:
{
strm.set_verify_callback(
std::bind(
&rfc2818_verify,
&rfc6125_verify,
host,
std::placeholders::_1,
std::placeholders::_2,
@@ -167,7 +167,7 @@ public:
/**
* @brief callback invoked for name verification - just passes through
* to the asio rfc2818 implementation.
* to the asio `host_name_verification` (rfc6125) implementation.
*
* @param domain hostname expected
* @param preverified passed by implementation
@@ -175,13 +175,13 @@ public:
* @param j journal for logging
*/
static bool
rfc2818_verify(
rfc6125_verify(
std::string const& domain,
bool preverified,
boost::asio::ssl::verify_context& ctx,
beast::Journal j)
{
if (boost::asio::ssl::rfc2818_verification(domain)(preverified, ctx))
if (boost::asio::ssl::host_name_verification(domain)(preverified, ctx))
return true;
JLOG(j.warn()) << "Outbound SSL connection to " << domain

View File

@@ -0,0 +1,565 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BASICS_FEES_H_INCLUDED
#define BASICS_FEES_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace feeunit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee units" calculations are a not-really-unitless value that is used
to express the cost of a given transaction vs. a reference transaction.
They are primarily used by the Transactor classes. */
struct feeunitTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a TaggedFee. They are
used for calculations in this header. */
struct unitlessTag;
template <class T>
using enable_if_unit_t = typename std::enable_if_t<
std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>>;
/** `is_usable_unit_v` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-fee contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T, class = enable_if_unit_t<T>>
constexpr bool is_usable_unit_v =
std::is_same_v<typename T::unit_type, feeunitTag> ||
std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag>;
template <class UnitTag, class T>
class TaggedFee : private boost::totally_ordered<TaggedFee<UnitTag, T>>,
private boost::additive<TaggedFee<UnitTag, T>>,
private boost::equality_comparable<TaggedFee<UnitTag, T>, T>,
private boost::dividable<TaggedFee<UnitTag, T>, T>,
private boost::modable<TaggedFee<UnitTag, T>, T>,
private boost::unit_steppable<TaggedFee<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type fee_;
protected:
template <class Other>
static constexpr bool is_compatible_v =
std::is_arithmetic_v<Other> && std::is_arithmetic_v<value_type> &&
std::is_convertible_v<Other, value_type>;
template <class OtherFee, class = enable_if_unit_t<OtherFee>>
static constexpr bool is_compatiblefee_v =
is_compatible_v<typename OtherFee::value_type> &&
std::is_same_v<UnitTag, typename OtherFee::unit_type>;
template <class Other>
using enable_if_compatible_t =
typename std::enable_if_t<is_compatible_v<Other>>;
template <class OtherFee>
using enable_if_compatiblefee_t =
typename std::enable_if_t<is_compatiblefee_v<OtherFee>>;
public:
TaggedFee() = default;
constexpr TaggedFee(TaggedFee const& other) = default;
constexpr TaggedFee&
operator=(TaggedFee const& other) = default;
constexpr explicit TaggedFee(beast::Zero) : fee_(0)
{
}
constexpr TaggedFee&
operator=(beast::Zero)
{
fee_ = 0;
return *this;
}
constexpr explicit TaggedFee(value_type fee) : fee_(fee)
{
}
TaggedFee&
operator=(value_type fee)
{
fee_ = fee;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <
class Other,
class = std::enable_if_t<
is_compatible_v<Other> &&
is_safetocasttovalue_v<value_type, Other>>>
constexpr TaggedFee(TaggedFee<unit_type, Other> const& fee)
: TaggedFee(safe_cast<value_type>(fee.fee()))
{
}
constexpr TaggedFee
operator*(value_type const& rhs) const
{
return TaggedFee{fee_ * rhs};
}
friend constexpr TaggedFee
operator*(value_type lhs, TaggedFee const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(TaggedFee const& rhs) const
{
return fee_ / rhs.fee_;
}
TaggedFee&
operator+=(TaggedFee const& other)
{
fee_ += other.fee();
return *this;
}
TaggedFee&
operator-=(TaggedFee const& other)
{
fee_ -= other.fee();
return *this;
}
TaggedFee&
operator++()
{
++fee_;
return *this;
}
TaggedFee&
operator--()
{
--fee_;
return *this;
}
TaggedFee&
operator*=(value_type const& rhs)
{
fee_ *= rhs;
return *this;
}
TaggedFee&
operator/=(value_type const& rhs)
{
fee_ /= rhs;
return *this;
}
template <class transparent = value_type>
std::enable_if_t<std::is_integral_v<transparent>, TaggedFee&>
operator%=(value_type const& rhs)
{
fee_ %= rhs;
return *this;
}
TaggedFee
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned fee types");
return TaggedFee{-fee_};
}
bool
operator==(TaggedFee const& other) const
{
return fee_ == other.fee_;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator==(TaggedFee<unit_type, Other> const& other) const
{
return fee_ == other.fee();
}
bool
operator==(value_type other) const
{
return fee_ == other;
}
template <class Other, class = enable_if_compatible_t<Other>>
bool
operator!=(TaggedFee<unit_type, Other> const& other) const
{
return !operator==(other);
}
bool
operator<(TaggedFee const& other) const
{
return fee_ < other.fee_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return fee_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (fee_ < 0) ? -1 : (fee_ ? 1 : 0);
}
/** Returns the number of drops */
constexpr value_type
fee() const
{
return fee_;
}
template <class Other>
constexpr double
decimalFromReference(TaggedFee<unit_type, Other> reference) const
{
return static_cast<double>(fee_) / reference.fee();
}
// `is_usable_unit_v` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
std::enable_if_t<is_usable_unit_v<TaggedFee>, Json::Value>
jsonClipped() const
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (fee_ < min)
return min;
if (fee_ > max)
return max;
return static_cast<jsontype>(fee_);
}
else
{
return fee_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return fee_;
}
friend std::istream&
operator>>(std::istream& s, TaggedFee& val)
{
s >> val.fee_;
return s;
}
};
// Output Fees as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, TaggedFee<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(TaggedFee<UnitTag, T> const& amount)
{
return std::to_string(amount.fee());
}
template <class Source, class = enable_if_unit_t<Source>>
constexpr bool can_muldiv_source_v =
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest, class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_dest_v =
can_muldiv_source_v<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <
class Source1,
class Source2,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>>
constexpr bool can_muldiv_sources_v =
can_muldiv_source_v<Source1> && can_muldiv_source_v<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_v =
can_muldiv_sources_v<Source1, Source2> && can_muldiv_dest_v<Dest>;
// Source and Dest can be the same by default
template <
class Source1,
class Source2,
class Dest,
class = enable_if_unit_t<Source1>,
class = enable_if_unit_t<Source2>,
class = enable_if_unit_t<Dest>>
constexpr bool can_muldiv_commute_v = can_muldiv_v<Source1, Source2, Dest> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
using enable_muldiv_source_t =
typename std::enable_if_t<can_muldiv_source_v<T>>;
template <class T>
using enable_muldiv_dest_t = typename std::enable_if_t<can_muldiv_dest_v<T>>;
template <class Source1, class Source2>
using enable_muldiv_sources_t =
typename std::enable_if_t<can_muldiv_sources_v<Source1, Source2>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_t =
typename std::enable_if_t<can_muldiv_v<Source1, Source2, Dest>>;
template <class Source1, class Source2, class Dest>
using enable_muldiv_commute_t =
typename std::enable_if_t<can_muldiv_commute_v<Source1, Source2, Dest>>;
template <class T>
TaggedFee<unitlessTag, T>
scalar(T value)
{
return TaggedFee<unitlessTag, T>{value};
}
template <
class Source1,
class Source2,
class Dest,
class = enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// Fees can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0,
"ripple::feeunit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::feeunit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() >= 0, "ripple::feeunit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace feeunit
template <class T>
using FeeLevel = feeunit::TaggedFee<feeunit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return feeunit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
class Dest,
class = feeunit::enable_muldiv_commute_t<Source1, Source2, Dest>>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return feeunit::mulDivU(mul, value, div);
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return feeunit::mulDivU(feeunit::scalar(value), mul, feeunit::scalar(div));
}
template <class Dest, class = feeunit::enable_muldiv_dest_t<Dest>>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = feeunit::mulDivU(value, feeunit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <
class Source1,
class Source2,
class = feeunit::enable_muldiv_sources_t<Source1, Source2>>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <class Dest, class Src>
constexpr std::enable_if_t<
std::is_same_v<typename Dest::unit_type, typename Src::unit_type> &&
std::is_integral_v<typename Dest::value_type> &&
std::is_integral_v<typename Src::value_type>,
Dest>
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
} // namespace ripple
#endif // BASICS_FEES_H_INCLUDED

View File

@@ -54,11 +54,11 @@ class SeqProxy;
namespace keylet {
/** AccountID root */
Keylet
TypedKeylet<ltACCOUNT_ROOT>
account(AccountID const& id) noexcept;
/** The index of the amendment table */
Keylet const&
TypedKeylet<ltAMENDMENTS> const&
amendments() noexcept;
/** Any item that can be in an owner dir. */
@@ -70,7 +70,7 @@ child(uint256 const& key) noexcept;
The "short" skip list is a node (at a fixed index) that holds the hashes
of ledgers since the last flag ledger. It will contain, at most, 256 hashes.
*/
Keylet const&
TypedKeylet<ltLEDGER_HASHES> const&
skip() noexcept;
/** The index of the long skip for a particular ledger range.
@@ -83,15 +83,15 @@ skip() noexcept;
and uses it to get the hash of the flag ledger whose short skip list will
contain the hash of the requested ledger.
*/
Keylet
TypedKeylet<ltLEDGER_HASHES>
skip(LedgerIndex ledger) noexcept;
/** The (fixed) index of the object containing the ledger fees. */
Keylet const&
TypedKeylet<ltFEE_SETTINGS> const&
fees() noexcept;
/** The (fixed) index of the object containing the ledger negativeUNL. */
Keylet const&
TypedKeylet<ltNEGATIVE_UNL> const&
negativeUNL() noexcept;
/** The beginning of an order book */
@@ -99,7 +99,7 @@ struct book_t
{
explicit book_t() = default;
Keylet
TypedKeylet<ltDIR_NODE>
operator()(Book const& b) const;
};
static book_t const book{};
@@ -112,13 +112,13 @@ static book_t const book{};
between them.
*/
/** @{ */
Keylet
TypedKeylet<ltRIPPLE_STATE>
line(
AccountID const& id0,
AccountID const& id1,
Currency const& currency) noexcept;
inline Keylet
inline TypedKeylet<ltRIPPLE_STATE>
line(AccountID const& id, Issue const& issue) noexcept
{
return line(id, issue.account, issue.currency);
@@ -127,27 +127,27 @@ line(AccountID const& id, Issue const& issue) noexcept
/** An offer from an account */
/** @{ */
Keylet
TypedKeylet<ltOFFER>
offer(AccountID const& id, std::uint32_t seq) noexcept;
inline Keylet
inline TypedKeylet<ltOFFER>
offer(uint256 const& key) noexcept
{
return {ltOFFER, key};
return {key};
}
/** @} */
/** The initial directory page for a specific quality */
Keylet
quality(Keylet const& k, std::uint64_t q) noexcept;
TypedKeylet<ltDIR_NODE>
quality(TypedKeylet<ltDIR_NODE> const& k, std::uint64_t q) noexcept;
/** The directory for the next lower quality */
struct next_t
{
explicit next_t() = default;
Keylet
operator()(Keylet const& k) const;
TypedKeylet<ltDIR_NODE>
operator()(TypedKeylet<ltDIR_NODE> const& k) const;
};
static next_t const next{};
@@ -156,50 +156,50 @@ struct ticket_t
{
explicit ticket_t() = default;
Keylet
TypedKeylet<ltTICKET>
operator()(AccountID const& id, std::uint32_t ticketSeq) const;
Keylet
TypedKeylet<ltTICKET>
operator()(AccountID const& id, SeqProxy ticketSeq) const;
Keylet
TypedKeylet<ltTICKET>
operator()(uint256 const& key) const
{
return {ltTICKET, key};
return {key};
}
};
static ticket_t const ticket{};
/** A SignerList */
Keylet
TypedKeylet<ltSIGNER_LIST>
signers(AccountID const& account) noexcept;
/** A Check */
/** @{ */
Keylet
TypedKeylet<ltCHECK>
check(AccountID const& id, std::uint32_t seq) noexcept;
inline Keylet
inline TypedKeylet<ltCHECK>
check(uint256 const& key) noexcept
{
return {ltCHECK, key};
return {key};
}
/** @} */
/** A DepositPreauth */
/** @{ */
Keylet
TypedKeylet<ltDEPOSIT_PREAUTH>
depositPreauth(AccountID const& owner, AccountID const& preauthorized) noexcept;
Keylet
TypedKeylet<ltDEPOSIT_PREAUTH>
depositPreauth(
AccountID const& owner,
std::set<std::pair<AccountID, Slice>> const& authCreds) noexcept;
inline Keylet
inline TypedKeylet<ltDEPOSIT_PREAUTH>
depositPreauth(uint256 const& key) noexcept
{
return {ltDEPOSIT_PREAUTH, key};
return {key};
}
/** @} */
@@ -210,15 +210,15 @@ Keylet
unchecked(uint256 const& key) noexcept;
/** The root page of an account's directory */
Keylet
TypedKeylet<ltDIR_NODE>
ownerDir(AccountID const& id) noexcept;
/** A page in a directory */
/** @{ */
Keylet
TypedKeylet<ltDIR_NODE>
page(uint256 const& root, std::uint64_t index = 0) noexcept;
inline Keylet
inline TypedKeylet<ltDIR_NODE>
page(Keylet const& root, std::uint64_t index = 0) noexcept
{
XRPL_ASSERT(
@@ -228,11 +228,11 @@ page(Keylet const& root, std::uint64_t index = 0) noexcept
/** @} */
/** An escrow entry */
Keylet
TypedKeylet<ltESCROW>
escrow(AccountID const& src, std::uint32_t seq) noexcept;
/** A PaymentChannel */
Keylet
TypedKeylet<ltPAYCHAN>
payChan(AccountID const& src, AccountID const& dst, std::uint32_t seq) noexcept;
/** NFT page keylets
@@ -244,112 +244,112 @@ payChan(AccountID const& src, AccountID const& dst, std::uint32_t seq) noexcept;
*/
/** @{ */
/** A keylet for the owner's first possible NFT page. */
Keylet
TypedKeylet<ltNFTOKEN_PAGE>
nftpage_min(AccountID const& owner);
/** A keylet for the owner's last possible NFT page. */
Keylet
TypedKeylet<ltNFTOKEN_PAGE>
nftpage_max(AccountID const& owner);
Keylet
nftpage(Keylet const& k, uint256 const& token);
TypedKeylet<ltNFTOKEN_PAGE>
nftpage(TypedKeylet<ltNFTOKEN_PAGE> const& k, uint256 const& token);
/** @} */
/** An offer from an account to buy or sell an NFT */
Keylet
TypedKeylet<ltNFTOKEN_OFFER>
nftoffer(AccountID const& owner, std::uint32_t seq);
inline Keylet
inline TypedKeylet<ltNFTOKEN_OFFER>
nftoffer(uint256 const& offer)
{
return {ltNFTOKEN_OFFER, offer};
return {offer};
}
/** The directory of buy offers for the specified NFT */
Keylet
TypedKeylet<ltDIR_NODE>
nft_buys(uint256 const& id) noexcept;
/** The directory of sell offers for the specified NFT */
Keylet
TypedKeylet<ltDIR_NODE>
nft_sells(uint256 const& id) noexcept;
/** AMM entry */
Keylet
TypedKeylet<ltAMM>
amm(Asset const& issue1, Asset const& issue2) noexcept;
Keylet
TypedKeylet<ltAMM>
amm(uint256 const& amm) noexcept;
/** A keylet for Delegate object */
Keylet
TypedKeylet<ltDELEGATE>
delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept;
Keylet
TypedKeylet<ltBRIDGE>
bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType);
// `seq` is stored as `sfXChainClaimID` in the object
Keylet
TypedKeylet<ltXCHAIN_OWNED_CLAIM_ID>
xChainClaimID(STXChainBridge const& bridge, std::uint64_t seq);
// `seq` is stored as `sfXChainAccountCreateCount` in the object
Keylet
TypedKeylet<ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID>
xChainCreateAccountClaimID(STXChainBridge const& bridge, std::uint64_t seq);
Keylet
TypedKeylet<ltDID>
did(AccountID const& account) noexcept;
Keylet
TypedKeylet<ltORACLE>
oracle(AccountID const& account, std::uint32_t const& documentID) noexcept;
Keylet
TypedKeylet<ltCREDENTIAL>
credential(
AccountID const& subject,
AccountID const& issuer,
Slice const& credType) noexcept;
inline Keylet
inline TypedKeylet<ltCREDENTIAL>
credential(uint256 const& key) noexcept
{
return {ltCREDENTIAL, key};
return {key};
}
Keylet
TypedKeylet<ltMPTOKEN_ISSUANCE>
mptIssuance(std::uint32_t seq, AccountID const& issuer) noexcept;
Keylet
TypedKeylet<ltMPTOKEN_ISSUANCE>
mptIssuance(MPTID const& issuanceID) noexcept;
inline Keylet
inline TypedKeylet<ltMPTOKEN_ISSUANCE>
mptIssuance(uint256 const& issuanceKey)
{
return {ltMPTOKEN_ISSUANCE, issuanceKey};
return {issuanceKey};
}
Keylet
TypedKeylet<ltMPTOKEN>
mptoken(MPTID const& issuanceID, AccountID const& holder) noexcept;
inline Keylet
inline TypedKeylet<ltMPTOKEN>
mptoken(uint256 const& mptokenKey)
{
return {ltMPTOKEN, mptokenKey};
return {mptokenKey};
}
Keylet
TypedKeylet<ltMPTOKEN>
mptoken(uint256 const& issuanceKey, AccountID const& holder) noexcept;
Keylet
TypedKeylet<ltVAULT>
vault(AccountID const& owner, std::uint32_t seq) noexcept;
inline Keylet
inline TypedKeylet<ltVAULT>
vault(uint256 const& vaultKey)
{
return {ltVAULT, vaultKey};
return {vaultKey};
}
Keylet
TypedKeylet<ltPERMISSIONED_DOMAIN>
permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept;
Keylet
TypedKeylet<ltPERMISSIONED_DOMAIN>
permissionedDomain(uint256 const& domainID) noexcept;
} // namespace keylet

View File

@@ -49,6 +49,22 @@ struct Keylet
check(STLedgerEntry const&) const;
};
template <LedgerEntryType Type>
struct TypedKeylet : Keylet
{
static constexpr LedgerEntryType LedgerType = Type;
TypedKeylet(uint256 const& key_) : Keylet(Type, key_)
{
}
Keylet
untyped() const
{
return Keylet(LedgerType, key);
}
};
} // namespace ripple
#endif

View File

@@ -55,13 +55,33 @@ enum LedgerEntryType : std::uint16_t
#pragma push_macro("LEDGER_ENTRY")
#undef LEDGER_ENTRY
#pragma push_macro("LEDGER_ENTRY_FIELD")
#undef LEDGER_ENTRY_FIELD
#pragma push_macro("DEFINE_LEDGER_ENTRY_FIELDS")
#undef DEFINE_LEDGER_ENTRY_FIELDS
#pragma push_macro("LEDGER_ENTRIES_BEGIN")
#undef LEDGER_ENTRIES_BEGIN
#pragma push_macro("LEDGER_ENTRIES_END")
#undef LEDGER_ENTRIES_END
#define LEDGER_ENTRY(tag, value, ...) tag = value,
#define LEDGER_ENTRIES_BEGIN
#define LEDGER_ENTRIES_END
#define DEFINE_LEDGER_ENTRY_FIELDS(...) ({__VA_ARGS__})
#define LEDGER_ENTRY_FIELD(...) {__VA_ARGS__},
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) tag = value,
#include <xrpl/protocol/detail/ledger_entries.macro>
#undef LEDGER_ENTRY
#pragma pop_macro("LEDGER_ENTRY")
#undef LEDGER_ENTRY_FIELD
#pragma pop_macro("LEDGER_ENTRY_FIELD")
#undef DEFINE_LEDGER_ENTRY_FIELDS
#pragma pop_macro("DEFINE_LEDGER_ENTRY_FIELDS")
#undef LEDGER_ENTRIES_BEGIN
#pragma pop_macro("LEDGER_ENTRIES_BEGIN")
#undef LEDGER_ENTRIES_END
#pragma pop_macro("LEDGER_ENTRIES_END")
//---------------------------------------------------------------------------
/** A special type, matching any ledger entry type.
@@ -188,14 +208,14 @@ enum LedgerSpecificFlags {
lsfMPTCanTransfer = 0x00000020,
lsfMPTCanClawback = 0x00000040,
lsmfMPTCanMutateCanLock = 0x00000002,
lsmfMPTCanMutateRequireAuth = 0x00000004,
lsmfMPTCanMutateCanEscrow = 0x00000008,
lsmfMPTCanMutateCanTrade = 0x00000010,
lsmfMPTCanMutateCanTransfer = 0x00000020,
lsmfMPTCanMutateCanClawback = 0x00000040,
lsmfMPTCanMutateMetadata = 0x00010000,
lsmfMPTCanMutateTransferFee = 0x00020000,
lmfMPTCanMutateCanLock = 0x00000002,
lmfMPTCanMutateRequireAuth = 0x00000004,
lmfMPTCanMutateCanEscrow = 0x00000008,
lmfMPTCanMutateCanTrade = 0x00000010,
lmfMPTCanMutateCanTransfer = 0x00000020,
lmfMPTCanMutateCanClawback = 0x00000040,
lmfMPTCanMutateMetadata = 0x00010000,
lmfMPTCanMutateTransferFee = 0x00020000,
// ltMPTOKEN
lsfMPTAuthorized = 0x00000002,

View File

@@ -74,9 +74,6 @@ public:
Permission&
operator=(Permission const&) = delete;
std::optional<std::string>
getPermissionName(std::uint32_t const value) const;
std::optional<std::uint32_t>
getGranularValue(std::string const& name) const;
@@ -86,9 +83,6 @@ public:
std::optional<TxType>
getGranularTxType(GranularPermissionType const& gpType) const;
std::optional<std::reference_wrapper<uint256 const>> const
getTxFeature(TxType txType) const;
bool
isDelegatable(std::uint32_t const& permissionValue, Rules const& rules)
const;

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>
@@ -55,10 +56,7 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory
Made obsolete by fixDirectoryLimit amendment.
*/
/** The maximum number of pages allowed in a directory */
std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/safe_cast.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <map>
@@ -72,10 +71,8 @@ class STCurrency;
STYPE(STI_VL, 7) \
STYPE(STI_ACCOUNT, 8) \
STYPE(STI_NUMBER, 9) \
STYPE(STI_INT32, 10) \
STYPE(STI_INT64, 11) \
\
/* 12-13 are reserved */ \
/* 10-13 are reserved */ \
STYPE(STI_OBJECT, 14) \
STYPE(STI_ARRAY, 15) \
\
@@ -151,10 +148,8 @@ public:
sMD_ChangeNew = 0x02, // new value when it changes
sMD_DeleteFinal = 0x04, // final value when it is deleted
sMD_Create = 0x08, // value when it's created
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20, // value is treated as base 10, overriding behavior
sMD_PseudoAccount = 0x40, // if this field is set in an ACCOUNT_ROOT
// _only_, then it is a pseudo-account
sMD_Always = 0x10, // value when node containing it is affected at all
sMD_BaseTen = 0x20,
sMD_Default =
sMD_ChangeOrig | sMD_ChangeNew | sMD_DeleteFinal | sMD_Create
};
@@ -189,7 +184,7 @@ public:
char const* fn,
int meta = sMD_Default,
IsSigning signing = IsSigning::yes);
explicit SField(private_access_tag_t, int fc, char const* fn);
explicit SField(private_access_tag_t, int fc);
static SField const&
getField(int fieldCode);
@@ -302,7 +297,7 @@ public:
static int
compare(SField const& f1, SField const& f2);
static std::unordered_map<int, SField const*> const&
static std::map<int, SField const*> const&
getKnownCodeToField()
{
return knownCodeToField;
@@ -310,8 +305,7 @@ public:
private:
static int num;
static std::unordered_map<int, SField const*> knownCodeToField;
static std::unordered_map<std::string, SField const*> knownNameToField;
static std::map<int, SField const*> knownCodeToField;
};
/** A field with a type known at compile time. */
@@ -358,9 +352,6 @@ using SF_UINT256 = TypedField<STBitString<256>>;
using SF_UINT384 = TypedField<STBitString<384>>;
using SF_UINT512 = TypedField<STBitString<512>>;
using SF_INT32 = TypedField<STInteger<std::int32_t>>;
using SF_INT64 = TypedField<STInteger<std::int64_t>>;
using SF_ACCOUNT = TypedField<STAccount>;
using SF_AMOUNT = TypedField<STAmount>;
using SF_ISSUE = TypedField<STIssue>;
@@ -377,11 +368,15 @@ using SF_XCHAIN_BRIDGE = TypedField<STXChainBridge>;
#undef UNTYPED_SFIELD
#pragma push_macro("TYPED_SFIELD")
#undef TYPED_SFIELD
#pragma push_macro("ARRAY_SFIELD")
#undef ARRAY_SFIELD
#define UNTYPED_SFIELD(sfName, stiSuffix, fieldValue, ...) \
extern SField const sfName;
#define TYPED_SFIELD(sfName, stiSuffix, fieldValue, ...) \
extern SF_##stiSuffix const sfName;
#define ARRAY_SFIELD(sfName, sfItemName, stiSuffix, fieldValue, ...) \
UNTYPED_SFIELD(sfName, stiSuffix, fieldValue, __VA_ARGS__)
extern SField const sfInvalid;
extern SField const sfGeneric;
@@ -392,6 +387,8 @@ extern SField const sfGeneric;
#pragma pop_macro("TYPED_SFIELD")
#undef UNTYPED_SFIELD
#pragma pop_macro("UNTYPED_SFIELD")
#undef ARRAY_SFIELD
#pragma pop_macro("ARRAY_SFIELD")
} // namespace ripple

View File

@@ -81,8 +81,6 @@ using STUInt16 = STInteger<std::uint16_t>;
using STUInt32 = STInteger<std::uint32_t>;
using STUInt64 = STInteger<std::uint64_t>;
using STInt32 = STInteger<std::int32_t>;
template <typename Integer>
inline STInteger<Integer>::STInteger(Integer v) : value_(v)
{

View File

@@ -26,9 +26,7 @@
namespace ripple {
class Rules;
namespace test {
class Invariants_test;
}
class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
{
@@ -38,8 +36,6 @@ class STLedgerEntry final : public STObject, public CountedObject<STLedgerEntry>
public:
using pointer = std::shared_ptr<STLedgerEntry>;
using ref = std::shared_ptr<STLedgerEntry> const&;
using const_pointer = std::shared_ptr<STLedgerEntry const>;
using const_ref = std::shared_ptr<STLedgerEntry const> const&;
/** Create an empty object with the given key and type. */
explicit STLedgerEntry(Keylet const& k);
@@ -58,7 +54,7 @@ public:
getText() const override;
Json::Value
getJson(JsonOptions options = JsonOptions::none) const override;
getJson(JsonOptions options) const override;
/** Returns the 'key' (or 'index') of this item.
The key identifies this entry's position in
@@ -88,8 +84,7 @@ private:
void
setSLEType();
friend test::Invariants_test; // this test wants access to the private
// type_
friend Invariants_test; // this test wants access to the private type_
STBase*
copy(std::size_t n, void* buf) const override;

View File

@@ -25,6 +25,7 @@
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/SOTemplate.h>
#include <xrpl/protocol/STAmount.h>
@@ -33,7 +34,6 @@
#include <xrpl/protocol/STIssue.h>
#include <xrpl/protocol/STPathSet.h>
#include <xrpl/protocol/STVector256.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/detail/STVar.h>
#include <boost/iterator/transform_iterator.hpp>
@@ -231,8 +231,6 @@ public:
getFieldH192(SField const& field) const;
uint256
getFieldH256(SField const& field) const;
std::int32_t
getFieldI32(SField const& field) const;
AccountID
getAccountID(SField const& field) const;
@@ -367,8 +365,6 @@ public:
void
setFieldH256(SField const& field, uint256 const&);
void
setFieldI32(SField const& field, std::int32_t);
void
setFieldVL(SField const& field, Blob const&);
void
setFieldVL(SField const& field, Slice const&);

View File

@@ -54,6 +54,34 @@ public:
Json::Value error;
};
/** Holds the serialized result of parsing an input JSON array.
This does validation and checking on the provided JSON.
*/
class STParsedJSONArray
{
public:
/** Parses and creates an STParsedJSON array.
The result of the parsing is stored in array and error.
Exceptions:
Does not throw.
@param name The name of the JSON field, used in diagnostics.
@param json The JSON-RPC to parse.
*/
STParsedJSONArray(std::string const& name, Json::Value const& json);
STParsedJSONArray() = delete;
STParsedJSONArray(STParsedJSONArray const&) = delete;
STParsedJSONArray&
operator=(STParsedJSONArray const&) = delete;
~STParsedJSONArray() = default;
/** The STArray if the parse was successful. */
std::optional<STArray> array;
/** On failure, an appropriate set of error values. */
Json::Value error;
};
} // namespace ripple
#endif

View File

@@ -22,10 +22,10 @@
#include <xrpl/basics/Log.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/FeeUnits.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/STObject.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Units.h>
#include <cstdint>
#include <optional>

View File

@@ -673,8 +673,7 @@ isTerRetry(TER x) noexcept
inline bool
isTesSuccess(TER x) noexcept
{
// Makes use of TERSubset::operator bool()
return !(x);
return (x == tesSUCCESS);
}
inline bool

View File

@@ -127,8 +127,6 @@ constexpr std::uint32_t tfTrustSetPermissionMask = ~(tfUniversal | tfSetfAuth |
// EnableAmendment flags:
constexpr std::uint32_t tfGotMajority = 0x00010000;
constexpr std::uint32_t tfLostMajority = 0x00020000;
constexpr std::uint32_t tfChangeMask =
~( tfUniversal | tfGotMajority | tfLostMajority);
// PaymentChannelClaim flags:
constexpr std::uint32_t tfRenew = 0x00010000;
@@ -143,8 +141,7 @@ constexpr std::uint32_t const tfTransferable = 0x00000008;
constexpr std::uint32_t const tfMutable = 0x00000010;
// MPTokenIssuanceCreate flags:
// Note: tf/lsfMPTLocked is intentionally omitted, since this transaction
// is not allowed to modify it.
// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate.
constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock;
constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth;
constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow;
@@ -156,14 +153,14 @@ constexpr std::uint32_t const tfMPTokenIssuanceCreateMask =
// MPTokenIssuanceCreate MutableFlags:
// Indicating specific fields or flags may be changed after issuance.
constexpr std::uint32_t const tmfMPTCanMutateCanLock = lsmfMPTCanMutateCanLock;
constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lsmfMPTCanMutateRequireAuth;
constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lsmfMPTCanMutateCanEscrow;
constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lsmfMPTCanMutateCanTrade;
constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lsmfMPTCanMutateCanTransfer;
constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lsmfMPTCanMutateCanClawback;
constexpr std::uint32_t const tmfMPTCanMutateMetadata = lsmfMPTCanMutateMetadata;
constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lsmfMPTCanMutateTransferFee;
constexpr std::uint32_t const tmfMPTCanMutateCanLock = lmfMPTCanMutateCanLock;
constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lmfMPTCanMutateRequireAuth;
constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lmfMPTCanMutateCanEscrow;
constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lmfMPTCanMutateCanTrade;
constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lmfMPTCanMutateCanTransfer;
constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lmfMPTCanMutateCanClawback;
constexpr std::uint32_t const tmfMPTCanMutateMetadata = lmfMPTCanMutateMetadata;
constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lmfMPTCanMutateTransferFee;
constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask =
~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade
| tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee);

File diff suppressed because it is too large Load Diff

View File

@@ -1,555 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef PROTOCOL_UNITS_H_INCLUDED
#define PROTOCOL_UNITS_H_INCLUDED
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/json/json_value.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
#include <iosfwd>
#include <limits>
#include <optional>
namespace ripple {
namespace unit {
/** "drops" are the smallest divisible amount of XRP. This is what most
of the code uses. */
struct dropTag;
/** "fee levels" are used by the transaction queue to compare the relative
cost of transactions that require different levels of effort to process.
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
struct feelevelTag;
/** unitless values are plain scalars wrapped in a ValueUnit. They are
used for calculations in this header. */
struct unitlessTag;
/** Units to represent basis points (bips) and 1/10 basis points */
class BipsTag;
class TenthBipsTag;
// These names don't have to be too descriptive, because we're in the "unit"
// namespace.
template <class T>
concept Valid = std::is_class_v<T> && std::is_object_v<typename T::unit_type> &&
std::is_object_v<typename T::value_type>;
/** `Usable` is checked to ensure that only values with
known valid type tags can be used (sometimes transparently) in
non-unit contexts. At the time of implementation, this includes
all known tags, but more may be added in the future, and they
should not be added automatically unless determined to be
appropriate.
*/
template <class T>
concept Usable = Valid<T> &&
(std::is_same_v<typename T::unit_type, feelevelTag> ||
std::is_same_v<typename T::unit_type, unitlessTag> ||
std::is_same_v<typename T::unit_type, dropTag> ||
std::is_same_v<typename T::unit_type, BipsTag> ||
std::is_same_v<typename T::unit_type, TenthBipsTag>);
template <class Other, class VU>
concept Compatible = Valid<VU> && std::is_arithmetic_v<Other> &&
std::is_arithmetic_v<typename VU::value_type> &&
std::is_convertible_v<Other, typename VU::value_type>;
template <class T>
concept Integral = std::is_integral_v<T>;
template <class VU>
concept IntegralValue = Integral<typename VU::value_type>;
template <class VU1, class VU2>
concept CastableValue = IntegralValue<VU1> && IntegralValue<VU2> &&
std::is_same_v<typename VU1::unit_type, typename VU2::unit_type>;
template <class UnitTag, class T>
class ValueUnit : private boost::totally_ordered<ValueUnit<UnitTag, T>>,
private boost::additive<ValueUnit<UnitTag, T>>,
private boost::equality_comparable<ValueUnit<UnitTag, T>, T>,
private boost::dividable<ValueUnit<UnitTag, T>, T>,
private boost::modable<ValueUnit<UnitTag, T>, T>,
private boost::unit_steppable<ValueUnit<UnitTag, T>>
{
public:
using unit_type = UnitTag;
using value_type = T;
private:
value_type value_;
public:
ValueUnit() = default;
constexpr ValueUnit(ValueUnit const& other) = default;
constexpr ValueUnit&
operator=(ValueUnit const& other) = default;
constexpr explicit ValueUnit(beast::Zero) : value_(0)
{
}
constexpr ValueUnit&
operator=(beast::Zero)
{
value_ = 0;
return *this;
}
constexpr explicit ValueUnit(value_type value) : value_(value)
{
}
constexpr ValueUnit&
operator=(value_type value)
{
value_ = value;
return *this;
}
/** Instances with the same unit, and a type that is
"safe" to convert to this one can be converted
implicitly */
template <Compatible<ValueUnit> Other>
constexpr ValueUnit(ValueUnit<unit_type, Other> const& value)
requires SafeToCast<Other, value_type>
: ValueUnit(safe_cast<value_type>(value.value()))
{
}
constexpr ValueUnit
operator+(value_type const& rhs) const
{
return ValueUnit{value_ + rhs};
}
friend constexpr ValueUnit
operator+(value_type lhs, ValueUnit const& rhs)
{
// addition is commutative
return rhs + lhs;
}
constexpr ValueUnit
operator-(value_type const& rhs) const
{
return ValueUnit{value_ - rhs};
}
friend constexpr ValueUnit
operator-(value_type lhs, ValueUnit const& rhs)
{
// subtraction is NOT commutative, but (lhs + (-rhs)) is addition, which
// is
return -rhs + lhs;
}
constexpr ValueUnit
operator*(value_type const& rhs) const
{
return ValueUnit{value_ * rhs};
}
friend constexpr ValueUnit
operator*(value_type lhs, ValueUnit const& rhs)
{
// multiplication is commutative
return rhs * lhs;
}
constexpr value_type
operator/(ValueUnit const& rhs) const
{
return value_ / rhs.value_;
}
ValueUnit&
operator+=(ValueUnit const& other)
{
value_ += other.value();
return *this;
}
ValueUnit&
operator-=(ValueUnit const& other)
{
value_ -= other.value();
return *this;
}
ValueUnit&
operator++()
{
++value_;
return *this;
}
ValueUnit&
operator--()
{
--value_;
return *this;
}
ValueUnit&
operator*=(value_type const& rhs)
{
value_ *= rhs;
return *this;
}
ValueUnit&
operator/=(value_type const& rhs)
{
value_ /= rhs;
return *this;
}
template <Integral transparent = value_type>
ValueUnit&
operator%=(value_type const& rhs)
{
value_ %= rhs;
return *this;
}
ValueUnit
operator-() const
{
static_assert(
std::is_signed_v<T>, "- operator illegal on unsigned value types");
return ValueUnit{-value_};
}
constexpr bool
operator==(ValueUnit const& other) const
{
return value_ == other.value_;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator==(ValueUnit<unit_type, Other> const& other) const
{
return value_ == other.value();
}
constexpr bool
operator==(value_type other) const
{
return value_ == other;
}
template <Compatible<ValueUnit> Other>
constexpr bool
operator!=(ValueUnit<unit_type, Other> const& other) const
{
return !operator==(other);
}
constexpr bool
operator<(ValueUnit const& other) const
{
return value_ < other.value_;
}
/** Returns true if the amount is not zero */
explicit constexpr
operator bool() const noexcept
{
return value_ != 0;
}
/** Return the sign of the amount */
constexpr int
signum() const noexcept
{
return (value_ < 0) ? -1 : (value_ ? 1 : 0);
}
/** Returns the number of drops */
// TODO: Move this to a new class, maybe with the old "TaggedFee" name
constexpr value_type
fee() const
{
return value_;
}
template <class Other>
constexpr double
decimalFromReference(ValueUnit<unit_type, Other> reference) const
{
return static_cast<double>(value_) / reference.value();
}
// `Usable` is checked to ensure that only values with
// known valid type tags can be converted to JSON. At the time
// of implementation, that includes all known tags, but more may
// be added in the future.
Json::Value
jsonClipped() const
requires Usable<ValueUnit>
{
if constexpr (std::is_integral_v<value_type>)
{
using jsontype = std::conditional_t<
std::is_signed_v<value_type>,
Json::Int,
Json::UInt>;
constexpr auto min = std::numeric_limits<jsontype>::min();
constexpr auto max = std::numeric_limits<jsontype>::max();
if (value_ < min)
return min;
if (value_ > max)
return max;
return static_cast<jsontype>(value_);
}
else
{
return value_;
}
}
/** Returns the underlying value. Code SHOULD NOT call this
function unless the type has been abstracted away,
e.g. in a templated function.
*/
constexpr value_type
value() const
{
return value_;
}
friend std::istream&
operator>>(std::istream& s, ValueUnit& val)
{
s >> val.value_;
return s;
}
};
// Output Values as just their numeric value.
template <class Char, class Traits, class UnitTag, class T>
std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, ValueUnit<UnitTag, T> const& q)
{
return os << q.value();
}
template <class UnitTag, class T>
std::string
to_string(ValueUnit<UnitTag, T> const& amount)
{
return std::to_string(amount.value());
}
template <class Source>
concept muldivSource = Valid<Source> &&
std::is_convertible_v<typename Source::value_type, std::uint64_t>;
template <class Dest>
concept muldivDest = muldivSource<Dest> && // Dest is also a source
std::is_convertible_v<std::uint64_t, typename Dest::value_type> &&
sizeof(typename Dest::value_type) >= sizeof(std::uint64_t);
template <class Source2, class Source1>
concept muldivSources = muldivSource<Source1> && muldivSource<Source2> &&
std::is_same_v<typename Source1::unit_type, typename Source2::unit_type>;
template <class Dest, class Source1, class Source2>
concept muldivable = muldivSources<Source1, Source2> && muldivDest<Dest>;
// Source and Dest can be the same by default
template <class Dest, class Source1, class Source2>
concept muldivCommutable = muldivable<Dest, Source1, Source2> &&
!std::is_same_v<typename Source1::unit_type, typename Dest::unit_type>;
template <class T>
ValueUnit<unitlessTag, T>
scalar(T value)
{
return ValueUnit<unitlessTag, T>{value};
}
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDivU(Source1 value, Dest mul, Source2 div)
{
// values can never be negative in any context.
if (value.value() < 0 || mul.value() < 0 || div.value() < 0)
{
// split the asserts so if one hits, the user can tell which
// without a debugger.
XRPL_ASSERT(
value.value() >= 0, "ripple::unit::mulDivU : minimum value input");
XRPL_ASSERT(
mul.value() >= 0, "ripple::unit::mulDivU : minimum mul input");
XRPL_ASSERT(
div.value() > 0, "ripple::unit::mulDivU : minimum div input");
return std::nullopt;
}
using desttype = typename Dest::value_type;
constexpr auto max = std::numeric_limits<desttype>::max();
// Shortcuts, since these happen a lot in the real world
if (value == div)
return mul;
if (mul.value() == div.value())
{
if (value.value() > max)
return std::nullopt;
return Dest{static_cast<desttype>(value.value())};
}
using namespace boost::multiprecision;
uint128_t product;
product = multiply(
product,
static_cast<std::uint64_t>(value.value()),
static_cast<std::uint64_t>(mul.value()));
auto quotient = product / div.value();
if (quotient > max)
return std::nullopt;
return Dest{static_cast<desttype>(quotient)};
}
} // namespace unit
// Fee Levels
template <class T>
using FeeLevel = unit::ValueUnit<unit::feelevelTag, T>;
using FeeLevel64 = FeeLevel<std::uint64_t>;
using FeeLevelDouble = FeeLevel<double>;
// Basis points (Bips)
template <class T>
using Bips = unit::ValueUnit<unit::BipsTag, T>;
using Bips16 = Bips<std::uint16_t>;
using Bips32 = Bips<std::uint32_t>;
template <class T>
using TenthBips = unit::ValueUnit<unit::TenthBipsTag, T>;
using TenthBips16 = TenthBips<std::uint16_t>;
using TenthBips32 = TenthBips<std::uint32_t>;
template <class Source1, class Source2, unit::muldivable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Source1 value, Dest mul, Source2 div)
{
return unit::mulDivU(value, mul, div);
}
template <
class Source1,
class Source2,
unit::muldivCommutable<Source1, Source2> Dest>
std::optional<Dest>
mulDiv(Dest value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return unit::mulDivU(mul, value, div);
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(std::uint64_t value, Dest mul, std::uint64_t div)
{
// Give the scalars a non-tag so the
// unit-handling version gets called.
return unit::mulDivU(unit::scalar(value), mul, unit::scalar(div));
}
template <unit::muldivDest Dest>
std::optional<Dest>
mulDiv(Dest value, std::uint64_t mul, std::uint64_t div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(Source1 value, std::uint64_t mul, Source2 div)
{
// Give the scalars a dimensionless unit so the
// unit-handling version gets called.
auto unitresult = unit::mulDivU(value, unit::scalar(mul), div);
if (!unitresult)
return std::nullopt;
return unitresult->value();
}
template <unit::muldivSource Source1, unit::muldivSources<Source1> Source2>
std::optional<std::uint64_t>
mulDiv(std::uint64_t value, Source1 mul, Source2 div)
{
// Multiplication is commutative
return mulDiv(mul, value, div);
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
safe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{safe_cast<typename Dest::value_type>(s)};
}
template <unit::IntegralValue Dest, unit::CastableValue<Dest> Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s.value())};
}
template <unit::IntegralValue Dest, unit::Integral Src>
constexpr Dest
unsafe_cast(Src s) noexcept
{
// Dest may not have an explicit value constructor
return Dest{unsafe_cast<typename Dest::value_type>(s)};
}
} // namespace ripple
#endif // PROTOCOL_UNITS_H_INCLUDED

View File

@@ -24,7 +24,7 @@
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/Zero.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/Units.h>
#include <xrpl/protocol/FeeUnits.h>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/operators.hpp>
@@ -42,7 +42,7 @@ class XRPAmount : private boost::totally_ordered<XRPAmount>,
private boost::additive<XRPAmount, std::int64_t>
{
public:
using unit_type = unit::dropTag;
using unit_type = feeunit::dropTag;
using value_type = std::int64_t;
private:

View File

@@ -129,12 +129,10 @@ inplace_bigint_div_rem(std::span<uint64_t> numerator, std::uint64_t divisor)
{
// should never happen, but if it does then it seems natural to define
// the a null set of numbers to be zero, so the remainder is also zero.
// LCOV_EXCL_START
UNREACHABLE(
"ripple::b58_fast::detail::inplace_bigint_div_rem : empty "
"numerator");
return 0;
// LCOV_EXCL_STOP
}
auto to_u128 = [](std::uint64_t high,

View File

@@ -29,14 +29,15 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
@@ -44,7 +45,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)

View File

@@ -0,0 +1,175 @@
#if !defined(INNER_OBJECT)
#error "undefined macro: INNER_OBJECT"
#endif
#if !defined(FIELDS)
#error "undefined macro: FIELDS"
#endif
#if !defined(FIELD)
#error "undefined macro: FIELD"
#endif
#if !defined(INNER_OBJECT_BEGIN)
#error "undefined macro: INNER_OBJECT_BEGIN"
#endif
#if !defined(INNER_OBJECT_END)
#error "undefined macro: INNER_OBJECT_END"
#endif
INNER_OBJECT_BEGIN
INNER_OBJECT(sfSignerEntry,
FIELDS(
FIELD(sfSignerEntry, sfAccount, soeREQUIRED)
FIELD(sfSignerEntry, sfSignerWeight, soeREQUIRED)
FIELD(sfSignerEntry, sfWalletLocator, soeOPTIONAL)
)
)
INNER_OBJECT(sfSigner,
FIELDS(
FIELD(sfSigner, sfAccount, soeREQUIRED)
FIELD(sfSigner, sfSigningPubKey, soeREQUIRED)
FIELD(sfSigner, sfTxnSignature, soeREQUIRED)
)
)
INNER_OBJECT(sfMajority,
FIELDS(
FIELD(sfMajority, sfAmendment, soeREQUIRED)
FIELD(sfMajority, sfCloseTime, soeREQUIRED)
)
)
INNER_OBJECT(sfDisabledValidator,
FIELDS(
FIELD(sfDisabledValidator, sfPublicKey, soeREQUIRED)
FIELD(sfDisabledValidator, sfFirstLedgerSequence, soeREQUIRED)
)
)
INNER_OBJECT(sfNFToken,
FIELDS(
FIELD(sfNFToken, sfNFTokenID, soeREQUIRED)
FIELD(sfNFToken, sfURI, soeOPTIONAL)
)
)
INNER_OBJECT(sfVoteEntry,
FIELDS(
FIELD(sfVoteEntry, sfAccount, soeREQUIRED)
FIELD(sfVoteEntry, sfTradingFee, soeDEFAULT)
FIELD(sfVoteEntry, sfVoteWeight, soeREQUIRED)
)
)
INNER_OBJECT(sfAuctionSlot,
FIELDS(
FIELD(sfAuctionSlot, sfAccount, soeREQUIRED)
FIELD(sfAuctionSlot, sfExpiration, soeREQUIRED)
FIELD(sfAuctionSlot, sfDiscountedFee, soeDEFAULT)
FIELD(sfAuctionSlot, sfPrice, soeREQUIRED)
FIELD(sfAuctionSlot, sfAuthAccounts, soeOPTIONAL)
)
)
INNER_OBJECT(sfXChainClaimAttestationCollectionElement,
FIELDS(
FIELD(sfXChainClaimAttestationCollectionElement, sfAttestationSignerAccount, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfPublicKey, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfSignature, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfAmount, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfAccount, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfAttestationRewardAccount, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfWasLockingChainSend, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfXChainClaimID, soeREQUIRED)
FIELD(sfXChainClaimAttestationCollectionElement, sfDestination, soeOPTIONAL)
)
)
INNER_OBJECT(sfXChainCreateAccountAttestationCollectionElement,
FIELDS(
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfAttestationSignerAccount, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfPublicKey, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfSignature, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfAmount, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfAccount, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfAttestationRewardAccount, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfWasLockingChainSend, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfXChainAccountCreateCount, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfDestination, soeREQUIRED)
FIELD(sfXChainCreateAccountAttestationCollectionElement, sfSignatureReward, soeREQUIRED)
)
)
INNER_OBJECT(sfXChainClaimProofSig,
FIELDS(
FIELD(sfXChainClaimProofSig, sfAttestationSignerAccount, soeREQUIRED)
FIELD(sfXChainClaimProofSig, sfPublicKey, soeREQUIRED)
FIELD(sfXChainClaimProofSig, sfAmount, soeREQUIRED)
FIELD(sfXChainClaimProofSig, sfAttestationRewardAccount, soeREQUIRED)
FIELD(sfXChainClaimProofSig, sfWasLockingChainSend, soeREQUIRED)
FIELD(sfXChainClaimProofSig, sfDestination, soeOPTIONAL)
)
)
INNER_OBJECT(sfXChainCreateAccountProofSig,
FIELDS(
FIELD(sfXChainCreateAccountProofSig, sfAttestationSignerAccount, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfPublicKey, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfAmount, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfSignatureReward, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfAttestationRewardAccount, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfWasLockingChainSend, soeREQUIRED)
FIELD(sfXChainCreateAccountProofSig, sfDestination, soeREQUIRED)
)
)
INNER_OBJECT(sfAuthAccount,
FIELDS(
FIELD(sfAuthAccount, sfAccount, soeREQUIRED)
)
)
INNER_OBJECT(sfPriceData,
FIELDS(
FIELD(sfPriceData, sfBaseAsset, soeREQUIRED)
FIELD(sfPriceData, sfQuoteAsset, soeREQUIRED)
FIELD(sfPriceData, sfAssetPrice, soeOPTIONAL)
FIELD(sfPriceData, sfScale, soeDEFAULT)
)
)
INNER_OBJECT(sfCredential,
FIELDS(
FIELD(sfCredential, sfIssuer, soeREQUIRED)
FIELD(sfCredential, sfCredentialType, soeREQUIRED)
)
)
INNER_OBJECT(sfPermission,
FIELDS(
FIELD(sfPermission, sfPermissionValue, soeREQUIRED)
)
)
INNER_OBJECT(sfBatchSigner,
FIELDS(
FIELD(sfBatchSigner, sfAccount, soeREQUIRED)
FIELD(sfBatchSigner, sfSigningPubKey, soeOPTIONAL)
FIELD(sfBatchSigner, sfTxnSignature, soeOPTIONAL)
FIELD(sfBatchSigner, sfSigners, soeOPTIONAL)
)
)
INNER_OBJECT(sfBook,
FIELDS(
FIELD(sfBook, sfBookDirectory, soeREQUIRED)
FIELD(sfBook, sfBookNode, soeREQUIRED)
)
)
INNER_OBJECT_END

View File

@@ -21,6 +21,22 @@
#error "undefined macro: LEDGER_ENTRY"
#endif
#if !defined(LEDGER_ENTRY_FIELD)
#error "undefined macro: LEDGER_ENTRY_FIELD"
#endif
#if !defined(DEFINE_LEDGER_ENTRY_FIELDS)
#error "undefined macro: DEFINE_LEDGER_ENTRY_FIELDS"
#endif
#if !defined(LEDGER_ENTRIES_BEGIN)
#error "undefined macro: LEDGER_ENTRIES_BEGIN"
#endif
#if !defined(LEDGER_ENTRIES_END)
#error "undefined macro: LEDGER_ENTRIES_END"
#endif
#ifndef LEDGER_ENTRY_DUPLICATE
// The EXPAND macro is needed for Windows
// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
@@ -32,6 +48,8 @@
#define LEDGER_ENTRY_DUPLICATE(...) EXPAND(LEDGER_ENTRY(__VA_ARGS__))
#endif
LEDGER_ENTRIES_BEGIN
/**
* These objects are listed in order of increasing ledger type ID.
* There are many gaps between these IDs.
@@ -42,50 +60,50 @@
\sa keylet::nftoffer
*/
LEDGER_ENTRY(ltNFTOKEN_OFFER, 0x0037, NFTokenOffer, nft_offer, ({
{sfOwner, soeREQUIRED},
{sfNFTokenID, soeREQUIRED},
{sfAmount, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfNFTokenOfferNode, soeREQUIRED},
{sfDestination, soeOPTIONAL},
{sfExpiration, soeOPTIONAL},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltNFTOKEN_OFFER, 0x0037, NFTokenOffer, nft_offer, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfOwner, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfNFTokenID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAmount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfNFTokenOfferNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestination, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfExpiration, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which describes a check.
\sa keylet::check
*/
LEDGER_ENTRY(ltCHECK, 0x0043, Check, check, ({
{sfAccount, soeREQUIRED},
{sfDestination, soeREQUIRED},
{sfSendMax, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfDestinationNode, soeREQUIRED},
{sfExpiration, soeOPTIONAL},
{sfInvoiceID, soeOPTIONAL},
{sfSourceTag, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltCHECK, 0x0043, Check, check, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestination, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSendMax, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestinationNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfExpiration, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfInvoiceID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfSourceTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDestinationTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** The ledger object which tracks the DID.
\sa keylet::did
*/
LEDGER_ENTRY(ltDID, 0x0049, DID, did, ({
{sfAccount, soeREQUIRED},
{sfDIDDocument, soeOPTIONAL},
{sfURI, soeOPTIONAL},
{sfData, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltDID, 0x0049, DID, did, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDIDDocument, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfURI, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfData, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** The ledger object which tracks the current negative UNL state.
@@ -93,25 +111,25 @@ LEDGER_ENTRY(ltDID, 0x0049, DID, did, ({
\sa keylet::negativeUNL
*/
LEDGER_ENTRY(ltNEGATIVE_UNL, 0x004e, NegativeUNL, nunl, ({
{sfDisabledValidators, soeOPTIONAL},
{sfValidatorToDisable, soeOPTIONAL},
{sfValidatorToReEnable, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
}))
LEDGER_ENTRY(ltNEGATIVE_UNL, 0x004e, NegativeUNL, nunl, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfDisabledValidators, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfValidatorToDisable, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfValidatorToReEnable, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeOPTIONAL)
))
/** A ledger object which contains a list of NFTs
\sa keylet::nftpage_min, keylet::nftpage_max, keylet::nftpage
*/
LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, ({
{sfPreviousPageMin, soeOPTIONAL},
{sfNextPageMin, soeOPTIONAL},
{sfNFTokens, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfPreviousPageMin, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfNextPageMin, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfNFTokens, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which contains a signer list for an account.
@@ -119,78 +137,78 @@ LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, ({
*/
// All fields are soeREQUIRED because there is always a SignerEntries.
// If there are no SignerEntries the node is deleted.
LEDGER_ENTRY(ltSIGNER_LIST, 0x0053, SignerList, signer_list, ({
{sfOwner, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfSignerQuorum, soeREQUIRED},
{sfSignerEntries, soeREQUIRED},
{sfSignerListID, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltSIGNER_LIST, 0x0053, SignerList, signer_list, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfOwner, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSignerQuorum, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSignerEntries, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSignerListID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which describes a ticket.
\sa keylet::ticket
*/
LEDGER_ENTRY(ltTICKET, 0x0054, Ticket, ticket, ({
{sfAccount, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfTicketSequence, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltTICKET, 0x0054, Ticket, ticket, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfTicketSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which describes an account.
\sa keylet::account
*/
LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, ({
{sfAccount, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfBalance, soeREQUIRED},
{sfOwnerCount, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfAccountTxnID, soeOPTIONAL},
{sfRegularKey, soeOPTIONAL},
{sfEmailHash, soeOPTIONAL},
{sfWalletLocator, soeOPTIONAL},
{sfWalletSize, soeOPTIONAL},
{sfMessageKey, soeOPTIONAL},
{sfTransferRate, soeOPTIONAL},
{sfDomain, soeOPTIONAL},
{sfTickSize, soeOPTIONAL},
{sfTicketCount, soeOPTIONAL},
{sfNFTokenMinter, soeOPTIONAL},
{sfMintedNFTokens, soeDEFAULT},
{sfBurnedNFTokens, soeDEFAULT},
{sfFirstNFTokenSequence, soeOPTIONAL},
{sfAMMID, soeOPTIONAL}, // pseudo-account designator
{sfVaultID, soeOPTIONAL}, // pseudo-account designator
}))
LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfBalance, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerCount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAccountTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfRegularKey, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfEmailHash, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfWalletLocator, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfWalletSize, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfMessageKey, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfTransferRate, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDomain, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfTickSize, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfTicketCount, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfNFTokenMinter, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfMintedNFTokens, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfBurnedNFTokens, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfFirstNFTokenSequence, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfAMMID, soeOPTIONAL) // pseudo-account designator
LEDGER_ENTRY_FIELD(sfVaultID, soeOPTIONAL) // pseudo-account designator
))
/** A ledger object which contains a list of object identifiers.
\sa keylet::page, keylet::quality, keylet::book, keylet::next and
keylet::ownerDir
*/
LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({
{sfOwner, soeOPTIONAL}, // for owner directories
{sfTakerPaysCurrency, soeOPTIONAL}, // order book directories
{sfTakerPaysIssuer, soeOPTIONAL}, // order book directories
{sfTakerGetsCurrency, soeOPTIONAL}, // order book directories
{sfTakerGetsIssuer, soeOPTIONAL}, // order book directories
{sfExchangeRate, soeOPTIONAL}, // order book directories
{sfIndexes, soeREQUIRED},
{sfRootIndex, soeREQUIRED},
{sfIndexNext, soeOPTIONAL},
{sfIndexPrevious, soeOPTIONAL},
{sfNFTokenID, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
{sfDomainID, soeOPTIONAL} // order book directories
}))
LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfOwner, soeOPTIONAL) // for owner directories
LEDGER_ENTRY_FIELD(sfTakerPaysCurrency, soeOPTIONAL) // order book directories
LEDGER_ENTRY_FIELD(sfTakerPaysIssuer, soeOPTIONAL) // order book directories
LEDGER_ENTRY_FIELD(sfTakerGetsCurrency, soeOPTIONAL) // order book directories
LEDGER_ENTRY_FIELD(sfTakerGetsIssuer, soeOPTIONAL) // order book directories
LEDGER_ENTRY_FIELD(sfExchangeRate, soeOPTIONAL) // order book directories
LEDGER_ENTRY_FIELD(sfIndexes, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfRootIndex, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfIndexNext, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfIndexPrevious, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfNFTokenID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDomainID, soeOPTIONAL) // order book directories
))
/** The ledger object which lists details about amendments on the network.
@@ -198,12 +216,12 @@ LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({
\sa keylet::amendments
*/
LEDGER_ENTRY(ltAMENDMENTS, 0x0066, Amendments, amendments, ({
{sfAmendments, soeOPTIONAL}, // Enabled
{sfMajorities, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
}))
LEDGER_ENTRY(ltAMENDMENTS, 0x0066, Amendments, amendments, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAmendments, soeOPTIONAL) // Enabled
LEDGER_ENTRY_FIELD(sfMajorities, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeOPTIONAL)
))
/** A ledger object that contains a list of ledger hashes.
@@ -213,76 +231,76 @@ LEDGER_ENTRY(ltAMENDMENTS, 0x0066, Amendments, amendments, ({
\sa keylet::skip
*/
LEDGER_ENTRY(ltLEDGER_HASHES, 0x0068, LedgerHashes, hashes, ({
{sfFirstLedgerSequence, soeOPTIONAL},
{sfLastLedgerSequence, soeOPTIONAL},
{sfHashes, soeREQUIRED},
}))
LEDGER_ENTRY(ltLEDGER_HASHES, 0x0068, LedgerHashes, hashes, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfFirstLedgerSequence, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfLastLedgerSequence, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfHashes, soeREQUIRED)
))
/** The ledger object which lists details about sidechains.
\sa keylet::bridge
*/
LEDGER_ENTRY(ltBRIDGE, 0x0069, Bridge, bridge, ({
{sfAccount, soeREQUIRED},
{sfSignatureReward, soeREQUIRED},
{sfMinAccountCreateAmount, soeOPTIONAL},
{sfXChainBridge, soeREQUIRED},
{sfXChainClaimID, soeREQUIRED},
{sfXChainAccountCreateCount, soeREQUIRED},
{sfXChainAccountClaimCount, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltBRIDGE, 0x0069, Bridge, bridge, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSignatureReward, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfMinAccountCreateAmount, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfXChainBridge, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainClaimID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainAccountCreateCount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainAccountClaimCount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which describes an offer on the DEX.
\sa keylet::offer
*/
LEDGER_ENTRY(ltOFFER, 0x006f, Offer, offer, ({
{sfAccount, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfTakerPays, soeREQUIRED},
{sfTakerGets, soeREQUIRED},
{sfBookDirectory, soeREQUIRED},
{sfBookNode, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfExpiration, soeOPTIONAL},
{sfDomainID, soeOPTIONAL},
{sfAdditionalBooks, soeOPTIONAL},
}))
LEDGER_ENTRY(ltOFFER, 0x006f, Offer, offer, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfTakerPays, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfTakerGets, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfBookDirectory, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfBookNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfExpiration, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDomainID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfAdditionalBooks, soeOPTIONAL)
))
/** A ledger object which describes a deposit preauthorization.
\sa keylet::depositPreauth
*/
LEDGER_ENTRY_DUPLICATE(ltDEPOSIT_PREAUTH, 0x0070, DepositPreauth, deposit_preauth, ({
{sfAccount, soeREQUIRED},
{sfAuthorize, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfAuthorizeCredentials, soeOPTIONAL},
}))
LEDGER_ENTRY_DUPLICATE(ltDEPOSIT_PREAUTH, 0x0070, DepositPreauth, deposit_preauth, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAuthorize, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAuthorizeCredentials, soeOPTIONAL)
))
/** A claim id for a cross chain transaction.
\sa keylet::xChainClaimID
*/
LEDGER_ENTRY(ltXCHAIN_OWNED_CLAIM_ID, 0x0071, XChainOwnedClaimID, xchain_owned_claim_id, ({
{sfAccount, soeREQUIRED},
{sfXChainBridge, soeREQUIRED},
{sfXChainClaimID, soeREQUIRED},
{sfOtherChainSource, soeREQUIRED},
{sfXChainClaimAttestations, soeREQUIRED},
{sfSignatureReward, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltXCHAIN_OWNED_CLAIM_ID, 0x0071, XChainOwnedClaimID, xchain_owned_claim_id, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainBridge, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainClaimID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOtherChainSource, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainClaimAttestations, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSignatureReward, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which describes a bidirectional trust line.
@@ -290,19 +308,19 @@ LEDGER_ENTRY(ltXCHAIN_OWNED_CLAIM_ID, 0x0071, XChainOwnedClaimID, xchain_owned_c
\sa keylet::line
*/
LEDGER_ENTRY(ltRIPPLE_STATE, 0x0072, RippleState, state, ({
{sfBalance, soeREQUIRED},
{sfLowLimit, soeREQUIRED},
{sfHighLimit, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfLowNode, soeOPTIONAL},
{sfLowQualityIn, soeOPTIONAL},
{sfLowQualityOut, soeOPTIONAL},
{sfHighNode, soeOPTIONAL},
{sfHighQualityIn, soeOPTIONAL},
{sfHighQualityOut, soeOPTIONAL},
}))
LEDGER_ENTRY(ltRIPPLE_STATE, 0x0072, RippleState, state, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfBalance, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfLowLimit, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfHighLimit, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfLowNode, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfLowQualityIn, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfLowQualityOut, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfHighNode, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfHighQualityIn, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfHighQualityOut, soeOPTIONAL)
))
/** The ledger object which lists the network's fee settings.
@@ -310,204 +328,206 @@ LEDGER_ENTRY(ltRIPPLE_STATE, 0x0072, RippleState, state, ({
\sa keylet::fees
*/
LEDGER_ENTRY(ltFEE_SETTINGS, 0x0073, FeeSettings, fee, ({
LEDGER_ENTRY(ltFEE_SETTINGS, 0x0073, FeeSettings, fee, DEFINE_LEDGER_ENTRY_FIELDS(
// Old version uses raw numbers
{sfBaseFee, soeOPTIONAL},
{sfReferenceFeeUnits, soeOPTIONAL},
{sfReserveBase, soeOPTIONAL},
{sfReserveIncrement, soeOPTIONAL},
LEDGER_ENTRY_FIELD(sfBaseFee, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfReferenceFeeUnits, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfReserveBase, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfReserveIncrement, soeOPTIONAL)
// New version uses Amounts
{sfBaseFeeDrops, soeOPTIONAL},
{sfReserveBaseDrops, soeOPTIONAL},
{sfReserveIncrementDrops, soeOPTIONAL},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
}))
LEDGER_ENTRY_FIELD(sfBaseFeeDrops, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfReserveBaseDrops, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfReserveIncrementDrops, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeOPTIONAL)
))
/** A claim id for a cross chain create account transaction.
\sa keylet::xChainCreateAccountClaimID
*/
LEDGER_ENTRY(ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID, 0x0074, XChainOwnedCreateAccountClaimID, xchain_owned_create_account_claim_id, ({
{sfAccount, soeREQUIRED},
{sfXChainBridge, soeREQUIRED},
{sfXChainAccountCreateCount, soeREQUIRED},
{sfXChainCreateAccountAttestations, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID, 0x0074, XChainOwnedCreateAccountClaimID, xchain_owned_create_account_claim_id, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainBridge, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainAccountCreateCount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfXChainCreateAccountAttestations, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object describing a single escrow.
\sa keylet::escrow
*/
LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({
{sfAccount, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED},
{sfCondition, soeOPTIONAL},
{sfCancelAfter, soeOPTIONAL},
{sfFinishAfter, soeOPTIONAL},
{sfSourceTag, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfDestinationNode, soeOPTIONAL},
{sfTransferRate, soeOPTIONAL},
{sfIssuerNode, soeOPTIONAL},
}))
LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDestination, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAmount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfCondition, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfCancelAfter, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfFinishAfter, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfSourceTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDestinationTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestinationNode, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfTransferRate, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfIssuerNode, soeOPTIONAL)
))
/** A ledger object describing a single unidirectional XRP payment channel.
\sa keylet::payChan
*/
LEDGER_ENTRY(ltPAYCHAN, 0x0078, PayChannel, payment_channel, ({
{sfAccount, soeREQUIRED},
{sfDestination, soeREQUIRED},
{sfSequence, soeOPTIONAL},
{sfAmount, soeREQUIRED},
{sfBalance, soeREQUIRED},
{sfPublicKey, soeREQUIRED},
{sfSettleDelay, soeREQUIRED},
{sfExpiration, soeOPTIONAL},
{sfCancelAfter, soeOPTIONAL},
{sfSourceTag, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfDestinationNode, soeOPTIONAL},
}))
LEDGER_ENTRY(ltPAYCHAN, 0x0078, PayChannel, payment_channel, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestination, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfAmount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfBalance, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPublicKey, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSettleDelay, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfExpiration, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfCancelAfter, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfSourceTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfDestinationTag, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDestinationNode, soeOPTIONAL)
))
/** The ledger object which tracks the AMM.
\sa keylet::amm
*/
LEDGER_ENTRY(ltAMM, 0x0079, AMM, amm, ({
{sfAccount, soeREQUIRED},
{sfTradingFee, soeDEFAULT},
{sfVoteSlots, soeOPTIONAL},
{sfAuctionSlot, soeOPTIONAL},
{sfLPTokenBalance, soeREQUIRED},
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeOPTIONAL},
{sfPreviousTxnLgrSeq, soeOPTIONAL},
}))
LEDGER_ENTRY(ltAMM, 0x0079, AMM, amm, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfTradingFee, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfVoteSlots, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfAuctionSlot, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfLPTokenBalance, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAsset, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAsset2, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeOPTIONAL)
))
/** A ledger object which tracks MPTokenIssuance
\sa keylet::mptIssuance
*/
LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({
{sfIssuer, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfTransferFee, soeDEFAULT},
{sfOwnerNode, soeREQUIRED},
{sfAssetScale, soeDEFAULT},
{sfMaximumAmount, soeOPTIONAL},
{sfOutstandingAmount, soeREQUIRED},
{sfLockedAmount, soeOPTIONAL},
{sfMPTokenMetadata, soeOPTIONAL},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfDomainID, soeOPTIONAL},
{sfMutableFlags, soeDEFAULT},
}))
LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfIssuer, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfTransferFee, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAssetScale, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfMaximumAmount, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOutstandingAmount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfLockedAmount, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfMPTokenMetadata, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfDomainID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfMutableFlags, soeDEFAULT)
))
/** A ledger object which tracks MPToken
\sa keylet::mptoken
*/
LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({
{sfAccount, soeREQUIRED},
{sfMPTokenIssuanceID, soeREQUIRED},
{sfMPTAmount, soeDEFAULT},
{sfLockedAmount, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfMPTokenIssuanceID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfMPTAmount, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfLockedAmount, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which tracks Oracle
\sa keylet::oracle
*/
LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({
{sfOwner, soeREQUIRED},
{sfOracleDocumentID, soeOPTIONAL},
{sfProvider, soeREQUIRED},
{sfPriceDataSeries, soeREQUIRED},
{sfAssetClass, soeREQUIRED},
{sfLastUpdateTime, soeREQUIRED},
{sfURI, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfOwner, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOracleDocumentID, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfProvider, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPriceDataSeries, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAssetClass, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfLastUpdateTime, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfURI, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which tracks Credential
\sa keylet::credential
*/
LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, ({
{sfSubject, soeREQUIRED},
{sfIssuer, soeREQUIRED},
{sfCredentialType, soeREQUIRED},
{sfExpiration, soeOPTIONAL},
{sfURI, soeOPTIONAL},
{sfIssuerNode, soeREQUIRED},
{sfSubjectNode, soeOPTIONAL},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfSubject, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfIssuer, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfCredentialType, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfExpiration, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfURI, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfIssuerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSubjectNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object which tracks PermissionedDomain
\sa keylet::permissionedDomain
*/
LEDGER_ENTRY(ltPERMISSIONED_DOMAIN, 0x0082, PermissionedDomain, permissioned_domain, ({
{sfOwner, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfAcceptedCredentials, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltPERMISSIONED_DOMAIN, 0x0082, PermissionedDomain, permissioned_domain, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfOwner, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAcceptedCredentials, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object representing permissions an account has delegated to another account.
\sa keylet::delegate
*/
LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, ({
{sfAccount, soeREQUIRED},
{sfAuthorize, soeREQUIRED},
{sfPermissions, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAuthorize, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPermissions, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
))
/** A ledger object representing a single asset vault.
\sa keylet::vault
*/
LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfOwner, soeREQUIRED},
{sfAccount, soeREQUIRED},
{sfData, soeOPTIONAL},
{sfAsset, soeREQUIRED},
{sfAssetsTotal, soeREQUIRED},
{sfAssetsAvailable, soeREQUIRED},
{sfAssetsMaximum, soeDEFAULT},
{sfLossUnrealized, soeREQUIRED},
{sfShareMPTID, soeREQUIRED},
{sfWithdrawalPolicy, soeREQUIRED},
{sfScale, soeDEFAULT},
LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, DEFINE_LEDGER_ENTRY_FIELDS(
LEDGER_ENTRY_FIELD(sfPreviousTxnID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfPreviousTxnLgrSeq, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfSequence, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwnerNode, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfOwner, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAccount, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfData, soeOPTIONAL)
LEDGER_ENTRY_FIELD(sfAsset, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAssetsTotal, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAssetsAvailable, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfAssetsMaximum, soeDEFAULT)
LEDGER_ENTRY_FIELD(sfLossUnrealized, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfShareMPTID, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfWithdrawalPolicy, soeREQUIRED)
LEDGER_ENTRY_FIELD(sfScale, soeDEFAULT)
// no SharesTotal ever (use MPTIssuance.sfOutstandingAmount)
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
}))
))
#undef EXPAND
#undef LEDGER_ENTRY_DUPLICATE
LEDGER_ENTRIES_END

View File

@@ -23,6 +23,9 @@
#if !defined(TYPED_SFIELD)
#error "undefined macro: TYPED_SFIELD"
#endif
#if !defined(ARRAY_SFIELD)
#error "undefined macro: ARRAY_SFIELD"
#endif
// untyped
UNTYPED_SFIELD(sfLedgerEntry, LEDGERENTRY, 257)
@@ -174,8 +177,7 @@ TYPED_SFIELD(sfNFTokenID, UINT256, 10)
TYPED_SFIELD(sfEmitParentTxnID, UINT256, 11)
TYPED_SFIELD(sfEmitNonce, UINT256, 12)
TYPED_SFIELD(sfEmitHookHash, UINT256, 13)
TYPED_SFIELD(sfAMMID, UINT256, 14,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfAMMID, UINT256, 14)
// 256-bit (uncommon)
TYPED_SFIELD(sfBookDirectory, UINT256, 16)
@@ -197,8 +199,7 @@ TYPED_SFIELD(sfHookHash, UINT256, 31)
TYPED_SFIELD(sfHookNamespace, UINT256, 32)
TYPED_SFIELD(sfHookSetTxnID, UINT256, 33)
TYPED_SFIELD(sfDomainID, UINT256, 34)
TYPED_SFIELD(sfVaultID, UINT256, 35,
SField::sMD_PseudoAccount | SField::sMD_Default)
TYPED_SFIELD(sfVaultID, UINT256, 35)
TYPED_SFIELD(sfParentBatchID, UINT256, 36)
// number (common)
@@ -208,12 +209,6 @@ TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3)
TYPED_SFIELD(sfAssetsTotal, NUMBER, 4)
TYPED_SFIELD(sfLossUnrealized, NUMBER, 5)
// int32
// NOTE: Do not use `sfDummyInt32`. It's so far the only use of INT32
// in this file and has been defined here for test only.
// TODO: Replace `sfDummyInt32` with actually useful field.
TYPED_SFIELD(sfDummyInt32, INT32, 1) // for tests only
// currency amount (common)
TYPED_SFIELD(sfAmount, AMOUNT, 1)
TYPED_SFIELD(sfBalance, AMOUNT, 2)
@@ -375,33 +370,33 @@ UNTYPED_SFIELD(sfBook, OBJECT, 36)
// array of objects (common)
// ARRAY/1 is reserved for end of array
// sfSigningAccounts has never been used.
//UNTYPED_SFIELD(sfSigningAccounts, ARRAY, 2)
UNTYPED_SFIELD(sfSigners, ARRAY, 3, SField::sMD_Default, SField::notSigning)
UNTYPED_SFIELD(sfSignerEntries, ARRAY, 4)
UNTYPED_SFIELD(sfTemplate, ARRAY, 5)
UNTYPED_SFIELD(sfNecessary, ARRAY, 6)
UNTYPED_SFIELD(sfSufficient, ARRAY, 7)
UNTYPED_SFIELD(sfAffectedNodes, ARRAY, 8)
UNTYPED_SFIELD(sfMemos, ARRAY, 9)
UNTYPED_SFIELD(sfNFTokens, ARRAY, 10)
UNTYPED_SFIELD(sfHooks, ARRAY, 11)
UNTYPED_SFIELD(sfVoteSlots, ARRAY, 12)
UNTYPED_SFIELD(sfAdditionalBooks, ARRAY, 13)
//UNTYPED_SFIELD(sfSigningAccounts, sfInvalid, ARRAY, 2)
ARRAY_SFIELD(sfSigners, sfSigner, ARRAY, 3, SField::sMD_Default, SField::notSigning)
ARRAY_SFIELD(sfSignerEntries, sfSignerEntry, ARRAY, 4)
ARRAY_SFIELD(sfTemplate, sfInvalid, ARRAY, 5)
ARRAY_SFIELD(sfNecessary, sfInvalid, ARRAY, 6)
ARRAY_SFIELD(sfSufficient, sfInvalid, ARRAY, 7)
ARRAY_SFIELD(sfAffectedNodes, sfInvalid, ARRAY, 8)
ARRAY_SFIELD(sfMemos, sfInvalid, ARRAY, 9)
ARRAY_SFIELD(sfNFTokens, sfNFToken, ARRAY, 10)
ARRAY_SFIELD(sfHooks, sfInvalid, ARRAY, 11)
ARRAY_SFIELD(sfVoteSlots, sfVoteEntry, ARRAY, 12)
ARRAY_SFIELD(sfAdditionalBooks, sfInvalid, ARRAY, 13)
// array of objects (uncommon)
UNTYPED_SFIELD(sfMajorities, ARRAY, 16)
UNTYPED_SFIELD(sfDisabledValidators, ARRAY, 17)
UNTYPED_SFIELD(sfHookExecutions, ARRAY, 18)
UNTYPED_SFIELD(sfHookParameters, ARRAY, 19)
UNTYPED_SFIELD(sfHookGrants, ARRAY, 20)
UNTYPED_SFIELD(sfXChainClaimAttestations, ARRAY, 21)
UNTYPED_SFIELD(sfXChainCreateAccountAttestations, ARRAY, 22)
// 23 unused
UNTYPED_SFIELD(sfPriceDataSeries, ARRAY, 24)
UNTYPED_SFIELD(sfAuthAccounts, ARRAY, 25)
UNTYPED_SFIELD(sfAuthorizeCredentials, ARRAY, 26)
UNTYPED_SFIELD(sfUnauthorizeCredentials, ARRAY, 27)
UNTYPED_SFIELD(sfAcceptedCredentials, ARRAY, 28)
UNTYPED_SFIELD(sfPermissions, ARRAY, 29)
UNTYPED_SFIELD(sfRawTransactions, ARRAY, 30)
UNTYPED_SFIELD(sfBatchSigners, ARRAY, 31, SField::sMD_Default, SField::notSigning)
ARRAY_SFIELD(sfMajorities, sfMajority, ARRAY, 16)
ARRAY_SFIELD(sfDisabledValidators, sfDisabledValidator, ARRAY, 17)
ARRAY_SFIELD(sfHookExecutions, sfInvalid, ARRAY, 18)
ARRAY_SFIELD(sfHookParameters, sfInvalid, ARRAY, 19)
ARRAY_SFIELD(sfHookGrants, sfInvalid, ARRAY, 20)
ARRAY_SFIELD(sfXChainClaimAttestations, sfInvalid, ARRAY, 21)
ARRAY_SFIELD(sfXChainCreateAccountAttestations, sfInvalid, ARRAY, 22)
// 23 unused
ARRAY_SFIELD(sfPriceDataSeries, sfPriceData, ARRAY, 24)
ARRAY_SFIELD(sfAuthAccounts, sfAuthAccount, ARRAY, 25)
ARRAY_SFIELD(sfAuthorizeCredentials, sfInvalid, ARRAY, 26)
ARRAY_SFIELD(sfUnauthorizeCredentials, sfInvalid, ARRAY, 27)
ARRAY_SFIELD(sfAcceptedCredentials, sfInvalid, ARRAY, 28)
ARRAY_SFIELD(sfPermissions, sfPermission, ARRAY, 29)
ARRAY_SFIELD(sfRawTransactions, sfInvalid, ARRAY, 30)
ARRAY_SFIELD(sfBatchSigners, sfBatchSigner, ARRAY, 31, SField::sMD_Default, SField::notSigning)

View File

@@ -22,31 +22,16 @@
#endif
/**
* TRANSACTION(tag, value, name, delegatable, amendments, privileges, fields)
*
* To ease maintenance, you may replace any unneeded values with "..."
* e.g. #define TRANSACTION(tag, value, name, ...)
* TRANSACTION(tag, value, name, delegatable, amendments, fields)
*
* You must define a transactor class in the `ripple` namespace named `name`,
* and include its header alongside the TRANSACTOR definition using this
* format:
* #if TRANSACTION_INCLUDE
* # include <xrpld/app/tx/detail/HEADER.h>
* #endif
*
* The `privileges` parameter of the TRANSACTION macro is a bitfield
* defining which operations the transaction can perform.
* The values are defined and used in InvariantCheck.cpp
* and include its header in `src/xrpld/app/tx/detail/applySteps.cpp`.
*/
/** This transaction type executes a payment. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Payment.h>
#endif
TRANSACTION(ttPAYMENT, 0, Payment,
Delegation::delegatable,
uint256{},
createAcct,
({
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
@@ -60,13 +45,9 @@ TRANSACTION(ttPAYMENT, 0, Payment,
}))
/** This transaction type creates an escrow object. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Escrow.h>
#endif
TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
@@ -80,7 +61,6 @@ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate,
TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfOwner, soeREQUIRED},
{sfOfferSequence, soeREQUIRED},
@@ -91,13 +71,9 @@ TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish,
/** This transaction type adjusts various account settings. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/SetAccount.h>
#endif
TRANSACTION(ttACCOUNT_SET, 3, AccountSet,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfEmailHash, soeOPTIONAL},
{sfWalletLocator, soeOPTIONAL},
@@ -112,26 +88,18 @@ TRANSACTION(ttACCOUNT_SET, 3, AccountSet,
}))
/** This transaction type cancels an existing escrow. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Escrow.h>
#endif
TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfOwner, soeREQUIRED},
{sfOfferSequence, soeREQUIRED},
}))
/** This transaction type sets or clears an account's "regular key". */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/SetRegularKey.h>
#endif
TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfRegularKey, soeOPTIONAL},
}))
@@ -139,13 +107,9 @@ TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey,
// 6 deprecated
/** This transaction type creates an offer to trade one asset for another. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CreateOffer.h>
#endif
TRANSACTION(ttOFFER_CREATE, 7, OfferCreate,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfTakerPays, soeREQUIRED},
{sfTakerGets, soeREQUIRED},
@@ -155,13 +119,9 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate,
}))
/** This transaction type cancels existing offers to trade one asset for another. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CancelOffer.h>
#endif
TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfOfferSequence, soeREQUIRED},
}))
@@ -169,13 +129,9 @@ TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel,
// 9 deprecated
/** This transaction type creates a new set of tickets. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CreateTicket.h>
#endif
TRANSACTION(ttTICKET_CREATE, 10, TicketCreate,
Delegation::delegatable,
featureTicketBatch,
noPriv,
({
{sfTicketCount, soeREQUIRED},
}))
@@ -185,26 +141,18 @@ TRANSACTION(ttTICKET_CREATE, 10, TicketCreate,
/** This transaction type modifies the signer list associated with an account. */
// The SignerEntries are optional because a SignerList is deleted by
// setting the SignerQuorum to zero and omitting SignerEntries.
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/SetSignerList.h>
#endif
TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfSignerQuorum, soeREQUIRED},
{sfSignerEntries, soeOPTIONAL},
}))
/** This transaction type creates a new unidirectional XRP payment channel. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/PayChan.h>
#endif
TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfDestination, soeREQUIRED},
{sfAmount, soeREQUIRED},
@@ -218,7 +166,6 @@ TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate,
TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfChannel, soeREQUIRED},
{sfAmount, soeREQUIRED},
@@ -229,7 +176,6 @@ TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund,
TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfChannel, soeREQUIRED},
{sfAmount, soeOPTIONAL},
@@ -240,13 +186,9 @@ TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim,
}))
/** This transaction type creates a new check. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CreateCheck.h>
#endif
TRANSACTION(ttCHECK_CREATE, 16, CheckCreate,
Delegation::delegatable,
featureChecks,
noPriv,
({
{sfDestination, soeREQUIRED},
{sfSendMax, soeREQUIRED},
@@ -256,13 +198,9 @@ TRANSACTION(ttCHECK_CREATE, 16, CheckCreate,
}))
/** This transaction type cashes an existing check. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CashCheck.h>
#endif
TRANSACTION(ttCHECK_CASH, 17, CheckCash,
Delegation::delegatable,
featureChecks,
noPriv,
({
{sfCheckID, soeREQUIRED},
{sfAmount, soeOPTIONAL},
@@ -270,25 +208,17 @@ TRANSACTION(ttCHECK_CASH, 17, CheckCash,
}))
/** This transaction type cancels an existing check. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/CancelCheck.h>
#endif
TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel,
Delegation::delegatable,
featureChecks,
noPriv,
({
{sfCheckID, soeREQUIRED},
}))
/** This transaction type grants or revokes authorization to transfer funds. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/DepositPreauth.h>
#endif
TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth,
Delegation::delegatable,
featureDepositPreauth,
noPriv,
({
{sfAuthorize, soeOPTIONAL},
{sfUnauthorize, soeOPTIONAL},
@@ -297,13 +227,9 @@ TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth,
}))
/** This transaction type modifies a trustline between two accounts. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/SetTrust.h>
#endif
TRANSACTION(ttTRUST_SET, 20, TrustSet,
Delegation::delegatable,
uint256{},
noPriv,
({
{sfLimitAmount, soeOPTIONAL},
{sfQualityIn, soeOPTIONAL},
@@ -311,13 +237,9 @@ TRANSACTION(ttTRUST_SET, 20, TrustSet,
}))
/** This transaction type deletes an existing account. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/DeleteAccount.h>
#endif
TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete,
Delegation::notDelegatable,
uint256{},
mustDeleteAcct,
({
{sfDestination, soeREQUIRED},
{sfDestinationTag, soeOPTIONAL},
@@ -327,13 +249,9 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete,
// 22 reserved
/** This transaction mints a new NFT. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenMint.h>
#endif
TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint,
Delegation::delegatable,
featureNonFungibleTokensV1,
changeNFTCounts,
({
{sfNFTokenTaxon, soeREQUIRED},
{sfTransferFee, soeOPTIONAL},
@@ -345,26 +263,18 @@ TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint,
}))
/** This transaction burns (i.e. destroys) an existing NFT. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenBurn.h>
#endif
TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn,
Delegation::delegatable,
featureNonFungibleTokensV1,
changeNFTCounts,
({
{sfNFTokenID, soeREQUIRED},
{sfOwner, soeOPTIONAL},
}))
/** This transaction creates a new offer to buy or sell an NFT. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenCreateOffer.h>
#endif
TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer,
Delegation::delegatable,
featureNonFungibleTokensV1,
noPriv,
({
{sfNFTokenID, soeREQUIRED},
{sfAmount, soeREQUIRED},
@@ -374,25 +284,17 @@ TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer,
}))
/** This transaction cancels an existing offer to buy or sell an existing NFT. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenCancelOffer.h>
#endif
TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer,
Delegation::delegatable,
featureNonFungibleTokensV1,
noPriv,
({
{sfNFTokenOffers, soeREQUIRED},
}))
/** This transaction accepts an existing offer to buy or sell an existing NFT. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenAcceptOffer.h>
#endif
TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer,
Delegation::delegatable,
featureNonFungibleTokensV1,
noPriv,
({
{sfNFTokenBuyOffer, soeOPTIONAL},
{sfNFTokenSellOffer, soeOPTIONAL},
@@ -400,26 +302,18 @@ TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer,
}))
/** This transaction claws back issued tokens. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Clawback.h>
#endif
TRANSACTION(ttCLAWBACK, 30, Clawback,
Delegation::delegatable,
featureClawback,
noPriv,
({
{sfAmount, soeREQUIRED, soeMPTSupported},
{sfHolder, soeOPTIONAL},
}))
/** This transaction claws back tokens from an AMM pool. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMClawback.h>
#endif
TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback,
Delegation::delegatable,
featureAMMClawback,
mayDeleteAcct | overrideFreeze,
({
{sfHolder, soeREQUIRED},
{sfAsset, soeREQUIRED},
@@ -428,13 +322,9 @@ TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback,
}))
/** This transaction type creates an AMM instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMCreate.h>
#endif
TRANSACTION(ttAMM_CREATE, 35, AMMCreate,
Delegation::delegatable,
featureAMM,
createPseudoAcct,
({
{sfAmount, soeREQUIRED},
{sfAmount2, soeREQUIRED},
@@ -442,13 +332,9 @@ TRANSACTION(ttAMM_CREATE, 35, AMMCreate,
}))
/** This transaction type deposits into an AMM instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMDeposit.h>
#endif
TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit,
Delegation::delegatable,
featureAMM,
noPriv,
({
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
@@ -460,13 +346,9 @@ TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit,
}))
/** This transaction type withdraws from an AMM instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMWithdraw.h>
#endif
TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw,
Delegation::delegatable,
featureAMM,
mayDeleteAcct,
({
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
@@ -477,13 +359,9 @@ TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw,
}))
/** This transaction type votes for the trading fee */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMVote.h>
#endif
TRANSACTION(ttAMM_VOTE, 38, AMMVote,
Delegation::delegatable,
featureAMM,
noPriv,
({
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
@@ -491,13 +369,9 @@ TRANSACTION(ttAMM_VOTE, 38, AMMVote,
}))
/** This transaction type bids for the auction slot */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMBid.h>
#endif
TRANSACTION(ttAMM_BID, 39, AMMBid,
Delegation::delegatable,
featureAMM,
noPriv,
({
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
@@ -507,26 +381,18 @@ TRANSACTION(ttAMM_BID, 39, AMMBid,
}))
/** This transaction type deletes AMM in the empty state */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/AMMDelete.h>
#endif
TRANSACTION(ttAMM_DELETE, 40, AMMDelete,
Delegation::delegatable,
featureAMM,
mustDeleteAcct,
({
{sfAsset, soeREQUIRED},
{sfAsset2, soeREQUIRED},
}))
/** This transactions creates a crosschain sequence number */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/XChainBridge.h>
#endif
TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfSignatureReward, soeREQUIRED},
@@ -537,7 +403,6 @@ TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID,
TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfXChainClaimID, soeREQUIRED},
@@ -549,7 +414,6 @@ TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit,
TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfXChainClaimID, soeREQUIRED},
@@ -562,7 +426,6 @@ TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim,
TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfDestination, soeREQUIRED},
@@ -574,7 +437,6 @@ TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit,
TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation,
Delegation::delegatable,
featureXChainBridge,
createAcct,
({
{sfXChainBridge, soeREQUIRED},
@@ -591,11 +453,9 @@ TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation,
}))
/** This transaction adds an attestation to an account */
TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46,
XChainAddAccountCreateAttestation,
TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation,
Delegation::delegatable,
featureXChainBridge,
createAcct,
({
{sfXChainBridge, soeREQUIRED},
@@ -616,7 +476,6 @@ TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46,
TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfSignatureReward, soeOPTIONAL},
@@ -627,7 +486,6 @@ TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge,
TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
{sfXChainBridge, soeREQUIRED},
{sfSignatureReward, soeREQUIRED},
@@ -635,13 +493,9 @@ TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge,
}))
/** This transaction type creates or updates a DID */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/DID.h>
#endif
TRANSACTION(ttDID_SET, 49, DIDSet,
Delegation::delegatable,
featureDID,
noPriv,
({
{sfDIDDocument, soeOPTIONAL},
{sfURI, soeOPTIONAL},
@@ -652,17 +506,12 @@ TRANSACTION(ttDID_SET, 49, DIDSet,
TRANSACTION(ttDID_DELETE, 50, DIDDelete,
Delegation::delegatable,
featureDID,
noPriv,
({}))
/** This transaction type creates an Oracle instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/SetOracle.h>
#endif
TRANSACTION(ttORACLE_SET, 51, OracleSet,
Delegation::delegatable,
featurePriceOracle,
noPriv,
({
{sfOracleDocumentID, soeREQUIRED},
{sfProvider, soeOPTIONAL},
@@ -673,38 +522,26 @@ TRANSACTION(ttORACLE_SET, 51, OracleSet,
}))
/** This transaction type deletes an Oracle instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/DeleteOracle.h>
#endif
TRANSACTION(ttORACLE_DELETE, 52, OracleDelete,
Delegation::delegatable,
featurePriceOracle,
noPriv,
({
{sfOracleDocumentID, soeREQUIRED},
}))
/** This transaction type fixes a problem in the ledger state */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/LedgerStateFix.h>
#endif
TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix,
Delegation::delegatable,
fixNFTokenPageLinks,
noPriv,
({
{sfLedgerFixType, soeREQUIRED},
{sfOwner, soeOPTIONAL},
}))
/** This transaction type creates a MPTokensIssuance instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/MPTokenIssuanceCreate.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate,
Delegation::delegatable,
featureMPTokensV1,
createMPTIssuance,
({
{sfAssetScale, soeOPTIONAL},
{sfTransferFee, soeOPTIONAL},
@@ -715,25 +552,17 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate,
}))
/** This transaction type destroys a MPTokensIssuance instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/MPTokenIssuanceDestroy.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy,
Delegation::delegatable,
featureMPTokensV1,
destroyMPTIssuance,
({
{sfMPTokenIssuanceID, soeREQUIRED},
}))
/** This transaction type sets flags on a MPTokensIssuance or MPToken instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/MPTokenIssuanceSet.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet,
Delegation::delegatable,
featureMPTokensV1,
noPriv,
({
{sfMPTokenIssuanceID, soeREQUIRED},
{sfHolder, soeOPTIONAL},
@@ -744,26 +573,18 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet,
}))
/** This transaction type authorizes a MPToken instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/MPTokenAuthorize.h>
#endif
TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize,
Delegation::delegatable,
featureMPTokensV1,
mustAuthorizeMPT,
({
{sfMPTokenIssuanceID, soeREQUIRED},
{sfHolder, soeOPTIONAL},
}))
/** This transaction type create an Credential instance */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Credentials.h>
#endif
TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate,
Delegation::delegatable,
featureCredentials,
noPriv,
({
{sfSubject, soeREQUIRED},
{sfCredentialType, soeREQUIRED},
@@ -775,7 +596,6 @@ TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate,
TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept,
Delegation::delegatable,
featureCredentials,
noPriv,
({
{sfIssuer, soeREQUIRED},
{sfCredentialType, soeREQUIRED},
@@ -785,7 +605,6 @@ TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept,
TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete,
Delegation::delegatable,
featureCredentials,
noPriv,
({
{sfSubject, soeOPTIONAL},
{sfIssuer, soeOPTIONAL},
@@ -793,13 +612,9 @@ TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete,
}))
/** This transaction type modify a NFToken */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/NFTokenModify.h>
#endif
TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify,
Delegation::delegatable,
featureDynamicNFT,
noPriv,
({
{sfNFTokenID, soeREQUIRED},
{sfOwner, soeOPTIONAL},
@@ -807,51 +622,35 @@ TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify,
}))
/** This transaction type creates or modifies a Permissioned Domain */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/PermissionedDomainSet.h>
#endif
TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet,
Delegation::delegatable,
featurePermissionedDomains,
noPriv,
({
{sfDomainID, soeOPTIONAL},
{sfAcceptedCredentials, soeREQUIRED},
}))
/** This transaction type deletes a Permissioned Domain */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/PermissionedDomainDelete.h>
#endif
TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete,
Delegation::delegatable,
featurePermissionedDomains,
noPriv,
({
{sfDomainID, soeREQUIRED},
}))
/** This transaction type delegates authorized account specified permissions */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/DelegateSet.h>
#endif
TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
Delegation::notDelegatable,
featurePermissionDelegation,
noPriv,
({
{sfAuthorize, soeREQUIRED},
{sfPermissions, soeREQUIRED},
}))
/** This transaction creates a single asset vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultCreate.h>
#endif
TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
Delegation::delegatable,
featureSingleAssetVault,
createPseudoAcct | createMPTIssuance | mustModifyVault,
({
{sfAsset, soeREQUIRED, soeMPTSupported},
{sfAssetsMaximum, soeOPTIONAL},
@@ -863,13 +662,9 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
}))
/** This transaction updates a single asset vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultSet.h>
#endif
TRANSACTION(ttVAULT_SET, 66, VaultSet,
Delegation::delegatable,
featureSingleAssetVault,
mustModifyVault,
({
{sfVaultID, soeREQUIRED},
{sfAssetsMaximum, soeOPTIONAL},
@@ -878,38 +673,26 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet,
}))
/** This transaction deletes a single asset vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultDelete.h>
#endif
TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
Delegation::delegatable,
featureSingleAssetVault,
mustDeleteAcct | destroyMPTIssuance | mustModifyVault,
({
{sfVaultID, soeREQUIRED},
}))
/** This transaction trades assets for shares with a vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultDeposit.h>
#endif
TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
Delegation::delegatable,
featureSingleAssetVault,
mayAuthorizeMPT | mustModifyVault,
({
{sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
}))
/** This transaction trades shares for assets with a vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultWithdraw.h>
#endif
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
Delegation::delegatable,
featureSingleAssetVault,
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
({
{sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
@@ -918,13 +701,9 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
}))
/** This transaction claws back tokens from a vault. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/VaultClawback.h>
#endif
TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
Delegation::delegatable,
featureSingleAssetVault,
mayDeleteMPT | mustModifyVault,
({
{sfVaultID, soeREQUIRED},
{sfHolder, soeREQUIRED},
@@ -932,13 +711,9 @@ TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
}))
/** This transaction type batches together transactions. */
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Batch.h>
#endif
TRANSACTION(ttBATCH, 71, Batch,
Delegation::notDelegatable,
featureBatch,
noPriv,
({
{sfRawTransactions, soeREQUIRED},
{sfBatchSigners, soeOPTIONAL},
@@ -948,13 +723,9 @@ TRANSACTION(ttBATCH, 71, Batch,
For details, see: https://xrpl.org/amendments.html
*/
#if TRANSACTION_INCLUDE
# include <xrpld/app/tx/detail/Change.h>
#endif
TRANSACTION(ttAMENDMENT, 100, EnableAmendment,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfLedgerSequence, soeREQUIRED},
{sfAmendment, soeREQUIRED},
@@ -966,7 +737,6 @@ TRANSACTION(ttAMENDMENT, 100, EnableAmendment,
TRANSACTION(ttFEE, 101, SetFee,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfLedgerSequence, soeOPTIONAL},
// Old version uses raw numbers
@@ -987,7 +757,6 @@ TRANSACTION(ttFEE, 101, SetFee,
TRANSACTION(ttUNL_MODIFY, 102, UNLModify,
Delegation::notDelegatable,
uint256{},
noPriv,
({
{sfUNLModifyDisabling, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},

View File

@@ -721,12 +721,24 @@ JSS(write_load); // out: GetCounts
#undef LEDGER_ENTRY
#pragma push_macro("LEDGER_ENTRY_DUPLICATE")
#undef LEDGER_ENTRY_DUPLICATE
#pragma push_macro("LEDGER_ENTRY_FIELD")
#undef LEDGER_ENTRY_FIELD
#pragma push_macro("DEFINE_LEDGER_ENTRY_FIELDS")
#undef DEFINE_LEDGER_ENTRY_FIELDS
#pragma push_macro("LEDGER_ENTRIES_BEGIN")
#undef LEDGER_ENTRIES_BEGIN
#pragma push_macro("LEDGER_ENTRIES_END")
#undef LEDGER_ENTRIES_END
#define LEDGER_ENTRY(tag, value, name, rpcName, ...) \
JSS(name); \
#define LEDGER_ENTRIES_BEGIN
#define LEDGER_ENTRIES_END
#define DEFINE_LEDGER_ENTRY_FIELDS(...) ({__VA_ARGS__})
#define LEDGER_ENTRY_FIELD(...) {__VA_ARGS__},
#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \
JSS(name); \
JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, ...) JSS(rpcName);
#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, fields) JSS(rpcName);
#include <xrpl/protocol/detail/ledger_entries.macro>
@@ -734,6 +746,14 @@ JSS(write_load); // out: GetCounts
#pragma pop_macro("LEDGER_ENTRY")
#undef LEDGER_ENTRY_DUPLICATE
#pragma pop_macro("LEDGER_ENTRY_DUPLICATE")
#undef LEDGER_ENTRY_FIELD
#pragma pop_macro("LEDGER_ENTRY_FIELD")
#undef DEFINE_LEDGER_ENTRY_FIELDS
#pragma pop_macro("DEFINE_LEDGER_ENTRY_FIELDS")
#undef LEDGER_ENTRIES_BEGIN
#pragma pop_macro("LEDGER_ENTRIES_BEGIN")
#undef LEDGER_ENTRIES_END
#pragma pop_macro("LEDGER_ENTRIES_END")
#undef JSS

View File

@@ -436,12 +436,10 @@ public:
admin_.erase(admin_.iterator_to(entry));
break;
default:
// LCOV_EXCL_START
UNREACHABLE(
"ripple::Resource::Logic::release : invalid entry "
"kind");
break;
// LCOV_EXCL_STOP
}
inactive_.push_back(entry);
entry.whenExpires = m_clock.now() + secondsUntilExpiration;

View File

@@ -25,7 +25,7 @@
#include <xrpl/server/Port.h>
#include <xrpl/server/detail/ServerImpl.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
namespace ripple {
@@ -34,10 +34,10 @@ template <class Handler>
std::unique_ptr<Server>
make_Server(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal)
{
return std::make_unique<ServerImpl<Handler>>(handler, io_service, journal);
return std::make_unique<ServerImpl<Handler>>(handler, io_context, journal);
}
} // namespace ripple

View File

@@ -88,9 +88,7 @@ public:
++iter)
{
typename BufferSequence::value_type const& buffer(*iter);
write(
boost::asio::buffer_cast<void const*>(buffer),
boost::asio::buffer_size(buffer));
write(buffer.data(), boost::asio::buffer_size(buffer));
}
}
@@ -104,7 +102,7 @@ public:
/** Detach the session.
This holds the session open so that the response can be sent
asynchronously. Calls to io_service::run made by the server
asynchronously. Calls to io_context::run made by the server
will not return until all detached sessions are closed.
*/
virtual std::shared_ptr<Session>

View File

@@ -24,11 +24,13 @@
#include <xrpl/beast/net/IPAddressConversion.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/server/Session.h>
#include <xrpl/server/detail/Spawn.h>
#include <xrpl/server/detail/io_list.h>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/streambuf.hpp>
#include <boost/beast/core/stream_traits.hpp>
#include <boost/beast/http/dynamic_body.hpp>
@@ -215,8 +217,8 @@ BaseHTTPPeer<Handler, Impl>::BaseHTTPPeer(
ConstBufferSequence const& buffers)
: port_(port)
, handler_(handler)
, work_(executor)
, strand_(executor)
, work_(boost::asio::make_work_guard(executor))
, strand_(boost::asio::make_strand(executor))
, remote_address_(remote_address)
, journal_(journal)
{
@@ -356,7 +358,7 @@ BaseHTTPPeer<Handler, Impl>::on_write(
return;
if (graceful_)
return do_close();
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
@@ -375,7 +377,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
{
auto const p = impl().shared_from_this();
resume = std::function<void(void)>([this, p, writer, keep_alive]() {
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer,
@@ -406,7 +408,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
if (!keep_alive)
return do_close();
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
@@ -448,14 +450,14 @@ BaseHTTPPeer<Handler, Impl>::write(
std::shared_ptr<Writer> const& writer,
bool keep_alive)
{
boost::asio::spawn(bind_executor(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_writer,
impl().shared_from_this(),
writer,
keep_alive,
std::placeholders::_1)));
std::placeholders::_1));
}
// DEPRECATED
@@ -490,12 +492,12 @@ BaseHTTPPeer<Handler, Impl>::complete()
}
// keep-alive
boost::asio::spawn(bind_executor(
util::spawn(
strand_,
std::bind(
&BaseHTTPPeer<Handler, Impl>::do_read,
impl().shared_from_this(),
std::placeholders::_1)));
std::placeholders::_1));
}
// DEPRECATED

View File

@@ -91,8 +91,8 @@ BasePeer<Handler, Impl>::BasePeer(
return "##" + std::to_string(++id) + " ";
}())
, j_(sink_)
, work_(executor)
, strand_(executor)
, work_(boost::asio::make_work_guard(executor))
, strand_(boost::asio::make_strand(executor))
{
}

View File

@@ -29,6 +29,7 @@
#include <xrpl/server/detail/BasePeer.h>
#include <xrpl/server/detail/LowestLayer.h>
#include <boost/asio/error.hpp>
#include <boost/beast/core/multi_buffer.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/websocket.hpp>
@@ -420,11 +421,17 @@ BaseWSPeer<Handler, Impl>::start_timer()
// Max seconds without completing a message
static constexpr std::chrono::seconds timeout{30};
static constexpr std::chrono::seconds timeoutLocal{3};
error_code ec;
timer_.expires_from_now(
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec);
if (ec)
return fail(ec, "start_timer");
try
{
timer_.expires_after(
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout);
}
catch (boost::system::system_error const& e)
{
return fail(e.code(), "start_timer");
}
timer_.async_wait(bind_executor(
strand_,
std::bind(
@@ -438,8 +445,14 @@ template <class Handler, class Impl>
void
BaseWSPeer<Handler, Impl>::cancel_timer()
{
error_code ec;
timer_.cancel(ec);
try
{
timer_.cancel();
}
catch (boost::system::system_error const&)
{
// ignored
}
}
template <class Handler, class Impl>

View File

@@ -69,7 +69,7 @@ private:
stream_type stream_;
socket_type& socket_;
endpoint_type remote_address_;
boost::asio::io_context::strand strand_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
beast::Journal const j_;
public:
@@ -95,7 +95,7 @@ private:
Handler& handler_;
boost::asio::io_context& ioc_;
acceptor_type acceptor_;
boost::asio::io_context::strand strand_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
bool ssl_;
bool plain_;
@@ -155,7 +155,7 @@ Door<Handler>::Detector::Detector(
, stream_(std::move(stream))
, socket_(stream_.socket())
, remote_address_(remote_address)
, strand_(ioc_)
, strand_(boost::asio::make_strand(ioc_))
, j_(j)
{
}
@@ -164,7 +164,7 @@ template <class Handler>
void
Door<Handler>::Detector::run()
{
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&Detector::do_detect,
@@ -269,7 +269,7 @@ Door<Handler>::reOpen()
Throw<std::exception>();
}
acceptor_.listen(boost::asio::socket_base::max_connections, ec);
acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
if (ec)
{
JLOG(j_.error()) << "Listen on port '" << port_.name
@@ -291,7 +291,7 @@ Door<Handler>::Door(
, handler_(handler)
, ioc_(io_context)
, acceptor_(io_context)
, strand_(io_context)
, strand_(boost::asio::make_strand(io_context))
, ssl_(
port_.protocol.count("https") > 0 ||
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
@@ -307,7 +307,7 @@ template <class Handler>
void
Door<Handler>::run()
{
boost::asio::spawn(
util::spawn(
strand_,
std::bind(
&Door<Handler>::do_accept,
@@ -320,7 +320,8 @@ void
Door<Handler>::close()
{
if (!strand_.running_in_this_thread())
return strand_.post(
return boost::asio::post(
strand_,
std::bind(&Door<Handler>::close, this->shared_from_this()));
error_code ec;
acceptor_.close(ec);

View File

@@ -105,7 +105,7 @@ PlainHTTPPeer<Handler>::run()
{
if (!this->handler_.onAccept(this->session(), this->remote_address_))
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
return;
@@ -114,7 +114,7 @@ PlainHTTPPeer<Handler>::run()
if (!socket_.is_open())
return;
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&PlainHTTPPeer::do_read,

View File

@@ -115,14 +115,14 @@ SSLHTTPPeer<Handler>::run()
{
if (!this->handler_.onAccept(this->session(), this->remote_address_))
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
return;
}
if (!socket_.is_open())
return;
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&SSLHTTPPeer::do_handshake,
@@ -164,7 +164,7 @@ SSLHTTPPeer<Handler>::do_handshake(yield_context do_yield)
this->port().protocol.count("https") > 0;
if (http)
{
boost::asio::spawn(
util::spawn(
this->strand_,
std::bind(
&SSLHTTPPeer::do_read,

View File

@@ -26,6 +26,8 @@
#include <xrpl/server/detail/io_list.h>
#include <boost/asio.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <array>
#include <chrono>
@@ -85,9 +87,11 @@ private:
Handler& handler_;
beast::Journal const j_;
boost::asio::io_service& io_service_;
boost::asio::io_service::strand strand_;
std::optional<boost::asio::io_service::work> work_;
boost::asio::io_context& io_context_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
std::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::mutex m_;
std::vector<Port> ports_;
@@ -100,7 +104,7 @@ private:
public:
ServerImpl(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal);
~ServerImpl();
@@ -123,10 +127,10 @@ public:
return ios_;
}
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return io_service_;
return io_context_;
}
bool
@@ -140,13 +144,13 @@ private:
template <class Handler>
ServerImpl<Handler>::ServerImpl(
Handler& handler,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal)
: handler_(handler)
, j_(journal)
, io_service_(io_service)
, strand_(io_service_)
, work_(io_service_)
, io_context_(io_context)
, strand_(boost::asio::make_strand(io_context_))
, work_(std::in_place, boost::asio::make_work_guard(io_context_))
{
}
@@ -173,7 +177,7 @@ ServerImpl<Handler>::ports(std::vector<Port> const& ports)
ports_.push_back(port);
auto& internalPort = ports_.back();
if (auto sp = ios_.emplace<Door<Handler>>(
handler_, io_service_, internalPort, j_))
handler_, io_context_, internalPort, j_))
{
list_.push_back(sp);

View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright(c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED
#define RIPPLE_SERVER_SPAWN_H_INCLUDED
#include <xrpl/basics/Log.h>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <concepts>
#include <type_traits>
namespace ripple::util {
namespace impl {
template <typename T>
concept IsStrand = std::same_as<
std::decay_t<T>,
boost::asio::strand<typename std::decay_t<T>::inner_executor_type>>;
/**
* @brief A completion handler that restores `boost::asio::spawn`'s behaviour
* from Boost 1.83
*
* This is intended to be passed as the third argument to `boost::asio::spawn`
* so that exceptions are not ignored but propagated to `io_context.run()` call
* site.
*
* @param ePtr The exception that was caught on the coroutine
*/
inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) {
if (ePtr)
{
try
{
std::rethrow_exception(ePtr);
}
catch (std::exception const& e)
{
JLOG(debugLog().warn()) << "Spawn exception: " << e.what();
throw;
}
catch (...)
{
JLOG(debugLog().warn()) << "Spawn exception: Unknown";
throw;
}
}
};
} // namespace impl
/**
* @brief Spawns a coroutine using `boost::asio::spawn`
*
* @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions
* through `io_context`
* @note Since implicit strand was removed from boost::asio::spawn this helper
* function adds the strand back
*
* @tparam Ctx The type of the context/strand
* @tparam F The type of the function to execute
* @param ctx The execution context
* @param func The function to execute. Must return `void`
*/
template <typename Ctx, typename F>
requires std::is_invocable_r_v<void, F, boost::asio::yield_context>
void
spawn(Ctx&& ctx, F&& func)
{
if constexpr (impl::IsStrand<Ctx>)
{
boost::asio::spawn(
std::forward<Ctx>(ctx),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
else
{
boost::asio::spawn(
boost::asio::make_strand(
boost::asio::get_associated_executor(std::forward<Ctx>(ctx))),
std::forward<F>(func),
impl::kPROPAGATE_EXCEPTIONS);
}
}
} // namespace ripple::util
#endif

View File

@@ -166,7 +166,7 @@ public:
May be called concurrently.
Preconditions:
No call to io_service::run on any io_service
No call to io_context::run on any io_context
used by work objects associated with this io_list
exists in the caller's call stack.
*/

View File

@@ -239,11 +239,9 @@ Logs::fromSeverity(beast::severities::Severity level)
case kError:
return lsERROR;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::fromSeverity : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case kFatal:
break;
}
@@ -267,11 +265,9 @@ Logs::toSeverity(LogSeverity level)
return kWarning;
case lsERROR:
return kError;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::toSeverity : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case lsFATAL:
break;
}
@@ -296,11 +292,9 @@ Logs::toString(LogSeverity s)
return "Error";
case lsFATAL:
return "Fatal";
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::toString : invalid severity");
return "Unknown";
// LCOV_EXCL_STOP
}
}
@@ -362,11 +356,9 @@ Logs::format(
case kError:
output += "ERR ";
break;
// LCOV_EXCL_START
default:
UNREACHABLE("ripple::Logs::format : invalid severity");
[[fallthrough]];
// LCOV_EXCL_STOP
case kFatal:
output += "FTL ";
break;

View File

@@ -25,8 +25,9 @@
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/system/detail/error_code.hpp>
@@ -124,8 +125,8 @@ public:
beast::Journal m_journal;
boost::asio::io_service& m_io_service;
boost::asio::io_service::strand m_strand;
boost::asio::io_context& m_io_context;
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
boost::asio::ip::tcp::resolver m_resolver;
std::condition_variable m_cv;
@@ -155,12 +156,12 @@ public:
std::deque<Work> m_work;
ResolverAsioImpl(
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
beast::Journal journal)
: m_journal(journal)
, m_io_service(io_service)
, m_strand(io_service)
, m_resolver(io_service)
, m_io_context(io_context)
, m_strand(boost::asio::make_strand(io_context))
, m_resolver(io_context)
, m_asyncHandlersCompleted(true)
, m_stop_called(false)
, m_stopped(true)
@@ -216,8 +217,14 @@ public:
{
if (m_stop_called.exchange(true) == false)
{
m_io_service.dispatch(m_strand.wrap(std::bind(
&ResolverAsioImpl::do_stop, this, CompletionCounter(this))));
boost::asio::dispatch(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_stop,
this,
CompletionCounter(this))));
JLOG(m_journal.debug()) << "Queued a stop request";
}
@@ -248,12 +255,16 @@ public:
// TODO NIKB use rvalue references to construct and move
// reducing cost.
m_io_service.dispatch(m_strand.wrap(std::bind(
&ResolverAsioImpl::do_resolve,
this,
names,
handler,
CompletionCounter(this))));
boost::asio::dispatch(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_resolve,
this,
names,
handler,
CompletionCounter(this))));
}
//-------------------------------------------------------------------------
@@ -279,19 +290,20 @@ public:
std::string name,
boost::system::error_code const& ec,
HandlerType handler,
boost::asio::ip::tcp::resolver::iterator iter,
boost::asio::ip::tcp::resolver::results_type results,
CompletionCounter)
{
if (ec == boost::asio::error::operation_aborted)
return;
std::vector<beast::IP::Endpoint> addresses;
auto iter = results.begin();
// If we get an error message back, we don't return any
// results that we may have gotten.
if (!ec)
{
while (iter != boost::asio::ip::tcp::resolver::iterator())
while (iter != results.end())
{
addresses.push_back(
beast::IPAddressConversion::from_asio(*iter));
@@ -301,8 +313,14 @@ public:
handler(name, addresses);
m_io_service.post(m_strand.wrap(std::bind(
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
boost::asio::post(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
}
HostAndPort
@@ -383,16 +401,21 @@ public:
{
JLOG(m_journal.error()) << "Unable to parse '" << name << "'";
m_io_service.post(m_strand.wrap(std::bind(
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
boost::asio::post(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
return;
}
boost::asio::ip::tcp::resolver::query query(host, port);
m_resolver.async_resolve(
query,
host,
port,
std::bind(
&ResolverAsioImpl::do_finish,
this,
@@ -423,10 +446,14 @@ public:
if (m_work.size() > 0)
{
m_io_service.post(m_strand.wrap(std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
boost::asio::post(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&ResolverAsioImpl::do_work,
this,
CompletionCounter(this))));
}
}
}
@@ -435,9 +462,9 @@ public:
//-----------------------------------------------------------------------------
std::unique_ptr<ResolverAsio>
ResolverAsio::New(boost::asio::io_service& io_service, beast::Journal journal)
ResolverAsio::New(boost::asio::io_context& io_context, beast::Journal journal)
{
return std::make_unique<ResolverAsioImpl>(io_service, journal);
return std::make_unique<ResolverAsioImpl>(io_context, journal);
}
//-----------------------------------------------------------------------------

View File

@@ -36,7 +36,6 @@ LogThrow(std::string const& title)
[[noreturn]] void
LogicError(std::string const& s) noexcept
{
// LCOV_EXCL_START
JLOG(debugLog().fatal()) << s;
std::cerr << "Logic error: " << s << std::endl;
// Use a non-standard contract naming here (without namespace) because
@@ -46,7 +45,6 @@ LogicError(std::string const& s) noexcept
// For the above reasons, we want this contract to stand out.
UNREACHABLE("LogicError", {{"message", s}});
std::abort();
// LCOV_EXCL_STOP
}
} // namespace ripple

View File

@@ -30,9 +30,11 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/udp.hpp>
#include <boost/asio/strand.hpp>
#include <boost/system/detail/error_code.hpp>
@@ -238,9 +240,11 @@ private:
Journal m_journal;
IP::Endpoint m_address;
std::string m_prefix;
boost::asio::io_service m_io_service;
std::optional<boost::asio::io_service::work> m_work;
boost::asio::io_service::strand m_strand;
boost::asio::io_context m_io_context;
std::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
m_work;
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
boost::asio::ip::udp::socket m_socket;
std::deque<std::string> m_data;
@@ -264,18 +268,24 @@ public:
: m_journal(journal)
, m_address(address)
, m_prefix(prefix)
, m_work(std::ref(m_io_service))
, m_strand(m_io_service)
, m_timer(m_io_service)
, m_socket(m_io_service)
, m_work(boost::asio::make_work_guard(m_io_context))
, m_strand(boost::asio::make_strand(m_io_context))
, m_timer(m_io_context)
, m_socket(m_io_context)
, m_thread(&StatsDCollectorImp::run, this)
{
}
~StatsDCollectorImp() override
{
boost::system::error_code ec;
m_timer.cancel(ec);
try
{
m_timer.cancel();
}
catch (boost::system::system_error const&)
{
// ignored
}
m_work.reset();
m_thread.join();
@@ -334,10 +344,10 @@ public:
//--------------------------------------------------------------------------
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return m_io_service;
return m_io_context;
}
std::string const&
@@ -355,8 +365,14 @@ public:
void
post_buffer(std::string&& buffer)
{
m_io_service.dispatch(m_strand.wrap(std::bind(
&StatsDCollectorImp::do_post_buffer, this, std::move(buffer))));
boost::asio::dispatch(
m_io_context,
boost::asio::bind_executor(
m_strand,
std::bind(
&StatsDCollectorImp::do_post_buffer,
this,
std::move(buffer))));
}
// The keepAlive parameter makes sure the buffers sent to
@@ -386,8 +402,7 @@ public:
for (auto const& buffer : buffers)
{
std::string const s(
boost::asio::buffer_cast<char const*>(buffer),
boost::asio::buffer_size(buffer));
buffer.data(), boost::asio::buffer_size(buffer));
std::cerr << s;
}
std::cerr << '\n';
@@ -456,7 +471,7 @@ public:
set_timer()
{
using namespace std::chrono_literals;
m_timer.expires_from_now(1s);
m_timer.expires_after(1s);
m_timer.async_wait(std::bind(
&StatsDCollectorImp::on_timer, this, std::placeholders::_1));
}
@@ -498,13 +513,13 @@ public:
set_timer();
m_io_service.run();
m_io_context.run();
m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec);
m_socket.close();
m_io_service.poll();
m_io_context.poll();
}
};
@@ -547,10 +562,12 @@ StatsDCounterImpl::~StatsDCounterImpl()
void
StatsDCounterImpl::increment(CounterImpl::value_type amount)
{
m_impl->get_io_service().dispatch(std::bind(
&StatsDCounterImpl::do_increment,
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
amount));
boost::asio::dispatch(
m_impl->get_io_context(),
std::bind(
&StatsDCounterImpl::do_increment,
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
amount));
}
void
@@ -592,10 +609,12 @@ StatsDEventImpl::StatsDEventImpl(
void
StatsDEventImpl::notify(EventImpl::value_type const& value)
{
m_impl->get_io_service().dispatch(std::bind(
&StatsDEventImpl::do_notify,
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
value));
boost::asio::dispatch(
m_impl->get_io_context(),
std::bind(
&StatsDEventImpl::do_notify,
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
value));
}
void
@@ -625,19 +644,23 @@ StatsDGaugeImpl::~StatsDGaugeImpl()
void
StatsDGaugeImpl::set(GaugeImpl::value_type value)
{
m_impl->get_io_service().dispatch(std::bind(
&StatsDGaugeImpl::do_set,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
value));
boost::asio::dispatch(
m_impl->get_io_context(),
std::bind(
&StatsDGaugeImpl::do_set,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
value));
}
void
StatsDGaugeImpl::increment(GaugeImpl::difference_type amount)
{
m_impl->get_io_service().dispatch(std::bind(
&StatsDGaugeImpl::do_increment,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
amount));
boost::asio::dispatch(
m_impl->get_io_context(),
std::bind(
&StatsDGaugeImpl::do_increment,
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
amount));
}
void
@@ -713,10 +736,12 @@ StatsDMeterImpl::~StatsDMeterImpl()
void
StatsDMeterImpl::increment(MeterImpl::value_type amount)
{
m_impl->get_io_service().dispatch(std::bind(
&StatsDMeterImpl::do_increment,
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
amount));
boost::asio::dispatch(
m_impl->get_io_context(),
std::bind(
&StatsDMeterImpl::do_increment,
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
amount));
}
void

View File

@@ -25,11 +25,11 @@ namespace IP {
bool
is_private(AddressV4 const& addr)
{
return ((addr.to_ulong() & 0xff000000) ==
return ((addr.to_uint() & 0xff000000) ==
0x0a000000) || // Prefix /8, 10. #.#.#
((addr.to_ulong() & 0xfff00000) ==
((addr.to_uint() & 0xfff00000) ==
0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.#
((addr.to_ulong() & 0xffff0000) ==
((addr.to_uint() & 0xffff0000) ==
0xc0a80000) || // Prefix /16 192.168.#.#
addr.is_loopback();
}
@@ -44,7 +44,7 @@ char
get_class(AddressV4 const& addr)
{
static char const* table = "AAAABBCD";
return table[(addr.to_ulong() & 0xE0000000) >> 29];
return table[(addr.to_uint() & 0xE0000000) >> 29];
}
} // namespace IP

View File

@@ -20,6 +20,8 @@
#include <xrpl/beast/net/IPAddressV4.h>
#include <xrpl/beast/net/IPAddressV6.h>
#include <boost/asio/ip/address_v4.hpp>
namespace beast {
namespace IP {
@@ -28,7 +30,9 @@ is_private(AddressV6 const& addr)
{
return (
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
(addr.is_v4_mapped() && is_private(addr.to_v4())));
(addr.is_v4_mapped() &&
is_private(boost::asio::ip::make_address_v4(
boost::asio::ip::v4_mapped, addr))));
}
bool

View File

@@ -21,6 +21,8 @@
#include <xrpl/beast/net/IPEndpoint.h>
#include <boost/algorithm/string/trim.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/asio/ip/address_v4.hpp>
#include <boost/system/detail/error_code.hpp>
#include <cctype>
@@ -167,7 +169,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
}
boost::system::error_code ec;
auto addr = Address::from_string(addrStr, ec);
auto addr = boost::asio::ip::make_address(addrStr, ec);
if (ec)
{
is.setstate(std::ios_base::failbit);

View File

@@ -174,7 +174,7 @@ Array::append(Json::Value const& v)
return;
}
}
UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE
UNREACHABLE("Json::Array::append : invalid type");
}
void
@@ -209,7 +209,7 @@ Object::set(std::string const& k, Json::Value const& v)
return;
}
}
UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE
UNREACHABLE("Json::Object::set : invalid type");
}
//------------------------------------------------------------------------------

View File

@@ -213,10 +213,8 @@ Value::Value(ValueType type) : type_(type), allocated_(0)
value_.bool_ = false;
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::Value(ValueType) : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -292,10 +290,8 @@ Value::Value(Value const& other) : type_(other.type_)
value_.map_ = new ObjectValues(*other.value_.map_);
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::Value(Value const&) : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -322,10 +318,8 @@ Value::~Value()
delete value_.map_;
break;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::~Value : invalid type");
// LCOV_EXCL_STOP
}
}
@@ -425,10 +419,8 @@ operator<(Value const& x, Value const& y)
return *x.value_.map_ < *y.value_.map_;
}
// LCOV_EXCL_START
default:
UNREACHABLE("Json::operator<(Value, Value) : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable
@@ -473,10 +465,8 @@ operator==(Value const& x, Value const& y)
return x.value_.map_->size() == y.value_.map_->size() &&
*x.value_.map_ == *y.value_.map_;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::operator==(Value, Value) : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable
@@ -516,10 +506,8 @@ Value::asString() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to string");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asString : invalid type");
// LCOV_EXCL_STOP
}
return ""; // unreachable
@@ -560,10 +548,8 @@ Value::asInt() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to int");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asInt : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -604,10 +590,8 @@ Value::asUInt() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to uint");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asUInt : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -638,10 +622,8 @@ Value::asDouble() const
case objectValue:
JSON_ASSERT_MESSAGE(false, "Type is not convertible to double");
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asDouble : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;
@@ -672,10 +654,8 @@ Value::asBool() const
case objectValue:
return value_.map_->size() != 0;
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::asBool : invalid type");
// LCOV_EXCL_STOP
}
return false; // unreachable;
@@ -730,10 +710,8 @@ Value::isConvertibleTo(ValueType other) const
return other == objectValue ||
(other == nullValue && value_.map_->size() == 0);
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::isConvertible : invalid type");
// LCOV_EXCL_STOP
}
return false; // unreachable;
@@ -766,10 +744,8 @@ Value::size() const
case objectValue:
return Int(value_.map_->size());
// LCOV_EXCL_START
default:
UNREACHABLE("Json::Value::size : invalid type");
// LCOV_EXCL_STOP
}
return 0; // unreachable;

View File

@@ -259,11 +259,9 @@ ApplyStateTable::apply(
}
else
{
// LCOV_EXCL_START
UNREACHABLE(
"ripple::detail::ApplyStateTable::apply : unsupported "
"operation type");
// LCOV_EXCL_STOP
}
}

View File

@@ -22,9 +22,6 @@
#include <xrpl/ledger/ApplyView.h>
#include <xrpl/protocol/Protocol.h>
#include <limits>
#include <type_traits>
namespace ripple {
std::optional<std::uint64_t>
@@ -94,21 +91,8 @@ ApplyView::dirAdd(
return page;
}
// We rely on modulo arithmetic of unsigned integers (guaranteed in
// [basic.fundamental] paragraph 2) to detect page representation overflow.
// For signed integers this would be UB, hence static_assert here.
static_assert(std::is_unsigned_v<decltype(page)>);
// Defensive check against breaking changes in compiler.
static_assert([]<typename T>(std::type_identity<T>) constexpr -> T {
T tmp = std::numeric_limits<T>::max();
return ++tmp;
}(std::type_identity<decltype(page)>{}) == 0);
++page;
// Check whether we're out of pages.
if (page == 0)
return std::nullopt;
if (!rules().enabled(fixDirectoryLimit) &&
page >= dirNodeMaxPages) // Old pages limit
if (++page >= dirNodeMaxPages)
return std::nullopt;
// We are about to create a new node; we'll link it to
@@ -149,10 +133,8 @@ ApplyView::emptyDirDelete(Keylet const& directory)
if (directory.type != ltDIR_NODE ||
node->getFieldH256(sfRootIndex) != directory.key)
{
// LCOV_EXCL_START
UNREACHABLE("ripple::ApplyView::emptyDirDelete : invalid node type");
return false;
// LCOV_EXCL_STOP
}
// The directory still contains entries and so it cannot be removed

View File

@@ -36,9 +36,7 @@ BookDirs::BookDirs(ReadView const& view, Book const& book)
{
if (!cdirFirst(*view_, key_, sle_, entry_, index_))
{
// LCOV_EXCL_START
UNREACHABLE("ripple::BookDirs::BookDirs : directory is empty");
// LCOV_EXCL_STOP
}
}
}
@@ -112,11 +110,9 @@ BookDirs::const_iterator::operator++()
}
else if (!cdirFirst(*view_, cur_key_, sle_, entry_, index_))
{
// LCOV_EXCL_START
UNREACHABLE(
"ripple::BookDirs::const_iterator::operator++ : directory is "
"empty");
// LCOV_EXCL_STOP
}
}

View File

@@ -77,23 +77,19 @@ deleteSLE(
AccountID const& account, SField const& node, bool isOwner) -> TER {
auto const sleAccount = view.peek(keylet::account(account));
if (!sleAccount)
{
// LCOV_EXCL_START
{ // LCOV_EXCL_START
JLOG(j.fatal()) << "Internal error: can't retrieve Owner account.";
return tecINTERNAL;
// LCOV_EXCL_STOP
}
} // LCOV_EXCL_STOP
// Remove object from owner directory
std::uint64_t const page = sleCredential->getFieldU64(node);
if (!view.dirRemove(
keylet::ownerDir(account), page, sleCredential->key(), false))
{
// LCOV_EXCL_START
{ // LCOV_EXCL_START
JLOG(j.fatal()) << "Unable to delete Credential from owner.";
return tefBAD_LEDGER;
// LCOV_EXCL_STOP
}
} // LCOV_EXCL_STOP
if (isOwner)
adjustOwnerCount(view, sleAccount, -1, j);

View File

@@ -324,12 +324,10 @@ isVaultPseudoAccountFrozen(
auto const issuer = mptIssuance->getAccountID(sfIssuer);
auto const mptIssuer = view.read(keylet::account(issuer));
if (mptIssuer == nullptr)
{
// LCOV_EXCL_START
{ // LCOV_EXCL_START
UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null MPToken issuer");
return false;
// LCOV_EXCL_STOP
}
} // LCOV_EXCL_STOP
if (!mptIssuer->isFieldPresent(sfVaultID))
return false; // not a Vault pseudo-account, common case
@@ -340,8 +338,7 @@ isVaultPseudoAccountFrozen(
{ // LCOV_EXCL_START
UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null vault");
return false;
// LCOV_EXCL_STOP
}
} // LCOV_EXCL_STOP
return isAnyFrozen(view, {issuer, account}, vault->at(sfAsset), depth + 1);
}
@@ -629,8 +626,8 @@ xrpLiquid(
std::uint32_t const ownerCount = confineOwnerCount(
view.ownerCountHook(id, sle->getFieldU32(sfOwnerCount)), ownerCountAdj);
// Pseudo-accounts have no reserve requirement
auto const reserve = isPseudoAccount(sle)
// AMMs have no reserve requirement
auto const reserve = sle->isFieldPresent(sfAMMID)
? XRPAmount{0}
: view.fees().accountReserve(ownerCount);
@@ -1042,7 +1039,7 @@ adjustOwnerCount(
AccountID const id = (*sle)[sfAccount];
std::uint32_t const adjusted = confineOwnerCount(current, amount, id, j);
view.adjustOwnerCountHook(id, current, adjusted);
sle->at(sfOwnerCount) = adjusted;
sle->setFieldU32(sfOwnerCount, adjusted);
view.update(sle);
}
@@ -1082,51 +1079,15 @@ pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey)
return beast::zero;
}
// Pseudo-account designator fields MUST be maintained by including the
// SField::sMD_PseudoAccount flag in the SField definition. (Don't forget to
// "| SField::sMD_Default"!) The fields do NOT need to be amendment-gated,
// since a non-active amendment will not set any field, by definition.
// Specific properties of a pseudo-account are NOT checked here, that's what
// Note, the list of the pseudo-account designator fields below MUST be
// maintained but it does NOT need to be amendment-gated, since a
// non-active amendment will not set any field, by definition. Specific
// properties of a pseudo-account are NOT checked here, that's what
// InvariantCheck is for.
[[nodiscard]] std::vector<SField const*> const&
getPseudoAccountFields()
{
static std::vector<SField const*> const pseudoFields = []() {
auto const ar = LedgerFormats::getInstance().findByType(ltACCOUNT_ROOT);
if (!ar)
{
// LCOV_EXCL_START
LogicError(
"ripple::isPseudoAccount : unable to find account root ledger "
"format");
// LCOV_EXCL_STOP
}
auto const& soTemplate = ar->getSOTemplate();
std::vector<SField const*> pseudoFields;
for (auto const& field : soTemplate)
{
if (field.sField().shouldMeta(SField::sMD_PseudoAccount))
pseudoFields.emplace_back(&field.sField());
}
return pseudoFields;
}();
return pseudoFields;
}
[[nodiscard]] bool
isPseudoAccount(std::shared_ptr<SLE const> sleAcct)
{
auto const& fields = getPseudoAccountFields();
// Intentionally use defensive coding here because it's cheap and makes the
// semantics of true return value clean.
return sleAcct && sleAcct->getType() == ltACCOUNT_ROOT &&
std::count_if(
fields.begin(), fields.end(), [&sleAcct](SField const* sf) -> bool {
return sleAcct->isFieldPresent(*sf);
}) > 0;
}
static std::array<SField const*, 2> const pseudoAccountOwnerFields = {
&sfAMMID, //
&sfVaultID, //
};
Expected<std::shared_ptr<SLE>, TER>
createPseudoAccount(
@@ -1134,11 +1095,10 @@ createPseudoAccount(
uint256 const& pseudoOwnerKey,
SField const& ownerField)
{
[[maybe_unused]] auto const& fields = getPseudoAccountFields();
XRPL_ASSERT(
std::count_if(
fields.begin(),
fields.end(),
pseudoAccountOwnerFields.begin(),
pseudoAccountOwnerFields.end(),
[&ownerField](SField const* sf) -> bool {
return *sf == ownerField;
}) == 1,
@@ -1174,42 +1134,18 @@ createPseudoAccount(
return account;
}
[[nodiscard]] TER
canAddHolding(ReadView const& view, Issue const& issue)
[[nodiscard]] bool
isPseudoAccount(std::shared_ptr<SLE const> sleAcct)
{
if (issue.native())
return tesSUCCESS; // No special checks for XRP
auto const issuer = view.read(keylet::account(issue.getIssuer()));
if (!issuer)
return terNO_ACCOUNT;
else if (!issuer->isFlag(lsfDefaultRipple))
return terNO_RIPPLE;
return tesSUCCESS;
}
[[nodiscard]] TER
canAddHolding(ReadView const& view, MPTIssue const& mptIssue)
{
auto mptID = mptIssue.getMptID();
auto issuance = view.read(keylet::mptIssuance(mptID));
if (!issuance)
return tecOBJECT_NOT_FOUND;
if (!issuance->isFlag(lsfMPTCanTransfer))
return tecNO_AUTH;
return tesSUCCESS;
}
[[nodiscard]] TER
canAddHolding(ReadView const& view, Asset const& asset)
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) -> TER {
return canAddHolding(view, issue);
},
asset.value());
// Intentionally use defensive coding here because it's cheap and makes the
// semantics of true return value clean.
return sleAcct && sleAcct->getType() == ltACCOUNT_ROOT &&
std::count_if(
pseudoAccountOwnerFields.begin(),
pseudoAccountOwnerFields.end(),
[&sleAcct](SField const* sf) -> bool {
return sleAcct->isFieldPresent(*sf);
}) > 0;
}
[[nodiscard]] TER
@@ -1242,12 +1178,6 @@ addEmptyHolding(
// If the line already exists, don't create it again.
if (view.read(index))
return tecDUPLICATE;
// Can the account cover the trust line reserve ?
std::uint32_t const ownerCount = sleDst->at(sfOwnerCount);
if (priorBalance < view.fees().accountReserve(ownerCount + 1))
return tecNO_LINE_INSUF_RESERVE;
return trustCreate(
view,
high,
@@ -1298,7 +1228,7 @@ authorizeMPToken(
{
auto const sleAcct = view.peek(keylet::account(account));
if (!sleAcct)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
// If the account that submitted the tx is a holder
// Note: `account_` is holder's account
@@ -1363,17 +1293,17 @@ authorizeMPToken(
auto const sleMptIssuance = view.read(keylet::mptIssuance(mptIssuanceID));
if (!sleMptIssuance)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
// If the account that submitted this tx is the issuer of the MPT
// Note: `account_` is issuer's account
// `holderID` is holder's account
if (account != (*sleMptIssuance)[sfIssuer])
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
auto const sleMpt = view.peek(keylet::mptoken(mptIssuanceID, *holderID));
if (!sleMpt)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags);
std::uint32_t flagsOut = flagsIn;
@@ -1430,7 +1360,7 @@ trustCreate(
describeOwnerDir(uLowAccountID));
if (!lowNode)
return tecDIR_FULL; // LCOV_EXCL_LINE
return tecDIR_FULL;
auto highNode = view.dirInsert(
keylet::ownerDir(uHighAccountID),
@@ -1438,14 +1368,14 @@ trustCreate(
describeOwnerDir(uHighAccountID));
if (!highNode)
return tecDIR_FULL; // LCOV_EXCL_LINE
return tecDIR_FULL;
bool const bSetDst = saLimit.getIssuer() == uDstAccountID;
bool const bSetHigh = bSrcHigh ^ bSetDst;
XRPL_ASSERT(sleAccount, "ripple::trustCreate : non-null SLE");
if (!sleAccount)
return tefINTERNAL; // LCOV_EXCL_LINE
return tefINTERNAL;
XRPL_ASSERT(
sleAccount->getAccountID(sfAccount) ==
@@ -1524,12 +1454,10 @@ removeEmptyHolding(
{
auto const sle = view.read(keylet::account(accountID));
if (!sle)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
auto const balance = sle->getFieldAmount(sfBalance);
if (balance.xrp() != 0)
return tecHAS_OBLIGATIONS;
return tesSUCCESS;
}
@@ -1547,8 +1475,7 @@ removeEmptyHolding(
auto sleLowAccount =
view.peek(keylet::account(line->at(sfLowLimit)->getIssuer()));
if (!sleLowAccount)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
adjustOwnerCount(view, sleLowAccount, -1, journal);
// It's not really necessary to clear the reserve flag, since the line
// is about to be deleted, but this will make the metadata reflect an
@@ -1562,8 +1489,7 @@ removeEmptyHolding(
auto sleHighAccount =
view.peek(keylet::account(line->at(sfHighLimit)->getIssuer()));
if (!sleHighAccount)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
adjustOwnerCount(view, sleHighAccount, -1, journal);
// It's not really necessary to clear the reserve flag, since the line
// is about to be deleted, but this will make the metadata reflect an
@@ -1623,7 +1549,7 @@ trustDelete(
sleRippleState->key(),
false))
{
return tefBAD_LEDGER; // LCOV_EXCL_LINE
return tefBAD_LEDGER;
}
JLOG(j.trace()) << "trustDelete: Deleting ripple line: high";
@@ -1634,7 +1560,7 @@ trustDelete(
sleRippleState->key(),
false))
{
return tefBAD_LEDGER; // LCOV_EXCL_LINE
return tefBAD_LEDGER;
}
JLOG(j.trace()) << "trustDelete: Deleting ripple line: state";
@@ -1660,7 +1586,7 @@ offerDelete(ApplyView& view, std::shared_ptr<SLE> const& sle, beast::Journal j)
offerIndex,
false))
{
return tefBAD_LEDGER; // LCOV_EXCL_LINE
return tefBAD_LEDGER;
}
if (!view.dirRemove(
@@ -1669,7 +1595,7 @@ offerDelete(ApplyView& view, std::shared_ptr<SLE> const& sle, beast::Journal j)
offerIndex,
false))
{
return tefBAD_LEDGER; // LCOV_EXCL_LINE
return tefBAD_LEDGER;
}
if (sle->isFieldPresent(sfAdditionalBooks))
@@ -1833,7 +1759,7 @@ rippleCreditIOU(
auto const sleAccount = view.peek(keylet::account(uReceiverID));
if (!sleAccount)
return tefINTERNAL; // LCOV_EXCL_LINE
return tefINTERNAL;
bool const noRipple = (sleAccount->getFlags() & lsfDefaultRipple) == 0;
@@ -1923,16 +1849,14 @@ accountSendIOU(
{
if (saAmount < beast::zero || saAmount.holds<MPTIssue>())
{
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
}
}
else
{
// LCOV_EXCL_START
XRPL_ASSERT(
saAmount >= beast::zero && !saAmount.holds<MPTIssue>(),
"ripple::accountSendIOU : minimum amount and not MPT");
// LCOV_EXCL_STOP
}
/* If we aren't sending anything or if the sender is the same as the
@@ -1989,10 +1913,8 @@ accountSendIOU(
{
// VFALCO Its laborious to have to mutate the
// TER based on params everywhere
// LCOV_EXCL_START
terResult = view.open() ? TER{telFAILED_PROCESSING}
: TER{tecFAILED_PROCESSING};
// LCOV_EXCL_STOP
}
else
{
@@ -2079,7 +2001,7 @@ rippleCreditMPT(
view.update(sleIssuance);
}
else
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
}
else
{
@@ -2339,7 +2261,7 @@ issueIOU(
auto const receiverAccount = view.peek(keylet::account(account));
if (!receiverAccount)
return tefINTERNAL; // LCOV_EXCL_LINE
return tefINTERNAL;
bool noRipple = (receiverAccount->getFlags() & lsfDefaultRipple) == 0;
@@ -2427,13 +2349,11 @@ redeemIOU(
// In order to hold an IOU, a trust line *MUST* exist to track the
// balance. If it doesn't, then something is very wrong. Don't try
// to continue.
// LCOV_EXCL_START
JLOG(j.fatal()) << "redeemIOU: " << to_string(account)
<< " attempts to redeem " << amount.getFullText()
<< " but no trust line exists!";
return tefINTERNAL;
// LCOV_EXCL_STOP
}
TER
@@ -2453,7 +2373,7 @@ transferXRP(
SLE::pointer const sender = view.peek(keylet::account(from));
SLE::pointer const receiver = view.peek(keylet::account(to));
if (!sender || !receiver)
return tefINTERNAL; // LCOV_EXCL_LINE
return tefINTERNAL;
JLOG(j.trace()) << "transferXRP: " << to_string(from) << " -> "
<< to_string(to) << ") : " << amount.getFullText();
@@ -2463,10 +2383,8 @@ transferXRP(
// VFALCO Its unfortunate we have to keep
// mutating these TER everywhere
// FIXME: this logic should be moved to callers maybe?
// LCOV_EXCL_START
return view.open() ? TER{telFAILED_PROCESSING}
: TER{tecFAILED_PROCESSING};
// LCOV_EXCL_STOP
}
// Decrement XRP balance.
@@ -2697,8 +2615,7 @@ enforceMPTokenAuthorization(
UNREACHABLE(
"ripple::enforceMPTokenAuthorization : condition list is incomplete");
return tefINTERNAL;
// LCOV_EXCL_STOP
}
} // LCOV_EXCL_STOP
TER
canTransfer(
@@ -2747,13 +2664,11 @@ cleanupOnAccountDelete(
if (!sleItem)
{
// Directory node has an invalid index. Bail out.
// LCOV_EXCL_START
JLOG(j.fatal())
<< "DeleteAccount: Directory node in ledger " << view.seq()
<< " has index to object that is missing: "
<< to_string(dirEntry);
return tefBAD_LEDGER;
// LCOV_EXCL_STOP
}
LedgerEntryType const nodeType{safe_cast<LedgerEntryType>(
@@ -2786,11 +2701,9 @@ cleanupOnAccountDelete(
"ripple::cleanupOnAccountDelete : minimum dir entries");
if (uDirEntry == 0)
{
// LCOV_EXCL_START
JLOG(j.error())
<< "DeleteAccount iterator re-validation failed.";
return tefBAD_LEDGER;
// LCOV_EXCL_STOP
}
if (skipEntry == SkipEntry::No)
uDirEntry--;
@@ -2810,7 +2723,7 @@ deleteAMMTrustLine(
beast::Journal j)
{
if (!sleState || sleState->getType() != ltRIPPLE_STATE)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
auto const& [low, high] = std::minmax(
sleState->getFieldAmount(sfLowLimit).getIssuer(),
@@ -2818,14 +2731,13 @@ deleteAMMTrustLine(
auto sleLow = view.peek(keylet::account(low));
auto sleHigh = view.peek(keylet::account(high));
if (!sleLow || !sleHigh)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
bool const ammLow = sleLow->isFieldPresent(sfAMMID);
bool const ammHigh = sleHigh->isFieldPresent(sfAMMID);
// can't both be AMM
if (ammLow && ammHigh)
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
// at least one must be
if (!ammLow && !ammHigh)
@@ -2845,7 +2757,7 @@ deleteAMMTrustLine(
auto const uFlags = !ammLow ? lsfLowReserve : lsfHighReserve;
if (!(sleState->getFlags() & uFlags))
return tecINTERNAL; // LCOV_EXCL_LINE
return tecINTERNAL;
adjustOwnerCount(view, !ammLow ? sleLow : sleHigh, -1, j);
@@ -3153,7 +3065,7 @@ rippleUnlockEscrowMPT(
{ // LCOV_EXCL_START
JLOG(j.error())
<< "rippleUnlockEscrowMPT: MPToken not found for " << receiver;
return tecOBJECT_NOT_FOUND;
return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE
} // LCOV_EXCL_STOP
auto current = sle->getFieldU64(sfMPTAmount);

View File

@@ -24,6 +24,7 @@
#include <xrpl/net/HTTPClientSSLContext.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/resolver_query_base.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/regex.hpp>
@@ -55,16 +56,16 @@ class HTTPClientImp : public std::enable_shared_from_this<HTTPClientImp>,
{
public:
HTTPClientImp(
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
unsigned short const port,
std::size_t maxResponseSize,
beast::Journal& j)
: mSocket(io_service, httpClientSSLContext->context())
, mResolver(io_service)
: mSocket(io_context, httpClientSSLContext->context())
, mResolver(io_context)
, mHeader(maxClientHeaderBytes)
, mPort(port)
, maxResponseSize_(maxResponseSize)
, mDeadline(io_service)
, mDeadline(io_context)
, j_(j)
{
}
@@ -146,18 +147,21 @@ public:
{
JLOG(j_.trace()) << "Fetch: " << mDeqSites[0];
auto query = std::make_shared<boost::asio::ip::tcp::resolver::query>(
auto query = std::make_shared<Query>(
mDeqSites[0],
std::to_string(mPort),
boost::asio::ip::resolver_query_base::numeric_service);
mQuery = query;
mDeadline.expires_from_now(mTimeout, mShutdown);
JLOG(j_.trace()) << "expires_from_now: " << mShutdown.message();
if (!mShutdown)
try
{
mDeadline.expires_after(mTimeout);
}
catch (boost::system::system_error const& e)
{
mShutdown = e.code();
JLOG(j_.trace()) << "expires_after: " << mShutdown.message();
mDeadline.async_wait(std::bind(
&HTTPClientImp::handleDeadline,
shared_from_this(),
@@ -169,7 +173,9 @@ public:
JLOG(j_.trace()) << "Resolving: " << mDeqSites[0];
mResolver.async_resolve(
*mQuery,
mQuery->host,
mQuery->port,
mQuery->flags,
std::bind(
&HTTPClientImp::handleResolve,
shared_from_this(),
@@ -233,7 +239,7 @@ public:
void
handleResolve(
boost::system::error_code const& ecResult,
boost::asio::ip::tcp::resolver::iterator itrEndpoint)
boost::asio::ip::tcp::resolver::results_type result)
{
if (!mShutdown)
{
@@ -255,7 +261,7 @@ public:
boost::asio::async_connect(
mSocket.lowest_layer(),
itrEndpoint,
result,
std::bind(
&HTTPClientImp::handleConnect,
shared_from_this(),
@@ -377,7 +383,7 @@ public:
static boost::regex reStatus{
"\\`HTTP/1\\S+ (\\d{3}) .*\\'"}; // HTTP/1.1 200 OK
static boost::regex reSize{
"\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'", boost::regex::icase};
"\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'"};
static boost::regex reBody{"\\`.*\\r\\n\\r\\n(.*)\\'"};
boost::smatch smMatch;
@@ -475,13 +481,15 @@ public:
std::string const& strData = "")
{
boost::system::error_code ecCancel;
(void)mDeadline.cancel(ecCancel);
if (ecCancel)
try
{
JLOG(j_.trace()) << "invokeComplete: Deadline cancel error: "
<< ecCancel.message();
mDeadline.cancel();
}
catch (boost::system::system_error const& e)
{
JLOG(j_.trace())
<< "invokeComplete: Deadline cancel error: " << e.what();
ecCancel = e.code();
}
JLOG(j_.debug()) << "invokeComplete: Deadline popping: "
@@ -515,7 +523,15 @@ private:
bool mSSL;
AutoSocket mSocket;
boost::asio::ip::tcp::resolver mResolver;
std::shared_ptr<boost::asio::ip::tcp::resolver::query> mQuery;
struct Query
{
std::string host;
std::string port;
boost::asio::ip::resolver_query_base::flags flags;
};
std::shared_ptr<Query> mQuery;
boost::asio::streambuf mRequest;
boost::asio::streambuf mHeader;
boost::asio::streambuf mResponse;
@@ -546,7 +562,7 @@ private:
void
HTTPClient::get(
bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::deque<std::string> deqSites,
unsigned short const port,
std::string const& strPath,
@@ -559,14 +575,14 @@ HTTPClient::get(
beast::Journal& j)
{
auto client =
std::make_shared<HTTPClientImp>(io_service, port, responseMax, j);
std::make_shared<HTTPClientImp>(io_context, port, responseMax, j);
client->get(bSSL, deqSites, strPath, timeout, complete);
}
void
HTTPClient::get(
bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::string const& strPath,
@@ -581,14 +597,14 @@ HTTPClient::get(
std::deque<std::string> deqSites(1, strSite);
auto client =
std::make_shared<HTTPClientImp>(io_service, port, responseMax, j);
std::make_shared<HTTPClientImp>(io_context, port, responseMax, j);
client->get(bSSL, deqSites, strPath, timeout, complete);
}
void
HTTPClient::request(
bool bSSL,
boost::asio::io_service& io_service,
boost::asio::io_context& io_context,
std::string strSite,
unsigned short const port,
std::function<void(boost::asio::streambuf& sb, std::string const& strHost)>
@@ -604,7 +620,7 @@ HTTPClient::request(
std::deque<std::string> deqSites(1, strSite);
auto client =
std::make_shared<HTTPClientImp>(io_service, port, responseMax, j);
std::make_shared<HTTPClientImp>(io_context, port, responseMax, j);
client->request(bSSL, deqSites, setRequest, timeout, complete);
}

View File

@@ -36,7 +36,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "3.0.0-rc1"
char const* const versionString = "2.6.0"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

Some files were not shown because too many files have changed in this diff Show More