Compare commits

..

8 Commits

Author SHA1 Message Date
Bart
9de4bcac05 Clean up scripts 2025-12-23 10:08:44 -05:00
Bart
90234d1fd0 Improve readme, reduce number of configs to build 2025-12-23 10:04:43 -05:00
Bart
2543f2eb58 Exclude scripts and example code from Codecov 2025-12-22 20:23:57 -05:00
Bart
5c3eaa5101 Fix missing imports 2025-12-22 19:43:30 -05:00
Bart
6dfaeb11bc Skip Clang 20+ on ARM, reduce macOS and Windows builds, temporarily run all 2025-12-22 19:42:16 -05:00
Bart
01d57c9aa3 Update image SHA for Debian and RHEL, pass JSON array as string 2025-12-22 18:58:19 -05:00
Bart
c115b77970 Reusable workflows do not support choice 2025-12-22 18:48:30 -05:00
Bart
05a76895ad ci: Support more flexible strategy matrix generation 2025-12-22 16:52:45 -05:00
345 changed files with 4136 additions and 21264 deletions

View File

@@ -32,7 +32,9 @@ parsers:
slack_app: false
ignore:
- "src/test/"
- "src/tests/"
- ".github/scripts/"
- "include/xrpl/beast/test/"
- "include/xrpl/beast/unit_test/"
- "src/test/"
- "src/tests/"
- "tests/"

View File

@@ -1,275 +0,0 @@
ignorePaths:
- build/**
- src/libxrpl/crypto
- src/test/** # Will be removed in the future
- CMakeUserPresets.json
- Doxyfile
- docs/**/*.puml
- cmake/**
- LICENSE.md
language: en
allowCompoundWords: true
ignoreRandomStrings: true
minWordLength: 5
dictionaries:
- cpp
- en_US
- en_GB
ignoreRegExpList:
- /[rs][1-9A-HJ-NP-Za-km-z]{25,34}/g # addresses and seeds
- /(XRPL|BEAST)_[A-Z_0-9]+_H_INCLUDED+/g # include guards
- /(XRPL|BEAST)_[A-Z_0-9]+_H+/g # include guards
- /::[a-z:_]+/g # things from other namespaces
- /lib[a-z]+/g # libraries
- /[0-9]{4}-[0-9]{2}-[0-9]{2}[,:][A-Za-zÀ-ÖØ-öø-ÿ.\s]+/g # copyright dates
- /[0-9]{4}[,:]?\s*[A-Za-zÀ-ÖØ-öø-ÿ.\s]+/g # copyright years
- /\[[A-Za-z0-9-]+\]\(https:\/\/github.com\/[A-Za-z0-9-]+\)/g # Github usernames
- /-[DWw][a-zA-Z0-9_-]+=/g # compile flags
- /[\['"`]-[DWw][a-zA-Z0-9_-]+['"`\]]/g # compile flags
suggestWords:
- xprl->xrpl
- unsynched->unsynced
- synched->synced
- synch->sync
words:
- abempty
- AMMID
- amt
- amts
- asnode
- asynchrony
- attestation
- authorises
- autobridge
- autobridged
- autobridging
- bimap
- bindir
- bookdir
- Bougalis
- Britto
- Btrfs
- canonicality
- checkme
- chrono
- citardauq
- clawback
- clawbacks
- coeffs
- coldwallet
- compr
- conanfile
- conanrun
- connectability
- coro
- coros
- cowid
- cryptocondition
- cryptoconditional
- cryptoconditions
- csprng
- ctid
- currenttxhash
- daria
- dcmake
- dearmor
- deleteme
- demultiplexer
- deserializaton
- desync
- desynced
- determ
- distro
- doxyfile
- dxrpl
- endmacro
- exceptioned
- Falco
- finalizers
- firewalled
- fmtdur
- funclets
- gcov
- gcovr
- ghead
- Gnutella
- gpgcheck
- gpgkey
- hotwallet
- ifndef
- inequation
- insuf
- insuff
- iou
- ious
- isrdc
- jemalloc
- jlog
- keylet
- keylets
- keyvadb
- ledgerentry
- ledgerhash
- ledgerindex
- leftw
- legleux
- levelization
- levelized
- libpb
- libxrpl
- llection
- LOCALGOOD
- logwstream
- lseq
- lsmf
- ltype
- MEMORYSTATUSEX
- Merkle
- Metafuncton
- misprediction
- mptbalance
- mptflags
- mptid
- mptissuance
- mptissuanceid
- mptoken
- mptokenid
- mptokenissuance
- mptokens
- mpts
- multisig
- multisign
- multisigned
- Nakamoto
- nftid
- nftoffer
- nftoken
- nftokenid
- nftokenpages
- nftokens
- nftpage
- nikb
- nonxrp
- noripple
- nudb
- nullptr
- nunl
- Nyffenegger
- ostr
- partitioner
- paychan
- paychans
- permdex
- perminute
- permissioned
- pointee
- preauth
- preauthorization
- preauthorize
- preauthorizes
- preclaim
- protobuf
- protos
- ptrs
- pyenv
- qalloc
- queuable
- Raphson
- replayer
- rerere
- retriable
- RIPD
- ripdtop
- rippleci
- rippled
- ripplerpc
- rippletest
- RLUSD
- rngfill
- rocksdb
- Rohrs
- roundings
- sahyadri
- Satoshi
- secp
- sendq
- seqit
- sf
- shamap
- shamapitem
- sidechain
- SIGGOOD
- sle
- sles
- soci
- socidb
- sslws
- statsd
- STATSDCOLLECTOR
- stissue
- stnum
- stobj
- stobject
- stpath
- stpathset
- sttx
- stvar
- stvector
- stxchainattestations
- superpeer
- superpeers
- takergets
- takerpays
- ters
- TMEndpointv2
- trixie
- tx
- txid
- txids
- txjson
- txn
- txns
- txs
- umant
- unacquired
- unambiguity
- unauthorizes
- unauthorizing
- unergonomic
- unfetched
- unflatten
- unfund
- unimpair
- unroutable
- unscalable
- unserviced
- unshareable
- unshares
- unsquelch
- unsquelched
- unsquelching
- unvalidated
- unveto
- unvetoed
- upvotes
- USDB
- variadics
- venv
- vfalco
- vinnie
- wasmi
- wextra
- wptr
- writeme
- wsrch
- wthread
- xbridge
- xchain
- ximinez
- XMACRO
- xrpkuwait
- xrpl
- xrpld
- xrplf
- xxhash
- xxhasher

1
.gitattributes vendored
View File

@@ -1,6 +1,5 @@
# Set default behaviour, in case users don't have core.autocrlf set.
#* text=auto
# cspell: disable
# Visual Studio
*.sln text eol=crlf

6
.github/CODEOWNERS vendored
View File

@@ -1,2 +1,8 @@
# Allow anyone to review any change by default.
*
# Require the rpc-reviewers team to review changes to the rpc code.
include/xrpl/protocol/ @xrplf/rpc-reviewers
src/libxrpl/protocol/ @xrplf/rpc-reviewers
src/xrpld/rpc/ @xrplf/rpc-reviewers
src/xrpld/app/misc/ @xrplf/rpc-reviewers

View File

@@ -11,6 +11,12 @@ runs:
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
@@ -21,23 +27,17 @@ runs:
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version
- name: Check configuration (all)
shell: bash
run: |
echo 'Checking Ccache version.'
ccache --version
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version

View File

@@ -81,10 +81,10 @@ It generates many files of [results](results):
- `rawincludes.txt`: The raw dump of the `#includes`
- `paths.txt`: A second dump grouping the source module
to the destination module, de-duped, and with frequency counts.
to the destination module, deduped, and with frequency counts.
- `includes/`: A directory where each file represents a module and
contains a list of modules and counts that the module _includes_.
- `included_by/`: Similar to `includes/`, but the other way around. Each
- `includedby/`: Similar to `includes/`, but the other way around. Each
file represents a module and contains a list of modules and counts
that _include_ the module.
- [`loops.txt`](results/loops.txt): A list of direct loops detected

View File

@@ -29,7 +29,7 @@ pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir included_by
mkdir includedby
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
@@ -59,7 +59,7 @@ do
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and deduplicate paths
echo Sort and dedup paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
@@ -71,7 +71,7 @@ exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a included_by/${include}
echo ${level} ${count} | tee -a includedby/${include}
done
exec 4>&- #close fd 4

View File

@@ -19,7 +19,7 @@ run from the repository root.
1. `.github/scripts/rename/definitions.sh`: This script will rename all
definitions, such as include guards, from `RIPPLE_XXX` and `RIPPLED_XXX` to
`XRPL_XXX`.
2. `.github/scripts/rename/copyright.sh`: This script will remove superfluous
2. `.github/scripts/rename/copyright.sh`: This script will remove superflous
copyright notices.
3. `.github/scripts/rename/cmake.sh`: This script will rename all CMake files
from `RippleXXX.cmake` or `RippledXXX.cmake` to `XrplXXX.cmake`, and any
@@ -31,9 +31,6 @@ run from the repository root.
the `xrpld` binary.
5. `.github/scripts/rename/namespace.sh`: This script will rename the C++
namespaces from `ripple` to `xrpl`.
6. `.github/scripts/rename/config.sh`: This script will rename the config from
`rippled.cfg` to `xrpld.cfg`, and updating the code accordingly. The old
filename will still be accepted.
You can run all these scripts from the repository root as follows:
@@ -43,5 +40,4 @@ You can run all these scripts from the repository root as follows:
./.github/scripts/rename/cmake.sh .
./.github/scripts/rename/binary.sh .
./.github/scripts/rename/namespace.sh .
./.github/scripts/rename/config.sh .
```

View File

@@ -1,72 +0,0 @@
#!/bin/bash
# Exit the script as soon as an error occurs.
set -e
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
SED_COMMAND=sed
if [[ "${OSTYPE}" == 'darwin'* ]]; then
if ! command -v gsed &> /dev/null; then
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
exit 1
fi
SED_COMMAND=gsed
fi
# This script renames the config from `rippled.cfg` to `xrpld.cfg`, and updates
# the code accordingly. The old filename will still be accepted.
# Usage: .github/scripts/rename/config.sh <repository directory>
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <repository directory>"
exit 1
fi
DIRECTORY=$1
echo "Processing directory: ${DIRECTORY}"
if [ ! -d "${DIRECTORY}" ]; then
echo "Error: Directory '${DIRECTORY}' does not exist."
exit 1
fi
pushd ${DIRECTORY}
# Add the xrpld.cfg to the .gitignore.
if ! grep -q 'xrpld.cfg' .gitignore; then
${SED_COMMAND} -i '/rippled.cfg/a\
/xrpld.cfg' .gitignore
fi
# Rename the files.
if [ -e rippled.cfg ]; then
mv rippled.cfg xrpld.cfg
fi
if [ -e cfg/rippled-example.cfg ]; then
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
fi
# Rename inside the files.
DIRECTORIES=("cfg" "cmake" "include" "src")
for DIRECTORY in "${DIRECTORIES[@]}"; do
echo "Processing directory: ${DIRECTORY}"
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
echo "Processing file: ${FILE}"
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
done
done
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
${SED_COMMAND} -i 's/ripplevalidators/xrplvalidators/g' src/test/core/Config_test.cpp # cspell: disable-line
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' src/test/core/Config_test.cpp
${SED_COMMAND} -i 's@ripple/@xrpld/@g' src/test/core/Config_test.cpp
${SED_COMMAND} -i 's/Rippled/File/g' src/test/core/Config_test.cpp
# Restore the old config file name in the code that maintains support for now.
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/g' src/xrpld/core/detail/Config.cpp
# Restore an URL.
${SED_COMMAND} -i 's/connect-your-xrpld-to-the-xrp-test-net.html/connect-your-rippled-to-the-xrp-test-net.html/g' cfg/xrpld-example.cfg
popd
echo "Renaming complete."

View File

@@ -50,11 +50,11 @@ for DIRECTORY in "${DIRECTORIES[@]}"; do
# Handle the cases where the copyright notice is enclosed in /* ... */
# and usually surrounded by //---- and //======.
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}" # cspell: ignore Bougalis Falco Hinnant Ritchford
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}"
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
# Handle the cases where the copyright notice is commented out with //.
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}" # cspell: ignore Vinnie Falco
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}"
done
done
@@ -83,16 +83,16 @@ if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/ValidatorInfo.cpp; then
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/ValidatorInfo.cpp
fi
if ! grep -q 'Bougalis' include/xrpl/basics/SlabAllocator.h; then
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h # cspell: ignore Nikolaos Bougalis nikb
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h
fi
if ! grep -q 'Bougalis' include/xrpl/basics/spinlock.h; then
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h # cspell: ignore Nikolaos Bougalis nikb
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h
fi
if ! grep -q 'Bougalis' include/xrpl/basics/tagged_integer.h; then
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h # cspell: ignore Nikolaos Bougalis nikb
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h
fi
if ! grep -q 'Ritchford' include/xrpl/beast/utility/Zero.h; then
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h # cspell: ignore Ritchford
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h
fi
# Restore newlines and tabs in string literals in the affected file.

View File

@@ -0,0 +1,118 @@
# Strategy Matrix
The scripts in this directory will generate a strategy matrix for GitHub Actions
CI, depending on the trigger that caused the workflow to run and the platform
specified.
There are several build, test, and publish settings that can be enabled for each
configuration. The settings are combined in a Cartesian product to generate the
full matrix, while filtering out any combinations not applicable to the trigger.
## Platforms
We support three platforms: Linux, macOS, and Windows.
### Linux
We support a variety of distributions (Debian, RHEL, and Ubuntu) and compilers
(GCC and Clang) on Linux. As there are so many combinations, we don't run them
all. Instead, we focus on a few key ones for PR commits and merges, while we run
most of them on a scheduled or ad hoc basis.
Some noteworthy configurations are:
- The official release build is GCC 14 on Debian Bullseye.
- Although we generally enable assertions in release builds, we disable them
for the official release build.
- We publish .deb and .rpm packages for this build, as well as a Docker image.
- For PR commits we also publish packages and images for testing purposes.
- Antithesis instrumentation is only supported on Clang 16+ on AMD64.
- We publish a Docker image for this build, but no packages.
- Coverage reports are generated on Bullseye with GCC 15.
- It must be enabled for both commits (to show PR coverage) and merges (to
show default branch coverage).
Note that we try to run pipelines equally across both AMD64 and ARM64, but in
some cases we cannot build on ARM64:
- All Clang 20+ builds on ARM64 are currently skipped due to a Boost build
error.
- All RHEL builds on AMD64 are currently skipped due to a build failure that
needs further investigation.
Also note that to create a Docker image we ideally build on both AMD64 and
ARM64 to create a multi-arch image. Both configs should therefore be triggered
by the same event. However, as the script outputs individual configs, the
workflow must be able to run both builds separately and then merge the
single-arch images afterward into a multi-arch image.
### MacOS
We support building on macOS, which uses the Apple Clang compiler and the ARM64
architecture. We use default settings for all builds, and don't publish any
packages or images.
### Windows
We also support building on Windows, which uses the MSVC compiler and the AMD64
architecture. While we could build on ARM64, we have not yet found a suitable
cloud machine to use as a GitHub runner. We use default settings for all builds,
and don't publish any packages or images.
## Triggers
We have four triggers that can cause the workflow to run:
- `commit`: A commit is pushed to a branch for which a pull request is open.
- `merge`: A pull request is merged.
- `label`: A label is added to a pull request.
- `schedule`: The workflow is run on a scheduled basis.
The `label` trigger is currently not used, but it is reserved for future use.
The `schedule` trigger is used to run the workflow each weekday, and is also
used for ad hoc testing via the `workflow_dispatch` event.
### Dependencies
The pipeline that is run for the `schedule` trigger will recompile and upload
all Conan packages to the remote for each configuration that is enabled. In
case any dependencies were added or updated in a recently merged PR, they will
then be available in the remote for the following pipeline runs. It is therefore
important that all configurations that are enabled for the `commit`, `merge`,
and `label` triggers are also enabled for the `schedule` trigger. We run
additional configurations in the `schedule` trigger that are not run for the
other triggers, to get extra confidence that the codebase can compile and run on
all supported platforms.
#### Caveats
There is some nuance here in that certain options affect the compilation of the
dependencies, while others do not. This means that that same options need to be
enabled for the `schedule` trigger as for the other triggers to ensure any
dependency changes get cached in the Conan remote.
- Build mode (`unity`): Does not affect the dependencies.
- Build option (`coverage`, `voidstar`): Does not affect the dependencies.
- Build option (`sanitizer asan`, `sanitizer tsan`): Affects the dependencies.
- Build type (`debug`, `release`): Affects the dependencies.
- Build type (`publish`): Same effect as `release` on the dependencies.
- Test option (`reference fee`): Does not affect the dependencies.
- Publish option (`package`, `image`): Does not affect the dependencies.
## Usage
Our GitHub CI pipeline uses the `generate.py` script to generate the matrix for
the current workflow invocation. Naturally, the script can be run locally to
generate the matrix for testing purposes, e.g.:
```bash
python3 generate.py --platform=linux --trigger=commit
```
If you want to pretty-print the output, you can pipe it to `jq` after stripping
off the `matrix=` prefix, e.g.:
```bash
python3 generate.py --platform=linux --trigger=commit | cut -d= -f2- | jq
```

View File

View File

@@ -1,303 +1,211 @@
#!/usr/bin/env python3
import argparse
import dataclasses
import itertools
import json
from dataclasses import dataclass
from pathlib import Path
from collections.abc import Iterator
THIS_DIR = Path(__file__).parent.resolve()
import linux
import macos
import windows
from helpers.defs import *
from helpers.enums import *
from helpers.funcs import *
from helpers.unique import *
# The GitHub runner tags to use for the different architectures.
RUNNER_TAGS = {
Arch.LINUX_AMD64: ["self-hosted", "Linux", "X64", "heavy"],
Arch.LINUX_ARM64: ["self-hosted", "Linux", "ARM64", "heavy-arm64"],
Arch.MACOS_ARM64: ["self-hosted", "macOS", "ARM64", "mac-runner-m1"],
Arch.WINDOWS_AMD64: ["self-hosted", "Windows", "devbox"],
}
@dataclass
class Config:
architecture: list[dict]
os: list[dict]
build_type: list[str]
cmake_args: list[str]
def generate_configs(distros: list[Distro], trigger: Trigger) -> list[Config]:
"""Generate a strategy matrix for GitHub Actions CI.
Args:
distros: The distros to generate the matrix for.
trigger: The trigger that caused the workflow to run.
Returns:
list[Config]: The generated configurations.
Raises:
ValueError: If any of the required fields are empty or invalid.
TypeError: If any of the required fields are of the wrong type.
"""
configs = []
for distro in distros:
for config in generate_config_for_distro(distro, trigger):
configs.append(config)
if not is_unique(configs):
raise ValueError("configs must be a list of unique Config")
return configs
"""
Generate a strategy matrix for GitHub Actions CI.
def generate_config_for_distro(distro: Distro, trigger: Trigger) -> Iterator[Config]:
"""Generate a strategy matrix for a specific distro.
On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and
Windows configurations, while upon merge into the develop, release, or master
branches, we will build all configurations, and test most of them.
Args:
distro: The distro to generate the matrix for.
trigger: The trigger that caused the workflow to run.
We will further set additional CMake arguments as follows:
- All builds will have the `tests`, `werr`, and `xrpld` options.
- All builds will have the `wextra` option except for GCC 12 and Clang 16.
- All release builds will have the `assert` option.
- Certain Debian Bookworm configurations will change the reference fee, enable
codecov, and enable voidstar in PRs.
"""
Yields:
Config: The next configuration to build.
Raises:
ValueError: If any of the required fields are empty or invalid.
TypeError: If any of the required fields are of the wrong type.
def generate_strategy_matrix(all: bool, config: Config) -> list:
configurations = []
for architecture, os, build_type, cmake_args in itertools.product(
config.architecture, config.os, config.build_type, config.cmake_args
):
# The default CMake target is 'all' for Linux and MacOS and 'install'
# for Windows, but it can get overridden for certain configurations.
cmake_target = "install" if os["distro_name"] == "windows" else "all"
# We build and test all configurations by default, except for Windows in
# Debug, because it is too slow, as well as when code coverage is
# enabled as that mode already runs the tests.
build_only = False
if os["distro_name"] == "windows" and build_type == "Debug":
build_only = True
# Only generate a subset of configurations in PRs.
if not all:
# Debian:
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
# the reference fee to 500.
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
# code coverage (which will be done below).
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
# enable voidstar.
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
# set the reference fee to 1000.
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
if os["distro_name"] == "debian":
skip = True
if os["distro_version"] == "bookworm":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/arm64"
):
cmake_args = f"-Dvoidstar=ON {cmake_args}"
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
and build_type == "Debug"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if skip:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
if os["distro_name"] == "rhel":
skip = True
if os["distro_version"] == "9":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Debug"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
elif os["distro_version"] == "10":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
and build_type == "Release"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if skip:
continue
# Ubuntu:
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
# - Noble using GCC 14: Release and Unity on linux/amd64.
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
# - Noble using Clang 19: Release and Unity on linux/arm64.
if os["distro_name"] == "ubuntu":
skip = True
if os["distro_version"] == "jammy":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/arm64"
):
skip = False
elif os["distro_version"] == "noble":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/arm64"
):
skip = False
if skip:
continue
# MacOS:
# - Debug and no Unity on macos/arm64.
if os["distro_name"] == "macos" and not (
build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "macos/arm64"
):
continue
# Windows:
# - Release and Unity on windows/amd64.
if os["distro_name"] == "windows" and not (
build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "windows/amd64"
):
continue
# Additional CMake arguments.
cmake_args = f"{cmake_args} -Dtests=ON -Dwerr=ON -Dxrpld=ON"
if not f"{os['compiler_name']}-{os['compiler_version']}" in [
"gcc-12",
"clang-16",
]:
cmake_args = f"{cmake_args} -Dwextra=ON"
if build_type == "Release":
cmake_args = f"{cmake_args} -Dassert=ON"
# We skip all RHEL on arm64 due to a build failure that needs further
# investigation.
if os["distro_name"] == "rhel" and architecture["platform"] == "linux/arm64":
"""
for spec in distro.specs:
if trigger not in spec.triggers:
continue
# We skip all clang 20+ on arm64 due to Boost build error.
if (
f"{os['compiler_name']}-{os['compiler_version']}"
in ["clang-20", "clang-21"]
and architecture["platform"] == "linux/arm64"
):
os_name = distro.os_name
os_version = distro.os_version
compiler_name = distro.compiler_name
compiler_version = distro.compiler_version
image_sha = distro.image_sha
yield from generate_config_for_distro_spec(
os_name,
os_version,
compiler_name,
compiler_version,
image_sha,
spec,
trigger,
)
def generate_config_for_distro_spec(
os_name: str,
os_version: str,
compiler_name: str,
compiler_version: str,
image_sha: str,
spec: Spec,
trigger: Trigger,
) -> Iterator[Config]:
"""Generate a strategy matrix for a specific distro and spec.
Args:
os_name: The OS name.
os_version: The OS version.
compiler_name: The compiler name.
compiler_version: The compiler version.
image_sha: The image SHA.
spec: The spec to generate the matrix for.
trigger: The trigger that caused the workflow to run.
Yields:
Config: The next configuration to build.
"""
for trigger_, arch, build_mode, build_type in itertools.product(
spec.triggers, spec.archs, spec.build_modes, spec.build_types
):
if trigger_ != trigger:
continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
# Unity on linux/amd64
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
build_option = spec.build_option
test_option = spec.test_option
publish_option = spec.publish_option
# Generate a unique name for the configuration, e.g. macos-arm64-debug
# or debian-bookworm-gcc-12-amd64-release-unity.
config_name = os["distro_name"]
if (n := os["distro_version"]) != "":
config_name += f"-{n}"
if (n := os["compiler_name"]) != "":
config_name += f"-{n}"
if (n := os["compiler_version"]) != "":
config_name += f"-{n}"
config_name += (
f"-{architecture['platform'][architecture['platform'].find('/') + 1 :]}"
)
config_name += f"-{build_type.lower()}"
if "-Dcoverage=ON" in cmake_args:
config_name += "-coverage"
if "-Dunity=ON" in cmake_args:
config_name += "-unity"
# Add the configuration to the list, with the most unique fields first,
# so that they are easier to identify in the GitHub Actions UI, as long
# names get truncated.
configurations.append(
{
"config_name": config_name,
"cmake_args": cmake_args,
"cmake_target": cmake_target,
"build_only": build_only,
"build_type": build_type,
"os": os,
"architecture": architecture,
}
# Determine the configuration name.
config_name = generate_config_name(
os_name,
os_version,
compiler_name,
compiler_version,
arch,
build_type,
build_mode,
build_option,
)
return configurations
# Determine the CMake arguments.
cmake_args = generate_cmake_args(
compiler_name,
compiler_version,
build_type,
build_mode,
build_option,
test_option,
)
# Determine the CMake target.
cmake_target = generate_cmake_target(os_name, build_type)
def read_config(file: Path) -> Config:
config = json.loads(file.read_text())
if (
config["architecture"] is None
or config["os"] is None
or config["build_type"] is None
or config["cmake_args"] is None
):
raise Exception("Invalid configuration file.")
# Determine whether to enable running tests, and to create a package
# and/or image.
enable_tests, enable_package, enable_image = generate_enable_options(
os_name, build_type, publish_option
)
return Config(**config)
# Determine the image to run in, if applicable.
image = generate_image_name(
os_name,
os_version,
compiler_name,
compiler_version,
image_sha,
)
# Generate the configuration.
yield Config(
config_name=config_name,
cmake_args=cmake_args,
cmake_target=cmake_target,
build_type=("Debug" if build_type == BuildType.DEBUG else "Release"),
enable_tests=enable_tests,
enable_package=enable_package,
enable_image=enable_image,
runs_on=RUNNER_TAGS[arch],
image=image,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all",
help="Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).",
action="store_true",
"--platform",
"-p",
required=False,
type=Platform,
choices=list(Platform),
help="The platform to run on.",
)
parser.add_argument(
"-c",
"--config",
help="Path to the JSON file containing the strategy matrix configurations.",
required=False,
type=Path,
"--trigger",
"-t",
required=True,
type=Trigger,
choices=list(Trigger),
help="The trigger that caused the workflow to run.",
)
args = parser.parse_args()
matrix = []
if args.config is None or args.config == "":
matrix += generate_strategy_matrix(
args.all, read_config(THIS_DIR / "linux.json")
)
matrix += generate_strategy_matrix(
args.all, read_config(THIS_DIR / "macos.json")
)
matrix += generate_strategy_matrix(
args.all, read_config(THIS_DIR / "windows.json")
)
else:
matrix += generate_strategy_matrix(args.all, read_config(args.config))
# Collect the distros to generate configs for.
distros = []
if args.platform in [None, Platform.LINUX]:
distros += linux.DEBIAN_DISTROS + linux.RHEL_DISTROS + linux.UBUNTU_DISTROS
if args.platform in [None, Platform.MACOS]:
distros += macos.DISTROS
if args.platform in [None, Platform.WINDOWS]:
distros += windows.DISTROS
# Generate the strategy matrix.
print(f"matrix={json.dumps({'include': matrix})}")
# Generate the configs.
configs = generate_configs(distros, args.trigger)
# Convert the configs into the format expected by GitHub Actions.
include = []
for config in configs:
include.append(dataclasses.asdict(config))
print(f"matrix={json.dumps({'include': include})}")

View File

@@ -0,0 +1,466 @@
import pytest
from generate import *
@pytest.fixture
def macos_distro():
return Distro(
os_name="macos",
specs=[
Spec(
archs=[Arch.MACOS_ARM64],
build_modes=[BuildMode.UNITY_OFF],
build_option=BuildOption.COVERAGE,
build_types=[BuildType.RELEASE],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
],
)
@pytest.fixture
def windows_distro():
return Distro(
os_name="windows",
specs=[
Spec(
archs=[Arch.WINDOWS_AMD64],
build_modes=[BuildMode.UNITY_ON],
build_option=BuildOption.SANITIZE_ASAN,
build_types=[BuildType.DEBUG],
publish_option=PublishOption.IMAGE_ONLY,
test_option=TestOption.REFERENCE_FEE_500,
triggers=[Trigger.COMMIT, Trigger.SCHEDULE],
)
],
)
@pytest.fixture
def linux_distro():
return Distro(
os_name="debian",
os_version="bookworm",
compiler_name="clang",
compiler_version="16",
image_sha="a1b2c3d4",
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_OFF],
build_option=BuildOption.SANITIZE_TSAN,
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.LABEL],
),
Spec(
archs=[Arch.LINUX_AMD64, Arch.LINUX_ARM64],
build_modes=[BuildMode.UNITY_OFF, BuildMode.UNITY_ON],
build_option=BuildOption.VOIDSTAR,
build_types=[BuildType.PUBLISH],
publish_option=PublishOption.PACKAGE_AND_IMAGE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT, Trigger.LABEL],
),
],
)
def test_macos_generate_config_for_distro_spec_matches_trigger(macos_distro):
trigger = Trigger.COMMIT
distro = macos_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[0],
trigger,
)
)
assert result == [
Config(
config_name="macos-coverage-release-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dassert=ON -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0",
cmake_target="all",
build_type="Release",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["self-hosted", "macOS", "ARM64", "mac-runner-m1"],
image=None,
)
]
def test_macos_generate_config_for_distro_spec_no_match_trigger(macos_distro):
trigger = Trigger.MERGE
distro = macos_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[0],
trigger,
)
)
assert result == []
def test_macos_generate_config_for_distro_matches_trigger(macos_distro):
trigger = Trigger.COMMIT
distro = macos_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == [
Config(
config_name="macos-coverage-release-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dassert=ON -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0",
cmake_target="all",
build_type="Release",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["self-hosted", "macOS", "ARM64", "mac-runner-m1"],
image=None,
)
]
def test_macos_generate_config_for_distro_no_match_trigger(macos_distro):
trigger = Trigger.MERGE
distro = macos_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == []
def test_windows_generate_config_for_distro_spec_matches_trigger(
windows_distro,
):
trigger = Trigger.COMMIT
distro = windows_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[0],
trigger,
)
)
assert result == [
Config(
config_name="windows-asan-debug-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dunity=ON -DUNIT_TEST_REFERENCE_FEE=500",
cmake_target="install",
build_type="Debug",
enable_tests=False,
enable_package=False,
enable_image=True,
runs_on=["self-hosted", "Windows", "devbox"],
image=None,
)
]
def test_windows_generate_config_for_distro_spec_no_match_trigger(
windows_distro,
):
trigger = Trigger.MERGE
distro = windows_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[0],
trigger,
)
)
assert result == []
def test_windows_generate_config_for_distro_matches_trigger(
windows_distro,
):
trigger = Trigger.COMMIT
distro = windows_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == [
Config(
config_name="windows-asan-debug-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dunity=ON -DUNIT_TEST_REFERENCE_FEE=500",
cmake_target="install",
build_type="Debug",
enable_tests=False,
enable_package=False,
enable_image=True,
runs_on=["self-hosted", "Windows", "devbox"],
image=None,
)
]
def test_windows_generate_config_for_distro_no_match_trigger(
windows_distro,
):
trigger = Trigger.MERGE
distro = windows_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == []
def test_linux_generate_config_for_distro_spec_matches_trigger(linux_distro):
trigger = Trigger.LABEL
distro = linux_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[1],
trigger,
)
)
assert result == [
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
]
def test_linux_generate_config_for_distro_spec_no_match_trigger(linux_distro):
trigger = Trigger.MERGE
distro = linux_distro
result = list(
generate_config_for_distro_spec(
distro.os_name,
distro.os_version,
distro.compiler_name,
distro.compiler_version,
distro.image_sha,
distro.specs[1],
trigger,
)
)
assert result == []
def test_linux_generate_config_for_distro_matches_trigger(linux_distro):
trigger = Trigger.LABEL
distro = linux_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == [
Config(
config_name="debian-bookworm-clang-16-tsan-debug-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
]
def test_linux_generate_config_for_distro_no_match_trigger(linux_distro):
trigger = Trigger.MERGE
distro = linux_distro
result = list(generate_config_for_distro(distro, trigger))
assert result == []
def test_generate_configs(macos_distro, windows_distro, linux_distro):
trigger = Trigger.COMMIT
distros = [macos_distro, windows_distro, linux_distro]
result = generate_configs(distros, trigger)
assert result == [
Config(
config_name="macos-coverage-release-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dassert=ON -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0",
cmake_target="all",
build_type="Release",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["self-hosted", "macOS", "ARM64", "mac-runner-m1"],
image=None,
),
Config(
config_name="windows-asan-debug-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dunity=ON -DUNIT_TEST_REFERENCE_FEE=500",
cmake_target="install",
build_type="Debug",
enable_tests=False,
enable_package=False,
enable_image=True,
runs_on=["self-hosted", "Windows", "devbox"],
image=None,
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-amd64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "X64", "heavy"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
Config(
config_name="debian-bookworm-clang-16-voidstar-publish-unity-arm64",
cmake_args="-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dunity=ON -Dvoidstar=ON",
cmake_target="install",
build_type="Release",
enable_tests=True,
enable_package=True,
enable_image=True,
runs_on=["self-hosted", "Linux", "ARM64", "heavy-arm64"],
image="ghcr.io/xrplf/ci/debian-bookworm:clang-16-a1b2c3d4",
),
]
def test_generate_configs_raises_on_duplicate_configs(macos_distro):
trigger = Trigger.COMMIT
distros = [macos_distro, macos_distro]
with pytest.raises(ValueError):
generate_configs(distros, trigger)

View File

View File

@@ -0,0 +1,190 @@
from dataclasses import dataclass, field
from helpers.enums import *
from helpers.unique import *
@dataclass
class Config:
"""Represents a configuration to include in the strategy matrix.
Raises:
ValueError: If any of the required fields are empty or invalid.
TypeError: If any of the required fields are of the wrong type.
"""
config_name: str
cmake_args: str
cmake_target: str
build_type: str
enable_tests: bool
enable_package: bool
enable_image: bool
runs_on: list[str]
image: str | None = None
def __post_init__(self):
if not self.config_name:
raise ValueError("config_name cannot be empty")
if not isinstance(self.config_name, str):
raise TypeError("config_name must be a string")
if not self.cmake_args:
raise ValueError("cmake_args cannot be empty")
if not isinstance(self.cmake_args, str):
raise TypeError("cmake_args must be a string")
if not self.cmake_target:
raise ValueError("cmake_target cannot be empty")
if not isinstance(self.cmake_target, str):
raise TypeError("cmake_target must be a string")
if self.cmake_target not in ["all", "install"]:
raise ValueError("cmake_target must be 'all' or 'install'")
if not self.build_type:
raise ValueError("build_type cannot be empty")
if not isinstance(self.build_type, str):
raise TypeError("build_type must be a string")
if self.build_type not in ["Debug", "Release"]:
raise ValueError("build_type must be 'Debug' or 'Release'")
if not isinstance(self.enable_tests, bool):
raise TypeError("enable_tests must be a boolean")
if not isinstance(self.enable_package, bool):
raise TypeError("enable_package must be a boolean")
if not isinstance(self.enable_image, bool):
raise TypeError("enable_image must be a boolean")
if not self.runs_on:
raise ValueError("runs_on cannot be empty")
if not isinstance(self.runs_on, list):
raise TypeError("runs_on must be a list")
if not all(isinstance(runner, str) for runner in self.runs_on):
raise TypeError("runs_on must be a list of strings")
if not all(self.runs_on):
raise ValueError("runs_on must be a list of non-empty strings")
if len(self.runs_on) != len(set(self.runs_on)):
raise ValueError("runs_on must be a list of unique strings")
if self.image and not isinstance(self.image, str):
raise TypeError("image must be a string")
@dataclass
class Spec:
"""Represents a specification used by a configuration.
Raises:
ValueError: If any of the required fields are empty.
TypeError: If any of the required fields are of the wrong type.
"""
archs: list[Arch] = field(
default_factory=lambda: [Arch.LINUX_AMD64, Arch.LINUX_ARM64]
)
build_option: BuildOption = BuildOption.NONE
build_modes: list[BuildMode] = field(
default_factory=lambda: [BuildMode.UNITY_OFF, BuildMode.UNITY_ON]
)
build_types: list[BuildType] = field(
default_factory=lambda: [BuildType.DEBUG, BuildType.RELEASE]
)
publish_option: PublishOption = PublishOption.NONE
test_option: TestOption = TestOption.NONE
triggers: list[Trigger] = field(
default_factory=lambda: [Trigger.COMMIT, Trigger.MERGE, Trigger.SCHEDULE]
)
def __post_init__(self):
if not self.archs:
raise ValueError("archs cannot be empty")
if not isinstance(self.archs, list):
raise TypeError("archs must be a list")
if not all(isinstance(arch, str) for arch in self.archs):
raise TypeError("archs must be a list of Arch")
if len(self.archs) != len(set(self.archs)):
raise ValueError("archs must be a list of unique Arch")
if not isinstance(self.build_option, BuildOption):
raise TypeError("build_option must be a BuildOption")
if not self.build_modes:
raise ValueError("build_modes cannot be empty")
if not isinstance(self.build_modes, list):
raise TypeError("build_modes must be a list")
if not all(
isinstance(build_mode, BuildMode) for build_mode in self.build_modes
):
raise TypeError("build_modes must be a list of BuildMode")
if len(self.build_modes) != len(set(self.build_modes)):
raise ValueError("build_modes must be a list of unique BuildMode")
if not self.build_types:
raise ValueError("build_types cannot be empty")
if not isinstance(self.build_types, list):
raise TypeError("build_types must be a list")
if not all(
isinstance(build_type, BuildType) for build_type in self.build_types
):
raise TypeError("build_types must be a list of BuildType")
if len(self.build_types) != len(set(self.build_types)):
raise ValueError("build_types must be a list of unique BuildType")
if not isinstance(self.publish_option, PublishOption):
raise TypeError("publish_option must be a PublishOption")
if not isinstance(self.test_option, TestOption):
raise TypeError("test_option must be a TestOption")
if not self.triggers:
raise ValueError("triggers cannot be empty")
if not isinstance(self.triggers, list):
raise TypeError("triggers must be a list")
if not all(isinstance(trigger, Trigger) for trigger in self.triggers):
raise TypeError("triggers must be a list of Trigger")
if len(self.triggers) != len(set(self.triggers)):
raise ValueError("triggers must be a list of unique Trigger")
@dataclass
class Distro:
"""Represents a Linux, Windows or macOS distribution with specifications.
Raises:
ValueError: If any of the required fields are empty.
TypeError: If any of the required fields are of the wrong type.
"""
os_name: str
os_version: str = ""
compiler_name: str = ""
compiler_version: str = ""
image_sha: str = ""
specs: list[Spec] = field(default_factory=list)
def __post_init__(self):
if not self.os_name:
raise ValueError("os_name cannot be empty")
if not isinstance(self.os_name, str):
raise TypeError("os_name must be a string")
if self.os_version and not isinstance(self.os_version, str):
raise TypeError("os_version must be a string")
if self.compiler_name and not isinstance(self.compiler_name, str):
raise TypeError("compiler_name must be a string")
if self.compiler_version and not isinstance(self.compiler_version, str):
raise TypeError("compiler_version must be a string")
if self.image_sha and not isinstance(self.image_sha, str):
raise TypeError("image_sha must be a string")
if not self.specs:
raise ValueError("specs cannot be empty")
if not isinstance(self.specs, list):
raise TypeError("specs must be a list")
if not all(isinstance(spec, Spec) for spec in self.specs):
raise TypeError("specs must be a list of Spec")
if not is_unique(self.specs):
raise ValueError("specs must be a list of unique Spec")

View File

@@ -0,0 +1,743 @@
import pytest
from helpers.defs import *
from helpers.enums import *
from helpers.funcs import *
def test_config_valid_none_image():
assert Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image=None,
)
def test_config_valid_empty_image():
assert Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="install",
build_type="Debug",
enable_tests=False,
enable_package=True,
enable_image=False,
runs_on=["label"],
image="",
)
def test_config_valid_with_image():
assert Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="install",
build_type="Release",
enable_tests=False,
enable_package=True,
enable_image=True,
runs_on=["label"],
image="image",
)
def test_config_raises_on_empty_config_name():
with pytest.raises(ValueError):
Config(
config_name="",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_config_name():
with pytest.raises(TypeError):
Config(
config_name=123,
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_empty_cmake_args():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_cmake_args():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args=123,
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_empty_cmake_target():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_invalid_cmake_target():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="invalid",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_cmake_target():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target=123,
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_empty_build_type():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_invalid_build_type():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="invalid",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_build_type():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type=123,
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_enable_tests():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=123,
enable_package=False,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_enable_package():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=123,
enable_image=False,
runs_on=["label"],
image="image",
)
def test_config_raises_on_wrong_enable_image():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=True,
enable_image=123,
runs_on=["label"],
image="image",
)
def test_config_raises_on_none_runs_on():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=None,
image="image",
)
def test_config_raises_on_empty_runs_on():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=[],
image="image",
)
def test_config_raises_on_invalid_runs_on():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=[""],
image="image",
)
def test_config_raises_on_wrong_runs_on():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=[123],
image="image",
)
def test_config_raises_on_duplicate_runs_on():
with pytest.raises(ValueError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label", "label"],
image="image",
)
def test_config_raises_on_wrong_image():
with pytest.raises(TypeError):
Config(
config_name="config",
cmake_args="-Doption=ON",
cmake_target="all",
build_type="Debug",
enable_tests=True,
enable_package=False,
enable_image=False,
runs_on=["label"],
image=123,
)
def test_spec_valid():
assert Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_none_archs():
with pytest.raises(ValueError):
Spec(
archs=None,
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_empty_archs():
with pytest.raises(ValueError):
Spec(
archs=[],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_archs():
with pytest.raises(TypeError):
Spec(
archs=[123],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_duplicate_archs():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64, Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_build_option():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=123,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_none_build_modes():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=None,
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_empty_build_modes():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_build_modes():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[123],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_none_build_types():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=None,
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_empty_build_types():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_build_types():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[123],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_duplicate_build_types():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG, BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_publish_option():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=123,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_wrong_test_option():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=123,
triggers=[Trigger.COMMIT],
)
def test_spec_raises_on_none_triggers():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=None,
)
def test_spec_raises_on_empty_triggers():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[],
)
def test_spec_raises_on_wrong_triggers():
with pytest.raises(TypeError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[123],
)
def test_spec_raises_on_duplicate_triggers():
with pytest.raises(ValueError):
Spec(
archs=[Arch.LINUX_AMD64],
build_option=BuildOption.NONE,
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.NONE,
test_option=TestOption.NONE,
triggers=[Trigger.COMMIT, Trigger.COMMIT],
)
def test_distro_valid_none_image_sha():
assert Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha=None,
specs=[Spec()], # This is valid due to the default values.
)
def test_distro_valid_empty_os_compiler_image_sha():
assert Distro(
os_name="os_name",
os_version="",
compiler_name="",
compiler_version="",
image_sha="",
specs=[Spec()],
)
def test_distro_valid_with_image():
assert Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_empty_os_name():
with pytest.raises(ValueError):
Distro(
os_name="",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_wrong_os_name():
with pytest.raises(TypeError):
Distro(
os_name=123,
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_wrong_os_version():
with pytest.raises(TypeError):
Distro(
os_name="os_name",
os_version=123,
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_wrong_compiler_name():
with pytest.raises(TypeError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name=123,
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_wrong_compiler_version():
with pytest.raises(TypeError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version=123,
image_sha="image_sha",
specs=[Spec()],
)
def test_distro_raises_on_wrong_image_sha():
with pytest.raises(TypeError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha=123,
specs=[Spec()],
)
def test_distro_raises_on_none_specs():
with pytest.raises(ValueError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=None,
)
def test_distro_raises_on_empty_specs():
with pytest.raises(ValueError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[],
)
def test_distro_raises_on_invalid_specs():
with pytest.raises(ValueError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec(triggers=[])],
)
def test_distro_raises_on_duplicate_specs():
with pytest.raises(ValueError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[Spec(), Spec()],
)
def test_distro_raises_on_wrong_specs():
with pytest.raises(TypeError):
Distro(
os_name="os_name",
os_version="os_version",
compiler_name="compiler_name",
compiler_version="compiler_version",
image_sha="image_sha",
specs=[123],
)

View File

@@ -0,0 +1,75 @@
from enum import StrEnum, auto
class Arch(StrEnum):
"""Represents architectures to build for."""
LINUX_AMD64 = "linux/amd64"
LINUX_ARM64 = "linux/arm64"
MACOS_ARM64 = "macos/arm64"
WINDOWS_AMD64 = "windows/amd64"
class BuildMode(StrEnum):
"""Represents whether to perform a unity or non-unity build."""
UNITY_OFF = auto()
UNITY_ON = auto()
class BuildOption(StrEnum):
"""Represents build options to enable."""
NONE = auto()
COVERAGE = auto()
SANITIZE_ASAN = (
auto()
) # Address Sanitizer, also includes Undefined Behavior Sanitizer.
SANITIZE_TSAN = (
auto()
) # Thread Sanitizer, also includes Undefined Behavior Sanitizer.
VOIDSTAR = auto()
class BuildType(StrEnum):
"""Represents the build type to use."""
DEBUG = auto()
RELEASE = auto()
PUBLISH = auto() # Release build without assertions.
class PublishOption(StrEnum):
"""Represents whether to publish a package, an image, or both."""
NONE = auto()
PACKAGE_ONLY = auto()
IMAGE_ONLY = auto()
PACKAGE_AND_IMAGE = auto()
class TestOption(StrEnum):
"""Represents test options to enable, specifically the reference fee to use."""
__test__ = False # Tell pytest to not consider this as a test class.
NONE = "" # Use the default reference fee of 10.
REFERENCE_FEE_500 = "500"
REFERENCE_FEE_1000 = "1000"
class Platform(StrEnum):
"""Represents the platform to use."""
LINUX = "linux"
MACOS = "macos"
WINDOWS = "windows"
class Trigger(StrEnum):
"""Represents the trigger that caused the workflow to run."""
COMMIT = "commit"
LABEL = "label"
MERGE = "merge"
SCHEDULE = "schedule"

View File

@@ -0,0 +1,235 @@
from helpers.defs import *
from helpers.enums import *
def generate_config_name(
os_name: str,
os_version: str | None,
compiler_name: str | None,
compiler_version: str | None,
arch: Arch,
build_type: BuildType,
build_mode: BuildMode,
build_option: BuildOption,
) -> str:
"""Create a configuration name based on the distro details and build
attributes.
The configuration name is used as the display name in the GitHub Actions
UI, and since GitHub truncates long names we have to make sure the most
important information is at the beginning of the name.
Args:
os_name (str): The OS name.
os_version (str): The OS version.
compiler_name (str): The compiler name.
compiler_version (str): The compiler version.
arch (Arch): The architecture.
build_type (BuildType): The build type.
build_mode (BuildMode): The build mode.
build_option (BuildOption): The build option.
Returns:
str: The configuration name.
Raises:
ValueError: If the OS name is empty.
"""
if not os_name:
raise ValueError("os_name cannot be empty")
config_name = os_name
if os_version:
config_name += f"-{os_version}"
if compiler_name:
config_name += f"-{compiler_name}"
if compiler_version:
config_name += f"-{compiler_version}"
if build_option == BuildOption.COVERAGE:
config_name += "-coverage"
elif build_option == BuildOption.VOIDSTAR:
config_name += "-voidstar"
elif build_option == BuildOption.SANITIZE_ASAN:
config_name += "-asan"
elif build_option == BuildOption.SANITIZE_TSAN:
config_name += "-tsan"
if build_type == BuildType.DEBUG:
config_name += "-debug"
elif build_type == BuildType.RELEASE:
config_name += "-release"
elif build_type == BuildType.PUBLISH:
config_name += "-publish"
if build_mode == BuildMode.UNITY_ON:
config_name += "-unity"
config_name += f"-{arch.value.split('/')[1]}"
return config_name
def generate_cmake_args(
compiler_name: str | None,
compiler_version: str | None,
build_type: BuildType,
build_mode: BuildMode,
build_option: BuildOption,
test_option: TestOption,
) -> str:
"""Create the CMake arguments based on the build type and enabled build
options.
- All builds will have the `tests`, `werr`, and `xrpld` options.
- All builds will have the `wextra` option except for GCC 12 and Clang 16.
- All release builds will have the `assert` option.
- Set the unity option if specified.
- Set the coverage option if specified.
- Set the voidstar option if specified.
- Set the reference fee if specified.
Args:
compiler_name (str): The compiler name.
compiler_version (str): The compiler version.
build_type (BuildType): The build type.
build_mode (BuildMode): The build mode.
build_option (BuildOption): The build option.
test_option (TestOption): The test option.
Returns:
str: The CMake arguments.
"""
cmake_args = "-Dtests=ON -Dwerr=ON -Dxrpld=ON"
if not f"{compiler_name}-{compiler_version}" in [
"gcc-12",
"clang-16",
]:
cmake_args += " -Dwextra=ON"
if build_type == BuildType.RELEASE:
cmake_args += " -Dassert=ON"
if build_mode == BuildMode.UNITY_ON:
cmake_args += " -Dunity=ON"
if build_option == BuildOption.COVERAGE:
cmake_args += " -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0"
elif build_option == BuildOption.SANITIZE_ASAN:
pass # TODO: Add ASAN-UBSAN flags.
elif build_option == BuildOption.SANITIZE_TSAN:
pass # TODO: Add TSAN-UBSAN flags.
elif build_option == BuildOption.VOIDSTAR:
cmake_args += " -Dvoidstar=ON"
if test_option != TestOption.NONE:
cmake_args += f" -DUNIT_TEST_REFERENCE_FEE={test_option.value}"
return cmake_args
def generate_cmake_target(os_name: str, build_type: BuildType) -> str:
"""Create the CMake target based on the build type.
The `install` target is used for Windows and for publishing a package, while
the `all` target is used for all other configurations.
Args:
os_name (str): The OS name.
build_type (BuildType): The build type.
Returns:
str: The CMake target.
"""
if os_name == "windows" or build_type == BuildType.PUBLISH:
return "install"
return "all"
def generate_enable_options(
os_name: str,
build_type: BuildType,
publish_option: PublishOption,
) -> tuple[bool, bool, bool]:
"""Create the enable flags based on the OS name, build option, and publish
option.
We build and test all configurations by default, except for Windows in
Debug, because it is too slow.
Args:
os_name (str): The OS name.
build_type (BuildType): The build type.
publish_option (PublishOption): The publish option.
Returns:
tuple: A tuple containing the enable test, enable package, and enable image flags.
"""
enable_tests = (
False if os_name == "windows" and build_type == BuildType.DEBUG else True
)
enable_package = (
True
if publish_option
in [
PublishOption.PACKAGE_ONLY,
PublishOption.PACKAGE_AND_IMAGE,
]
else False
)
enable_image = (
True
if publish_option
in [
PublishOption.IMAGE_ONLY,
PublishOption.PACKAGE_AND_IMAGE,
]
else False
)
return enable_tests, enable_package, enable_image
def generate_image_name(
os_name: str,
os_version: str,
compiler_name: str,
compiler_version: str,
image_sha: str,
) -> str | None:
"""Create the Docker image name based on the distro details.
Args:
os_name (str): The OS name.
os_version (str): The OS version.
compiler_name (str): The compiler name.
compiler_version (str): The compiler version.
image_sha (str): The image SHA.
Returns:
str: The Docker image name or None if not applicable.
Raises:
ValueError: If any of the arguments is empty for Linux.
"""
if os_name == "windows" or os_name == "macos":
return None
if not os_name:
raise ValueError("os_name cannot be empty")
if not os_version:
raise ValueError("os_version cannot be empty")
if not compiler_name:
raise ValueError("compiler_name cannot be empty")
if not compiler_version:
raise ValueError("compiler_version cannot be empty")
if not image_sha:
raise ValueError("image_sha cannot be empty")
return f"ghcr.io/xrplf/ci/{os_name}-{os_version}:{compiler_name}-{compiler_version}-{image_sha}"

View File

@@ -0,0 +1,419 @@
import pytest
from helpers.enums import *
from helpers.funcs import *
def test_generate_config_name_a_b_c_d_debug_amd64():
assert (
generate_config_name(
"a",
"b",
"c",
"d",
Arch.LINUX_AMD64,
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
)
== "a-b-c-d-debug-amd64"
)
def test_generate_config_name_a_b_c_release_unity_arm64():
assert (
generate_config_name(
"a",
"b",
"c",
"",
Arch.LINUX_ARM64,
BuildType.RELEASE,
BuildMode.UNITY_ON,
BuildOption.NONE,
)
== "a-b-c-release-unity-arm64"
)
def test_generate_config_name_a_b_coverage_publish_amd64():
assert (
generate_config_name(
"a",
"b",
"",
"",
Arch.LINUX_AMD64,
BuildType.PUBLISH,
BuildMode.UNITY_OFF,
BuildOption.COVERAGE,
)
== "a-b-coverage-publish-amd64"
)
def test_generate_config_name_a_asan_debug_unity_arm64():
assert (
generate_config_name(
"a",
"",
"",
"",
Arch.LINUX_ARM64,
BuildType.DEBUG,
BuildMode.UNITY_ON,
BuildOption.SANITIZE_ASAN,
)
== "a-asan-debug-unity-arm64"
)
def test_generate_config_name_a_c_tsan_release_amd64():
assert (
generate_config_name(
"a",
"",
"c",
"",
Arch.LINUX_AMD64,
BuildType.RELEASE,
BuildMode.UNITY_OFF,
BuildOption.SANITIZE_TSAN,
)
== "a-c-tsan-release-amd64"
)
def test_generate_config_name_a_d_voidstar_debug_amd64():
assert (
generate_config_name(
"a",
"",
"",
"d",
Arch.LINUX_AMD64,
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.VOIDSTAR,
)
== "a-d-voidstar-debug-amd64"
)
def test_generate_config_name_raises_on_none_os_name():
with pytest.raises(ValueError):
generate_config_name(
None,
"b",
"c",
"d",
Arch.LINUX_AMD64,
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
)
def test_generate_config_name_raises_on_empty_os_name():
with pytest.raises(ValueError):
generate_config_name(
"",
"b",
"c",
"d",
Arch.LINUX_AMD64,
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
)
def test_generate_cmake_args_a_b_debug():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON"
)
def test_generate_cmake_args_gcc_12_no_wextra():
assert (
generate_cmake_args(
"gcc",
"12",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON"
)
def test_generate_cmake_args_clang_16_no_wextra():
assert (
generate_cmake_args(
"clang",
"16",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON"
)
def test_generate_cmake_args_a_b_release():
assert (
generate_cmake_args(
"a",
"b",
BuildType.RELEASE,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dassert=ON"
)
def test_generate_cmake_args_a_b_publish():
assert (
generate_cmake_args(
"a",
"b",
BuildType.PUBLISH,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON"
)
def test_generate_cmake_args_a_b_unity():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_ON,
BuildOption.NONE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dunity=ON"
)
def test_generate_cmake_args_a_b_coverage():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.COVERAGE,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0"
)
def test_generate_cmake_args_a_b_voidstar():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.VOIDSTAR,
TestOption.NONE,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dvoidstar=ON"
)
def test_generate_cmake_args_a_b_reference_fee_500():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.REFERENCE_FEE_500,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -DUNIT_TEST_REFERENCE_FEE=500"
)
def test_generate_cmake_args_a_b_reference_fee_1000():
assert (
generate_cmake_args(
"a",
"b",
BuildType.DEBUG,
BuildMode.UNITY_OFF,
BuildOption.NONE,
TestOption.REFERENCE_FEE_1000,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -DUNIT_TEST_REFERENCE_FEE=1000"
)
def test_generate_cmake_args_a_b_multiple():
assert (
generate_cmake_args(
"a",
"b",
BuildType.RELEASE,
BuildMode.UNITY_ON,
BuildOption.VOIDSTAR,
TestOption.REFERENCE_FEE_500,
)
== "-Dtests=ON -Dwerr=ON -Dxrpld=ON -Dwextra=ON -Dassert=ON -Dunity=ON -Dvoidstar=ON -DUNIT_TEST_REFERENCE_FEE=500"
)
def test_generate_cmake_target_linux_debug():
assert generate_cmake_target("linux", BuildType.DEBUG) == "all"
def test_generate_cmake_target_linux_release():
assert generate_cmake_target("linux", BuildType.RELEASE) == "all"
def test_generate_cmake_target_linux_publish():
assert generate_cmake_target("linux", BuildType.PUBLISH) == "install"
def test_generate_cmake_target_macos_debug():
assert generate_cmake_target("macos", BuildType.DEBUG) == "all"
def test_generate_cmake_target_macos_release():
assert generate_cmake_target("macos", BuildType.RELEASE) == "all"
def test_generate_cmake_target_macos_publish():
assert generate_cmake_target("macos", BuildType.PUBLISH) == "install"
def test_generate_cmake_target_windows_debug():
assert generate_cmake_target("windows", BuildType.DEBUG) == "install"
def test_generate_cmake_target_windows_release():
assert generate_cmake_target("windows", BuildType.DEBUG) == "install"
def test_generate_cmake_target_windows_publish():
assert generate_cmake_target("windows", BuildType.DEBUG) == "install"
def test_generate_enable_options_linux_debug_no_publish():
assert generate_enable_options("linux", BuildType.DEBUG, PublishOption.NONE) == (
True,
False,
False,
)
def test_generate_enable_options_linux_release_package_only():
assert generate_enable_options(
"linux", BuildType.RELEASE, PublishOption.PACKAGE_ONLY
) == (True, True, False)
def test_generate_enable_options_linux_publish_image_only():
assert generate_enable_options(
"linux", BuildType.PUBLISH, PublishOption.IMAGE_ONLY
) == (True, False, True)
def test_generate_enable_options_macos_debug_package_only():
assert generate_enable_options(
"macos", BuildType.DEBUG, PublishOption.PACKAGE_ONLY
) == (True, True, False)
def test_generate_enable_options_macos_release_image_only():
assert generate_enable_options(
"macos", BuildType.RELEASE, PublishOption.IMAGE_ONLY
) == (True, False, True)
def test_generate_enable_options_macos_publish_package_and_image():
assert generate_enable_options(
"macos", BuildType.PUBLISH, PublishOption.PACKAGE_AND_IMAGE
) == (True, True, True)
def test_generate_enable_options_windows_debug_package_and_image():
assert generate_enable_options(
"windows", BuildType.DEBUG, PublishOption.PACKAGE_AND_IMAGE
) == (False, True, True)
def test_generate_enable_options_windows_release_no_publish():
assert generate_enable_options(
"windows", BuildType.RELEASE, PublishOption.NONE
) == (True, False, False)
def test_generate_enable_options_windows_publish_image_only():
assert generate_enable_options(
"windows", BuildType.PUBLISH, PublishOption.IMAGE_ONLY
) == (True, False, True)
def test_generate_image_name_linux():
assert generate_image_name("a", "b", "c", "d", "e") == "ghcr.io/xrplf/ci/a-b:c-d-e"
def test_generate_image_name_linux_raises_on_empty_os_name():
with pytest.raises(ValueError):
generate_image_name("", "b", "c", "d", "e")
def test_generate_image_name_linux_raises_on_empty_os_version():
with pytest.raises(ValueError):
generate_image_name("a", "", "c", "d", "e")
def test_generate_image_name_linux_raises_on_empty_compiler_name():
with pytest.raises(ValueError):
generate_image_name("a", "b", "", "d", "e")
def test_generate_image_name_linux_raises_on_empty_compiler_version():
with pytest.raises(ValueError):
generate_image_name("a", "b", "c", "", "e")
def test_generate_image_name_linux_raises_on_empty_image_sha():
with pytest.raises(ValueError):
generate_image_name("a", "b", "c", "e", "")
def test_generate_image_name_macos():
assert generate_image_name("macos", "", "", "", "") is None
def test_generate_image_name_macos_extra():
assert generate_image_name("macos", "value", "does", "not", "matter") is None
def test_generate_image_name_windows():
assert generate_image_name("windows", "", "", "", "") is None
def test_generate_image_name_windows_extra():
assert generate_image_name("windows", "value", "does", "not", "matter") is None

View File

@@ -0,0 +1,30 @@
import json
from dataclasses import _is_dataclass_instance, asdict
from typing import Any
def is_unique(items: list[Any]) -> bool:
"""Check if a list of dataclass objects contains only unique items.
As the items may not be hashable, we convert them to JSON strings first, and
then check if the list of strings is the same size as the set of strings.
Args:
items: The list of dataclass objects to check.
Returns:
True if the list contains only unique items, False otherwise.
Raises:
TypeError: If any of the items is not a dataclass.
"""
l = list()
s = set()
for item in items:
if not _is_dataclass_instance(item):
raise TypeError("items must be a list of dataclasses")
j = json.dumps(asdict(item))
l.append(j)
s.add(j)
return len(l) == len(s)

View File

@@ -0,0 +1,40 @@
from dataclasses import dataclass
import pytest
from helpers.unique import *
@dataclass
class ExampleInt:
value: int
@dataclass
class ExampleList:
values: list[int]
def test_unique_int():
assert is_unique([ExampleInt(1), ExampleInt(2), ExampleInt(3)])
def test_not_unique_int():
assert not is_unique([ExampleInt(1), ExampleInt(2), ExampleInt(1)])
def test_unique_list():
assert is_unique(
[ExampleList([1, 2, 3]), ExampleList([4, 5, 6]), ExampleList([7, 8, 9])]
)
def test_not_unique_list():
assert not is_unique(
[ExampleList([1, 2, 3]), ExampleList([4, 5, 6]), ExampleList([1, 2, 3])]
)
def test_unique_raises_on_non_dataclass():
with pytest.raises(TypeError):
is_unique([1, 2, 3])

View File

@@ -1,212 +0,0 @@
{
"architecture": [
{
"platform": "linux/amd64",
"runner": ["self-hosted", "Linux", "X64", "heavy"]
},
{
"platform": "linux/arm64",
"runner": ["self-hosted", "Linux", "ARM64", "heavy-arm64"]
}
],
"os": [
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "cc09fd3"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "21",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "cc09fd3"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "cc09fd3"
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
}

385
.github/scripts/strategy-matrix/linux.py vendored Executable file
View File

@@ -0,0 +1,385 @@
from helpers.defs import *
from helpers.enums import *
# The default CI image SHAs to use, which can be specified per distro group and
# can be overridden for individual distros, which is useful when debugging using
# a locally built CI image. See https://github.com/XRPLF/ci for the images.
DEBIAN_SHA = "sha-ca4517d"
RHEL_SHA = "sha-ca4517d"
UBUNTU_SHA = "sha-84afd81"
# We only build a selection of configurations for the various triggers to reduce
# pipeline runtime. Across all three operating systems we aim to cover all GCC
# and Clang versions, while not duplicating configurations too much. See also
# the README for more details.
# The Debian distros to build configurations for.
#
# We have the following distros available:
# - Debian Bullseye: GCC 12-15
# - Debian Bookworm: GCC 13-15, Clang 16-20
# - Debian Trixie: GCC 14-15, Clang 20-21
DEBIAN_DISTROS = [
Distro(
os_name="debian",
os_version="bullseye",
compiler_name="gcc",
compiler_version="14",
image_sha=DEBIAN_SHA,
specs=[
Spec(
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
publish_option=PublishOption.PACKAGE_ONLY,
triggers=[Trigger.COMMIT, Trigger.LABEL],
),
Spec(
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.PUBLISH],
publish_option=PublishOption.PACKAGE_AND_IMAGE,
triggers=[Trigger.MERGE],
),
Spec(
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bullseye",
compiler_name="gcc",
compiler_version="15",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
build_modes=[BuildMode.UNITY_ON],
build_option=BuildOption.COVERAGE,
build_types=[BuildType.DEBUG],
triggers=[Trigger.COMMIT, Trigger.MERGE],
),
Spec(
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bookworm",
compiler_name="gcc",
compiler_version="15",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bookworm",
compiler_name="clang",
compiler_version="16",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_OFF],
build_option=BuildOption.VOIDSTAR,
build_types=[BuildType.DEBUG],
publish_option=PublishOption.IMAGE_ONLY,
triggers=[Trigger.COMMIT],
),
Spec(
archs=[Arch.LINUX_ARM64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.RELEASE],
triggers=[Trigger.MERGE],
),
Spec(
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bookworm",
compiler_name="clang",
compiler_version="17",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bookworm",
compiler_name="clang",
compiler_version="18",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="bookworm",
compiler_name="clang",
compiler_version="19",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="trixie",
compiler_name="gcc",
compiler_version="15",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="debian",
os_version="trixie",
compiler_name="clang",
compiler_version="21",
image_sha=DEBIAN_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
triggers=[Trigger.MERGE],
),
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
]
# The RHEL distros to build configurations for.
#
# We have the following distros available:
# - RHEL 8: GCC 14, Clang "any"
# - RHEL 9: GCC 12-14, Clang "any"
# - RHEL 10: GCC 14, Clang "any"
RHEL_DISTROS = [
Distro(
os_name="rhel",
os_version="8",
compiler_name="gcc",
compiler_version="14",
image_sha=RHEL_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="rhel",
os_version="8",
compiler_name="clang",
compiler_version="any",
image_sha=RHEL_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="rhel",
os_version="9",
compiler_name="gcc",
compiler_version="12",
image_sha=RHEL_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.DEBUG],
triggers=[Trigger.COMMIT],
),
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.RELEASE],
triggers=[Trigger.MERGE],
),
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="rhel",
os_version="9",
compiler_name="gcc",
compiler_version="13",
image_sha=RHEL_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="rhel",
os_version="10",
compiler_name="clang",
compiler_version="any",
image_sha=RHEL_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
]
# The Ubuntu distros to build configurations for.
#
# We have the following distros available:
# - Ubuntu Jammy (22.04): GCC 12
# - Ubuntu Noble (24.04): GCC 13-14, Clang 16-20
UBUNTU_DISTROS = [
Distro(
os_name="ubuntu",
os_version="jammy",
compiler_name="gcc",
compiler_version="12",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="gcc",
compiler_version="13",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.RELEASE],
triggers=[Trigger.MERGE],
),
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="gcc",
compiler_version="14",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="clang",
compiler_version="17",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
triggers=[Trigger.MERGE],
),
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="clang",
compiler_version="18",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="clang",
compiler_version="19",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
Distro(
os_name="ubuntu",
os_version="noble",
compiler_name="clang",
compiler_version="20",
image_sha=UBUNTU_SHA,
specs=[
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.DEBUG],
triggers=[Trigger.COMMIT],
),
Spec(
archs=[Arch.LINUX_AMD64],
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.RELEASE],
triggers=[Trigger.MERGE],
),
Spec(
archs=[Arch.LINUX_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
]

View File

@@ -1,22 +0,0 @@
{
"architecture": [
{
"platform": "macos/arm64",
"runner": ["self-hosted", "macOS", "ARM64", "mac-runner-m1"]
}
],
"os": [
{
"distro_name": "macos",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
}
],
"build_type": ["Debug", "Release"],
"cmake_args": [
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
]
}

20
.github/scripts/strategy-matrix/macos.py vendored Executable file
View File

@@ -0,0 +1,20 @@
from helpers.defs import *
from helpers.enums import *
DISTROS = [
Distro(
os_name="macos",
specs=[
Spec(
archs=[Arch.MACOS_ARM64],
build_modes=[BuildMode.UNITY_OFF],
build_types=[BuildType.DEBUG],
triggers=[Trigger.COMMIT, Trigger.MERGE],
),
Spec(
archs=[Arch.MACOS_ARM64],
triggers=[Trigger.SCHEDULE],
),
],
),
]

View File

@@ -1,19 +0,0 @@
{
"architecture": [
{
"platform": "windows/amd64",
"runner": ["self-hosted", "Windows", "devbox"]
}
],
"os": [
{
"distro_name": "windows",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
}

20
.github/scripts/strategy-matrix/windows.py vendored Executable file
View File

@@ -0,0 +1,20 @@
from helpers.defs import *
from helpers.enums import *
DISTROS = [
Distro(
os_name="windows",
specs=[
Spec(
archs=[Arch.WINDOWS_AMD64],
build_modes=[BuildMode.UNITY_ON],
build_types=[BuildType.RELEASE],
triggers=[Trigger.COMMIT, Trigger.MERGE],
),
Spec(
archs=[Arch.WINDOWS_AMD64],
triggers=[Trigger.SCHEDULE],
),
],
),
]

View File

@@ -112,12 +112,10 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
platform: [linux, macos, windows]
with:
# Enable ccache only for events targeting the XRPLF repository, since
# other accounts will not have access to our remote cache storage.
ccache_enabled: ${{ github.repository_owner == 'XRPLF' }}
os: ${{ matrix.os }}
platform: ${{ matrix.platform }}
trigger: commit
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -66,15 +66,10 @@ jobs:
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
platform: [linux, macos, windows]
with:
# Enable ccache only for events targeting the XRPLF repository, since
# other accounts will not have access to our remote cache storage.
# However, we do not enable ccache for events targeting the master or a
# release branch, to protect against the rare case that the output
# produced by ccache is not identical to a regular compilation.
ccache_enabled: ${{ github.repository_owner == 'XRPLF' && !(github.base_ref == 'master' || startsWith(github.base_ref, 'release')) }}
os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
platform: ${{ matrix.platform }}
# The workflow dispatch event uses the same trigger as the schedule event.
trigger: ${{ github.event_name == 'push' && 'merge' || 'schedule' }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -3,21 +3,10 @@ name: Build and test configuration
on:
workflow_call:
inputs:
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
type: string
ccache_enabled:
description: "Whether to enable ccache."
required: false
type: boolean
default: false
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
@@ -27,8 +16,23 @@ on:
cmake_target:
description: "The CMake target to build."
required: true
type: string
required: true
enable_tests:
description: "Whether to run the tests."
required: true
type: boolean
enable_package:
description: "Whether to publish a package."
required: true
type: boolean
enable_image:
description: "Whether to publish an image."
required: true
type: boolean
runs_on:
description: Runner to run the job on as a JSON string
@@ -72,25 +76,8 @@ jobs:
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
env:
# Use a namespace to keep the objects separate for each configuration.
CCACHE_NAMESPACE: ${{ inputs.config_name }}
# Ccache supports both Redis and HTTP endpoints.
# * For Redis, use the following format: redis://ip:port, see
# https://github.com/ccache/ccache/wiki/Redis-storage. Note that TLS is
# not directly supported by ccache, and requires use of a proxy.
# * For HTTP use the following format: http://ip:port/cache when using
# nginx as backend or http://ip:port|layout=bazel when using Bazel
# Remote Cache, see https://github.com/ccache/ccache/wiki/HTTP-storage.
# Note that HTTPS is not directly supported by ccache.
CCACHE_REMOTE_ONLY: true
CCACHE_REMOTE_STORAGE: http://cache.dev.ripplex.io:8080|layout=bazel
# Ignore the creation and modification timestamps on files, since the
# header files are copied into separate directories by CMake, which will
# otherwise result in cache misses.
CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime
# Determine if coverage and voidstar should be enabled.
COVERAGE_ENABLED: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
VOIDSTAR_ENABLED: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
ENABLED_VOIDSTAR: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
ENABLED_COVERAGE: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
@@ -102,11 +89,7 @@ jobs:
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
with:
disable_ccache: ${{ !inputs.ccache_enabled }}
- name: Set ccache log file
if: ${{ inputs.ccache_enabled && runner.debug == '1' }}
run: echo "CCACHE_LOGFILE=${{ runner.temp }}/ccache.log" >> "${GITHUB_ENV}"
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
@@ -155,15 +138,6 @@ jobs:
--parallel "${BUILD_NPROC}" \
--target "${CMAKE_TARGET}"
- name: Show ccache statistics
if: ${{ inputs.ccache_enabled }}
run: |
ccache --show-stats -vv
if [ '${{ runner.debug }}' = '1' ]; then
cat "${CCACHE_LOGFILE}"
curl ${CCACHE_REMOTE_STORAGE%|*}/status || true
fi
- name: Upload the binary (Linux)
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
@@ -186,13 +160,13 @@ jobs:
fi
- name: Verify presence of instrumentation (Linux)
if: ${{ runner.os == 'Linux' && env.VOIDSTAR_ENABLED == 'true' }}
if: ${{ runner.os == 'Linux' && env.ENABLED_VOIDSTAR == 'true' }}
working-directory: ${{ env.BUILD_DIR }}
run: |
./xrpld --version | grep libvoidstar
- name: Run the separate tests
if: ${{ !inputs.build_only }}
if: ${{ inputs.enable_tests }}
working-directory: ${{ env.BUILD_DIR }}
# Windows locks some of the build files while running tests, and parallel jobs can collide
env:
@@ -205,7 +179,7 @@ jobs:
-j "${PARALLELISM}"
- name: Run the embedded tests
if: ${{ !inputs.build_only }}
if: ${{ inputs.enable_tests }}
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
@@ -213,7 +187,7 @@ jobs:
./xrpld --unittest --unittest-jobs "${BUILD_NPROC}"
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
if: ${{ (failure() || cancelled()) && runner.os == 'Linux' && inputs.enable_tests }}
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
@@ -221,7 +195,7 @@ jobs:
netstat -an
- name: Prepare coverage report
if: ${{ !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
if: ${{ github.repository_owner == 'XRPLF' && env.ENABLED_COVERAGE == 'true' }}
working-directory: ${{ env.BUILD_DIR }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
@@ -234,7 +208,7 @@ jobs:
--target coverage
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
if: ${{ github.repository_owner == 'XRPLF' && env.ENABLED_COVERAGE == 'true' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true

View File

@@ -8,24 +8,14 @@ name: Build and test
on:
workflow_call:
inputs:
ccache_enabled:
description: "Whether to enable ccache."
platform:
description: "The platform to generate the strategy matrix for ('linux', 'macos', 'windows'). If not provided all platforms are used."
required: false
type: boolean
default: false
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
type: string
trigger:
description: "The trigger that caused the workflow to run ('commit', 'label', 'merge', 'schedule')."
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
@@ -36,8 +26,8 @@ jobs:
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
platform: ${{ inputs.platform }}
trigger: ${{ inputs.trigger }}
# Build and test the binary for each configuration.
build-test-config:
@@ -49,13 +39,14 @@ jobs:
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
ccache_enabled: ${{ inputs.ccache_enabled }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
enable_tests: ${{ matrix.enable_tests }}
enable_package: ${{ matrix.enable_package }}
enable_image: ${{ matrix.enable_image }}
runs_on: ${{ toJson(matrix.runs_on) }}
image: ${{ matrix.image }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -29,8 +29,6 @@ jobs:
run: .github/scripts/rename/binary.sh .
- name: Check namespaces
run: .github/scripts/rename/namespace.sh .
- name: Check config name
run: .github/scripts/rename/config.sh .
- name: Check for differences
env:
MESSAGE: |

View File

@@ -3,16 +3,14 @@ name: Generate strategy matrix
on:
workflow_call:
inputs:
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
platform:
description: "The platform to generate the strategy matrix for ('linux', 'macos', 'windows'). If not provided all platforms are used."
required: false
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
trigger:
description: "The trigger that caused the workflow to run ('commit', 'label', 'merge', 'schedule')."
required: true
type: string
default: "minimal"
outputs:
matrix:
description: "The generated strategy matrix."
@@ -40,6 +38,6 @@ jobs:
working-directory: .github/scripts/strategy-matrix
id: generate
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
PLATFORM: ${{ inputs.platform != '' && format('--platform={0}', inputs.platform) || '' }}
TRIGGER: ${{ format('--trigger={0}', inputs.trigger) }}
run: ./generate.py ${PLATFORM} ${TRIGGER} >> "${GITHUB_OUTPUT}"

View File

@@ -19,17 +19,17 @@ on:
branches: [develop]
paths:
# This allows testing changes to the upload workflow in a PR
- .github/workflows/upload-conan-deps.yml
- ".github/workflows/upload-conan-deps.yml"
push:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml
- ".github/workflows/upload-conan-deps.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/actions/build-deps/action.yml"
- ".github/actions/setup-conan/action.yml"
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
- "conanfile.py"
- "conan.lock"
env:
CONAN_REMOTE_NAME: xrplf
@@ -49,7 +49,8 @@ jobs:
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# The workflow dispatch event uses the same trigger as the schedule event.
trigger: ${{ github.event_name == 'pull_request' && 'commit' || (github.event_name == 'push' && 'merge' || 'schedule') }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps:
@@ -59,8 +60,8 @@ jobs:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
runs-on: ${{ matrix.runs_on }}
container: ${{ matrix.image }}
steps:
- name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
@@ -72,7 +73,7 @@ jobs:
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
with:
disable_ccache: true
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env

3
.gitignore vendored
View File

@@ -1,5 +1,4 @@
# .gitignore
# cspell: disable
# Macintosh Desktop Services Store files.
.DS_Store
@@ -20,6 +19,7 @@ Release/
/tmp/
CMakeSettings.json
CMakeUserPresets.json
__pycache__
# Coverage files.
*.gcno
@@ -36,7 +36,6 @@ gmon.out
# Customized configs.
/rippled.cfg
/xrpld.cfg
/validators.txt
# Locally patched Conan recipes

View File

@@ -36,21 +36,6 @@ repos:
hooks:
- id: black
- repo: https://github.com/streetsidesoftware/cspell-cli
rev: v9.2.0
hooks:
- id: cspell # Spell check changed files
exclude: .config/cspell.config.yaml
- id: cspell # Spell check the commit message
name: check commit message spelling
args:
- --no-must-find-files
- --no-progress
- --no-summary
- --files
- .git/COMMIT_EDITMSG
stages: [commit-msg]
exclude: |
(?x)^(
external/.*|

View File

@@ -28,9 +28,6 @@ elseif(MSVC)
add_compile_options(/wd4068) # Ignore unknown pragmas
endif()
# Enable ccache to speed up builds.
include(Ccache)
# make GIT_COMMIT_HASH define available to all sources
find_package(Git)
if(Git_FOUND)
@@ -116,7 +113,6 @@ find_package(date REQUIRED)
find_package(ed25519 REQUIRED)
find_package(nudb REQUIRED)
find_package(secp256k1 REQUIRED)
find_package(wasmi REQUIRED)
find_package(xxHash REQUIRED)
target_link_libraries(xrpl_libs INTERFACE

View File

@@ -555,16 +555,16 @@ Rippled uses a linear workflow model that can be summarized as:
git fetch --multiple upstreams user1 user2 user3 [...]
git checkout -B release-next --no-track upstream/develop
# Only do an ff-only merge if pr-branch1 is either already
# Only do an ff-only merge if prbranch1 is either already
# squashed, or needs to be merged with separate commits,
# and has no merge commits.
# Use -S on the ff-only merge if pr-branch1 isn't signed.
git merge [-S] --ff-only user1/pr-branch1
# Use -S on the ff-only merge if prbranch1 isn't signed.
git merge [-S] --ff-only user1/prbranch1
git merge --squash user2/pr-branch2
git merge --squash user2/prbranch2
git commit -S # Use the commit message provided on the PR
git merge --squash user3/pr-branch3
git merge --squash user3/prbranch3
git commit -S # Use the commit message provided on the PR
[...]
@@ -876,7 +876,7 @@ git push --delete upstream-push master-next
#### Special cases: point releases, hotfixes, etc.
On occasion, a bug or issue is discovered in a version that already
On occassion, a bug or issue is discovered in a version that already
had a final release. Most of the time, development will have started
on the next version, and will usually have changes in `develop`
and often in `release`.

View File

@@ -42,7 +42,7 @@ For more information on responsible disclosure, please read this [Wikipedia arti
## Report Handling Process
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic pre-commitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic precommitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
Once we receive a report, we:

View File

@@ -29,18 +29,18 @@
#
# Purpose
#
# This file documents and provides examples of all xrpld server process
# configuration options. When the xrpld server instance is launched, it
# This file documents and provides examples of all rippled server process
# configuration options. When the rippled server instance is launched, it
# looks for a file with the following name:
#
# xrpld.cfg
# rippled.cfg
#
# For more information on where the xrpld server instance searches for the
# For more information on where the rippled server instance searches for the
# file, visit:
#
# https://xrpl.org/commandline-usage.html#generic-options
#
# This file should be named xrpld.cfg. This file is UTF-8 with DOS, UNIX,
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
# or Mac style end of lines. Blank lines and lines beginning with '#' are
# ignored. Undefined sections are reserved. No escapes are currently defined.
#
@@ -89,8 +89,8 @@
#
#
#
# xrpld offers various server protocols to clients making inbound
# connections. The listening ports xrpld uses are "universal" ports
# rippled offers various server protocols to clients making inbound
# connections. The listening ports rippled uses are "universal" ports
# which may be configured to handshake in one or more of the available
# supported protocols. These universal ports simplify administration:
# A single open port can be used for multiple protocols.
@@ -103,7 +103,7 @@
#
# A list of port names and key/value pairs. A port name must start with a
# letter and contain only letters and numbers. The name is not case-sensitive.
# For each name in this list, xrpld will look for a configuration file
# For each name in this list, rippled will look for a configuration file
# section with the same name and use it to create a listening port. The
# name is informational only; the choice of name does not affect the function
# of the listening port.
@@ -134,7 +134,7 @@
# ip = 127.0.0.1
# protocol = http
#
# When xrpld is used as a command line client (for example, issuing a
# When rippled is used as a command line client (for example, issuing a
# server stop command), the first port advertising the http or https
# protocol will be used to make the connection.
#
@@ -175,7 +175,7 @@
# same time. It is possible have both Websockets and Secure Websockets
# together in one port.
#
# NOTE If no ports support the peer protocol, xrpld cannot
# NOTE If no ports support the peer protocol, rippled cannot
# receive incoming peer connections or become a superpeer.
#
# limit = <number>
@@ -194,7 +194,7 @@
# required. IP address restrictions, if any, will be checked in addition
# to the credentials specified here.
#
# When acting in the client role, xrpld will supply these credentials
# When acting in the client role, rippled will supply these credentials
# using HTTP's Basic Authentication headers when making outbound HTTP/S
# requests.
#
@@ -218,7 +218,7 @@
# administrative commands.
#
# NOTE A common configuration value for the admin field is "localhost".
# If you are listening on all IPv4/IPv6 addresses by specifying
# If you are listening on all IPv4/IPv6 addresses by specifing
# ip = :: then you can use admin = ::ffff:127.0.0.1,::1 to allow
# administrative access from both IPv4 and IPv6 localhost
# connections.
@@ -237,7 +237,7 @@
# WS, or WSS protocol interfaces. If administrative commands are
# disabled for a port, these credentials have no effect.
#
# When acting in the client role, xrpld will supply these credentials
# When acting in the client role, rippled will supply these credentials
# in the submitted JSON for any administrative command requests when
# invoking JSON-RPC commands on remote servers.
#
@@ -258,7 +258,7 @@
# resource controls will default to those for non-administrative users.
#
# The secure_gateway IP addresses are intended to represent
# proxies. Since xrpld trusts these hosts, they must be
# proxies. Since rippled trusts these hosts, they must be
# responsible for properly authenticating the remote user.
#
# If some IP addresses are included for both "admin" and
@@ -272,7 +272,7 @@
# Use the specified files when configuring SSL on the port.
#
# NOTE If no files are specified and secure protocols are selected,
# xrpld will generate an internal self-signed certificate.
# rippled will generate an internal self-signed certificate.
#
# The files have these meanings:
#
@@ -297,12 +297,12 @@
# Control the ciphers which the server will support over SSL on the port,
# specified using the OpenSSL "cipher list format".
#
# NOTE If unspecified, xrpld will automatically configure a modern
# NOTE If unspecified, rippled will automatically configure a modern
# cipher suite. This default suite should be widely supported.
#
# You should not modify this string unless you have a specific
# reason and cryptographic expertise. Incorrect modification may
# keep xrpld from connecting to other instances of xrpld or
# keep rippled from connecting to other instances of rippled or
# prevent RPC and WebSocket clients from connecting.
#
# send_queue_limit = [1..65535]
@@ -382,7 +382,7 @@
#-----------------
#
# These settings control security and access attributes of the Peer to Peer
# server section of the xrpld process. Peer Protocol implements the
# server section of the rippled process. Peer Protocol implements the
# Ripple Payment protocol. It is over peer connections that transactions
# and validations are passed from to machine to machine, to determine the
# contents of validated ledgers.
@@ -396,7 +396,7 @@
# true - enables compression
# false - disables compression [default].
#
# The xrpld server can save bandwidth by compressing its peer-to-peer communications,
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
# at a cost of greater CPU usage. If you enable link compression,
# the server automatically compresses communications with peer servers
# that also have link compression enabled.
@@ -432,7 +432,7 @@
#
# [ips_fixed]
#
# List of IP addresses or hostnames to which xrpld should always attempt to
# List of IP addresses or hostnames to which rippled should always attempt to
# maintain peer connections with. This is useful for manually forming private
# networks, for example to configure a validation server that connects to the
# Ripple network through a public-facing server, or for building a set
@@ -573,7 +573,7 @@
#
# minimum_txn_in_ledger_standalone = <number>
#
# Like minimum_txn_in_ledger when xrpld is running in standalone
# Like minimum_txn_in_ledger when rippled is running in standalone
# mode. Default: 1000.
#
# target_txn_in_ledger = <number>
@@ -710,7 +710,7 @@
#
# [validator_token]
#
# This is an alternative to [validation_seed] that allows xrpld to perform
# This is an alternative to [validation_seed] that allows rippled to perform
# validation without having to store the validator keys on the network
# connected server. The field should contain a single token in the form of a
# base64-encoded blob.
@@ -745,7 +745,7 @@
#
# Specify the file by its name or path.
# Unless an absolute path is specified, it will be considered relative to
# the folder in which the xrpld.cfg file is located.
# the folder in which the rippled.cfg file is located.
#
# Examples:
# /home/ripple/validators.txt
@@ -840,7 +840,7 @@
#
# 0: Disable the ledger replay feature [default]
# 1: Enable the ledger replay feature. With this feature enabled, when
# acquiring a ledger from the network, a xrpld node only downloads
# acquiring a ledger from the network, a rippled node only downloads
# the ledger header and the transactions instead of the whole ledger.
# And the ledger is built by applying the transactions to the parent
# ledger.
@@ -851,7 +851,7 @@
#
#----------------
#
# The xrpld server instance uses HTTPS GET requests in a variety of
# The rippled server instance uses HTTPS GET requests in a variety of
# circumstances, including but not limited to contacting trusted domains to
# fetch information such as mapping an email address to a Ripple Payment
# Network address.
@@ -891,7 +891,7 @@
#
#------------
#
# xrpld creates 4 SQLite database to hold bookkeeping information
# rippled creates 4 SQLite database to hold bookkeeping information
# about transactions, local credentials, and various other things.
# It also creates the NodeDB, which holds all the objects that
# make up the current and historical ledgers.
@@ -902,7 +902,7 @@
# the performance of the server.
#
# Partial pathnames will be considered relative to the location of
# the xrpld.cfg file.
# the rippled.cfg file.
#
# [node_db] Settings for the Node Database (required)
#
@@ -920,11 +920,11 @@
# type = NuDB
#
# NuDB is a high-performance database written by Ripple Labs and optimized
# for xrpld and solid-state drives.
# for rippled and solid-state drives.
#
# NuDB maintains its high speed regardless of the amount of history
# stored. Online delete may be selected, but is not required. NuDB is
# available on all platforms that xrpld runs on.
# available on all platforms that rippled runs on.
#
# type = RocksDB
#
@@ -1049,7 +1049,7 @@
#
# recovery_wait_seconds
# The online delete process checks periodically
# that xrpld is still in sync with the network,
# that rippled is still in sync with the network,
# and that the validated ledger is less than
# 'age_threshold_seconds' old. If not, then continue
# sleeping for this number of seconds and
@@ -1069,8 +1069,8 @@
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
# the 'database_path' location. If you omit this configuration setting,
# the server creates a directory called "db" located in the same place as
# your xrpld.cfg file.
# Partial pathnames are relative to the location of the xrpld executable.
# your rippled.cfg file.
# Partial pathnames are relative to the location of the rippled executable.
#
# [sqlite] Tuning settings for the SQLite databases (optional)
#
@@ -1120,7 +1120,7 @@
# The default is "wal", which uses a write-ahead
# log to implement database transactions.
# Alternately, "memory" saves disk I/O, but if
# xrpld crashes during a transaction, the
# rippled crashes during a transaction, the
# database is likely to be corrupted.
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
# for more details about the available options.
@@ -1130,7 +1130,7 @@
# synchronous Valid values: off, normal, full, extra
# The default is "normal", which works well with
# the "wal" journal mode. Alternatively, "off"
# allows xrpld to continue as soon as data is
# allows rippled to continue as soon as data is
# passed to the OS, which can significantly
# increase speed, but risks data corruption if
# the host computer crashes before writing that
@@ -1144,7 +1144,7 @@
# The default is "file", which will use files
# for temporary database tables and indices.
# Alternatively, "memory" may save I/O, but
# xrpld does not currently use many, if any,
# rippled does not currently use many, if any,
# of these temporary objects.
# See https://www.sqlite.org/pragma.html#pragma_temp_store
# for more details about the available options.
@@ -1173,7 +1173,7 @@
#
# These settings are designed to help server administrators diagnose
# problems, and obtain detailed information about the activities being
# performed by the xrpld process.
# performed by the rippled process.
#
#
#
@@ -1190,7 +1190,7 @@
#
# Configuration parameters for the Beast. Insight stats collection module.
#
# Insight is a module that collects information from the areas of xrpld
# Insight is a module that collects information from the areas of rippled
# that have instrumentation. The configuration parameters control where the
# collection metrics are sent. The parameters are expressed as key = value
# pairs with no white space. The main parameter is the choice of server:
@@ -1199,7 +1199,7 @@
#
# Choice of server to send metrics to. Currently the only choice is
# "statsd" which sends UDP packets to a StatsD daemon, which must be
# running while xrpld is running. More information on StatsD is
# running while rippled is running. More information on StatsD is
# available here:
# https://github.com/b/statsd_spec
#
@@ -1209,7 +1209,7 @@
# in the format, n.n.n.n:port.
#
# "prefix" A string prepended to each collected metric. This is used
# to distinguish between different running instances of xrpld.
# to distinguish between different running instances of rippled.
#
# If this section is missing, or the server type is unspecified or unknown,
# statistics are not collected or reported.
@@ -1236,7 +1236,7 @@
#
# Example:
# [perf]
# perf_log=/var/log/xrpld/perf.log
# perf_log=/var/log/rippled/perf.log
# log_interval=2
#
#-------------------------------------------------------------------------------
@@ -1246,7 +1246,7 @@
#----------
#
# The vote settings configure settings for the entire Ripple network.
# While a single instance of xrpld cannot unilaterally enforce network-wide
# While a single instance of rippled cannot unilaterally enforce network-wide
# settings, these choices become part of the instance's vote during the
# consensus process for each voting ledger.
#
@@ -1260,7 +1260,7 @@
# The reference transaction is the simplest form of transaction.
# It represents an XRP payment between two parties.
#
# If this parameter is unspecified, xrpld will use an internal
# If this parameter is unspecified, rippled will use an internal
# default. Don't change this without understanding the consequences.
#
# Example:
@@ -1272,7 +1272,7 @@
# account's XRP balance that is at or below the reserve may only be
# spent on transaction fees, and not transferred out of the account.
#
# If this parameter is unspecified, xrpld will use an internal
# If this parameter is unspecified, rippled will use an internal
# default. Don't change this without understanding the consequences.
#
# Example:
@@ -1284,7 +1284,7 @@
# each ledger item owned by the account. Ledger items an account may
# own include trust lines, open orders, and tickets.
#
# If this parameter is unspecified, xrpld will use an internal
# If this parameter is unspecified, rippled will use an internal
# default. Don't change this without understanding the consequences.
#
# Example:
@@ -1326,7 +1326,7 @@
# tool instead.
#
# This flag has no effect on the "sign" and "sign_for" command line options
# that xrpld makes available.
# that rippled makes available.
#
# The default value of this field is "false"
#
@@ -1405,7 +1405,7 @@
#--------------------
#
# Administrators can use these values as a starting point for configuring
# their instance of xrpld, but each value should be checked to make sure
# their instance of rippled, but each value should be checked to make sure
# it meets the business requirements for the organization.
#
# Server
@@ -1415,7 +1415,7 @@
# "peer"
#
# Peer protocol open to everyone. This is required to accept
# incoming xrpld connections. This does not affect automatic
# incoming rippled connections. This does not affect automatic
# or manual outgoing Peer protocol connections.
#
# "rpc"
@@ -1432,7 +1432,7 @@
#
# ETL commands for Clio. We recommend setting secure_gateway
# in this section to a comma-separated list of the addresses
# of your Clio servers, in order to bypass xrpld's rate limiting.
# of your Clio servers, in order to bypass rippled's rate limiting.
#
# This port is commented out but can be enabled by removing
# the '#' from each corresponding line including the entry under [server]
@@ -1449,8 +1449,8 @@
# NOTE
#
# To accept connections on well known ports such as 80 (HTTP) or
# 443 (HTTPS), most operating systems will require xrpld to
# run with administrator privileges, or else xrpld will not start.
# 443 (HTTPS), most operating systems will require rippled to
# run with administrator privileges, or else rippled will not start.
[server]
port_rpc_admin_local
@@ -1496,7 +1496,7 @@ secure_gateway = 127.0.0.1
#-------------------------------------------------------------------------------
# This is primary persistent datastore for xrpld. This includes transaction
# This is primary persistent datastore for rippled. This includes transaction
# metadata, account states, and ledger headers. Helpful information can be
# found at https://xrpl.org/capacity-planning.html#node-db-type
# type=NuDB is recommended for non-validators with fast SSDs. Validators or
@@ -1511,19 +1511,19 @@ secure_gateway = 127.0.0.1
# deletion.
[node_db]
type=NuDB
path=/var/lib/xrpld/db/nudb
path=/var/lib/rippled/db/nudb
nudb_block_size=4096
online_delete=512
advisory_delete=0
[database_path]
/var/lib/xrpld/db
/var/lib/rippled/db
# This needs to be an absolute directory reference, not a relative one.
# Modify this value as required.
[debug_logfile]
/var/log/xrpld/debug.log
/var/log/rippled/debug.log
# To use the XRP test network
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
@@ -1533,7 +1533,7 @@ advisory_delete=0
# File containing trusted validator keys or validator list publishers.
# Unless an absolute path is specified, it will be considered relative to the
# folder in which the xrpld.cfg file is located.
# folder in which the rippled.cfg file is located.
[validators_file]
validators.txt

View File

@@ -1,7 +1,7 @@
#
# Default validators.txt
#
# This file is located in the same folder as your xrpld.cfg file
# This file is located in the same folder as your rippled.cfg file
# and defines which validators your server trusts not to collude.
#
# This file is UTF-8 with DOS, UNIX, or Mac style line endings.

View File

@@ -1,19 +0,0 @@
#!/bin/bash
# cspell: ignore clangf
modified=$1
dir=`pwd`
clangf=clang-format-10
clangf=clang-format
if [ "$1" = "--all" ]
then
modified=`git status|egrep "modified|new file"|egrep "(cpp|h)$" | sed -E -e 's/modified://' -e 's/new file://' -e 's/^[[:space:]]+//'`
fi
for i in $modified
do
basedir=$(dirname "$i")
file=$(basename "$i")
echo "$basedir $file"
cd $basedir
$clangf -style=file -i "$file"
cd $dir
done

View File

@@ -1,51 +0,0 @@
find_program(CCACHE_PATH "ccache")
if (NOT CCACHE_PATH)
return()
endif ()
# For Linux and macOS we can use the ccache binary directly.
if (NOT MSVC)
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PATH}")
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
message(STATUS "Found ccache: ${CCACHE_PATH}")
return()
endif ()
# For Windows more effort is required. The code below is a modified version of
# https://github.com/ccache/ccache/wiki/MS-Visual-Studio#usage-with-cmake.
if ("${CCACHE_PATH}" MATCHES "chocolatey")
message(DEBUG "Ccache path: ${CCACHE_PATH}")
# Chocolatey uses a shim executable that we cannot use directly, in which
# case we have to find the executable it points to. If we cannot find the
# target executable then we cannot use ccache.
find_program(BASH_PATH "bash")
if (NOT BASH_PATH)
message(WARNING "Could not find bash.")
return()
endif ()
execute_process(
COMMAND bash -c "export LC_ALL='en_US.UTF-8'; ${CCACHE_PATH} --shimgen-noop | grep -oP 'path to executable: \\K.+' | head -c -1"
OUTPUT_VARIABLE CCACHE_PATH)
if (NOT CCACHE_PATH)
message(WARNING "Could not find ccache target.")
return()
endif ()
file(TO_CMAKE_PATH "${CCACHE_PATH}" CCACHE_PATH)
endif ()
message(STATUS "Found ccache: ${CCACHE_PATH}")
# Tell cmake to use ccache for compiling with Visual Studio.
file(COPY_FILE
${CCACHE_PATH} ${CMAKE_BINARY_DIR}/cl.exe
ONLY_IF_DIFFERENT)
set(CMAKE_VS_GLOBALS
"CLToolExe=cl.exe"
"CLToolPath=${CMAKE_BINARY_DIR}"
"TrackFileAccess=false"
"UseMultiToolTask=true")
# By default Visual Studio generators will use /Zi, which is not compatible with
# ccache, so tell it to use /Z7 instead.
set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT "$<$<CONFIG:Debug,RelWithDebInfo>:Embedded>")

View File

@@ -149,7 +149,7 @@ elseif (use_gold AND is_gcc)
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
#[=========================================================[
NOTE: THE gold linker inserts -rpath as DT_RUNPATH by
default instead of DT_RPATH, so you might have slightly
default intead of DT_RPATH, so you might have slightly
unexpected runtime ld behavior if you were expecting
DT_RPATH. Specify --disable-new-dtags to gold if you do
not want the default DT_RUNPATH behavior. This rpath

View File

@@ -63,7 +63,6 @@ target_link_libraries(xrpl.imports.main
Xrpl::opts
Xrpl::syslibs
secp256k1::secp256k1
wasmi::wasmi
xrpl.libpb
xxHash::xxhash
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
@@ -207,7 +206,7 @@ if(xrpld)
)
exclude_if_included(xrpld)
# define a macro for tests that might need to
# be excluded or run differently in CI environment
# be exluded or run differently in CI environment
if(is_ci)
target_compile_definitions(xrpld PRIVATE XRPL_RUNNING_IN_CI)
endif ()

View File

@@ -62,7 +62,7 @@ if (is_root_project AND TARGET xrpld)
message (\"-- Skipping : \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
endif ()
endmacro()
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/xrpld-example.cfg\" etc xrpld.cfg)
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/rippled-example.cfg\" etc rippled.cfg)
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/validators-example.txt\" etc validators.txt)
")
install(CODE "

View File

@@ -3,7 +3,6 @@
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"wasmi/0.42.1#2a96357d4e6bf40dfe201106d849c24f%1764802092.014",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",

View File

@@ -35,7 +35,6 @@ class Xrpl(ConanFile):
"openssl/3.5.4",
"secp256k1/0.7.0",
"soci/4.0.3",
"wasmi/0.42.1",
"zlib/1.3.1",
]
@@ -183,10 +182,12 @@ class Xrpl(ConanFile):
libxrpl.libs = [
"xrpl",
"xrpl.libpb",
"ed25519",
"secp256k1",
]
# TODO: Fix the protobufs to include each other relative to
# `include/`, not `include/xrpl/proto/`.
libxrpl.includedirs = ["include", "include/xrpl/proto"]
# `include/`, not `include/ripple/proto/`.
libxrpl.includedirs = ["include", "include/ripple/proto"]
libxrpl.requires = [
"boost::headers",
"boost::chrono",
@@ -211,7 +212,6 @@ class Xrpl(ConanFile):
"soci::soci",
"secp256k1::secp256k1",
"sqlite3::sqlite",
"wasmi::wasmi",
"xxhash::xxhash",
"zlib::zlib",
]

View File

@@ -134,7 +134,7 @@ validation messages (_PAV_) received from each validator on the node's UNL. Note
that the node will only count the validation messages that agree with its own
validations.
We define the **PAV** as the Percentage of Agreed Validation
We define the **PAV** as the **P**ercentage of **A**greed **V**alidation
messages received for the last N ledgers, where N = 256 by default.
When the PAV drops below the **_low-water mark_**, the validator is considered

View File

@@ -43,14 +43,14 @@ alt phase == OPEN
alt sqn%256==0
CA -[#green]> RM: <font color=green>getValidations
CA -[#green]> CA: <font color=green>create UNLModify Tx
hnote over CA#lightgreen: use validations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
end
CA -> GC
GC -> CA: propose
deactivate CA
end
else phase == ESTABLISH
hnote over GC: receive peer positions
hnote over GC: receive peer postions
GC -> GC : update our position
GC -> CA : propose \n(if position changed)
GC -> GC : check if have consensus

View File

@@ -189,7 +189,7 @@ validations. It checks this on every call to `timerEntry`.
- _Wrong Ledger_ indicates the node is not working on the correct prior ledger
and does not have it available. It requests that ledger from the network, but
continues to work towards consensus this round while waiting. If it had been
_proposing_, it will send a special "bow-out" proposal to its peers to indicate
_proposing_, it will send a special "bowout" proposal to its peers to indicate
its change in mode for the rest of this round. For the duration of the round,
it defers to peer positions for determining the consensus outcome as if it
were just _observing_.
@@ -515,7 +515,7 @@ are excerpts of the generic consensus implementation and of helper types that wi
interact with the concrete implementing class.
```{.cpp}
// Represents a transaction under dispute this round
// Represents a transction under dispute this round
template <class Tx_t, class NodeID_t> class DisputedTx;
// Represents how the node participates in Consensus this round

View File

@@ -58,7 +58,7 @@ concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
When the strong pointer count goes to zero, the "partialDestructor" is
called. This can be used to destroy as much of the object as possible while
still retaining the reference counts. For example, for SHAMapInnerNodes the
children may be reset in that function. Note that std::shared_pointer WILL
children may be reset in that function. Note that std::shared_poiner WILL
run the destructor when the strong count reaches zero, but may not free the
memory used by the object until the weak count reaches zero. In rippled, we
typically allocate shared pointers with the `make_shared` function. When

View File

@@ -301,7 +301,7 @@ IntrusiveRefCounts::addWeakReleaseStrongRef() const
// change the counts and flags (the count could be atomically changed, but
// the flags depend on the current value of the counts).
//
// Note: If this becomes a perf bottleneck, the `partialDestroyStartedMask`
// Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
// may be able to be set non-atomically. But it is easier to reason about
// the code if the flag is set atomically.
while (1)

View File

@@ -221,8 +221,7 @@ public:
private:
enum {
// Maximum line length for log messages.
// If the message exceeds this length it will be truncated with
// ellipses.
// If the message exceeds this length it will be truncated with elipses.
maximumMessageCharacters = 12 * 1024
};

View File

@@ -339,10 +339,6 @@ abs(Number x) noexcept
Number
power(Number const& f, unsigned n);
// logarithm with base 10
Number
lg(Number const& value);
// Returns f^(1/d)
// Uses NewtonRaphson iterations until the result stops changing
// to find the root of the polynomial g(x) = x^d - f

View File

@@ -152,8 +152,8 @@ public:
/** Return a "sub slice" of given length starting at the given position
Note that the subslice encompasses the range [pos, pos + rCount),
where rCount is the smaller of count and size() - pos.
Note that the subslice encompasses the range [pos, pos + rcount),
where rcount is the smaller of count and size() - pos.
@param pos position of the first character
@count requested length

View File

@@ -31,7 +31,7 @@ template <class Iterator>
std::optional<Blob>
strUnHex(std::size_t strSize, Iterator begin, Iterator end)
{
static constexpr std::array<int, 256> const digitLookupTable = []() {
static constexpr std::array<int, 256> const unxtab = []() {
std::array<int, 256> t{};
for (auto& x : t)
@@ -57,7 +57,7 @@ strUnHex(std::size_t strSize, Iterator begin, Iterator end)
if (strSize & 1)
{
int c = digitLookupTable[*iter++];
int c = unxtab[*iter++];
if (c < 0)
return {};
@@ -67,12 +67,12 @@ strUnHex(std::size_t strSize, Iterator begin, Iterator end)
while (iter != end)
{
int cHigh = digitLookupTable[*iter++];
int cHigh = unxtab[*iter++];
if (cHigh < 0)
return {};
int cLow = digitLookupTable[*iter++];
int cLow = unxtab[*iter++];
if (cLow < 0)
return {};

View File

@@ -3189,12 +3189,11 @@ operator==(aged_unordered_container<
{
if (size() != other.size())
return false;
for (auto iter(cbegin()), last(cend()), otherLast(other.cend());
iter != last;
for (auto iter(cbegin()), last(cend()), olast(other.cend()); iter != last;
++iter)
{
auto otherIter(other.find(extract(*iter)));
if (otherIter == otherLast)
auto oiter(other.find(extract(*iter)));
if (oiter == olast)
return false;
}
return true;

View File

@@ -18,7 +18,7 @@ namespace beast {
namespace detail {
// These specializations get called by the non-member functions to do the work
// These specializatons get called by the non-member functions to do the work
template <class Out, class In>
struct LexicalCast;

View File

@@ -203,7 +203,7 @@ struct is_contiguously_hashable<T[N], HashAlgorithm>
Throws:
Never
Effect:
Returns the resulting hash of all the input data.
Returns the reslting hash of all the input data.
*/
/** @{ */

View File

@@ -376,7 +376,7 @@ public:
print statement examples
"parent.child" prints child and all of its children
"parent.child." start at the parent and print down to child
"parent.grandchild" prints nothing- grandchild not direct descendent
"parent.grandchild" prints nothing- grandchild not direct discendent
"parent.grandchild." starts at the parent and prints down to grandchild
"parent.grandchild.*" starts at parent, print through grandchild
children

View File

@@ -40,7 +40,7 @@ public:
using microseconds = std::chrono::microseconds;
/**
* Configuration from [perf] section of xrpld.cfg.
* Configuration from [perf] section of rippled.cfg.
*/
struct Setup
{

View File

@@ -1,6 +1,8 @@
#ifndef XRPL_JSON_JSON_READER_H_INCLUDED
#define XRPL_JSON_JSON_READER_H_INCLUDED
#define CPPTL_JSON_READER_H_INCLUDED
#include <xrpl/json/json_forwards.h>
#include <xrpl/json/json_value.h>
@@ -66,7 +68,7 @@ public:
* error occurred during parsing.
*/
std::string
getFormattedErrorMessages() const;
getFormatedErrorMessages() const;
static constexpr unsigned nest_limit{25};
@@ -229,4 +231,4 @@ operator>>(std::istream&, Value&);
} // namespace Json
#endif // XRPL_JSON_JSON_READER_H_INCLUDED
#endif // CPPTL_JSON_READER_H_INCLUDED

View File

@@ -44,7 +44,7 @@ enum ValueType {
class StaticString
{
public:
constexpr explicit StaticString(char const* czString) : str_(czString)
constexpr explicit StaticString(char const* czstring) : str_(czstring)
{
}
@@ -682,4 +682,4 @@ public:
} // namespace Json
#endif // XRPL_JSON_JSON_VALUE_H_INCLUDED
#endif // CPPTL_JSON_H_INCLUDED

View File

@@ -90,7 +90,7 @@ private:
void
writeArrayValue(Value const& value);
bool
isMultilineArray(Value const& value);
isMultineArray(Value const& value);
void
pushValue(std::string const& value);
void
@@ -157,7 +157,7 @@ private:
void
writeArrayValue(Value const& value);
bool
isMultilineArray(Value const& value);
isMultineArray(Value const& value);
void
pushValue(std::string const& value);
void

View File

@@ -15,7 +15,7 @@ namespace xrpl {
namespace credentials {
// These function will be used by the code that use DepositPreauth / Credentials
// (and any future pre-authorization modes) as part of authorization (all the
// (and any future preauthorization modes) as part of authorization (all the
// transfer funds transactions)
// Check if credential sfExpiration field has passed ledger's parentCloseTime
@@ -41,8 +41,7 @@ checkFields(STTx const& tx, beast::Journal j);
// Accessing the ledger to check if provided credentials are valid. Do not use
// in doApply (only in preclaim) since it does not remove expired credentials.
// If you call it in preclaim, you also must call verifyDepositPreauth in
// doApply
// If you call it in prelaim, you also must call verifyDepositPreauth in doApply
TER
valid(
STTx const& tx,

View File

@@ -6,7 +6,7 @@
namespace xrpl {
namespace NodeStore {
/** Simple NodeStore Scheduler that just performs the tasks synchronously. */
/** Simple NodeStore Scheduler that just peforms the tasks synchronously. */
class DummyScheduler : public Scheduler
{
public:

View File

@@ -55,7 +55,7 @@ public:
HyperLevelDB, LevelDBFactory, SQLite, MDB
If the fastBackendParameter is omitted or empty, no ephemeral database
is used. If the scheduler parameter is omitted or unspecified, a
is used. If the scheduler parameter is omited or unspecified, a
synchronous scheduler is used which performs all tasks immediately on
the caller's thread.

View File

@@ -96,7 +96,7 @@ Facebook's RocksDB database, builds on LevelDB.
Use SQLite.
'path' specifies where the backend will store its data files.
'path' speficies where the backend will store its data files.
Choices for 'compression'
@@ -130,7 +130,7 @@ newer versions of RocksDB (TBD).
## Discussion
RocksDBQuickFactory is intended to provide a testbed for comparing potential
rocksdb performance with the existing recommended configuration in xrpld.cfg.
rocksdb performance with the existing recommended configuration in rippled.cfg.
Through various executions and profiling some conclusions are presented below.
- If the write ahead log is enabled, insert speed soon clogs up under load. The
@@ -161,7 +161,7 @@ Through various executions and profiling some conclusions are presented below.
- Multiple runs of the benchmarks can yield surprisingly different results. This
can perhaps be attributed to the asynchronous nature of rocksdb's compaction
process. The benchmarks are artificial and create highly unlikely write load to
process. The benchmarks are artifical and create highly unlikely write load to
create the dataset to measure different read access patterns. Therefore multiple
runs of the benchmarks are required to get a feel for the effectiveness of the
changes. This contrasts sharply with the keyvadb benchmarking were highly

View File

@@ -9,7 +9,7 @@ import "org/xrpl/rpc/v1/get_ledger_entry.proto";
import "org/xrpl/rpc/v1/get_ledger_data.proto";
import "org/xrpl/rpc/v1/get_ledger_diff.proto";
// These methods are binary only methods for retrieving arbitrary ledger state
// These methods are binary only methods for retrieiving arbitrary ledger state
// via gRPC. These methods are used by clio, but can also be
// used by any client that wants to extract ledger state in an efficient manner.
// They do not directly mimic the JSON equivalent methods.

View File

@@ -17,9 +17,9 @@ enum MessageType {
mtHAVE_SET = 35;
mtVALIDATION = 41;
mtGET_OBJECTS = 42;
mtVALIDATOR_LIST = 54;
mtVALIDATORLIST = 54;
mtSQUELCH = 55;
mtVALIDATOR_LIST_COLLECTION = 56;
mtVALIDATORLISTCOLLECTION = 56;
mtPROOF_PATH_REQ = 57;
mtPROOF_PATH_RESPONSE = 58;
mtREPLAY_DELTA_REQ = 59;
@@ -308,7 +308,7 @@ message TMSquelch {
}
enum TMLedgerMapType {
lmTRANSACTION = 1; // transaction map
lmTRANASCTION = 1; // transaction map
lmACCOUNT_STATE = 2; // account state map
}

View File

@@ -67,6 +67,9 @@ enum class HashPrefix : std::uint32_t {
/** Payment Channel Claim */
paymentChannelClaim = detail::make_hash_prefix('C', 'L', 'M'),
/** Credentials signature */
credential = detail::make_hash_prefix('C', 'R', 'D'),
/** Batch */
batch = detail::make_hash_prefix('B', 'C', 'H'),
};

View File

@@ -20,7 +20,7 @@ namespace xrpl {
Arithmetic operations can throw std::overflow_error during normalization
if the amount exceeds the largest representable amount, but underflows
will silently truncate to zero.
will silently trunctate to zero.
*/
class IOUAmount : private boost::totally_ordered<IOUAmount>,
private boost::additive<IOUAmount>
@@ -39,13 +39,6 @@ private:
normalize();
public:
/* The range for the mantissa when normalized */
static std::int64_t constexpr minMantissa = 1000000000000000ull;
static std::int64_t constexpr maxMantissa = 9999999999999999ull;
/* The range for the exponent when normalized */
static int constexpr minExponent = -96;
static int constexpr maxExponent = 80;
IOUAmount() = default;
explicit IOUAmount(Number const& other);
IOUAmount(beast::Zero);

View File

@@ -18,7 +18,7 @@
namespace xrpl {
class SeqProxy;
/** Keylet computation functions.
/** Keylet computation funclets.
Entries in the ledger are located using 256-bit locators. The locators are
calculated using a wide range of parameters specific to the entry whose

View File

@@ -29,7 +29,7 @@ enum GranularPermissionType : std::uint32_t {
#pragma pop_macro("PERMISSION")
};
enum Delegation { delegable, notDelegable };
enum Delegation { delegatable, notDelegatable };
class Permission
{
@@ -38,7 +38,7 @@ private:
std::unordered_map<std::uint16_t, uint256> txFeatureMap_;
std::unordered_map<std::uint16_t, Delegation> delegableTx_;
std::unordered_map<std::uint16_t, Delegation> delegatableTx_;
std::unordered_map<std::string, GranularPermissionType>
granularPermissionMap_;
@@ -71,7 +71,8 @@ public:
getTxFeature(TxType txType) const;
bool
isDelegable(std::uint32_t const& permissionValue, Rules const& rules) const;
isDelegatable(std::uint32_t const& permissionValue, Rules const& rules)
const;
// for tx level permission, permission value is equal to tx type plus one
uint32_t

View File

@@ -179,7 +179,7 @@ static constexpr int loanPaymentsPerFeeIncrement = 5;
*
* This limit is enforced during the loan payment process, and thus is not
* estimated. If the limit is hit, no further payments or overpayments will be
* processed, no matter how much of the transaction Amount is left, but the
* processed, no matter how much of the transation Amount is left, but the
* transaction will succeed with the payments that have been processed up to
* that point.
*
@@ -251,13 +251,6 @@ std::uint8_t constexpr vaultMaximumIOUScale = 18;
* another vault; counted from 0 */
std::uint8_t constexpr maxAssetCheckDepth = 5;
/** The maximum length of a Data field in Escrow object that can be updated by
* Wasm code */
std::size_t constexpr maxWasmDataLength = 4 * 1024;
/** The maximum length of a parameters passed from Wasm code*/
std::size_t constexpr maxWasmParamLength = 1024;
/** A ledger index. */
using LedgerIndex = std::uint32_t;

View File

@@ -210,7 +210,7 @@ public:
private:
// The ceil_in and ceil_out methods that deal in TAmount all convert
// their arguments to STAmount and convert the result back to TAmount.
// their arguments to STAoumout and convert the result back to TAmount.
// This helper function takes care of all the conversion operations.
template <
class In,

View File

@@ -122,8 +122,6 @@ enum TEMcodes : TERUnderlyingType {
temARRAY_TOO_LARGE,
temBAD_TRANSFER_FEE,
temINVALID_INNER_BATCH,
temBAD_WASM,
};
//------------------------------------------------------------------------------

View File

@@ -277,7 +277,7 @@ constexpr std::uint32_t const tfLoanOverpayment = 0x00010000;
// interest and fees, or it will fail. False: Not a full payment.
constexpr std::uint32_t const tfLoanFullPayment = 0x00020000;
// tfLoanLatePayment: True, indicates that the payment is late,
// and includes late interest and fees. If the loan is not late,
// and includes late iterest and fees. If the loan is not late,
// it will fail. False: not a late payment. If the current payment
// is overdue, the transaction will fail.
constexpr std::uint32_t const tfLoanLatePayment = 0x00040000;

View File

@@ -33,12 +33,12 @@ struct AttestationBase
// Account on the sending chain that triggered the event (sent the
// transaction)
AccountID sendingAccount;
// Amount transferred on the sending chain
// Amount transfered on the sending chain
STAmount sendingAmount;
// Account on the destination chain that collects a share of the attestation
// reward
AccountID rewardAccount;
// Amount was transferred on the locking chain
// Amount was transfered on the locking chain
bool wasLockingChainSend;
explicit AttestationBase(
@@ -354,7 +354,7 @@ struct XChainCreateAccountAttestation
XChainCreateAccountAttestation const& rhs);
};
// Attestations from witness servers for a particular claim ID and bridge.
// Attestations from witness servers for a particular claimid and bridge.
// Only one attestation per signature is allowed.
template <class TAttestation>
class XChainAttestationsBase

View File

@@ -42,7 +42,7 @@ concept ValidConstructSTArgs =
class STVar
{
private:
// The largest "small object" we can accommodate
// The largest "small object" we can accomodate
static std::size_t constexpr max_size = 72;
std::aligned_storage<max_size>::type d_;

View File

@@ -237,7 +237,7 @@ LEDGER_ENTRY(ltOFFER, 0x006f, Offer, offer, ({
{sfAdditionalBooks, soeOPTIONAL},
}))
/** A ledger object which describes a deposit pre-authorization.
/** A ledger object which describes a deposit preauthorization.
\sa keylet::depositPreauth
*/
@@ -577,8 +577,8 @@ LEDGER_ENTRY(ltLOAN, 0x0089, Loan, loan, ({
// - TrueTotalLoanValue = PaymentRemaining * PeriodicPayment
// The unrounded true total value of the loan.
//
// - TrueTotalPrincipalOutstanding can be computed using the algorithm
// in the xrpl::detail::loanPrincipalFromPeriodicPayment function.
// - TrueTotalPrincialOutstanding can be computed using the algorithm
// in the ripple::detail::loanPrincipalFromPeriodicPayment function.
//
// - TrueTotalInterestOutstanding = TrueTotalLoanValue -
// TrueTotalPrincipalOutstanding

View File

@@ -3,7 +3,7 @@
#endif
/**
* TRANSACTION(tag, value, name, delegable, amendments, privileges, fields)
* TRANSACTION(tag, value, name, delegatable, amendments, privileges, fields)
*
* To ease maintenance, you may replace any unneeded values with "..."
* e.g. #define TRANSACTION(tag, value, name, ...)
@@ -25,7 +25,7 @@
# include <xrpld/app/tx/detail/Payment.h>
#endif
TRANSACTION(ttPAYMENT, 0, Payment,
Delegation::delegable,
Delegation::delegatable,
uint256{},
createAcct,
({
@@ -45,7 +45,7 @@ TRANSACTION(ttPAYMENT, 0, Payment,
# include <xrpld/app/tx/detail/Escrow.h>
#endif
TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -59,7 +59,7 @@ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate,
/** This transaction type completes an existing escrow. */
TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -76,7 +76,7 @@ TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish,
# include <xrpld/app/tx/detail/SetAccount.h>
#endif
TRANSACTION(ttACCOUNT_SET, 3, AccountSet,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({
@@ -97,7 +97,7 @@ TRANSACTION(ttACCOUNT_SET, 3, AccountSet,
# include <xrpld/app/tx/detail/Escrow.h>
#endif
TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -110,7 +110,7 @@ TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel,
# include <xrpld/app/tx/detail/SetRegularKey.h>
#endif
TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({
@@ -124,7 +124,7 @@ TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey,
# include <xrpld/app/tx/detail/CreateOffer.h>
#endif
TRANSACTION(ttOFFER_CREATE, 7, OfferCreate,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -140,7 +140,7 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate,
# include <xrpld/app/tx/detail/CancelOffer.h>
#endif
TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -154,7 +154,7 @@ TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel,
# include <xrpld/app/tx/detail/CreateTicket.h>
#endif
TRANSACTION(ttTICKET_CREATE, 10, TicketCreate,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -170,7 +170,7 @@ TRANSACTION(ttTICKET_CREATE, 10, TicketCreate,
# include <xrpld/app/tx/detail/SetSignerList.h>
#endif
TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({
@@ -183,7 +183,7 @@ TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet,
# include <xrpld/app/tx/detail/PayChan.h>
#endif
TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -197,7 +197,7 @@ TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate,
/** This transaction type funds an existing unidirectional XRP payment channel. */
TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -208,7 +208,7 @@ TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund,
/** This transaction type submits a claim against an existing unidirectional payment channel. */
TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -225,7 +225,7 @@ TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim,
# include <xrpld/app/tx/detail/CreateCheck.h>
#endif
TRANSACTION(ttCHECK_CREATE, 16, CheckCreate,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -241,7 +241,7 @@ TRANSACTION(ttCHECK_CREATE, 16, CheckCreate,
# include <xrpld/app/tx/detail/CashCheck.h>
#endif
TRANSACTION(ttCHECK_CASH, 17, CheckCash,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -255,7 +255,7 @@ TRANSACTION(ttCHECK_CASH, 17, CheckCash,
# include <xrpld/app/tx/detail/CancelCheck.h>
#endif
TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -267,7 +267,7 @@ TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel,
# include <xrpld/app/tx/detail/DepositPreauth.h>
#endif
TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -282,7 +282,7 @@ TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth,
# include <xrpld/app/tx/detail/SetTrust.h>
#endif
TRANSACTION(ttTRUST_SET, 20, TrustSet,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -296,7 +296,7 @@ TRANSACTION(ttTRUST_SET, 20, TrustSet,
# include <xrpld/app/tx/detail/DeleteAccount.h>
#endif
TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
mustDeleteAcct,
({
@@ -312,7 +312,7 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete,
# include <xrpld/app/tx/detail/NFTokenMint.h>
#endif
TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint,
Delegation::delegable,
Delegation::delegatable,
uint256{},
changeNFTCounts,
({
@@ -330,7 +330,7 @@ TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint,
# include <xrpld/app/tx/detail/NFTokenBurn.h>
#endif
TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn,
Delegation::delegable,
Delegation::delegatable,
uint256{},
changeNFTCounts,
({
@@ -343,7 +343,7 @@ TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn,
# include <xrpld/app/tx/detail/NFTokenCreateOffer.h>
#endif
TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -359,7 +359,7 @@ TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer,
# include <xrpld/app/tx/detail/NFTokenCancelOffer.h>
#endif
TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -371,7 +371,7 @@ TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer,
# include <xrpld/app/tx/detail/NFTokenAcceptOffer.h>
#endif
TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer,
Delegation::delegable,
Delegation::delegatable,
uint256{},
noPriv,
({
@@ -385,7 +385,7 @@ TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer,
# include <xrpld/app/tx/detail/Clawback.h>
#endif
TRANSACTION(ttCLAWBACK, 30, Clawback,
Delegation::delegable,
Delegation::delegatable,
featureClawback,
noPriv,
({
@@ -398,7 +398,7 @@ TRANSACTION(ttCLAWBACK, 30, Clawback,
# include <xrpld/app/tx/detail/AMMClawback.h>
#endif
TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback,
Delegation::delegable,
Delegation::delegatable,
featureAMMClawback,
mayDeleteAcct | overrideFreeze,
({
@@ -413,7 +413,7 @@ TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback,
# include <xrpld/app/tx/detail/AMMCreate.h>
#endif
TRANSACTION(ttAMM_CREATE, 35, AMMCreate,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
createPseudoAcct,
({
@@ -427,7 +427,7 @@ TRANSACTION(ttAMM_CREATE, 35, AMMCreate,
# include <xrpld/app/tx/detail/AMMDeposit.h>
#endif
TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
noPriv,
({
@@ -445,7 +445,7 @@ TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit,
# include <xrpld/app/tx/detail/AMMWithdraw.h>
#endif
TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
mayDeleteAcct,
({
@@ -462,7 +462,7 @@ TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw,
# include <xrpld/app/tx/detail/AMMVote.h>
#endif
TRANSACTION(ttAMM_VOTE, 38, AMMVote,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
noPriv,
({
@@ -476,7 +476,7 @@ TRANSACTION(ttAMM_VOTE, 38, AMMVote,
# include <xrpld/app/tx/detail/AMMBid.h>
#endif
TRANSACTION(ttAMM_BID, 39, AMMBid,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
noPriv,
({
@@ -492,7 +492,7 @@ TRANSACTION(ttAMM_BID, 39, AMMBid,
# include <xrpld/app/tx/detail/AMMDelete.h>
#endif
TRANSACTION(ttAMM_DELETE, 40, AMMDelete,
Delegation::delegable,
Delegation::delegatable,
featureAMM,
mustDeleteAcct,
({
@@ -505,7 +505,7 @@ TRANSACTION(ttAMM_DELETE, 40, AMMDelete,
# include <xrpld/app/tx/detail/XChainBridge.h>
#endif
TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -516,7 +516,7 @@ TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID,
/** This transactions initiates a crosschain transaction */
TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -528,7 +528,7 @@ TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit,
/** This transaction completes a crosschain transaction */
TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -541,7 +541,7 @@ TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim,
/** This transaction initiates a crosschain account create transaction */
TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -553,7 +553,7 @@ TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit,
/** This transaction adds an attestation to a claim */
TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
createAcct,
({
@@ -574,7 +574,7 @@ TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation,
/** This transaction adds an attestation to an account */
TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46,
XChainAddAccountCreateAttestation,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
createAcct,
({
@@ -595,7 +595,7 @@ TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46,
/** This transaction modifies a sidechain */
TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -606,7 +606,7 @@ TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge,
/** This transactions creates a sidechain */
TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge,
Delegation::delegable,
Delegation::delegatable,
featureXChainBridge,
noPriv,
({
@@ -620,7 +620,7 @@ TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge,
# include <xrpld/app/tx/detail/DID.h>
#endif
TRANSACTION(ttDID_SET, 49, DIDSet,
Delegation::delegable,
Delegation::delegatable,
featureDID,
noPriv,
({
@@ -631,7 +631,7 @@ TRANSACTION(ttDID_SET, 49, DIDSet,
/** This transaction type deletes a DID */
TRANSACTION(ttDID_DELETE, 50, DIDDelete,
Delegation::delegable,
Delegation::delegatable,
featureDID,
noPriv,
({}))
@@ -641,7 +641,7 @@ TRANSACTION(ttDID_DELETE, 50, DIDDelete,
# include <xrpld/app/tx/detail/SetOracle.h>
#endif
TRANSACTION(ttORACLE_SET, 51, OracleSet,
Delegation::delegable,
Delegation::delegatable,
featurePriceOracle,
noPriv,
({
@@ -658,7 +658,7 @@ TRANSACTION(ttORACLE_SET, 51, OracleSet,
# include <xrpld/app/tx/detail/DeleteOracle.h>
#endif
TRANSACTION(ttORACLE_DELETE, 52, OracleDelete,
Delegation::delegable,
Delegation::delegatable,
featurePriceOracle,
noPriv,
({
@@ -670,7 +670,7 @@ TRANSACTION(ttORACLE_DELETE, 52, OracleDelete,
# include <xrpld/app/tx/detail/LedgerStateFix.h>
#endif
TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix,
Delegation::delegable,
Delegation::delegatable,
fixNFTokenPageLinks,
noPriv,
({
@@ -683,7 +683,7 @@ TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix,
# include <xrpld/app/tx/detail/MPTokenIssuanceCreate.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate,
Delegation::delegable,
Delegation::delegatable,
featureMPTokensV1,
createMPTIssuance,
({
@@ -700,7 +700,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate,
# include <xrpld/app/tx/detail/MPTokenIssuanceDestroy.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy,
Delegation::delegable,
Delegation::delegatable,
featureMPTokensV1,
destroyMPTIssuance,
({
@@ -712,7 +712,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy,
# include <xrpld/app/tx/detail/MPTokenIssuanceSet.h>
#endif
TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet,
Delegation::delegable,
Delegation::delegatable,
featureMPTokensV1,
noPriv,
({
@@ -729,7 +729,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet,
# include <xrpld/app/tx/detail/MPTokenAuthorize.h>
#endif
TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize,
Delegation::delegable,
Delegation::delegatable,
featureMPTokensV1,
mustAuthorizeMPT,
({
@@ -742,7 +742,7 @@ TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize,
# include <xrpld/app/tx/detail/Credentials.h>
#endif
TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate,
Delegation::delegable,
Delegation::delegatable,
featureCredentials,
noPriv,
({
@@ -754,7 +754,7 @@ TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate,
/** This transaction type accept an Credential object */
TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept,
Delegation::delegable,
Delegation::delegatable,
featureCredentials,
noPriv,
({
@@ -764,7 +764,7 @@ TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept,
/** This transaction type delete an Credential object */
TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete,
Delegation::delegable,
Delegation::delegatable,
featureCredentials,
noPriv,
({
@@ -778,7 +778,7 @@ TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete,
# include <xrpld/app/tx/detail/NFTokenModify.h>
#endif
TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify,
Delegation::delegable,
Delegation::delegatable,
featureDynamicNFT,
noPriv,
({
@@ -792,7 +792,7 @@ TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify,
# include <xrpld/app/tx/detail/PermissionedDomainSet.h>
#endif
TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet,
Delegation::delegable,
Delegation::delegatable,
featurePermissionedDomains,
noPriv,
({
@@ -805,7 +805,7 @@ TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet,
# include <xrpld/app/tx/detail/PermissionedDomainDelete.h>
#endif
TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete,
Delegation::delegable,
Delegation::delegatable,
featurePermissionedDomains,
noPriv,
({
@@ -817,7 +817,7 @@ TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete,
# include <xrpld/app/tx/detail/DelegateSet.h>
#endif
TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
Delegation::notDelegable,
Delegation::notDelegatable,
featurePermissionDelegationV1_1,
noPriv,
({
@@ -830,7 +830,7 @@ TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
# include <xrpld/app/tx/detail/VaultCreate.h>
#endif
TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
createPseudoAcct | createMPTIssuance | mustModifyVault,
({
@@ -848,7 +848,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
# include <xrpld/app/tx/detail/VaultSet.h>
#endif
TRANSACTION(ttVAULT_SET, 66, VaultSet,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
mustModifyVault,
({
@@ -863,7 +863,7 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet,
# include <xrpld/app/tx/detail/VaultDelete.h>
#endif
TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
mustDeleteAcct | destroyMPTIssuance | mustModifyVault,
({
@@ -875,7 +875,7 @@ TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
# include <xrpld/app/tx/detail/VaultDeposit.h>
#endif
TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
mayAuthorizeMPT | mustModifyVault,
({
@@ -888,7 +888,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
# include <xrpld/app/tx/detail/VaultWithdraw.h>
#endif
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
({
@@ -903,7 +903,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
# include <xrpld/app/tx/detail/VaultClawback.h>
#endif
TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
Delegation::delegable,
Delegation::delegatable,
featureSingleAssetVault,
mayDeleteMPT | mustModifyVault,
({
@@ -917,7 +917,7 @@ TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
# include <xrpld/app/tx/detail/Batch.h>
#endif
TRANSACTION(ttBATCH, 71, Batch,
Delegation::notDelegable,
Delegation::notDelegatable,
featureBatch,
noPriv,
({
@@ -932,7 +932,7 @@ TRANSACTION(ttBATCH, 71, Batch,
# include <xrpld/app/tx/detail/LoanBrokerSet.h>
#endif
TRANSACTION(ttLOAN_BROKER_SET, 74, LoanBrokerSet,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
createPseudoAcct | mayAuthorizeMPT, ({
{sfVaultID, soeREQUIRED},
@@ -949,7 +949,7 @@ TRANSACTION(ttLOAN_BROKER_SET, 74, LoanBrokerSet,
# include <xrpld/app/tx/detail/LoanBrokerDelete.h>
#endif
TRANSACTION(ttLOAN_BROKER_DELETE, 75, LoanBrokerDelete,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
mustDeleteAcct | mayAuthorizeMPT, ({
{sfLoanBrokerID, soeREQUIRED},
@@ -960,7 +960,7 @@ TRANSACTION(ttLOAN_BROKER_DELETE, 75, LoanBrokerDelete,
# include <xrpld/app/tx/detail/LoanBrokerCoverDeposit.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_DEPOSIT, 76, LoanBrokerCoverDeposit,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanBrokerID, soeREQUIRED},
@@ -972,7 +972,7 @@ TRANSACTION(ttLOAN_BROKER_COVER_DEPOSIT, 76, LoanBrokerCoverDeposit,
# include <xrpld/app/tx/detail/LoanBrokerCoverWithdraw.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_WITHDRAW, 77, LoanBrokerCoverWithdraw,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT, ({
{sfLoanBrokerID, soeREQUIRED},
@@ -987,7 +987,7 @@ TRANSACTION(ttLOAN_BROKER_COVER_WITHDRAW, 77, LoanBrokerCoverWithdraw,
# include <xrpld/app/tx/detail/LoanBrokerCoverClawback.h>
#endif
TRANSACTION(ttLOAN_BROKER_COVER_CLAWBACK, 78, LoanBrokerCoverClawback,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanBrokerID, soeOPTIONAL},
@@ -999,7 +999,7 @@ TRANSACTION(ttLOAN_BROKER_COVER_CLAWBACK, 78, LoanBrokerCoverClawback,
# include <xrpld/app/tx/detail/LoanSet.h>
#endif
TRANSACTION(ttLOAN_SET, 80, LoanSet,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT | mustModifyVault, ({
{sfLoanBrokerID, soeREQUIRED},
@@ -1026,7 +1026,7 @@ TRANSACTION(ttLOAN_SET, 80, LoanSet,
# include <xrpld/app/tx/detail/LoanDelete.h>
#endif
TRANSACTION(ttLOAN_DELETE, 81, LoanDelete,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
noPriv, ({
{sfLoanID, soeREQUIRED},
@@ -1037,7 +1037,7 @@ TRANSACTION(ttLOAN_DELETE, 81, LoanDelete,
# include <xrpld/app/tx/detail/LoanManage.h>
#endif
TRANSACTION(ttLOAN_MANAGE, 82, LoanManage,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
// All of the LoanManage options will modify the vault, but the
// transaction can succeed without options, essentially making it
@@ -1051,7 +1051,7 @@ TRANSACTION(ttLOAN_MANAGE, 82, LoanManage,
# include <xrpld/app/tx/detail/LoanPay.h>
#endif
TRANSACTION(ttLOAN_PAY, 84, LoanPay,
Delegation::delegable,
Delegation::delegatable,
featureLendingProtocol,
mayAuthorizeMPT | mustModifyVault, ({
{sfLoanID, soeREQUIRED},
@@ -1066,7 +1066,7 @@ TRANSACTION(ttLOAN_PAY, 84, LoanPay,
# include <xrpld/app/tx/detail/Change.h>
#endif
TRANSACTION(ttAMENDMENT, 100, EnableAmendment,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({
@@ -1078,7 +1078,7 @@ TRANSACTION(ttAMENDMENT, 100, EnableAmendment,
For details, see: https://xrpl.org/fee-voting.html
*/
TRANSACTION(ttFEE, 101, SetFee,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({
@@ -1099,7 +1099,7 @@ TRANSACTION(ttFEE, 101, SetFee,
For details, see: https://xrpl.org/negative-unl.html
*/
TRANSACTION(ttUNL_MODIFY, 102, UNLModify,
Delegation::notDelegable,
Delegation::notDelegatable,
uint256{},
noPriv,
({

View File

@@ -66,7 +66,7 @@ values over time: this is implemented by the DecayingSample class.
Each server in a cluster creates a list of IP addresses of end points
that are imposing a significant load. This list is called Gossip, which
is passed to other nodes in that cluster. Gossip helps individual
servers in the cluster identify IP addresses that might be unduly loading
servers in the cluster identify IP addreses that might be unduly loading
the entire cluster. Again the recourse of the individual servers is to
drop connections to those IP addresses that occur commonly in the gossip.

View File

@@ -61,7 +61,7 @@ private:
// List of all active admin entries
EntryIntrusiveList admin_;
// List of all inactive entries
// List of all inactve entries
EntryIntrusiveList inactive_;
// All imported gossip data

View File

@@ -9,7 +9,7 @@
namespace xrpl {
// Before boost 1.70, get_lowest_layer required an explicit template parameter
// Before boost 1.70, get_lowest_layer required an explicit templat parameter
template <class T>
decltype(auto)
get_lowest_layer(T& t) noexcept

View File

@@ -226,7 +226,7 @@ The `fetchNodeNT()` method goes through three phases:
Any SHAMapLeafNode that is immutable has a sequence number of zero
(sharable). When a mutable `SHAMap` is created then its SHAMapTreeNodes are
given non-zero sequence numbers (unshareable). But all nodes in the
given non-zero sequence numbers (unsharable). But all nodes in the
TreeNodeCache are immutable, so if one is found here, its sequence number
will be 0.

View File

@@ -125,13 +125,13 @@ intrusive_ptr_release(SHAMapItem const* x)
{
auto p = reinterpret_cast<std::uint8_t const*>(x);
// The SHAMapItem constructor isn't trivial (because the destructor
// The SHAMapItem constuctor isn't trivial (because the destructor
// for CountedObject isn't) so we can't avoid calling it here, but
// plan for a future where we might not need to.
if constexpr (!std::is_trivially_destructible_v<SHAMapItem>)
std::destroy_at(x);
// If the slabber doesn't claim this pointer, it was allocated
// If the slabber doens't claim this pointer, it was allocated
// manually, so we free it manually.
if (!detail::slabber.deallocate(const_cast<std::uint8_t*>(p)))
delete[] p;

View File

@@ -38,8 +38,8 @@ Number::setround(rounding_mode mode)
// Guard
// The Guard class is used to temporarily add extra digits of
// precision to an operation. This enables the final result
// The Guard class is used to tempoarily add extra digits of
// preicision to an operation. This enables the final result
// to be correctly rounded to the internal precision of Number.
class Number::Guard
@@ -631,48 +631,6 @@ power(Number const& f, unsigned n)
return r;
}
// Continued fraction approximation of ln(x)
static Number
ln(Number const& x, unsigned iterations = 50)
{
if (x <= 0)
throw std::runtime_error("Not positive value");
Number const z = (x - 1) / (x + 1);
Number const zz = z * z;
Number denom = Number(1, -10);
// Construct the fraction from the bottom up
for (int i = iterations; i > 0; --i)
{
Number k(2 * i - 1);
denom = k - (i * i * zz / denom);
}
auto const r = 2 * z / denom;
return r;
}
Number
lg(Number const& x)
{
static Number const ln10 = ln(Number(10));
if (x <= Number(10))
{
auto const r = ln(x) / ln10;
return r;
}
// ln(x) = ln(normX * 10^norm) = ln(normX) + norm * ln(10)
int diffExp = 15 + x.exponent();
Number const normalX = x / Number(1, diffExp); // (1 <= normalX < 10)
auto const lnX = ln(normalX) + diffExp * ln10;
auto const r = lnX / ln10;
return r;
}
// Returns f^(1/d)
// Uses NewtonRaphson iterations until the result stops changing
// to find the non-negative root of the polynomial g(x) = x^d - f

View File

@@ -28,7 +28,7 @@ namespace xrpl {
namespace openssl {
namespace detail {
/** The default strength of self-signed RSA certificates.
/** The default strength of self-signed RSA certifices.
Per NIST Special Publication 800-57 Part 3, 2048-bit RSA is still
considered acceptably secure. Generally, we would want to go above
@@ -131,7 +131,7 @@ initAnonymous(boost::asio::ssl::context& context)
LogicError("X509_new failed");
// According to the standards (X.509 et al), the value should be one
// less than the actually certificate version we want. Since we want
// less than the actualy certificate version we want. Since we want
// version 3, we must use a 2.
X509_set_version(x509, 2);

View File

@@ -24,7 +24,7 @@ is_public(AddressV4 const& addr)
char
get_class(AddressV4 const& addr)
{
static char const* table = "AAAABBCD"; // cspell:disable-line
static char const* table = "AAAABBCD";
return table[(addr.to_uint() & 0xE0000000) >> 29];
}

View File

@@ -66,7 +66,7 @@ LoadMonitor::update()
"Imagine if you add 10 to something every second. And you
also reduce it by 1/4 every second. It will "idle" at 40,
corresponding to 10 counts per second."
correponding to 10 counts per second."
*/
do
{

View File

@@ -193,7 +193,7 @@ public:
}
private:
// JSON collections are either arrays, or objects.
// JSON collections are either arrrays, or objects.
struct Collection
{
explicit Collection() = default;

View File

@@ -241,7 +241,7 @@ Reader::readToken(Token& token)
case 'f':
token.type_ = tokenFalse;
ok = match("alse", 4); // cspell:disable-line
ok = match("alse", 4);
break;
case 'n':
@@ -912,7 +912,7 @@ Reader::getLocationLineAndColumn(Location location) const
}
std::string
Reader::getFormattedErrorMessages() const
Reader::getFormatedErrorMessages() const
{
std::string formattedMessage;
@@ -941,7 +941,7 @@ operator>>(std::istream& sin, Value& root)
// XRPL_ASSERT(ok, "Json::operator>>() : parse succeeded");
if (!ok)
xrpl::Throw<std::runtime_error>(reader.getFormattedErrorMessages());
xrpl::Throw<std::runtime_error>(reader.getFormatedErrorMessages());
return sin;
}

View File

@@ -89,26 +89,26 @@ ValueIteratorBase::copy(SelfType const& other)
Value
ValueIteratorBase::key() const
{
Value::CZString const czString = (*current_).first;
Value::CZString const czstring = (*current_).first;
if (czString.c_str())
if (czstring.c_str())
{
if (czString.isStaticString())
return Value(StaticString(czString.c_str()));
if (czstring.isStaticString())
return Value(StaticString(czstring.c_str()));
return Value(czString.c_str());
return Value(czstring.c_str());
}
return Value(czString.index());
return Value(czstring.index());
}
UInt
ValueIteratorBase::index() const
{
Value::CZString const czString = (*current_).first;
Value::CZString const czstring = (*current_).first;
if (!czString.c_str())
return czString.index();
if (!czstring.c_str())
return czstring.index();
return Value::UInt(-1);
}

View File

@@ -79,7 +79,7 @@ valueToString(double value)
// of precision requested below.
char buffer[32];
// Print into the buffer. We need not request the alternative representation
// that always has a decimal point because JSON doesn't distinguish the
// that always has a decimal point because JSON doesn't distingish the
// concepts of reals and integers.
#if defined(_MSC_VER) && \
defined(__STDC_SECURE_LIB__) // Use secure version with visual studio 2005
@@ -108,7 +108,7 @@ valueToQuotedString(char const* value)
// We have to walk value and escape any special characters.
// Appending to std::string is not efficient, but this should be rare.
// (Note: forward slashes are *not* rare, but I am not escaping them.)
unsigned maxsize = strlen(value) * 2 + 3; // all-escaped+quotes+NULL
unsigned maxsize = strlen(value) * 2 + 3; // allescaped+quotes+NULL
std::string result;
result.reserve(maxsize); // to avoid lots of mallocs
result += "\"";
@@ -347,7 +347,7 @@ StyledWriter::writeArrayValue(Value const& value)
pushValue("[]");
else
{
bool isArrayMultiLine = isMultilineArray(value);
bool isArrayMultiLine = isMultineArray(value);
if (isArrayMultiLine)
{
@@ -398,7 +398,7 @@ StyledWriter::writeArrayValue(Value const& value)
}
bool
StyledWriter::isMultilineArray(Value const& value)
StyledWriter::isMultineArray(Value const& value)
{
int size = value.size();
bool isMultiLine = size * 3 >= rightMargin_;
@@ -573,7 +573,7 @@ StyledStreamWriter::writeArrayValue(Value const& value)
pushValue("[]");
else
{
bool isArrayMultiLine = isMultilineArray(value);
bool isArrayMultiLine = isMultineArray(value);
if (isArrayMultiLine)
{
@@ -624,7 +624,7 @@ StyledStreamWriter::writeArrayValue(Value const& value)
}
bool
StyledStreamWriter::isMultilineArray(Value const& value)
StyledStreamWriter::isMultineArray(Value const& value)
{
int size = value.size();
bool isMultiLine = size * 3 >= rightMargin_;

View File

@@ -290,7 +290,7 @@ checkArray(STArray const& credentials, unsigned maxSize, beast::Journal j)
if (!ins)
{
JLOG(j.trace()) << "Malformed transaction: "
"duplicates in credentials.";
"duplicates in credenentials.";
return temMALFORMED;
}
}

View File

@@ -3244,7 +3244,7 @@ enforceMPTokenAuthorization(
auto const maybeDomainID = sleIssuance->at(~sfDomainID);
bool expired = false;
bool const authorizedByDomain = [&]() -> bool {
// NOTE: defensive here, should be checked in preclaim
// NOTE: defensive here, shuld be checked in preclaim
if (!maybeDomainID.has_value())
return false; // LCOV_EXCL_LINE

View File

@@ -49,11 +49,11 @@ registerSSLCerts(
return;
}
auto warn = [&](std::string const& msg) {
auto warn = [&](std::string const& mesg) {
// Buffer based on asio recommended size
char buf[256];
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
JLOG(j.warn()) << msg << " " << buf;
JLOG(j.warn()) << mesg << " " << buf;
::ERR_clear_error();
};

Some files were not shown because too many files have changed in this diff Show More