mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-17 12:22:34 +00:00
Compare commits
4 Commits
pratik/Ope
...
pratik/Mig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7466a40ada | ||
|
|
61be075ff8 | ||
|
|
db73390eff | ||
|
|
adc6ca6d11 |
@@ -37,7 +37,7 @@ BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 120
|
||||
ColumnLimit: 80
|
||||
CommentPragmas: "^ IWYU pragma:"
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
|
||||
@@ -1,247 +0,0 @@
|
||||
_help_parse: Options affecting listfile parsing
|
||||
parse:
|
||||
_help_additional_commands:
|
||||
- Specify structure for custom cmake functions
|
||||
additional_commands:
|
||||
target_protobuf_sources:
|
||||
pargs:
|
||||
- target
|
||||
- prefix
|
||||
kwargs:
|
||||
PROTOS: "*"
|
||||
LANGUAGE: cpp
|
||||
IMPORT_DIRS: "*"
|
||||
GENERATE_EXTENSIONS: "*"
|
||||
PLUGIN: "*"
|
||||
_help_override_spec:
|
||||
- Override configurations per-command where available
|
||||
override_spec: {}
|
||||
_help_vartags:
|
||||
- Specify variable tags.
|
||||
vartags: []
|
||||
_help_proptags:
|
||||
- Specify property tags.
|
||||
proptags: []
|
||||
_help_format: Options affecting formatting.
|
||||
format:
|
||||
_help_disable:
|
||||
- Disable formatting entirely, making cmake-format a no-op
|
||||
disable: false
|
||||
_help_line_width:
|
||||
- How wide to allow formatted cmake files
|
||||
line_width: 120
|
||||
_help_tab_size:
|
||||
- How many spaces to tab for indent
|
||||
tab_size: 4
|
||||
_help_use_tabchars:
|
||||
- If true, lines are indented using tab characters (utf-8
|
||||
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
|
||||
- In cases where the layout would require a fractional tab
|
||||
- character, the behavior of the fractional indentation is
|
||||
- governed by <fractional_tab_policy>
|
||||
use_tabchars: false
|
||||
_help_fractional_tab_policy:
|
||||
- If <use_tabchars> is True, then the value of this variable
|
||||
- indicates how fractional indentions are handled during
|
||||
- whitespace replacement. If set to 'use-space', fractional
|
||||
- indentation is left as spaces (utf-8 0x20). If set to
|
||||
- "`round-up` fractional indentation is replaced with a single"
|
||||
- tab character (utf-8 0x09) effectively shifting the column
|
||||
- to the next tabstop
|
||||
fractional_tab_policy: use-space
|
||||
_help_max_subgroups_hwrap:
|
||||
- If an argument group contains more than this many sub-groups
|
||||
- (parg or kwarg groups) then force it to a vertical layout.
|
||||
max_subgroups_hwrap: 4
|
||||
_help_max_pargs_hwrap:
|
||||
- If a positional argument group contains more than this many
|
||||
- arguments, then force it to a vertical layout.
|
||||
max_pargs_hwrap: 5
|
||||
_help_max_rows_cmdline:
|
||||
- If a cmdline positional group consumes more than this many
|
||||
- lines without nesting, then invalidate the layout (and nest)
|
||||
max_rows_cmdline: 2
|
||||
_help_separate_ctrl_name_with_space:
|
||||
- If true, separate flow control names from their parentheses
|
||||
- with a space
|
||||
separate_ctrl_name_with_space: true
|
||||
_help_separate_fn_name_with_space:
|
||||
- If true, separate function names from parentheses with a
|
||||
- space
|
||||
separate_fn_name_with_space: false
|
||||
_help_dangle_parens:
|
||||
- If a statement is wrapped to more than one line, than dangle
|
||||
- the closing parenthesis on its own line.
|
||||
dangle_parens: false
|
||||
_help_dangle_align:
|
||||
- If the trailing parenthesis must be 'dangled' on its on
|
||||
- "line, then align it to this reference: `prefix`: the start"
|
||||
- "of the statement, `prefix-indent`: the start of the"
|
||||
- "statement, plus one indentation level, `child`: align to"
|
||||
- the column of the arguments
|
||||
dangle_align: prefix
|
||||
_help_min_prefix_chars:
|
||||
- If the statement spelling length (including space and
|
||||
- parenthesis) is smaller than this amount, then force reject
|
||||
- nested layouts.
|
||||
min_prefix_chars: 18
|
||||
_help_max_prefix_chars:
|
||||
- If the statement spelling length (including space and
|
||||
- parenthesis) is larger than the tab width by more than this
|
||||
- amount, then force reject un-nested layouts.
|
||||
max_prefix_chars: 10
|
||||
_help_max_lines_hwrap:
|
||||
- If a candidate layout is wrapped horizontally but it exceeds
|
||||
- this many lines, then reject the layout.
|
||||
max_lines_hwrap: 2
|
||||
_help_line_ending:
|
||||
- What style line endings to use in the output.
|
||||
line_ending: unix
|
||||
_help_command_case:
|
||||
- Format command names consistently as 'lower' or 'upper' case
|
||||
command_case: canonical
|
||||
_help_keyword_case:
|
||||
- Format keywords consistently as 'lower' or 'upper' case
|
||||
keyword_case: unchanged
|
||||
_help_always_wrap:
|
||||
- A list of command names which should always be wrapped
|
||||
always_wrap: []
|
||||
_help_enable_sort:
|
||||
- If true, the argument lists which are known to be sortable
|
||||
- will be sorted lexicographicall
|
||||
enable_sort: true
|
||||
_help_autosort:
|
||||
- If true, the parsers may infer whether or not an argument
|
||||
- list is sortable (without annotation).
|
||||
autosort: true
|
||||
_help_require_valid_layout:
|
||||
- By default, if cmake-format cannot successfully fit
|
||||
- everything into the desired linewidth it will apply the
|
||||
- last, most aggressive attempt that it made. If this flag is
|
||||
- True, however, cmake-format will print error, exit with non-
|
||||
- zero status code, and write-out nothing
|
||||
require_valid_layout: false
|
||||
_help_layout_passes:
|
||||
- A dictionary mapping layout nodes to a list of wrap
|
||||
- decisions. See the documentation for more information.
|
||||
layout_passes: {}
|
||||
_help_markup: Options affecting comment reflow and formatting.
|
||||
markup:
|
||||
_help_bullet_char:
|
||||
- What character to use for bulleted lists
|
||||
bullet_char: "-"
|
||||
_help_enum_char:
|
||||
- What character to use as punctuation after numerals in an
|
||||
- enumerated list
|
||||
enum_char: .
|
||||
_help_first_comment_is_literal:
|
||||
- If comment markup is enabled, don't reflow the first comment
|
||||
- block in each listfile. Use this to preserve formatting of
|
||||
- your copyright/license statements.
|
||||
first_comment_is_literal: false
|
||||
_help_literal_comment_pattern:
|
||||
- If comment markup is enabled, don't reflow any comment block
|
||||
- which matches this (regex) pattern. Default is `None`
|
||||
- (disabled).
|
||||
literal_comment_pattern: null
|
||||
_help_fence_pattern:
|
||||
- Regular expression to match preformat fences in comments
|
||||
- default= ``r'^\s*([`~]{3}[`~]*)(.*)$'``
|
||||
fence_pattern: ^\s*([`~]{3}[`~]*)(.*)$
|
||||
_help_ruler_pattern:
|
||||
- Regular expression to match rulers in comments default=
|
||||
- '``r''^\s*[^\w\s]{3}.*[^\w\s]{3}$''``'
|
||||
ruler_pattern: ^\s*[^\w\s]{3}.*[^\w\s]{3}$
|
||||
_help_explicit_trailing_pattern:
|
||||
- If a comment line matches starts with this pattern then it
|
||||
- is explicitly a trailing comment for the preceding
|
||||
- argument. Default is '#<'
|
||||
explicit_trailing_pattern: "#<"
|
||||
_help_hashruler_min_length:
|
||||
- If a comment line starts with at least this many consecutive
|
||||
- hash characters, then don't lstrip() them off. This allows
|
||||
- for lazy hash rulers where the first hash char is not
|
||||
- separated by space
|
||||
hashruler_min_length: 10
|
||||
_help_canonicalize_hashrulers:
|
||||
- If true, then insert a space between the first hash char and
|
||||
- remaining hash chars in a hash ruler, and normalize its
|
||||
- length to fill the column
|
||||
canonicalize_hashrulers: true
|
||||
_help_enable_markup:
|
||||
- enable comment markup parsing and reflow
|
||||
enable_markup: false
|
||||
_help_lint: Options affecting the linter
|
||||
lint:
|
||||
_help_disabled_codes:
|
||||
- a list of lint codes to disable
|
||||
disabled_codes: []
|
||||
_help_function_pattern:
|
||||
- regular expression pattern describing valid function names
|
||||
function_pattern: "[0-9a-z_]+"
|
||||
_help_macro_pattern:
|
||||
- regular expression pattern describing valid macro names
|
||||
macro_pattern: "[0-9A-Z_]+"
|
||||
_help_global_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with global (cache) scope
|
||||
global_var_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_internal_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with global scope (but internal semantic)
|
||||
internal_var_pattern: _[A-Z][0-9A-Z_]+
|
||||
_help_local_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with local scope
|
||||
local_var_pattern: "[a-z][a-z0-9_]+"
|
||||
_help_private_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- privatedirectory variables
|
||||
private_var_pattern: _[0-9a-z_]+
|
||||
_help_public_var_pattern:
|
||||
- regular expression pattern describing valid names for public
|
||||
- directory variables
|
||||
public_var_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_argument_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- function/macro arguments and loop variables.
|
||||
argument_var_pattern: "[a-z][a-z0-9_]+"
|
||||
_help_keyword_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- keywords used in functions or macros
|
||||
keyword_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_max_conditionals_custom_parser:
|
||||
- In the heuristic for C0201, how many conditionals to match
|
||||
- within a loop in before considering the loop a parser.
|
||||
max_conditionals_custom_parser: 2
|
||||
_help_min_statement_spacing:
|
||||
- Require at least this many newlines between statements
|
||||
min_statement_spacing: 1
|
||||
_help_max_statement_spacing:
|
||||
- Require no more than this many newlines between statements
|
||||
max_statement_spacing: 2
|
||||
max_returns: 6
|
||||
max_branches: 12
|
||||
max_arguments: 5
|
||||
max_localvars: 15
|
||||
max_statements: 50
|
||||
_help_encode: Options affecting file encoding
|
||||
encode:
|
||||
_help_emit_byteorder_mark:
|
||||
- If true, emit the unicode byte-order mark (BOM) at the start
|
||||
- of the file
|
||||
emit_byteorder_mark: false
|
||||
_help_input_encoding:
|
||||
- Specify the encoding of the input file. Defaults to utf-8
|
||||
input_encoding: utf-8
|
||||
_help_output_encoding:
|
||||
- Specify the encoding of the output file. Defaults to utf-8.
|
||||
- Note that cmake only claims to support utf-8 so be careful
|
||||
- when using anything else
|
||||
output_encoding: utf-8
|
||||
_help_misc: Miscellaneous configurations options.
|
||||
misc:
|
||||
_help_per_command:
|
||||
- A dictionary containing any per-command configuration
|
||||
- overrides. Currently only `command_case` is supported.
|
||||
per_command: {}
|
||||
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,6 +1,5 @@
|
||||
# Set default behaviour, in case users don't have core.autocrlf set.
|
||||
#* text=auto
|
||||
# cspell: disable
|
||||
|
||||
# Visual Studio
|
||||
*.sln text eol=crlf
|
||||
|
||||
8
.github/CODEOWNERS
vendored
Normal file
8
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Allow anyone to review any change by default.
|
||||
*
|
||||
|
||||
# Require the rpc-reviewers team to review changes to the rpc code.
|
||||
include/xrpl/protocol/ @xrplf/rpc-reviewers
|
||||
src/libxrpl/protocol/ @xrplf/rpc-reviewers
|
||||
src/xrpld/rpc/ @xrplf/rpc-reviewers
|
||||
src/xrpld/app/misc/ @xrplf/rpc-reviewers
|
||||
15
.github/actions/build-deps/action.yml
vendored
15
.github/actions/build-deps/action.yml
vendored
@@ -4,6 +4,9 @@ description: "Install Conan dependencies, optionally forcing a rebuild of all de
|
||||
# Note that actions do not support 'type' and all inputs are strings, see
|
||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
|
||||
inputs:
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: true
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
@@ -18,10 +21,6 @@ inputs:
|
||||
description: "The logging verbosity."
|
||||
required: false
|
||||
default: "verbose"
|
||||
sanitizers:
|
||||
description: "The sanitizers to enable."
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
@@ -29,15 +28,17 @@ runs:
|
||||
- name: Install Conan dependencies
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
BUILD_NPROC: ${{ inputs.build_nproc }}
|
||||
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
run: |
|
||||
echo 'Installing dependencies.'
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
conan install \
|
||||
--profile ci \
|
||||
--output-folder . \
|
||||
--build="${BUILD_OPTION}" \
|
||||
--options:host='&:tests=True' \
|
||||
--options:host='&:xrpld=True' \
|
||||
@@ -45,4 +46,4 @@ runs:
|
||||
--conf:all tools.build:jobs=${BUILD_NPROC} \
|
||||
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
|
||||
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
|
||||
.
|
||||
..
|
||||
|
||||
44
.github/actions/generate-version/action.yml
vendored
44
.github/actions/generate-version/action.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: Generate build version number
|
||||
description: "Generate build version number."
|
||||
|
||||
outputs:
|
||||
version:
|
||||
description: "The generated build version number."
|
||||
value: ${{ steps.version.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# When a tag is pushed, the version is used as-is.
|
||||
- name: Generate version for tag event
|
||||
if: ${{ github.event_name == 'tag' }}
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ github.ref_name }}
|
||||
run: echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
# When a tag is not pushed, then the version (e.g. 1.2.3-b0) is extracted
|
||||
# from the BuildInfo.cpp file and the shortened commit hash appended to it.
|
||||
# We use a plus sign instead of a hyphen because Conan recipe versions do
|
||||
# not support two hyphens.
|
||||
- name: Generate version for non-tag event
|
||||
if: ${{ github.event_name != 'tag' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Extracting version from BuildInfo.cpp.'
|
||||
VERSION="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')"
|
||||
if [[ -z "${VERSION}" ]]; then
|
||||
echo 'Unable to extract version from BuildInfo.cpp.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 'Appending shortened commit hash to version.'
|
||||
SHA='${{ github.sha }}'
|
||||
VERSION="${VERSION}+${SHA:0:7}"
|
||||
|
||||
echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Output version
|
||||
id: version
|
||||
shell: bash
|
||||
run: echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
24
.github/actions/print-env/action.yml
vendored
24
.github/actions/print-env/action.yml
vendored
@@ -11,6 +11,12 @@ runs:
|
||||
echo 'Checking environment variables.'
|
||||
set
|
||||
|
||||
echo 'Checking CMake version.'
|
||||
cmake --version
|
||||
|
||||
echo 'Checking Conan version.'
|
||||
conan --version
|
||||
|
||||
- name: Check configuration (Linux and macOS)
|
||||
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
@@ -21,23 +27,17 @@ runs:
|
||||
echo 'Checking environment variables.'
|
||||
env | sort
|
||||
|
||||
echo 'Checking CMake version.'
|
||||
cmake --version
|
||||
|
||||
echo 'Checking compiler version.'
|
||||
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
|
||||
|
||||
echo 'Checking Conan version.'
|
||||
conan --version
|
||||
|
||||
echo 'Checking Ninja version.'
|
||||
ninja --version
|
||||
|
||||
echo 'Checking nproc version.'
|
||||
nproc --version
|
||||
|
||||
- name: Check configuration (all)
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Checking Ccache version.'
|
||||
ccache --version
|
||||
|
||||
echo 'Checking CMake version.'
|
||||
cmake --version
|
||||
|
||||
echo 'Checking Conan version.'
|
||||
conan --version
|
||||
|
||||
16
.github/actions/setup-conan/action.yml
vendored
16
.github/actions/setup-conan/action.yml
vendored
@@ -2,11 +2,11 @@ name: Setup Conan
|
||||
description: "Set up Conan configuration, profile, and remote."
|
||||
|
||||
inputs:
|
||||
remote_name:
|
||||
conan_remote_name:
|
||||
description: "The name of the Conan remote to use."
|
||||
required: false
|
||||
default: xrplf
|
||||
remote_url:
|
||||
conan_remote_url:
|
||||
description: "The URL of the Conan endpoint to use."
|
||||
required: false
|
||||
default: https://conan.ripplex.io
|
||||
@@ -28,19 +28,19 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Installing profile.'
|
||||
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||
conan config install conan/profiles/default -tf $(conan config home)/profiles/
|
||||
|
||||
echo 'Conan profile:'
|
||||
conan profile show --profile ci
|
||||
conan profile show
|
||||
|
||||
- name: Set up Conan remote
|
||||
shell: bash
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
REMOTE_URL: ${{ inputs.remote_url }}
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
|
||||
run: |
|
||||
echo "Adding Conan remote '${REMOTE_NAME}' at '${REMOTE_URL}'."
|
||||
conan remote add --index 0 --force "${REMOTE_NAME}" "${REMOTE_URL}"
|
||||
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
|
||||
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
|
||||
|
||||
echo 'Listing Conan remotes.'
|
||||
conan remote list
|
||||
|
||||
4
.github/scripts/levelization/README.md
vendored
4
.github/scripts/levelization/README.md
vendored
@@ -81,10 +81,10 @@ It generates many files of [results](results):
|
||||
|
||||
- `rawincludes.txt`: The raw dump of the `#includes`
|
||||
- `paths.txt`: A second dump grouping the source module
|
||||
to the destination module, de-duped, and with frequency counts.
|
||||
to the destination module, deduped, and with frequency counts.
|
||||
- `includes/`: A directory where each file represents a module and
|
||||
contains a list of modules and counts that the module _includes_.
|
||||
- `included_by/`: Similar to `includes/`, but the other way around. Each
|
||||
- `includedby/`: Similar to `includes/`, but the other way around. Each
|
||||
file represents a module and contains a list of modules and counts
|
||||
that _include_ the module.
|
||||
- [`loops.txt`](results/loops.txt): A list of direct loops detected
|
||||
|
||||
6
.github/scripts/levelization/generate.sh
vendored
6
.github/scripts/levelization/generate.sh
vendored
@@ -29,7 +29,7 @@ pushd results
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir included_by
|
||||
mkdir includedby
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
@@ -59,7 +59,7 @@ do
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and deduplicate paths
|
||||
echo Sort and dedup paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
@@ -71,7 +71,7 @@ exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a included_by/${include}
|
||||
echo ${level} ${count} | tee -a includedby/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
|
||||
@@ -4,11 +4,14 @@ Loop: test.jtx test.toplevel
|
||||
Loop: test.jtx test.unit_test
|
||||
test.unit_test == test.jtx
|
||||
|
||||
Loop: xrpld.app xrpld.core
|
||||
xrpld.app > xrpld.core
|
||||
|
||||
Loop: xrpld.app xrpld.overlay
|
||||
xrpld.overlay > xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.peerfinder
|
||||
xrpld.peerfinder == xrpld.app
|
||||
xrpld.peerfinder ~= xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.rpc
|
||||
xrpld.rpc > xrpld.app
|
||||
|
||||
@@ -1,3 +1,13 @@
|
||||
doctest.basics > xrpl.basics
|
||||
doctest.basics > xrpl.protocol
|
||||
doctest.beast > xrpl.basics
|
||||
doctest.core > xrpl.core
|
||||
doctest.core > xrpl.json
|
||||
doctest.csf > test.csf
|
||||
doctest.nodestore > xrpl.nodestore
|
||||
doctest.protocol > xrpl.basics
|
||||
doctest.protocol > xrpl.json
|
||||
doctest.protocol > xrpl.protocol
|
||||
libxrpl.basics > xrpl.basics
|
||||
libxrpl.core > xrpl.basics
|
||||
libxrpl.core > xrpl.core
|
||||
@@ -17,15 +27,12 @@ libxrpl.nodestore > xrpl.protocol
|
||||
libxrpl.protocol > xrpl.basics
|
||||
libxrpl.protocol > xrpl.json
|
||||
libxrpl.protocol > xrpl.protocol
|
||||
libxrpl.rdb > xrpl.basics
|
||||
libxrpl.rdb > xrpl.rdb
|
||||
libxrpl.resource > xrpl.basics
|
||||
libxrpl.resource > xrpl.json
|
||||
libxrpl.resource > xrpl.resource
|
||||
libxrpl.server > xrpl.basics
|
||||
libxrpl.server > xrpl.json
|
||||
libxrpl.server > xrpl.protocol
|
||||
libxrpl.server > xrpl.rdb
|
||||
libxrpl.server > xrpl.server
|
||||
libxrpl.shamap > xrpl.basics
|
||||
libxrpl.shamap > xrpl.protocol
|
||||
@@ -44,9 +51,7 @@ test.app > xrpl.json
|
||||
test.app > xrpl.ledger
|
||||
test.app > xrpl.nodestore
|
||||
test.app > xrpl.protocol
|
||||
test.app > xrpl.rdb
|
||||
test.app > xrpl.resource
|
||||
test.app > xrpl.server
|
||||
test.basics > test.jtx
|
||||
test.basics > test.unit_test
|
||||
test.basics > xrpl.basics
|
||||
@@ -72,7 +77,6 @@ test.core > xrpl.basics
|
||||
test.core > xrpl.core
|
||||
test.core > xrpld.core
|
||||
test.core > xrpl.json
|
||||
test.core > xrpl.rdb
|
||||
test.core > xrpl.server
|
||||
test.csf > xrpl.basics
|
||||
test.csf > xrpld.consensus
|
||||
@@ -101,8 +105,8 @@ test.nodestore > test.jtx
|
||||
test.nodestore > test.toplevel
|
||||
test.nodestore > test.unit_test
|
||||
test.nodestore > xrpl.basics
|
||||
test.nodestore > xrpld.core
|
||||
test.nodestore > xrpl.nodestore
|
||||
test.nodestore > xrpl.rdb
|
||||
test.overlay > test.jtx
|
||||
test.overlay > test.toplevel
|
||||
test.overlay > test.unit_test
|
||||
@@ -110,7 +114,6 @@ test.overlay > xrpl.basics
|
||||
test.overlay > xrpld.app
|
||||
test.overlay > xrpld.overlay
|
||||
test.overlay > xrpld.peerfinder
|
||||
test.overlay > xrpl.nodestore
|
||||
test.overlay > xrpl.protocol
|
||||
test.overlay > xrpl.shamap
|
||||
test.peerfinder > test.beast
|
||||
@@ -159,8 +162,6 @@ tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.core > xrpl.basics
|
||||
xrpl.core > xrpl.json
|
||||
xrpl.core > xrpl.ledger
|
||||
xrpl.core > xrpl.protocol
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
xrpl.ledger > xrpl.protocol
|
||||
@@ -169,17 +170,12 @@ xrpl.nodestore > xrpl.basics
|
||||
xrpl.nodestore > xrpl.protocol
|
||||
xrpl.protocol > xrpl.basics
|
||||
xrpl.protocol > xrpl.json
|
||||
xrpl.rdb > xrpl.basics
|
||||
xrpl.rdb > xrpl.core
|
||||
xrpl.rdb > xrpl.protocol
|
||||
xrpl.resource > xrpl.basics
|
||||
xrpl.resource > xrpl.json
|
||||
xrpl.resource > xrpl.protocol
|
||||
xrpl.server > xrpl.basics
|
||||
xrpl.server > xrpl.core
|
||||
xrpl.server > xrpl.json
|
||||
xrpl.server > xrpl.protocol
|
||||
xrpl.server > xrpl.rdb
|
||||
xrpl.shamap > xrpl.basics
|
||||
xrpl.shamap > xrpl.nodestore
|
||||
xrpl.shamap > xrpl.protocol
|
||||
@@ -188,15 +184,12 @@ xrpld.app > xrpl.basics
|
||||
xrpld.app > xrpl.core
|
||||
xrpld.app > xrpld.conditions
|
||||
xrpld.app > xrpld.consensus
|
||||
xrpld.app > xrpld.core
|
||||
xrpld.app > xrpl.json
|
||||
xrpld.app > xrpl.ledger
|
||||
xrpld.app > xrpl.net
|
||||
xrpld.app > xrpl.nodestore
|
||||
xrpld.app > xrpl.protocol
|
||||
xrpld.app > xrpl.rdb
|
||||
xrpld.app > xrpl.resource
|
||||
xrpld.app > xrpl.server
|
||||
xrpld.app > xrpl.shamap
|
||||
xrpld.conditions > xrpl.basics
|
||||
xrpld.conditions > xrpl.protocol
|
||||
@@ -208,20 +201,17 @@ xrpld.core > xrpl.core
|
||||
xrpld.core > xrpl.json
|
||||
xrpld.core > xrpl.net
|
||||
xrpld.core > xrpl.protocol
|
||||
xrpld.core > xrpl.rdb
|
||||
xrpld.overlay > xrpl.basics
|
||||
xrpld.overlay > xrpl.core
|
||||
xrpld.overlay > xrpld.core
|
||||
xrpld.overlay > xrpld.peerfinder
|
||||
xrpld.overlay > xrpl.json
|
||||
xrpld.overlay > xrpl.protocol
|
||||
xrpld.overlay > xrpl.rdb
|
||||
xrpld.overlay > xrpl.resource
|
||||
xrpld.overlay > xrpl.server
|
||||
xrpld.peerfinder > xrpl.basics
|
||||
xrpld.peerfinder > xrpld.core
|
||||
xrpld.peerfinder > xrpl.protocol
|
||||
xrpld.peerfinder > xrpl.rdb
|
||||
xrpld.perflog > xrpl.basics
|
||||
xrpld.perflog > xrpl.core
|
||||
xrpld.perflog > xrpld.rpc
|
||||
@@ -234,7 +224,6 @@ xrpld.rpc > xrpl.ledger
|
||||
xrpld.rpc > xrpl.net
|
||||
xrpld.rpc > xrpl.nodestore
|
||||
xrpld.rpc > xrpl.protocol
|
||||
xrpld.rpc > xrpl.rdb
|
||||
xrpld.rpc > xrpl.resource
|
||||
xrpld.rpc > xrpl.server
|
||||
xrpld.shamap > xrpl.shamap
|
||||
|
||||
6
.github/scripts/rename/README.md
vendored
6
.github/scripts/rename/README.md
vendored
@@ -19,7 +19,7 @@ run from the repository root.
|
||||
1. `.github/scripts/rename/definitions.sh`: This script will rename all
|
||||
definitions, such as include guards, from `RIPPLE_XXX` and `RIPPLED_XXX` to
|
||||
`XRPL_XXX`.
|
||||
2. `.github/scripts/rename/copyright.sh`: This script will remove superfluous
|
||||
2. `.github/scripts/rename/copyright.sh`: This script will remove superflous
|
||||
copyright notices.
|
||||
3. `.github/scripts/rename/cmake.sh`: This script will rename all CMake files
|
||||
from `RippleXXX.cmake` or `RippledXXX.cmake` to `XrplXXX.cmake`, and any
|
||||
@@ -31,9 +31,6 @@ run from the repository root.
|
||||
the `xrpld` binary.
|
||||
5. `.github/scripts/rename/namespace.sh`: This script will rename the C++
|
||||
namespaces from `ripple` to `xrpl`.
|
||||
6. `.github/scripts/rename/config.sh`: This script will rename the config from
|
||||
`rippled.cfg` to `xrpld.cfg`, and updating the code accordingly. The old
|
||||
filename will still be accepted.
|
||||
|
||||
You can run all these scripts from the repository root as follows:
|
||||
|
||||
@@ -43,5 +40,4 @@ You can run all these scripts from the repository root as follows:
|
||||
./.github/scripts/rename/cmake.sh .
|
||||
./.github/scripts/rename/binary.sh .
|
||||
./.github/scripts/rename/namespace.sh .
|
||||
./.github/scripts/rename/config.sh .
|
||||
```
|
||||
|
||||
72
.github/scripts/rename/config.sh
vendored
72
.github/scripts/rename/config.sh
vendored
@@ -1,72 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the config from `rippled.cfg` to `xrpld.cfg`, and updates
|
||||
# the code accordingly. The old filename will still be accepted.
|
||||
# Usage: .github/scripts/rename/config.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Add the xrpld.cfg to the .gitignore.
|
||||
if ! grep -q 'xrpld.cfg' .gitignore; then
|
||||
${SED_COMMAND} -i '/rippled.cfg/a\
|
||||
/xrpld.cfg' .gitignore
|
||||
fi
|
||||
|
||||
# Rename the files.
|
||||
if [ -e rippled.cfg ]; then
|
||||
mv rippled.cfg xrpld.cfg
|
||||
fi
|
||||
if [ -e cfg/rippled-example.cfg ]; then
|
||||
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
DIRECTORIES=("cfg" "cmake" "include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/ripplevalidators/xrplvalidators/g' src/test/core/Config_test.cpp # cspell: disable-line
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's@ripple/@xrpld/@g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/Rippled/File/g' src/test/core/Config_test.cpp
|
||||
|
||||
|
||||
# Restore the old config file name in the code that maintains support for now.
|
||||
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/g' src/xrpld/core/detail/Config.cpp
|
||||
|
||||
# Restore an URL.
|
||||
${SED_COMMAND} -i 's/connect-your-xrpld-to-the-xrp-test-net.html/connect-your-rippled-to-the-xrp-test-net.html/g' cfg/xrpld-example.cfg
|
||||
|
||||
popd
|
||||
echo "Renaming complete."
|
||||
12
.github/scripts/rename/copyright.sh
vendored
12
.github/scripts/rename/copyright.sh
vendored
@@ -50,11 +50,11 @@ for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
# Handle the cases where the copyright notice is enclosed in /* ... */
|
||||
# and usually surrounded by //---- and //======.
|
||||
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
|
||||
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}" # cspell: ignore Bougalis Falco Hinnant Ritchford
|
||||
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}"
|
||||
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
|
||||
|
||||
# Handle the cases where the copyright notice is commented out with //.
|
||||
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}" # cspell: ignore Vinnie Falco
|
||||
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}"
|
||||
done
|
||||
done
|
||||
|
||||
@@ -83,16 +83,16 @@ if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/ValidatorInfo.cpp; then
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/ValidatorInfo.cpp
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/SlabAllocator.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/spinlock.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/tagged_integer.h; then
|
||||
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h
|
||||
fi
|
||||
if ! grep -q 'Ritchford' include/xrpl/beast/utility/Zero.h; then
|
||||
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h # cspell: ignore Ritchford
|
||||
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h
|
||||
fi
|
||||
|
||||
# Restore newlines and tabs in string literals in the affected file.
|
||||
|
||||
30
.github/scripts/rename/include.sh
vendored
30
.github/scripts/rename/include.sh
vendored
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# This script checks whether there are no new include guards introduced by a new
|
||||
# PR, as header files should use "#pragma once" instead. The script assumes any
|
||||
# include guards will use "XRPL_" as prefix.
|
||||
# Usage: .github/scripts/rename/include.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
if grep -q "#ifndef XRPL_" "${FILE}"; then
|
||||
echo "Please replace all include guards by #pragma once."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "Checking complete."
|
||||
140
.github/scripts/strategy-matrix/generate.py
vendored
140
.github/scripts/strategy-matrix/generate.py
vendored
@@ -20,8 +20,8 @@ class Config:
|
||||
Generate a strategy matrix for GitHub Actions CI.
|
||||
|
||||
On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and
|
||||
Windows configurations, while upon merge into the develop or release branches,
|
||||
we will build all configurations, and test most of them.
|
||||
Windows configurations, while upon merge into the develop, release, or master
|
||||
branches, we will build all configurations, and test most of them.
|
||||
|
||||
We will further set additional CMake arguments as follows:
|
||||
- All builds will have the `tests`, `werr`, and `xrpld` options.
|
||||
@@ -51,20 +51,22 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Only generate a subset of configurations in PRs.
|
||||
if not all:
|
||||
# Debian:
|
||||
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
|
||||
# fee to 500.
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
|
||||
# coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
|
||||
# - Bookworm using Clang 17: Release on linux/amd64, set the
|
||||
# reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64.
|
||||
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
|
||||
# the reference fee to 500.
|
||||
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
|
||||
# code coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
|
||||
# enable voidstar.
|
||||
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
|
||||
# set the reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
|
||||
if os["distro_name"] == "debian":
|
||||
skip = True
|
||||
if os["distro_version"] == "bookworm":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
|
||||
@@ -72,12 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
cmake_args = f"-Dvoidstar=ON {cmake_args}"
|
||||
@@ -85,6 +89,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
|
||||
@@ -92,6 +97,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -99,14 +105,15 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# RHEL:
|
||||
# - 9 using GCC 12: Debug on linux/amd64.
|
||||
# - 10 using Clang: Release on linux/amd64.
|
||||
# - 9 using GCC 12: Debug and Unity on linux/amd64.
|
||||
# - 10 using Clang: Release and no Unity on linux/amd64.
|
||||
if os["distro_name"] == "rhel":
|
||||
skip = True
|
||||
if os["distro_version"] == "9":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -114,6 +121,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -121,16 +129,17 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# Ubuntu:
|
||||
# - Jammy using GCC 12: Debug on linux/arm64.
|
||||
# - Noble using GCC 14: Release on linux/amd64.
|
||||
# - Noble using Clang 18: Debug on linux/amd64.
|
||||
# - Noble using Clang 19: Release on linux/arm64.
|
||||
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
|
||||
# - Noble using GCC 14: Release and Unity on linux/amd64.
|
||||
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
|
||||
# - Noble using Clang 19: Release and Unity on linux/arm64.
|
||||
if os["distro_name"] == "ubuntu":
|
||||
skip = True
|
||||
if os["distro_version"] == "jammy":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -138,18 +147,21 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -157,16 +169,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# MacOS:
|
||||
# - Debug on macos/arm64.
|
||||
# - Debug and no Unity on macos/arm64.
|
||||
if os["distro_name"] == "macos" and not (
|
||||
build_type == "Debug" and architecture["platform"] == "macos/arm64"
|
||||
build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "macos/arm64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Windows:
|
||||
# - Release on windows/amd64.
|
||||
# - Release and Unity on windows/amd64.
|
||||
if os["distro_name"] == "windows" and not (
|
||||
build_type == "Release" and architecture["platform"] == "windows/amd64"
|
||||
build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "windows/amd64"
|
||||
):
|
||||
continue
|
||||
|
||||
@@ -193,28 +209,18 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
):
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
||||
# Unity on linux/amd64
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "debian-bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0"
|
||||
|
||||
# Enable unity build for Ubuntu Jammy using GCC 12 in Debug on
|
||||
# linux/amd64.
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "ubuntu-jammy"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dunity=ON"
|
||||
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
|
||||
|
||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
||||
# or debian-bookworm-gcc-12-amd64-release.
|
||||
# or debian-bookworm-gcc-12-amd64-release-unity.
|
||||
config_name = os["distro_name"]
|
||||
if (n := os["distro_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
@@ -223,64 +229,26 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (n := os["compiler_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
config_name += (
|
||||
f"-{architecture['platform'][architecture['platform'].find('/')+1:]}"
|
||||
f"-{architecture['platform'][architecture['platform'].find('/') + 1 :]}"
|
||||
)
|
||||
config_name += f"-{build_type.lower()}"
|
||||
if "-Dcoverage=ON" in cmake_args:
|
||||
config_name += "-coverage"
|
||||
if "-Dunity=ON" in cmake_args:
|
||||
config_name += "-unity"
|
||||
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
):
|
||||
# Add ASAN + UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-asan-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "address,undefinedbehavior",
|
||||
}
|
||||
)
|
||||
# TSAN is deactivated due to seg faults with latest compilers.
|
||||
activate_tsan = False
|
||||
if activate_tsan:
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-tsan-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "thread,undefinedbehavior",
|
||||
}
|
||||
)
|
||||
else:
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name,
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "",
|
||||
}
|
||||
)
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name,
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
}
|
||||
)
|
||||
|
||||
return configurations
|
||||
|
||||
|
||||
58
.github/scripts/strategy-matrix/linux.json
vendored
58
.github/scripts/strategy-matrix/linux.json
vendored
@@ -15,198 +15,198 @@
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "21",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "jammy",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "ab4d1f0"
|
||||
"image_sha": "e1782cd"
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
5
.github/scripts/strategy-matrix/macos.json
vendored
5
.github/scripts/strategy-matrix/macos.json
vendored
@@ -15,5 +15,8 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": ["-DCMAKE_POLICY_VERSION_MINIMUM=3.5"]
|
||||
"cmake_args": [
|
||||
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
|
||||
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/scripts/strategy-matrix/windows.json
vendored
2
.github/scripts/strategy-matrix/windows.json
vendored
@@ -15,5 +15,5 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
43
.github/workflows/on-pr.yml
vendored
43
.github/workflows/on-pr.yml
vendored
@@ -1,8 +1,7 @@
|
||||
# This workflow runs all workflows to check, build and test the project on
|
||||
# various Linux flavors, as well as on MacOS and Windows, on every push to a
|
||||
# user branch. However, it will not run if the pull request is a draft unless it
|
||||
# has the 'DraftRunCI' label. For commits to PRs that target a release branch,
|
||||
# it also uploads the libxrpl recipe to the Conan remote.
|
||||
# has the 'DraftRunCI' label.
|
||||
name: PR
|
||||
|
||||
on:
|
||||
@@ -54,12 +53,12 @@ jobs:
|
||||
.github/scripts/rename/**
|
||||
.github/workflows/reusable-check-levelization.yml
|
||||
.github/workflows/reusable-check-rename.yml
|
||||
.github/workflows/reusable-notify-clio.yml
|
||||
.github/workflows/on-pr.yml
|
||||
|
||||
# Keep the paths below in sync with those in `on-trigger.yml`.
|
||||
.github/actions/build-deps/**
|
||||
.github/actions/build-test/**
|
||||
.github/actions/generate-version/**
|
||||
.github/actions/setup-conan/**
|
||||
.github/scripts/strategy-matrix/**
|
||||
.github/workflows/reusable-build.yml
|
||||
@@ -67,7 +66,6 @@ jobs:
|
||||
.github/workflows/reusable-build-test.yml
|
||||
.github/workflows/reusable-strategy-matrix.yml
|
||||
.github/workflows/reusable-test.yml
|
||||
.github/workflows/reusable-upload-recipe.yml
|
||||
.codecov.yml
|
||||
cmake/**
|
||||
conan/**
|
||||
@@ -116,49 +114,26 @@ jobs:
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
with:
|
||||
# Enable ccache only for events targeting the XRPLF repository, since
|
||||
# other accounts will not have access to our remote cache storage.
|
||||
ccache_enabled: ${{ github.repository_owner == 'XRPLF' }}
|
||||
os: ${{ matrix.os }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
upload-recipe:
|
||||
notify-clio:
|
||||
needs:
|
||||
- should-run
|
||||
- build-test
|
||||
# Only run when committing to a PR that targets a release branch in the
|
||||
# XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' && needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
if: ${{ needs.should-run.outputs.go == 'true' && (startsWith(github.base_ref, 'release') || github.base_ref == 'master') }}
|
||||
uses: ./.github/workflows/reusable-notify-clio.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
|
||||
notify-clio:
|
||||
needs: upload-recipe
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Notify the Clio repository about the newly proposed release version, so
|
||||
# it can be checked for compatibility before the release is actually made.
|
||||
- name: Notify Clio
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[ref]=${{ needs.upload-recipe.outputs.recipe_ref }}" \
|
||||
-F "client_payload[pr_url]=${PR_URL}"
|
||||
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
|
||||
passed:
|
||||
if: failure() || cancelled()
|
||||
needs:
|
||||
- check-levelization
|
||||
- check-rename
|
||||
- build-test
|
||||
- upload-recipe
|
||||
- notify-clio
|
||||
- check-levelization
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fail
|
||||
|
||||
25
.github/workflows/on-tag.yml
vendored
25
.github/workflows/on-tag.yml
vendored
@@ -1,25 +0,0 @@
|
||||
# This workflow uploads the libxrpl recipe to the Conan remote when a versioned
|
||||
# tag is pushed.
|
||||
name: Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload-recipe:
|
||||
# Only run when a tag is pushed to the XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
28
.github/workflows/on-trigger.yml
vendored
28
.github/workflows/on-trigger.yml
vendored
@@ -1,7 +1,9 @@
|
||||
# This workflow runs all workflows to build and test the code on various Linux
|
||||
# flavors, as well as on MacOS and Windows, on a scheduled basis, on merge into
|
||||
# the 'develop' or 'release*' branches, or when requested manually. Upon pushes
|
||||
# to the develop branch it also uploads the libxrpl recipe to the Conan remote.
|
||||
# This workflow runs all workflows to build the dependencies required for the
|
||||
# project on various Linux flavors, as well as on MacOS and Windows, on a
|
||||
# scheduled basis, on merge into the 'develop', 'release', or 'master' branches,
|
||||
# or manually. The missing commits check is only run when the code is merged
|
||||
# into the 'develop' or 'release' branches, and the documentation is built when
|
||||
# the code is merged into the 'develop' branch.
|
||||
name: Trigger
|
||||
|
||||
on:
|
||||
@@ -9,6 +11,7 @@ on:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
- "master"
|
||||
paths:
|
||||
# These paths are unique to `on-trigger.yml`.
|
||||
- ".github/workflows/on-trigger.yml"
|
||||
@@ -16,7 +19,6 @@ on:
|
||||
# Keep the paths below in sync with those in `on-pr.yml`.
|
||||
- ".github/actions/build-deps/**"
|
||||
- ".github/actions/build-test/**"
|
||||
- ".github/actions/generate-version/**"
|
||||
- ".github/actions/setup-conan/**"
|
||||
- ".github/scripts/strategy-matrix/**"
|
||||
- ".github/workflows/reusable-build.yml"
|
||||
@@ -24,7 +26,6 @@ on:
|
||||
- ".github/workflows/reusable-build-test.yml"
|
||||
- ".github/workflows/reusable-strategy-matrix.yml"
|
||||
- ".github/workflows/reusable-test.yml"
|
||||
- ".github/workflows/reusable-upload-recipe.yml"
|
||||
- ".codecov.yml"
|
||||
- "cmake/**"
|
||||
- "conan/**"
|
||||
@@ -67,22 +68,7 @@ jobs:
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
with:
|
||||
# Enable ccache only for events targeting the XRPLF repository, since
|
||||
# other accounts will not have access to our remote cache storage.
|
||||
# However, we do not enable ccache for events targeting a release branch,
|
||||
# to protect against the rare case that the output produced by ccache is
|
||||
# not identical to a regular compilation.
|
||||
ccache_enabled: ${{ github.repository_owner == 'XRPLF' && !startsWith(github.ref, 'refs/heads/release') }}
|
||||
os: ${{ matrix.os }}
|
||||
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
upload-recipe:
|
||||
needs: build-test
|
||||
# Only run when pushing to the develop branch in the XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name == 'push' && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
|
||||
8
.github/workflows/pre-commit.yml
vendored
8
.github/workflows/pre-commit.yml
vendored
@@ -3,15 +3,13 @@ name: Run pre-commit hooks
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
branches: [develop, release, master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@320be44621ca2a080f05aeb15817c44b84518108
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||
with:
|
||||
runs_on: ubuntu-latest
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-ab4d1f0" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'
|
||||
|
||||
4
.github/workflows/publish-docs.yml
vendored
4
.github/workflows/publish-docs.yml
vendored
@@ -22,7 +22,7 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
BUILD_DIR: build
|
||||
BUILD_DIR: .build
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
jobs:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
108
.github/workflows/reusable-build-test-config.yml
vendored
108
.github/workflows/reusable-build-test-config.yml
vendored
@@ -3,6 +3,11 @@ name: Build and test configuration
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: true
|
||||
type: string
|
||||
|
||||
build_only:
|
||||
description: 'Whether to only build or to build and test the code ("true", "false").'
|
||||
required: true
|
||||
@@ -10,14 +15,8 @@ on:
|
||||
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
ccache_enabled:
|
||||
description: "Whether to enable ccache."
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
required: true
|
||||
|
||||
cmake_args:
|
||||
description: "Additional arguments to pass to CMake."
|
||||
@@ -27,8 +26,8 @@ on:
|
||||
|
||||
cmake_target:
|
||||
description: "The CMake target to build."
|
||||
required: true
|
||||
type: string
|
||||
required: true
|
||||
|
||||
runs_on:
|
||||
description: Runner to run the job on as a JSON string
|
||||
@@ -51,12 +50,6 @@ on:
|
||||
type: number
|
||||
default: 2
|
||||
|
||||
sanitizers:
|
||||
description: "The sanitizers to enable."
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
@@ -66,11 +59,6 @@ defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
# Conan installs the generators in the build/generators directory, see the
|
||||
# layout() method in conanfile.py. We then run CMake from the build directory.
|
||||
BUILD_DIR: build
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
name: ${{ inputs.config_name }}
|
||||
@@ -78,72 +66,47 @@ jobs:
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
# Use a namespace to keep the objects separate for each configuration.
|
||||
CCACHE_NAMESPACE: ${{ inputs.config_name }}
|
||||
# Ccache supports both Redis and HTTP endpoints.
|
||||
# * For Redis, use the following format: redis://ip:port, see
|
||||
# https://github.com/ccache/ccache/wiki/Redis-storage. Note that TLS is
|
||||
# not directly supported by ccache, and requires use of a proxy.
|
||||
# * For HTTP use the following format: http://ip:port/cache when using
|
||||
# nginx as backend or http://ip:port|layout=bazel when using Bazel
|
||||
# Remote Cache, see https://github.com/ccache/ccache/wiki/HTTP-storage.
|
||||
# Note that HTTPS is not directly supported by ccache.
|
||||
CCACHE_REMOTE_ONLY: true
|
||||
CCACHE_REMOTE_STORAGE: http://cache.dev.ripplex.io:8080|layout=bazel
|
||||
# Ignore the creation and modification timestamps on files, since the
|
||||
# header files are copied into separate directories by CMake, which will
|
||||
# otherwise result in cache misses.
|
||||
CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime
|
||||
# Determine if coverage and voidstar should be enabled.
|
||||
COVERAGE_ENABLED: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
|
||||
VOIDSTAR_ENABLED: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
|
||||
SANITIZERS_ENABLED: ${{ inputs.sanitizers != '' }}
|
||||
ENABLED_VOIDSTAR: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
|
||||
ENABLED_COVERAGE: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
with:
|
||||
enable_ccache: ${{ inputs.ccache_enabled }}
|
||||
|
||||
- name: Set ccache log file
|
||||
if: ${{ inputs.ccache_enabled && runner.debug == '1' }}
|
||||
run: echo "CCACHE_LOGFILE=${{ runner.temp }}/ccache.log" >> "${GITHUB_ENV}"
|
||||
disable_ccache: false
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Setup Conan
|
||||
env:
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
uses: ./.github/actions/setup-conan
|
||||
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: ${{ inputs.build_dir }}
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
sanitizers: ${{ inputs.sanitizers }}
|
||||
|
||||
- name: Configure CMake
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
CMAKE_ARGS: ${{ inputs.cmake_args }}
|
||||
run: |
|
||||
cmake \
|
||||
@@ -154,7 +117,7 @@ jobs:
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -166,18 +129,11 @@ jobs:
|
||||
--parallel "${BUILD_NPROC}" \
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: ${{ inputs.ccache_enabled }}
|
||||
run: |
|
||||
ccache --show-stats -vv
|
||||
if [ '${{ runner.debug }}' = '1' ]; then
|
||||
cat "${CCACHE_LOGFILE}"
|
||||
curl ${CCACHE_REMOTE_STORAGE%|*}/status || true
|
||||
fi
|
||||
|
||||
- name: Upload the binary (Linux)
|
||||
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
with:
|
||||
name: xrpld-${{ inputs.config_name }}
|
||||
path: ${{ env.BUILD_DIR }}/xrpld
|
||||
@@ -185,8 +141,8 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Check linking (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.SANITIZERS_ENABLED == 'false' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
run: |
|
||||
ldd ./xrpld
|
||||
if [ "$(ldd ./xrpld | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
||||
@@ -197,22 +153,14 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Verify presence of instrumentation (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.VOIDSTAR_ENABLED == 'true' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
if: ${{ runner.os == 'Linux' && env.ENABLED_VOIDSTAR == 'true' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
run: |
|
||||
./xrpld --version | grep libvoidstar
|
||||
|
||||
- name: Set sanitizer options
|
||||
if: ${{ !inputs.build_only && env.SANITIZERS_ENABLED == 'true' }}
|
||||
run: |
|
||||
echo "ASAN_OPTIONS=print_stacktrace=1:detect_container_overflow=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=second_deadlock_stack=1:halt_on_error=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Run the separate tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
# Windows locks some of the build files while running tests, and parallel jobs can collide
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -225,7 +173,7 @@ jobs:
|
||||
|
||||
- name: Run the embedded tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', inputs.build_dir, inputs.build_type) || inputs.build_dir }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
@@ -240,8 +188,8 @@ jobs:
|
||||
netstat -an
|
||||
|
||||
- name: Prepare coverage report
|
||||
if: ${{ !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
if: ${{ !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -253,13 +201,13 @@ jobs:
|
||||
--target coverage
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
|
||||
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
with:
|
||||
disable_search: true
|
||||
disable_telem: true
|
||||
fail_ci_if_error: true
|
||||
files: ${{ env.BUILD_DIR }}/coverage.xml
|
||||
files: ${{ inputs.build_dir }}/coverage.xml
|
||||
plugins: noop
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
14
.github/workflows/reusable-build-test.yml
vendored
14
.github/workflows/reusable-build-test.yml
vendored
@@ -8,24 +8,21 @@ name: Build and test
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ccache_enabled:
|
||||
description: "Whether to enable ccache."
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
type: string
|
||||
default: ".build"
|
||||
os:
|
||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
strategy_matrix:
|
||||
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
|
||||
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
|
||||
required: false
|
||||
type: string
|
||||
default: "minimal"
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
@@ -49,14 +46,13 @@ jobs:
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
with:
|
||||
build_dir: ${{ inputs.build_dir }}
|
||||
build_only: ${{ matrix.build_only }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
ccache_enabled: ${{ inputs.ccache_enabled }}
|
||||
cmake_args: ${{ matrix.cmake_args }}
|
||||
cmake_target: ${{ matrix.cmake_target }}
|
||||
runs_on: ${{ toJSON(matrix.architecture.runner) }}
|
||||
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
|
||||
config_name: ${{ matrix.config_name }}
|
||||
sanitizers: ${{ matrix.sanitizers }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
4
.github/workflows/reusable-check-rename.yml
vendored
4
.github/workflows/reusable-check-rename.yml
vendored
@@ -29,10 +29,6 @@ jobs:
|
||||
run: .github/scripts/rename/binary.sh .
|
||||
- name: Check namespaces
|
||||
run: .github/scripts/rename/namespace.sh .
|
||||
- name: Check config name
|
||||
run: .github/scripts/rename/config.sh .
|
||||
- name: Check include guards
|
||||
run: .github/scripts/rename/include.sh .
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
|
||||
91
.github/workflows/reusable-notify-clio.yml
vendored
Normal file
91
.github/workflows/reusable-notify-clio.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
# This workflow exports the built libxrpl package to the Conan remote on a
|
||||
# a channel named after the pull request, and notifies the Clio repository about
|
||||
# the new version so it can check for compatibility.
|
||||
name: Notify Clio
|
||||
|
||||
# This workflow can only be triggered by other workflows.
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
conan_remote_name:
|
||||
description: "The name of the Conan remote to use."
|
||||
required: false
|
||||
type: string
|
||||
default: xrplf
|
||||
conan_remote_url:
|
||||
description: "The URL of the Conan endpoint to use."
|
||||
required: false
|
||||
type: string
|
||||
default: https://conan.ripplex.io
|
||||
secrets:
|
||||
clio_notify_token:
|
||||
description: "The GitHub token to notify Clio about new versions."
|
||||
required: true
|
||||
conan_remote_username:
|
||||
description: "The username for logging into the Conan remote."
|
||||
required: true
|
||||
conan_remote_password:
|
||||
description: "The password for logging into the Conan remote."
|
||||
required: true
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-clio
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload:
|
||||
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Generate outputs
|
||||
id: generate
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
echo 'Generating user and channel.'
|
||||
echo "user=clio" >> "${GITHUB_OUTPUT}"
|
||||
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
echo 'Extracting version.'
|
||||
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
|
||||
- name: Calculate conan reference
|
||||
id: conan_ref
|
||||
run: |
|
||||
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
|
||||
- name: Set up Conan
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
conan_remote_name: ${{ inputs.conan_remote_name }}
|
||||
conan_remote_url: ${{ inputs.conan_remote_url }}
|
||||
- name: Log into Conan remote
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
||||
- name: Upload package
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: |
|
||||
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
|
||||
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
outputs:
|
||||
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
|
||||
|
||||
notify:
|
||||
needs: upload
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Clio
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.clio_notify_token }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
|
||||
-F "client_payload[pr_url]=${PR_URL}"
|
||||
97
.github/workflows/reusable-upload-recipe.yml
vendored
97
.github/workflows/reusable-upload-recipe.yml
vendored
@@ -1,97 +0,0 @@
|
||||
# This workflow exports the built libxrpl package to the Conan remote.
|
||||
name: Upload Conan recipe
|
||||
|
||||
# This workflow can only be triggered by other workflows.
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
remote_name:
|
||||
description: "The name of the Conan remote to use."
|
||||
required: false
|
||||
type: string
|
||||
default: xrplf
|
||||
remote_url:
|
||||
description: "The URL of the Conan endpoint to use."
|
||||
required: false
|
||||
type: string
|
||||
default: https://conan.ripplex.io
|
||||
|
||||
secrets:
|
||||
remote_username:
|
||||
description: "The username for logging into the Conan remote."
|
||||
required: true
|
||||
remote_password:
|
||||
description: "The password for logging into the Conan remote."
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
recipe_ref:
|
||||
description: "The Conan recipe reference ('name/version') that was uploaded."
|
||||
value: ${{ jobs.upload.outputs.ref }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-upload-recipe
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Generate build version number
|
||||
id: version
|
||||
uses: ./.github/actions/generate-version
|
||||
|
||||
- name: Set up Conan
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
remote_name: ${{ inputs.remote_name }}
|
||||
remote_url: ${{ inputs.remote_url }}
|
||||
|
||||
- name: Log into Conan remote
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
REMOTE_USERNAME: ${{ secrets.remote_username }}
|
||||
REMOTE_PASSWORD: ${{ secrets.remote_password }}
|
||||
run: conan remote login "${REMOTE_NAME}" "${REMOTE_USERNAME}" --password "${REMOTE_PASSWORD}"
|
||||
|
||||
- name: Upload Conan recipe (version)
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=${{ steps.version.outputs.version }}
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/${{ steps.version.outputs.version }}
|
||||
|
||||
- name: Upload Conan recipe (develop)
|
||||
if: ${{ github.ref == 'refs/heads/develop' }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=develop
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/develop
|
||||
|
||||
- name: Upload Conan recipe (rc)
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/release') }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=rc
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/rc
|
||||
|
||||
- name: Upload Conan recipe (release)
|
||||
if: ${{ github.event_name == 'tag' }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=release
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/release
|
||||
|
||||
outputs:
|
||||
ref: xrpl/${{ steps.version.outputs.version }}
|
||||
16
.github/workflows/upload-conan-deps.yml
vendored
16
.github/workflows/upload-conan-deps.yml
vendored
@@ -64,43 +64,41 @@ jobs:
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
with:
|
||||
enable_ccache: false
|
||||
disable_ccache: false
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
- name: Setup Conan
|
||||
env:
|
||||
SANITIZERS: ${{ matrix.sanitizers }}
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
||||
remote_url: ${{ env.CONAN_REMOTE_URL }}
|
||||
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
||||
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
|
||||
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: .build
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
sanitizers: ${{ matrix.sanitizers }}
|
||||
|
||||
- name: Log into Conan remote
|
||||
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
# .gitignore
|
||||
# cspell: disable
|
||||
|
||||
# Macintosh Desktop Services Store files.
|
||||
.DS_Store
|
||||
@@ -36,7 +35,6 @@ gmon.out
|
||||
|
||||
# Customized configs.
|
||||
/rippled.cfg
|
||||
/xrpld.cfg
|
||||
/validators.txt
|
||||
|
||||
# Locally patched Conan recipes
|
||||
@@ -64,9 +62,6 @@ DerivedData
|
||||
/.vs/
|
||||
/.vscode/
|
||||
|
||||
# zed IDE.
|
||||
/.zed/
|
||||
|
||||
# AI tools.
|
||||
/.augment
|
||||
/.claude
|
||||
|
||||
@@ -26,37 +26,16 @@ repos:
|
||||
args: [--style=file]
|
||||
"types_or": [c++, c, proto]
|
||||
|
||||
- repo: https://github.com/cheshirekow/cmake-format-precommit
|
||||
rev: e2c2116d86a80e72e7146a06e68b7c228afc6319 # frozen: v0.6.13
|
||||
hooks:
|
||||
- id: cmake-format
|
||||
additional_dependencies: [PyYAML]
|
||||
|
||||
- repo: https://github.com/rbubley/mirrors-prettier
|
||||
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
|
||||
hooks:
|
||||
- id: prettier
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: v25.12.0
|
||||
rev: 25.11.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/streetsidesoftware/cspell-cli
|
||||
rev: 1cfa010f078c354f3ffb8413616280cc28f5ba21 # frozen: v9.4.0
|
||||
hooks:
|
||||
- id: cspell # Spell check changed files
|
||||
exclude: .config/cspell.config.yaml
|
||||
- id: cspell # Spell check the commit message
|
||||
name: check commit message spelling
|
||||
args:
|
||||
- --no-must-find-files
|
||||
- --no-progress
|
||||
- --no-summary
|
||||
- --files
|
||||
- .git/COMMIT_EDITMSG
|
||||
stages: [commit-msg]
|
||||
|
||||
exclude: |
|
||||
(?x)^(
|
||||
external/.*|
|
||||
|
||||
200
API-CHANGELOG.md
200
API-CHANGELOG.md
@@ -6,85 +6,90 @@ For info about how [API versioning](https://xrpl.org/request-formatting.html#api
|
||||
|
||||
The API version controls the API behavior you see. This includes what properties you see in responses, what parameters you're permitted to send in requests, and so on. You specify the API version in each of your requests. When a breaking change is introduced to the `rippled` API, a new version is released. To avoid breaking your code, you should set (or increase) your version when you're ready to upgrade.
|
||||
|
||||
The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conventions/request-formatting/#commandline-format) always uses the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
|
||||
|
||||
For a log of breaking changes, see the **API Version [number]** headings. In general, breaking changes are associated with a particular API Version number. For non-breaking changes, scroll to the **XRP Ledger version [x.y.z]** headings. Non-breaking changes are associated with a particular XRP Ledger (`rippled`) release.
|
||||
|
||||
## API Version 3 (Beta)
|
||||
|
||||
API version 3 is currently a beta API. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. See [API-VERSION-3.md](API-VERSION-3.md) for the full list of changes in API version 3.
|
||||
|
||||
## API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. See [API-VERSION-2.md](API-VERSION-2.md) for the full list of changes in API version 2.
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
|
||||
|
||||
#### Removed methods
|
||||
|
||||
In API version 2, the following deprecated methods are no longer available: (https://github.com/XRPLF/rippled/pull/4759)
|
||||
|
||||
- `tx_history` - Instead, use other methods such as `account_tx` or `ledger` with the `transactions` field set to `true`.
|
||||
- `ledger_header` - Instead, use the `ledger` method.
|
||||
|
||||
#### Modifications to JSON transaction element in V2
|
||||
|
||||
In API version 2, JSON elements for transaction output have been changed and made consistent for all methods which output transactions. (https://github.com/XRPLF/rippled/pull/4775)
|
||||
This helps to unify the JSON serialization format of transactions. (https://github.com/XRPLF/clio/issues/722, https://github.com/XRPLF/rippled/issues/4727)
|
||||
|
||||
- JSON transaction element is named `tx_json`
|
||||
- Binary transaction element is named `tx_blob`
|
||||
- JSON transaction metadata element is named `meta`
|
||||
- Binary transaction metadata element is named `meta_blob`
|
||||
|
||||
Additionally, these elements are now consistently available next to `tx_json` (i.e. sibling elements), where possible:
|
||||
|
||||
- `hash` - Transaction ID. This data was stored inside transaction output in API version 1, but in API version 2 is a sibling element.
|
||||
- `ledger_index` - Ledger index (only set on validated ledgers)
|
||||
- `ledger_hash` - Ledger hash (only set on closed or validated ledgers)
|
||||
- `close_time_iso` - Ledger close time expressed in ISO 8601 time format (only set on validated ledgers)
|
||||
- `validated` - Bool element set to `true` if the transaction is in a validated ledger, otherwise `false`
|
||||
|
||||
This change affects the following methods:
|
||||
|
||||
- `tx` - Transaction data moved into element `tx_json` (was inline inside `result`) or, if binary output was requested, moved from `tx` to `tx_blob`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `account_tx` - Renamed transaction element from `tx` to `tx_json`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `transaction_entry` - Renamed transaction metadata element from `metadata` to `meta`. Changed location of `hash` and added new elements
|
||||
- `subscribe` - Renamed transaction element from `transaction` to `tx_json`. Changed location of `hash` and added new elements
|
||||
- `sign`, `sign_for`, `submit` and `submit_multisigned` - Changed location of `hash` element.
|
||||
|
||||
#### Modification to `Payment` transaction JSON schema
|
||||
|
||||
When reading Payments, the `Amount` field should generally **not** be used. Instead, use [delivered_amount](https://xrpl.org/partial-payments.html#the-delivered_amount-field) to see the amount that the Payment delivered. To clarify its meaning, the `Amount` field is being renamed to `DeliverMax`. (https://github.com/XRPLF/rippled/pull/4733)
|
||||
|
||||
- In `Payment` transaction type, JSON RPC field `Amount` is renamed to `DeliverMax`. To enable smooth client transition, `Amount` is still handled, as described below: (https://github.com/XRPLF/rippled/pull/4733)
|
||||
- On JSON RPC input (e.g. `submit_multisigned` etc. methods), `Amount` is recognized as an alias to `DeliverMax` for both API version 1 and version 2 clients.
|
||||
- On JSON RPC input, submitting both `Amount` and `DeliverMax` fields is allowed _only_ if they are identical; otherwise such input is rejected with `rpcINVALID_PARAMS` error.
|
||||
- On JSON RPC output (e.g. `subscribe`, `account_tx` etc. methods), `DeliverMax` is present in both API version 1 and version 2.
|
||||
- On JSON RPC output, `Amount` is only present in API version 1 and _not_ in version 2.
|
||||
|
||||
#### Modifications to account_info response
|
||||
|
||||
- `signer_lists` is returned in the root of the response. (In API version 1, it was nested under `account_data`.) (https://github.com/XRPLF/rippled/pull/3770)
|
||||
- When using an invalid `signer_lists` value, the API now returns an "invalidParams" error. (https://github.com/XRPLF/rippled/pull/4585)
|
||||
- (`signer_lists` must be a boolean. In API version 1, strings were accepted and may return a normal response - i.e. as if `signer_lists` were `true`.)
|
||||
|
||||
#### Modifications to [account_tx](https://xrpl.org/account_tx.html#account_tx) response
|
||||
|
||||
- Using `ledger_index_min`, `ledger_index_max`, and `ledger_index` returns `invalidParams` because if you use `ledger_index_min` or `ledger_index_max`, then it does not make sense to also specify `ledger_index`. In API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4571)
|
||||
- The same applies for `ledger_index_min`, `ledger_index_max`, and `ledger_hash`. (https://github.com/XRPLF/rippled/issues/4545#issuecomment-1565065579)
|
||||
- Using a `ledger_index_min` or `ledger_index_max` beyond the range of ledgers that the server has:
|
||||
- returns `lgrIdxMalformed` in API version 2. Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/issues/4288)
|
||||
- Attempting to use a non-boolean value (such as a string) for the `binary` or `forward` parameters returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
|
||||
|
||||
#### Modifications to [noripple_check](https://xrpl.org/noripple_check.html#noripple_check) response
|
||||
|
||||
- Attempting to use a non-boolean value (such as a string) for the `transactions` parameter returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
|
||||
|
||||
## API Version 1
|
||||
|
||||
This version is supported by all `rippled` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
|
||||
|
||||
## XRP Ledger server version 3.1.0
|
||||
The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conventions/request-formatting/#commandline-format) always uses the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
|
||||
|
||||
[Version 3.1.0](https://github.com/XRPLF/rippled/releases/tag/3.1.0) was released on Jan 27, 2026.
|
||||
### Inconsistency: server_info - network_id
|
||||
|
||||
### Additions in 3.1.0
|
||||
|
||||
- `vault_info`: New RPC method to retrieve information about a specific vault (part of XLS-66 Lending Protocol). ([#6156](https://github.com/XRPLF/rippled/pull/6156))
|
||||
|
||||
## XRP Ledger server version 3.0.0
|
||||
|
||||
[Version 3.0.0](https://github.com/XRPLF/rippled/releases/tag/3.0.0) was released on Dec 9, 2025.
|
||||
|
||||
### Additions in 3.0.0
|
||||
|
||||
- `ledger_entry`: Supports all ledger entry types with dedicated parsers. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: New error codes `entryNotFound` and `unexpectedLedgerType` for more specific error handling. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: Improved error messages with more context (e.g., specifying which field is invalid or missing). ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: Assorted bug fixes in RPC processing. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `simulate`: Supports additional metadata in the response. ([#5754](https://github.com/XRPLF/rippled/pull/5754))
|
||||
|
||||
## XRP Ledger server version 2.6.2
|
||||
|
||||
[Version 2.6.2](https://github.com/XRPLF/rippled/releases/tag/2.6.2) was released on Nov 19, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.6.1
|
||||
|
||||
[Version 2.6.1](https://github.com/XRPLF/rippled/releases/tag/2.6.1) was released on Sep 30, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.6.0
|
||||
|
||||
[Version 2.6.0](https://github.com/XRPLF/rippled/releases/tag/2.6.0) was released on Aug 27, 2025.
|
||||
|
||||
### Additions in 2.6.0
|
||||
|
||||
- `account_info`: Added `allowTrustLineLocking` flag in response. ([#5525](https://github.com/XRPLF/rippled/pull/5525))
|
||||
- `ledger`: Removed the type filter from the RPC command. ([#4934](https://github.com/XRPLF/rippled/pull/4934))
|
||||
- `subscribe` (`validations` stream): `network_id` is now included. ([#5579](https://github.com/XRPLF/rippled/pull/5579))
|
||||
- `subscribe` (`transactions` stream): `nftoken_id`, `nftoken_ids`, and `offer_id` are now included in transaction metadata. ([#5230](https://github.com/XRPLF/rippled/pull/5230))
|
||||
|
||||
## XRP Ledger server version 2.5.1
|
||||
|
||||
[Version 2.5.1](https://github.com/XRPLF/rippled/releases/tag/2.5.1) was released on Sep 17, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
The `network_id` field was added in the `server_info` response in version 1.5.0 (2019), but it is not returned in [reporting mode](https://xrpl.org/rippled-server-modes.html#reporting-mode). However, use of reporting mode is now discouraged, in favor of using [Clio](https://github.com/XRPLF/clio) instead.
|
||||
|
||||
## XRP Ledger server version 2.5.0
|
||||
|
||||
[Version 2.5.0](https://github.com/XRPLF/rippled/releases/tag/2.5.0) was released on Jun 24, 2025.
|
||||
As of 2025-04-04, version 2.5.0 is in development. You can use a pre-release version by building from source or [using the `nightly` package](https://xrpl.org/docs/infrastructure/installation/install-rippled-on-ubuntu).
|
||||
|
||||
### Additions and bugfixes in 2.5.0
|
||||
|
||||
- `tx`: Added `ctid` field to the response and improved error handling. ([#4738](https://github.com/XRPLF/rippled/pull/4738))
|
||||
- `ledger_entry`: Improved error messages in `permissioned_domain`. ([#5344](https://github.com/XRPLF/rippled/pull/5344))
|
||||
- `simulate`: Improved multi-sign usage. ([#5479](https://github.com/XRPLF/rippled/pull/5479))
|
||||
- `channel_authorize`: If `signing_support` is not enabled in the config, the RPC is disabled. ([#5385](https://github.com/XRPLF/rippled/pull/5385))
|
||||
- `subscribe` (admin): Removed webhook queue limit to prevent dropping notifications; reduced HTTP timeout from 10 minutes to 30 seconds. ([#5163](https://github.com/XRPLF/rippled/pull/5163))
|
||||
- `ledger_data` (gRPC): Fixed crashing issue with some invalid markers. ([#5137](https://github.com/XRPLF/rippled/pull/5137))
|
||||
- `account_lines`: Fixed error with `no_ripple` and `no_ripple_peer` sometimes showing up incorrectly. ([#5345](https://github.com/XRPLF/rippled/pull/5345))
|
||||
- `account_tx`: Fixed issue with incorrect CTIDs. ([#5408](https://github.com/XRPLF/rippled/pull/5408))
|
||||
- `channel_authorize`: If `signing_support` is not enabled in the config, the RPC is disabled.
|
||||
|
||||
## XRP Ledger server version 2.4.0
|
||||
|
||||
@@ -92,19 +97,11 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
### Additions and bugfixes in 2.4.0
|
||||
|
||||
- `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate). ([#5069](https://github.com/XRPLF/rippled/pull/5069))
|
||||
- Signing methods (`sign`, `sign_for`, `submit`): Autofill fees better, properly handle transactions without a base fee, and autofill the `NetworkID` field. ([#5069](https://github.com/XRPLF/rippled/pull/5069))
|
||||
- `ledger_entry`: `state` is added as an alias for `ripple_state`. ([#5199](https://github.com/XRPLF/rippled/pull/5199))
|
||||
- `ledger`, `ledger_data`, `account_objects`: Support filtering ledger entry types by their canonical names (case-insensitive). ([#5271](https://github.com/XRPLF/rippled/pull/5271))
|
||||
- `validators`: Added new field `validator_list_threshold` in response. ([#5112](https://github.com/XRPLF/rippled/pull/5112))
|
||||
- `server_info`: Added git commit hash info on admin connection. ([#5225](https://github.com/XRPLF/rippled/pull/5225))
|
||||
- `server_definitions`: Changed larger `UInt` serialized types to `Hash`. ([#5231](https://github.com/XRPLF/rippled/pull/5231))
|
||||
|
||||
## XRP Ledger server version 2.3.1
|
||||
|
||||
[Version 2.3.1](https://github.com/XRPLF/rippled/releases/tag/2.3.1) was released on Jan 29, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
- `ledger_entry`: `state` is added an alias for `ripple_state`.
|
||||
- `ledger_entry`: Enables case-insensitive filtering by canonical name in addition to case-sensitive filtering by RPC name.
|
||||
- `validators`: Added new field `validator_list_threshold` in response.
|
||||
- `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate)
|
||||
- Signing methods autofill fees better and properly handle transactions that don't have a base fee, and will also autofill the `NetworkID` field.
|
||||
|
||||
## XRP Ledger server version 2.3.0
|
||||
|
||||
@@ -112,30 +109,19 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
### Breaking changes in 2.3.0
|
||||
|
||||
- `book_changes`: If the requested ledger version is not available on this node, a `ledgerNotFound` error is returned and the node does not attempt to acquire the ledger from the p2p network (as with other non-admin RPCs). Admins can still attempt to retrieve old ledgers with the `ledger_request` RPC.
|
||||
- `book_changes`: If the requested ledger version is not available on this node, a `ledgerNotFound` error is returned and the node does not attempt to acquire the ledger from the p2p network (as with other non-admin RPCs).
|
||||
|
||||
Admins can still attempt to retrieve old ledgers with the `ledger_request` RPC.
|
||||
|
||||
### Additions and bugfixes in 2.3.0
|
||||
|
||||
- `book_changes`: Returns a `validated` field in its response. ([#5096](https://github.com/XRPLF/rippled/pull/5096))
|
||||
- `book_changes`: Accepts shortcut strings (`current`, `closed`, `validated`) for the `ledger_index` parameter. ([#5096](https://github.com/XRPLF/rippled/pull/5096))
|
||||
- `server_definitions`: Include `index` in response. ([#5190](https://github.com/XRPLF/rippled/pull/5190))
|
||||
- `account_nfts`: Fix issue where unassociated marker would return incorrect results. ([#5045](https://github.com/XRPLF/rippled/pull/5045))
|
||||
- `account_objects`: Fix issue where invalid marker would not return an error. ([#5046](https://github.com/XRPLF/rippled/pull/5046))
|
||||
- `account_objects`: Disallow filtering by ledger entry types that an account cannot hold. ([#5056](https://github.com/XRPLF/rippled/pull/5056))
|
||||
- `tx`: Allow lowercase CTID. ([#5049](https://github.com/XRPLF/rippled/pull/5049))
|
||||
- `feature`: Better error handling for invalid values of `feature`. ([#5063](https://github.com/XRPLF/rippled/pull/5063))
|
||||
- `book_changes`: Returns a `validated` field in its response, which was missing in prior versions.
|
||||
|
||||
## XRP Ledger server version 2.2.0
|
||||
|
||||
[Version 2.2.0](https://github.com/XRPLF/rippled/releases/tag/2.2.0) was released on Jun 5, 2024. The following additions are non-breaking (because they are purely additive):
|
||||
|
||||
- `feature`: Add a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
|
||||
|
||||
## XRP Ledger server version 2.0.1
|
||||
|
||||
[Version 2.0.1](https://github.com/XRPLF/rippled/releases/tag/2.0.1) was released on Jan 29, 2024. The following additions are non-breaking:
|
||||
|
||||
- `path_find`: Fixes unbounded memory growth. ([#4822](https://github.com/XRPLF/rippled/pull/4822))
|
||||
- The `feature` method now has a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
|
||||
|
||||
## XRP Ledger server version 2.0.0
|
||||
|
||||
@@ -143,18 +129,24 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
- `server_definitions`: A new RPC that generates a `definitions.json`-like output that can be used in XRPL libraries.
|
||||
- In `Payment` transactions, `DeliverMax` has been added. This is a replacement for the `Amount` field, which should not be used. Typically, the `delivered_amount` (in transaction metadata) should be used. To ease the transition, `DeliverMax` is present regardless of API version, since adding a field is non-breaking.
|
||||
- API version 2 has been moved from beta to supported, meaning that it is generally available (regardless of the `beta_rpc_api` setting). The full list of changes is in [API-VERSION-2.md](API-VERSION-2.md).
|
||||
- API version 2 has been moved from beta to supported, meaning that it is generally available (regardless of the `beta_rpc_api` setting).
|
||||
|
||||
## XRP Ledger server version 2.2.0
|
||||
|
||||
The following is a non-breaking addition to the API.
|
||||
|
||||
- The `feature` method now has a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
|
||||
|
||||
## XRP Ledger server version 1.12.0
|
||||
|
||||
[Version 1.12.0](https://github.com/XRPLF/rippled/releases/tag/1.12.0) was released on Sep 6, 2023. The following additions are non-breaking (because they are purely additive):
|
||||
[Version 1.12.0](https://github.com/XRPLF/rippled/releases/tag/1.12.0) was released on Sep 6, 2023. The following additions are non-breaking (because they are purely additive).
|
||||
|
||||
- `server_info`: Added `ports`, an array which advertises the RPC and WebSocket ports. This information is also included in the `/crawl` endpoint (which calls `server_info` internally). `grpc` and `peer` ports are also included. ([#4427](https://github.com/XRPLF/rippled/pull/4427))
|
||||
- `server_info`: Added `ports`, an array which advertises the RPC and WebSocket ports. This information is also included in the `/crawl` endpoint (which calls `server_info` internally). `grpc` and `peer` ports are also included. (https://github.com/XRPLF/rippled/pull/4427)
|
||||
- `ports` contains objects, each containing a `port` for the listening port (a number string), and a `protocol` array listing the supported protocols on that port.
|
||||
- This allows crawlers to build a more detailed topology without needing to port-scan nodes.
|
||||
- (For peers and other non-admin clients, the info about admin ports is excluded.)
|
||||
- Clawback: The following additions are gated by the Clawback amendment (`featureClawback`). ([#4553](https://github.com/XRPLF/rippled/pull/4553))
|
||||
- Adds an [AccountRoot flag](https://xrpl.org/accountroot.html#accountroot-flags) called `lsfAllowTrustLineClawback`. ([#4617](https://github.com/XRPLF/rippled/pull/4617))
|
||||
- Clawback: The following additions are gated by the Clawback amendment (`featureClawback`). (https://github.com/XRPLF/rippled/pull/4553)
|
||||
- Adds an [AccountRoot flag](https://xrpl.org/accountroot.html#accountroot-flags) called `lsfAllowTrustLineClawback` (https://github.com/XRPLF/rippled/pull/4617)
|
||||
- Adds the corresponding `asfAllowTrustLineClawback` [AccountSet Flag](https://xrpl.org/accountset.html#accountset-flags) as well.
|
||||
- Clawback is disabled by default, so if an issuer desires the ability to claw back funds, they must use an `AccountSet` transaction to set the AllowTrustLineClawback flag. They must do this before creating any trust lines, offers, escrows, payment channels, or checks.
|
||||
- Adds the [Clawback transaction type](https://github.com/XRPLF/XRPL-Standards/blob/master/XLS-39d-clawback/README.md#331-clawback-transaction), containing these fields:
|
||||
@@ -189,16 +181,16 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
### Breaking changes in 1.11
|
||||
|
||||
- Added the ability to mark amendments as obsolete. For the `feature` admin API, there is a new possible value for the `vetoed` field. ([#4291](https://github.com/XRPLF/rippled/pull/4291))
|
||||
- Added the ability to mark amendments as obsolete. For the `feature` admin API, there is a new possible value for the `vetoed` field. (https://github.com/XRPLF/rippled/pull/4291)
|
||||
- The value of `vetoed` can now be `true`, `false`, or `"Obsolete"`.
|
||||
- Removed the acceptance of seeds or public keys in place of account addresses. ([#4404](https://github.com/XRPLF/rippled/pull/4404))
|
||||
- Removed the acceptance of seeds or public keys in place of account addresses. (https://github.com/XRPLF/rippled/pull/4404)
|
||||
- This simplifies the API and encourages better security practices (i.e. seeds should never be sent over the network).
|
||||
- For the `ledger_data` method, when all entries are filtered out, the `state` field of the response is now an empty list (in other words, an empty array, `[]`). (Previously, it would return `null`.) While this is technically a breaking change, the new behavior is consistent with the documentation, so this is considered only a bug fix. ([#4398](https://github.com/XRPLF/rippled/pull/4398))
|
||||
- For the `ledger_data` method, when all entries are filtered out, the `state` field of the response is now an empty list (in other words, an empty array, `[]`). (Previously, it would return `null`.) While this is technically a breaking change, the new behavior is consistent with the documentation, so this is considered only a bug fix. (https://github.com/XRPLF/rippled/pull/4398)
|
||||
- If and when the `fixNFTokenRemint` amendment activates, there will be a new AccountRoot field, `FirstNFTSequence`. This field is set to the current account sequence when the account issues their first NFT. If an account has not issued any NFTs, then the field is not set. ([#4406](https://github.com/XRPLF/rippled/pull/4406))
|
||||
- There is a new account deletion restriction: an account can only be deleted if `FirstNFTSequence` + `MintedNFTokens` + `256` is less than the current ledger sequence.
|
||||
- This is potentially a breaking change if clients have logic for determining whether an account can be deleted.
|
||||
- NetworkID
|
||||
- For sidechains and networks with a network ID greater than 1024, there is a new [transaction common field](https://xrpl.org/transaction-common-fields.html), `NetworkID`. ([#4370](https://github.com/XRPLF/rippled/pull/4370))
|
||||
- For sidechains and networks with a network ID greater than 1024, there is a new [transaction common field](https://xrpl.org/transaction-common-fields.html), `NetworkID`. (https://github.com/XRPLF/rippled/pull/4370)
|
||||
- This field helps to prevent replay attacks and is now required for chains whose network ID is 1025 or higher.
|
||||
- The field must be omitted for Mainnet, so there is no change for Mainnet users.
|
||||
- There are three new local error codes:
|
||||
@@ -208,10 +200,10 @@ This release contains bug fixes only and no API changes.
|
||||
|
||||
### Additions and bug fixes in 1.11
|
||||
|
||||
- Added `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `tx` and `account_tx` responses. ([#4447](https://github.com/XRPLF/rippled/pull/4447))
|
||||
- Added an `account_flags` object to the `account_info` method response. ([#4459](https://github.com/XRPLF/rippled/pull/4459))
|
||||
- Added `NFTokenPages` to the `account_objects` RPC. ([#4352](https://github.com/XRPLF/rippled/pull/4352))
|
||||
- Fixed: `marker` returned from the `account_lines` command would not work on subsequent commands. ([#4361](https://github.com/XRPLF/rippled/pull/4361))
|
||||
- Added `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `tx` and `account_tx` responses. (https://github.com/XRPLF/rippled/pull/4447)
|
||||
- Added an `account_flags` object to the `account_info` method response. (https://github.com/XRPLF/rippled/pull/4459)
|
||||
- Added `NFTokenPages` to the `account_objects` RPC. (https://github.com/XRPLF/rippled/pull/4352)
|
||||
- Fixed: `marker` returned from the `account_lines` command would not work on subsequent commands. (https://github.com/XRPLF/rippled/pull/4361)
|
||||
|
||||
## XRP Ledger server version 1.10.0
|
||||
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
# API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
## Removed methods
|
||||
|
||||
In API version 2, the following deprecated methods are no longer available: ([#4759](https://github.com/XRPLF/rippled/pull/4759))
|
||||
|
||||
- `tx_history` - Instead, use other methods such as `account_tx` or `ledger` with the `transactions` field set to `true`.
|
||||
- `ledger_header` - Instead, use the `ledger` method.
|
||||
|
||||
## Modifications to JSON transaction element in API version 2
|
||||
|
||||
In API version 2, JSON elements for transaction output have been changed and made consistent for all methods which output transactions. ([#4775](https://github.com/XRPLF/rippled/pull/4775))
|
||||
This helps to unify the JSON serialization format of transactions. ([clio#722](https://github.com/XRPLF/clio/issues/722), [#4727](https://github.com/XRPLF/rippled/issues/4727))
|
||||
|
||||
- JSON transaction element is named `tx_json`
|
||||
- Binary transaction element is named `tx_blob`
|
||||
- JSON transaction metadata element is named `meta`
|
||||
- Binary transaction metadata element is named `meta_blob`
|
||||
|
||||
Additionally, these elements are now consistently available next to `tx_json` (i.e. sibling elements), where possible:
|
||||
|
||||
- `hash` - Transaction ID. This data was stored inside transaction output in API version 1, but in API version 2 is a sibling element.
|
||||
- `ledger_index` - Ledger index (only set on validated ledgers)
|
||||
- `ledger_hash` - Ledger hash (only set on closed or validated ledgers)
|
||||
- `close_time_iso` - Ledger close time expressed in ISO 8601 time format (only set on validated ledgers)
|
||||
- `validated` - Bool element set to `true` if the transaction is in a validated ledger, otherwise `false`
|
||||
|
||||
This change affects the following methods:
|
||||
|
||||
- `tx` - Transaction data moved into element `tx_json` (was inline inside `result`) or, if binary output was requested, moved from `tx` to `tx_blob`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `account_tx` - Renamed transaction element from `tx` to `tx_json`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `transaction_entry` - Renamed transaction metadata element from `metadata` to `meta`. Changed location of `hash` and added new elements
|
||||
- `subscribe` - Renamed transaction element from `transaction` to `tx_json`. Changed location of `hash` and added new elements
|
||||
- `sign`, `sign_for`, `submit` and `submit_multisigned` - Changed location of `hash` element.
|
||||
|
||||
## Modifications to `Payment` transaction JSON schema
|
||||
|
||||
When reading Payments, the `Amount` field should generally **not** be used. Instead, use [delivered_amount](https://xrpl.org/partial-payments.html#the-delivered_amount-field) to see the amount that the Payment delivered. To clarify its meaning, the `Amount` field is being renamed to `DeliverMax`. ([#4733](https://github.com/XRPLF/rippled/pull/4733))
|
||||
|
||||
- In `Payment` transaction type, JSON RPC field `Amount` is renamed to `DeliverMax`. To enable smooth client transition, `Amount` is still handled, as described below: ([#4733](https://github.com/XRPLF/rippled/pull/4733))
|
||||
- On JSON RPC input (e.g. `submit_multisigned` etc. methods), `Amount` is recognized as an alias to `DeliverMax` for both API version 1 and version 2 clients.
|
||||
- On JSON RPC input, submitting both `Amount` and `DeliverMax` fields is allowed _only_ if they are identical; otherwise such input is rejected with `rpcINVALID_PARAMS` error.
|
||||
- On JSON RPC output (e.g. `subscribe`, `account_tx` etc. methods), `DeliverMax` is present in both API version 1 and version 2.
|
||||
- On JSON RPC output, `Amount` is only present in API version 1 and _not_ in version 2.
|
||||
|
||||
## Modifications to account_info response
|
||||
|
||||
- `signer_lists` is returned in the root of the response. (In API version 1, it was nested under `account_data`.) ([#3770](https://github.com/XRPLF/rippled/pull/3770))
|
||||
- When using an invalid `signer_lists` value, the API now returns an "invalidParams" error. ([#4585](https://github.com/XRPLF/rippled/pull/4585))
|
||||
- (`signer_lists` must be a boolean. In API version 1, strings were accepted and may return a normal response - i.e. as if `signer_lists` were `true`.)
|
||||
|
||||
## Modifications to [account_tx](https://xrpl.org/account_tx.html#account_tx) response
|
||||
|
||||
- Using `ledger_index_min`, `ledger_index_max`, and `ledger_index` returns `invalidParams` because if you use `ledger_index_min` or `ledger_index_max`, then it does not make sense to also specify `ledger_index`. In API version 1, no error was returned. ([#4571](https://github.com/XRPLF/rippled/pull/4571))
|
||||
- The same applies for `ledger_index_min`, `ledger_index_max`, and `ledger_hash`. ([#4545](https://github.com/XRPLF/rippled/issues/4545#issuecomment-1565065579))
|
||||
- Using a `ledger_index_min` or `ledger_index_max` beyond the range of ledgers that the server has:
|
||||
- returns `lgrIdxMalformed` in API version 2. Previously, in API version 1, no error was returned. ([#4288](https://github.com/XRPLF/rippled/issues/4288))
|
||||
- Attempting to use a non-boolean value (such as a string) for the `binary` or `forward` parameters returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. ([#4620](https://github.com/XRPLF/rippled/pull/4620))
|
||||
|
||||
## Modifications to [noripple_check](https://xrpl.org/noripple_check.html#noripple_check) response
|
||||
|
||||
- Attempting to use a non-boolean value (such as a string) for the `transactions` parameter returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. ([#4620](https://github.com/XRPLF/rippled/pull/4620))
|
||||
@@ -1,27 +0,0 @@
|
||||
# API Version 3
|
||||
|
||||
API version 3 is currently a **beta API**. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. To use this API, clients specify `"api_version" : 3` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
### Modifications to `amm_info`
|
||||
|
||||
The order of error checks has been changed to provide more specific error messages. ([#4924](https://github.com/XRPLF/rippled/pull/4924))
|
||||
|
||||
- **Before (API v2)**: When sending an invalid account or asset to `amm_info` while other parameters are not set as expected, the method returns a generic `rpcINVALID_PARAMS` error.
|
||||
- **After (API v3)**: The same scenario returns a more specific error: `rpcISSUE_MALFORMED` for malformed assets or `rpcACT_MALFORMED` for malformed accounts.
|
||||
|
||||
### Modifications to `ledger_entry`
|
||||
|
||||
Added support for string shortcuts to look up fixed-location ledger entries using the `"index"` parameter. ([#5644](https://github.com/XRPLF/rippled/pull/5644))
|
||||
|
||||
In API version 3, the following string values can be used with the `"index"` parameter:
|
||||
|
||||
- `"index": "amendments"` - Returns the `Amendments` ledger entry
|
||||
- `"index": "fee"` - Returns the `FeeSettings` ledger entry
|
||||
- `"index": "nunl"` - Returns the `NegativeUNL` ledger entry
|
||||
- `"index": "hashes"` - Returns the "short" `LedgerHashes` ledger entry (recent ledger hashes)
|
||||
|
||||
These shortcuts are only available in API version 3 and later. In API versions 1 and 2, these string values would result in an error.
|
||||
95
BUILD.md
95
BUILD.md
@@ -1,5 +1,5 @@
|
||||
| :warning: **WARNING** :warning: |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| :warning: **WARNING** :warning:
|
||||
|---|
|
||||
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
|
||||
|
||||
> These instructions also assume a basic familiarity with Conan and CMake.
|
||||
@@ -148,8 +148,7 @@ function extract_version {
|
||||
}
|
||||
|
||||
# Define which recipes to export.
|
||||
recipes=('ed25519' 'grpc' 'nudb' 'openssl' 'secp256k1' 'snappy' 'soci')
|
||||
folders=('all' 'all' 'all' '3.x.x' 'all' 'all' 'all')
|
||||
recipes=(ed25519 grpc secp256k1 snappy soci)
|
||||
|
||||
# Selectively check out the recipes from our CCI fork.
|
||||
cd external
|
||||
@@ -158,24 +157,20 @@ cd conan-center-index
|
||||
git init
|
||||
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
||||
git sparse-checkout init
|
||||
for ((index = 1; index <= ${#recipes[@]}; index++)); do
|
||||
recipe=${recipes[index]}
|
||||
folder=${folders[index]}
|
||||
echo "Checking out recipe '${recipe}' from folder '${folder}'..."
|
||||
git sparse-checkout add recipes/${recipe}/${folder}
|
||||
for recipe in ${recipes[@]}; do
|
||||
echo "Checking out ${recipe}..."
|
||||
git sparse-checkout add recipes/${recipe}/all
|
||||
done
|
||||
git fetch origin master
|
||||
git checkout master
|
||||
cd ../..
|
||||
|
||||
# Export the recipes into the local cache.
|
||||
for ((index = 1; index <= ${#recipes[@]}; index++)); do
|
||||
recipe=${recipes[index]}
|
||||
folder=${folders[index]}
|
||||
for recipe in ${recipes[@]}; do
|
||||
version=$(extract_version ${recipe})
|
||||
echo "Exporting '${recipe}/${version}' from '${recipe}/${folder}'..."
|
||||
echo "Exporting ${recipe}/${version}..."
|
||||
conan export --version $(extract_version ${recipe}) \
|
||||
external/conan-center-index/recipes/${recipe}/${folder}
|
||||
external/conan-center-index/recipes/${recipe}/all
|
||||
done
|
||||
```
|
||||
|
||||
@@ -368,36 +363,6 @@ The workaround for this error is to add two lines to your profile:
|
||||
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
```
|
||||
|
||||
### Set Up Ccache
|
||||
|
||||
To speed up repeated compilations, we recommend that you install
|
||||
[ccache](https://ccache.dev), a tool that wraps your compiler so that it can
|
||||
cache build objects locally.
|
||||
|
||||
#### Linux
|
||||
|
||||
You can install it using the package manager, e.g. `sudo apt install ccache`
|
||||
(Ubuntu) or `sudo dnf install ccache` (RHEL).
|
||||
|
||||
#### macOS
|
||||
|
||||
You can install it using Homebrew, i.e. `brew install ccache`.
|
||||
|
||||
#### Windows
|
||||
|
||||
You can install it using Chocolatey, i.e. `choco install ccache`. If you already
|
||||
have Ccache installed, then `choco upgrade ccache` will update it to the latest
|
||||
version. However, if you see an error such as:
|
||||
|
||||
```
|
||||
terminate called after throwing an instance of 'std::bad_alloc'
|
||||
what(): std::bad_alloc
|
||||
C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(617,5): error MSB6006: "cl.exe" exited with code 3.
|
||||
```
|
||||
|
||||
then please install a specific version of Ccache that we know works, via: `choco
|
||||
install ccache --version 4.11.3 --allow-downgrade`.
|
||||
|
||||
### Build and Test
|
||||
|
||||
1. Create a build directory and move into it.
|
||||
@@ -553,37 +518,23 @@ stored inside the build directory, as either of:
|
||||
- file named `coverage.`_extension_, with a suitable extension for the report format, or
|
||||
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
||||
|
||||
## Sanitizers
|
||||
|
||||
To build dependencies and xrpld with sanitizer instrumentation, set the
|
||||
`SANITIZERS` environment variable (only once before running conan and cmake) and use the `sanitizers` profile in conan:
|
||||
|
||||
```bash
|
||||
export SANITIZERS=address,undefinedbehavior
|
||||
|
||||
conan install .. --output-folder . --profile:all sanitizers --build missing --settings build_type=Debug
|
||||
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Debug -Dxrpld=ON -Dtests=ON ..
|
||||
```
|
||||
|
||||
See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Default Value | Description |
|
||||
| ---------- | ------------- | -------------------------------------------------------------- |
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
| Option | Default Value | Description |
|
||||
| ---------- | ------------- | ------------------------------------------------------------------ |
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
|
||||
[Unity builds][5] may be faster for the first build (at the cost of much more
|
||||
memory) since they concatenate sources into fewer translation units. Non-unity
|
||||
builds may be faster for incremental builds, and can be helpful for detecting
|
||||
`#include` omissions.
|
||||
[Unity builds][5] may be faster for the first build
|
||||
(at the cost of much more memory) since they concatenate sources into fewer
|
||||
translation units. Non-unity builds may be faster for incremental builds,
|
||||
and can be helpful for detecting `#include` omissions.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
168
CMakeLists.txt
168
CMakeLists.txt
@@ -1,16 +1,14 @@
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
if (POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif ()
|
||||
if (POLICY CMP0077)
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif ()
|
||||
if(POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif()
|
||||
if(POLICY CMP0077)
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif()
|
||||
|
||||
# Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows.
|
||||
if (DEFINED CMAKE_MODULE_PATH)
|
||||
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
|
||||
endif ()
|
||||
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
project(xrpl)
|
||||
@@ -18,130 +16,136 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
include(CompilationEnv)
|
||||
|
||||
if (is_gcc)
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||
# GCC-specific fixes
|
||||
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
|
||||
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
|
||||
elseif (is_clang)
|
||||
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
# Clang-specific fixes
|
||||
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
|
||||
elseif (is_msvc)
|
||||
elseif(MSVC)
|
||||
# MSVC-specific fixes
|
||||
add_compile_options(/wd4068) # Ignore unknown pragmas
|
||||
endif ()
|
||||
|
||||
# Enable ccache to speed up builds.
|
||||
include(Ccache)
|
||||
endif()
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
find_package(Git)
|
||||
if (Git_FOUND)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse HEAD
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if (gch)
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if(gch)
|
||||
set(GIT_COMMIT_HASH "${gch}")
|
||||
message(STATUS gch: ${GIT_COMMIT_HASH})
|
||||
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse --abbrev-ref HEAD
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
|
||||
if (gb)
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
|
||||
if(gb)
|
||||
set(GIT_BRANCH "${gb}")
|
||||
message(STATUS gb: ${GIT_BRANCH})
|
||||
add_definitions(-DGIT_BRANCH="${GIT_BRANCH}")
|
||||
endif ()
|
||||
endif () # git
|
||||
endif()
|
||||
endif() #git
|
||||
|
||||
if (thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS
|
||||
-DXRPL_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif ()
|
||||
if(thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DXRPL_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (FetchContent)
|
||||
include (ExternalProject)
|
||||
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
if (target)
|
||||
message(FATAL_ERROR "The target option has been removed - use native cmake options to control build")
|
||||
message (FATAL_ERROR "The target option has been removed - use native cmake options to control build")
|
||||
endif ()
|
||||
|
||||
include(XrplSanity)
|
||||
include(XrplVersion)
|
||||
include(XrplSettings)
|
||||
# this check has to remain in the top-level cmake because of the early return statement
|
||||
# this check has to remain in the top-level cmake
|
||||
# because of the early return statement
|
||||
if (packages_only)
|
||||
if (NOT TARGET rpm)
|
||||
message(FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
|
||||
endif ()
|
||||
return()
|
||||
if (NOT TARGET rpm)
|
||||
message (FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
|
||||
endif()
|
||||
return ()
|
||||
endif ()
|
||||
include(XrplCompiler)
|
||||
include(XrplSanitizers)
|
||||
include(XrplInterface)
|
||||
|
||||
option(only_docs "Include only the docs target?" FALSE)
|
||||
include(XrplDocs)
|
||||
if (only_docs)
|
||||
return()
|
||||
endif ()
|
||||
if(only_docs)
|
||||
return()
|
||||
endif()
|
||||
|
||||
###
|
||||
|
||||
include(deps/Boost)
|
||||
find_package(OpenSSL 1.1.1 REQUIRED)
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
|
||||
add_subdirectory(external/antithesis-sdk)
|
||||
find_package(date REQUIRED)
|
||||
find_package(ed25519 REQUIRED)
|
||||
find_package(gRPC REQUIRED)
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
find_package(secp256k1 REQUIRED)
|
||||
# Target names with :: are not allowed in a generator expression.
|
||||
# We need to pull the include directories and imported location properties
|
||||
# from separate targets.
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
|
||||
target_link_libraries(
|
||||
xrpl_libs
|
||||
INTERFACE ed25519::ed25519
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3)
|
||||
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if (rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif ()
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
|
||||
find_package(date REQUIRED)
|
||||
find_package(ed25519 REQUIRED)
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(secp256k1 REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
|
||||
target_link_libraries(xrpl_libs INTERFACE
|
||||
ed25519::ed25519
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3
|
||||
)
|
||||
|
||||
# Work around changes to Conan recipe for now.
|
||||
if (TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif (TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else ()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif ()
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif(TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif()
|
||||
target_link_libraries(xrpl_libs INTERFACE ${nudb})
|
||||
|
||||
if (coverage)
|
||||
include(XrplCov)
|
||||
endif ()
|
||||
if(coverage)
|
||||
include(XrplCov)
|
||||
endif()
|
||||
|
||||
set(PROJECT_EXPORT_SET XrplExports)
|
||||
include(XrplCore)
|
||||
include(XrplInstall)
|
||||
include(XrplValidatorKeys)
|
||||
|
||||
if (tests)
|
||||
include(CTest)
|
||||
add_subdirectory(src/tests/libxrpl)
|
||||
endif ()
|
||||
if(tests)
|
||||
include(CTest)
|
||||
add_subdirectory(src/tests/libxrpl)
|
||||
add_subdirectory(src/doctest)
|
||||
endif()
|
||||
|
||||
@@ -555,16 +555,16 @@ Rippled uses a linear workflow model that can be summarized as:
|
||||
git fetch --multiple upstreams user1 user2 user3 [...]
|
||||
git checkout -B release-next --no-track upstream/develop
|
||||
|
||||
# Only do an ff-only merge if pr-branch1 is either already
|
||||
# Only do an ff-only merge if prbranch1 is either already
|
||||
# squashed, or needs to be merged with separate commits,
|
||||
# and has no merge commits.
|
||||
# Use -S on the ff-only merge if pr-branch1 isn't signed.
|
||||
git merge [-S] --ff-only user1/pr-branch1
|
||||
# Use -S on the ff-only merge if prbranch1 isn't signed.
|
||||
git merge [-S] --ff-only user1/prbranch1
|
||||
|
||||
git merge --squash user2/pr-branch2
|
||||
git merge --squash user2/prbranch2
|
||||
git commit -S # Use the commit message provided on the PR
|
||||
|
||||
git merge --squash user3/pr-branch3
|
||||
git merge --squash user3/prbranch3
|
||||
git commit -S # Use the commit message provided on the PR
|
||||
|
||||
[...]
|
||||
@@ -872,12 +872,11 @@ git push --delete upstream-push master-next
|
||||
11. [Create a new release on
|
||||
Github](https://github.com/XRPLF/rippled/releases). Be sure that
|
||||
"Set as the latest release" is checked.
|
||||
12. Open a PR to update the [API-CHANGELOG](API-CHANGELOG.md) and `API-VERSION-[n].md` with the changes for this release (if any are missing).
|
||||
13. Finally, [reverse merge the release into `develop`](#follow-up-reverse-merge).
|
||||
12. Finally [reverse merge the release into `develop`](#follow-up-reverse-merge).
|
||||
|
||||
#### Special cases: point releases, hotfixes, etc.
|
||||
|
||||
On occasion, a bug or issue is discovered in a version that already
|
||||
On occassion, a bug or issue is discovered in a version that already
|
||||
had a final release. Most of the time, development will have started
|
||||
on the next version, and will usually have changes in `develop`
|
||||
and often in `release`.
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
# Architecture Analysis
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Design Decisions](./02-design-decisions.md) | [Implementation Strategy](./03-implementation-strategy.md)
|
||||
|
||||
---
|
||||
|
||||
## 1.1 Current rippled Architecture Overview
|
||||
|
||||
The rippled node software consists of several interconnected components that need instrumentation for distributed tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
subgraph services["Core Services"]
|
||||
RPC["RPC Server<br/>(HTTP/WS/gRPC)"]
|
||||
Overlay["Overlay<br/>(P2P Network)"]
|
||||
Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
end
|
||||
|
||||
JobQueue["JobQueue<br/>(Thread Pool)"]
|
||||
|
||||
subgraph processing["Processing Layer"]
|
||||
NetworkOPs["NetworkOPs<br/>(Tx Processing)"]
|
||||
LedgerMaster["LedgerMaster<br/>(Ledger Mgmt)"]
|
||||
NodeStore["NodeStore<br/>(Database)"]
|
||||
end
|
||||
|
||||
subgraph observability["Existing Observability"]
|
||||
PerfLog["PerfLog<br/>(JSON)"]
|
||||
Insight["Insight<br/>(StatsD)"]
|
||||
Logging["Logging<br/>(Journal)"]
|
||||
end
|
||||
|
||||
services --> JobQueue
|
||||
JobQueue --> processing
|
||||
end
|
||||
|
||||
style rippled fill:#f5f5f5,stroke:#333
|
||||
style services fill:#e3f2fd,stroke:#1976d2
|
||||
style processing fill:#e8f5e9,stroke:#388e3c
|
||||
style observability fill:#fff3e0,stroke:#f57c00
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.2 Key Components for Instrumentation
|
||||
|
||||
| Component | Location | Purpose | Trace Value |
|
||||
| ----------------- | ------------------------------------------ | ------------------------ | ---------------------------- |
|
||||
| **Overlay** | `src/xrpld/overlay/` | P2P communication | Message propagation timing |
|
||||
| **PeerImp** | `src/xrpld/overlay/detail/PeerImp.cpp` | Individual peer handling | Per-peer latency |
|
||||
| **RCLConsensus** | `src/xrpld/app/consensus/RCLConsensus.cpp` | Consensus algorithm | Round timing, phase analysis |
|
||||
| **NetworkOPs** | `src/xrpld/app/misc/NetworkOPs.cpp` | Transaction processing | Tx lifecycle tracking |
|
||||
| **ServerHandler** | `src/xrpld/rpc/detail/ServerHandler.cpp` | RPC entry point | Request latency |
|
||||
| **RPCHandler** | `src/xrpld/rpc/detail/RPCHandler.cpp` | Command execution | Per-command timing |
|
||||
| **JobQueue** | `src/xrpl/core/JobQueue.h` | Async task execution | Queue wait times |
|
||||
|
||||
---
|
||||
|
||||
## 1.3 Transaction Flow Diagram
|
||||
|
||||
Transaction flow spans multiple nodes in the network. Each node creates linked spans to form a distributed trace:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant PeerA as Peer A (Receive)
|
||||
participant PeerB as Peer B (Relay)
|
||||
participant PeerC as Peer C (Validate)
|
||||
|
||||
Client->>PeerA: 1. Submit TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerA: tx.receive SPAN START
|
||||
PeerA->>PeerA: HashRouter Deduplication
|
||||
PeerA->>PeerA: tx.validate (child span)
|
||||
end
|
||||
|
||||
PeerA->>PeerB: 2. Relay TX (with trace ctx)
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerB: tx.receive (linked span)
|
||||
end
|
||||
|
||||
PeerB->>PeerC: 3. Relay TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerC: tx.receive (linked span)
|
||||
PeerC->>PeerC: tx.process
|
||||
end
|
||||
|
||||
Note over Client,PeerC: DISTRIBUTED TRACE (same trace_id: abc123)
|
||||
```
|
||||
|
||||
### Trace Structure
|
||||
|
||||
```
|
||||
trace_id: abc123
|
||||
├── span: tx.receive (Peer A)
|
||||
│ ├── span: tx.validate
|
||||
│ └── span: tx.relay
|
||||
├── span: tx.receive (Peer B) [parent: Peer A]
|
||||
│ └── span: tx.relay
|
||||
└── span: tx.receive (Peer C) [parent: Peer B]
|
||||
└── span: tx.process
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.4 Consensus Round Flow
|
||||
|
||||
Consensus rounds are multi-phase operations that benefit significantly from tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph round["consensus.round (root span)"]
|
||||
attrs["Attributes:<br/>xrpl.consensus.ledger.seq = 12345678<br/>xrpl.consensus.mode = proposing<br/>xrpl.consensus.proposers = 35"]
|
||||
|
||||
subgraph open["consensus.phase.open"]
|
||||
open_desc["Duration: ~3s<br/>Waiting for transactions"]
|
||||
end
|
||||
|
||||
subgraph establish["consensus.phase.establish"]
|
||||
est_attrs["proposals_received = 28<br/>disputes_resolved = 3"]
|
||||
est_children["├── consensus.proposal.receive (×28)<br/>├── consensus.proposal.send (×1)<br/>└── consensus.dispute.resolve (×3)"]
|
||||
end
|
||||
|
||||
subgraph accept["consensus.phase.accept"]
|
||||
acc_attrs["transactions_applied = 150<br/>ledger.hash = DEF456..."]
|
||||
acc_children["├── ledger.build<br/>└── ledger.validate"]
|
||||
end
|
||||
|
||||
attrs --> open
|
||||
open --> establish
|
||||
establish --> accept
|
||||
end
|
||||
|
||||
style round fill:#fff8e1,stroke:#ffc107
|
||||
style open fill:#e3f2fd,stroke:#1976d2
|
||||
style establish fill:#e8f5e9,stroke:#388e3c
|
||||
style accept fill:#fce4ec,stroke:#e91e63
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.5 RPC Request Flow
|
||||
|
||||
RPC requests support W3C Trace Context headers for distributed tracing across services:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph request["rpc.request (root span)"]
|
||||
http["HTTP Request<br/>POST /<br/>traceparent: 00-abc123...-def456...-01"]
|
||||
|
||||
attrs["Attributes:<br/>http.method = POST<br/>net.peer.ip = 192.168.1.100<br/>xrpl.rpc.command = submit"]
|
||||
|
||||
subgraph enqueue["jobqueue.enqueue"]
|
||||
job_attr["xrpl.job.type = jtCLIENT_RPC"]
|
||||
end
|
||||
|
||||
subgraph command["rpc.command.submit"]
|
||||
cmd_attrs["xrpl.rpc.version = 2<br/>xrpl.rpc.role = user"]
|
||||
cmd_children["├── tx.deserialize<br/>├── tx.validate_local<br/>└── tx.submit_to_network"]
|
||||
end
|
||||
|
||||
response["Response: 200 OK<br/>Duration: 45ms"]
|
||||
|
||||
http --> attrs
|
||||
attrs --> enqueue
|
||||
enqueue --> command
|
||||
command --> response
|
||||
end
|
||||
|
||||
style request fill:#e8f5e9,stroke:#388e3c
|
||||
style enqueue fill:#e3f2fd,stroke:#1976d2
|
||||
style command fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.6 Key Trace Points
|
||||
|
||||
The following table identifies priority instrumentation points across the codebase:
|
||||
|
||||
| Category | Span Name | File | Method | Priority |
|
||||
| --------------- | ---------------------- | -------------------- | ---------------------- | -------- |
|
||||
| **Transaction** | `tx.receive` | `PeerImp.cpp` | `handleTransaction()` | High |
|
||||
| **Transaction** | `tx.validate` | `NetworkOPs.cpp` | `processTransaction()` | High |
|
||||
| **Transaction** | `tx.process` | `NetworkOPs.cpp` | `doTransactionSync()` | High |
|
||||
| **Transaction** | `tx.relay` | `OverlayImpl.cpp` | `relay()` | Medium |
|
||||
| **Consensus** | `consensus.round` | `RCLConsensus.cpp` | `startRound()` | High |
|
||||
| **Consensus** | `consensus.phase.*` | `Consensus.h` | `timerEntry()` | High |
|
||||
| **Consensus** | `consensus.proposal.*` | `RCLConsensus.cpp` | `peerProposal()` | Medium |
|
||||
| **RPC** | `rpc.request` | `ServerHandler.cpp` | `onRequest()` | High |
|
||||
| **RPC** | `rpc.command.*` | `RPCHandler.cpp` | `doCommand()` | High |
|
||||
| **Peer** | `peer.connect` | `OverlayImpl.cpp` | `onHandoff()` | Low |
|
||||
| **Peer** | `peer.message.*` | `PeerImp.cpp` | `onMessage()` | Low |
|
||||
| **Ledger** | `ledger.acquire` | `InboundLedgers.cpp` | `acquire()` | Medium |
|
||||
| **Ledger** | `ledger.build` | `RCLConsensus.cpp` | `buildLCL()` | High |
|
||||
|
||||
---
|
||||
|
||||
## 1.7 Instrumentation Priority
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Instrumentation Priority Matrix
|
||||
x-axis Low Complexity --> High Complexity
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Implement First
|
||||
quadrant-2 Plan Carefully
|
||||
quadrant-3 Quick Wins
|
||||
quadrant-4 Consider Later
|
||||
|
||||
RPC Tracing: [0.3, 0.85]
|
||||
Transaction Tracing: [0.6, 0.95]
|
||||
Consensus Tracing: [0.75, 0.9]
|
||||
Peer Message Tracing: [0.4, 0.3]
|
||||
Ledger Acquisition: [0.5, 0.6]
|
||||
JobQueue Tracing: [0.35, 0.5]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.8 Observable Outcomes
|
||||
|
||||
After implementing OpenTelemetry, operators and developers will gain visibility into the following:
|
||||
|
||||
### 1.8.1 What You Will See: Traces
|
||||
|
||||
| Trace Type | Description | Example Query in Grafana/Tempo |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| **Transaction Lifecycle** | Full journey from RPC submission through validation, relay, consensus, and ledger inclusion | `{service.name="rippled" && xrpl.tx.hash="ABC123..."}` |
|
||||
| **Cross-Node Propagation** | Transaction path across multiple rippled nodes with timing | `{xrpl.tx.relay_count > 0}` |
|
||||
| **Consensus Rounds** | Complete round with all phases (open, establish, accept) | `{span.name=~"consensus.round.*"}` |
|
||||
| **RPC Request Processing** | Individual command execution with timing breakdown | `{xrpl.rpc.command="account_info"}` |
|
||||
| **Ledger Acquisition** | Peer-to-peer ledger data requests and responses | `{span.name="ledger.acquire"}` |
|
||||
|
||||
### 1.8.2 What You Will See: Metrics (Derived from Traces)
|
||||
|
||||
| Metric | Description | Dashboard Panel |
|
||||
| ----------------------------- | -------------------------------------- | --------------------------- |
|
||||
| **RPC Latency (p50/p95/p99)** | Response time distribution per command | Heatmap by command |
|
||||
| **Transaction Throughput** | Transactions processed per second | Time series graph |
|
||||
| **Consensus Round Duration** | Time to complete consensus phases | Histogram |
|
||||
| **Cross-Node Latency** | Time for transaction to reach N nodes | Line chart with percentiles |
|
||||
| **Error Rate** | Failed transactions/RPC calls by type | Stacked bar chart |
|
||||
|
||||
### 1.8.3 Concrete Dashboard Examples
|
||||
|
||||
**Transaction Trace View (Jaeger/Tempo):**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Trace: abc123... (Transaction Submission) Duration: 847ms │
|
||||
├─────────────────────────────────────────────────────────────────────────────────┤
|
||||
│ ├── rpc.request [ServerHandler] ████░░░░░░ 45ms │
|
||||
│ │ └── rpc.command.submit [RPCHandler] ████░░░░░░ 42ms │
|
||||
│ │ └── tx.receive [NetworkOPs] ███░░░░░░░ 35ms │
|
||||
│ │ ├── tx.validate [TxQ] █░░░░░░░░░ 8ms │
|
||||
│ │ └── tx.relay [Overlay] ██░░░░░░░░ 15ms │
|
||||
│ │ ├── tx.receive [Node-B] █████░░░░░ 52ms │
|
||||
│ │ │ └── tx.relay [Node-B] ██░░░░░░░░ 18ms │
|
||||
│ │ └── tx.receive [Node-C] ██████░░░░ 65ms │
|
||||
│ └── consensus.round [RCLConsensus] ████████░░ 720ms │
|
||||
│ ├── consensus.phase.open ██░░░░░░░░ 180ms │
|
||||
│ ├── consensus.phase.establish █████░░░░░ 480ms │
|
||||
│ └── consensus.phase.accept █░░░░░░░░░ 60ms │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**RPC Performance Dashboard Panel:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ RPC Command Latency (Last 1 Hour) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Command │ p50 │ p95 │ p99 │ Errors │ Rate │
|
||||
│──────────────────┼────────┼────────┼────────┼────────┼──────│
|
||||
│ account_info │ 12ms │ 45ms │ 89ms │ 0.1% │ 150/s│
|
||||
│ submit │ 35ms │ 120ms │ 250ms │ 2.3% │ 45/s│
|
||||
│ ledger │ 8ms │ 25ms │ 55ms │ 0.0% │ 80/s│
|
||||
│ tx │ 15ms │ 50ms │ 100ms │ 0.5% │ 60/s│
|
||||
│ server_info │ 5ms │ 12ms │ 20ms │ 0.0% │ 200/s│
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Consensus Health Dashboard Panel:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Consensus Round Duration (Last 24 Hours) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ 5s ┤ * │
|
||||
│ │ * * * │
|
||||
│ 4s ┤ * ** * * │
|
||||
│ │ * * * * ** * │
|
||||
│ 3s ┤ * * * * * * * * │
|
||||
│ │ * * * * * * * * * │
|
||||
│ 2s ┤* ** * * ** * * * * │
|
||||
│ │ ** ** ** │
|
||||
│ 1s ┤────────────────────────────────────────────────── │
|
||||
│ └──────────────────────────────────────────────────── │
|
||||
│ 00:00 04:00 08:00 12:00 16:00 20:00 24:00 │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 1.8.4 Operator Actionable Insights
|
||||
|
||||
| Scenario | What You'll See | Action |
|
||||
| --------------------- | ---------------------------------------------------------------------------- | -------------------------------- |
|
||||
| **Slow RPC** | Span showing which phase is slow (parsing, execution, serialization) | Optimize specific code path |
|
||||
| **Transaction Stuck** | Trace stops at validation; error attribute shows reason | Fix transaction parameters |
|
||||
| **Consensus Delay** | Phase.establish taking too long; proposer attribute shows missing validators | Investigate network connectivity |
|
||||
| **Memory Spike** | Large batch of spans correlating with memory increase | Tune batch_size or sampling |
|
||||
| **Network Partition** | Traces missing cross-node links for specific peer | Check peer connectivity |
|
||||
|
||||
### 1.8.5 Developer Debugging Workflow
|
||||
|
||||
1. **Find Transaction**: Query by `xrpl.tx.hash` to get full trace
|
||||
2. **Identify Bottleneck**: Look at span durations to find slowest component
|
||||
3. **Check Attributes**: Review `xrpl.tx.validity`, `xrpl.rpc.status` for errors
|
||||
4. **Correlate Logs**: Use `trace_id` to find related PerfLog entries
|
||||
5. **Compare Nodes**: Filter by `service.instance.id` to compare behavior across nodes
|
||||
|
||||
---
|
||||
|
||||
*Next: [Design Decisions](./02-design-decisions.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,319 +0,0 @@
|
||||
# Design Decisions
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Architecture Analysis](./01-architecture-analysis.md) | [Code Samples](./04-code-samples.md)
|
||||
|
||||
---
|
||||
|
||||
## 2.1 OpenTelemetry Components
|
||||
|
||||
### 2.1.1 SDK Selection
|
||||
|
||||
**Primary Choice**: OpenTelemetry C++ SDK (`opentelemetry-cpp`)
|
||||
|
||||
| Component | Purpose | Required |
|
||||
| --------------------------------------- | ---------------------- | ----------- |
|
||||
| `opentelemetry-cpp::api` | Tracing API headers | Yes |
|
||||
| `opentelemetry-cpp::sdk` | SDK implementation | Yes |
|
||||
| `opentelemetry-cpp::ext` | Extensions (exporters) | Yes |
|
||||
| `opentelemetry-cpp::otlp_grpc_exporter` | OTLP/gRPC export | Recommended |
|
||||
| `opentelemetry-cpp::otlp_http_exporter` | OTLP/HTTP export | Alternative |
|
||||
|
||||
### 2.1.2 Instrumentation Strategy
|
||||
|
||||
**Manual Instrumentation** (recommended):
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
| ---------- | ----------------------------------------------------------------- | ------------------------------------------------------- |
|
||||
| **Manual** | Precise control, optimized placement, rippled-specific attributes | More development effort |
|
||||
| **Auto** | Less code, automatic coverage | Less control, potential overhead, limited customization |
|
||||
|
||||
---
|
||||
|
||||
## 2.2 Exporter Configuration
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph nodes["rippled Nodes"]
|
||||
node1["rippled<br/>Node 1"]
|
||||
node2["rippled<br/>Node 2"]
|
||||
node3["rippled<br/>Node 3"]
|
||||
end
|
||||
|
||||
collector["OpenTelemetry<br/>Collector<br/>(sidecar or standalone)"]
|
||||
|
||||
subgraph backends["Observability Backends"]
|
||||
jaeger["Jaeger<br/>(Dev)"]
|
||||
tempo["Tempo<br/>(Prod)"]
|
||||
elastic["Elastic<br/>APM"]
|
||||
end
|
||||
|
||||
node1 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node2 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node3 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
|
||||
collector --> jaeger
|
||||
collector --> tempo
|
||||
collector --> elastic
|
||||
|
||||
style nodes fill:#e3f2fd,stroke:#1976d2
|
||||
style backends fill:#e8f5e9,stroke:#388e3c
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
### 2.2.1 OTLP/gRPC (Recommended)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over gRPC
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpGrpcExporterOptions opts;
|
||||
opts.endpoint = "localhost:4317";
|
||||
opts.use_ssl_credentials = true;
|
||||
opts.ssl_credentials_cacert_path = "/path/to/ca.crt";
|
||||
```
|
||||
|
||||
### 2.2.2 OTLP/HTTP (Alternative)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over HTTP
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpHttpExporterOptions opts;
|
||||
opts.url = "http://localhost:4318/v1/traces";
|
||||
opts.content_type = otlp::HttpRequestContentType::kJson; // or kBinary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.3 Span Naming Conventions
|
||||
|
||||
### 2.3.1 Naming Schema
|
||||
|
||||
```
|
||||
<component>.<operation>[.<sub-operation>]
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
- `tx.receive` - Transaction received from peer
|
||||
- `consensus.phase.establish` - Consensus establish phase
|
||||
- `rpc.command.server_info` - server_info RPC command
|
||||
|
||||
### 2.3.2 Complete Span Catalog
|
||||
|
||||
```yaml
|
||||
# Transaction Spans
|
||||
tx:
|
||||
receive: "Transaction received from network"
|
||||
validate: "Transaction signature/format validation"
|
||||
process: "Full transaction processing"
|
||||
relay: "Transaction relay to peers"
|
||||
apply: "Apply transaction to ledger"
|
||||
|
||||
# Consensus Spans
|
||||
consensus:
|
||||
round: "Complete consensus round"
|
||||
phase:
|
||||
open: "Open phase - collecting transactions"
|
||||
establish: "Establish phase - reaching agreement"
|
||||
accept: "Accept phase - applying consensus"
|
||||
proposal:
|
||||
receive: "Receive peer proposal"
|
||||
send: "Send our proposal"
|
||||
validation:
|
||||
receive: "Receive peer validation"
|
||||
send: "Send our validation"
|
||||
|
||||
# RPC Spans
|
||||
rpc:
|
||||
request: "HTTP/WebSocket request handling"
|
||||
command:
|
||||
"*": "Specific RPC command (dynamic)"
|
||||
|
||||
# Peer Spans
|
||||
peer:
|
||||
connect: "Peer connection establishment"
|
||||
disconnect: "Peer disconnection"
|
||||
message:
|
||||
send: "Send protocol message"
|
||||
receive: "Receive protocol message"
|
||||
|
||||
# Ledger Spans
|
||||
ledger:
|
||||
acquire: "Ledger acquisition from network"
|
||||
build: "Build new ledger"
|
||||
validate: "Ledger validation"
|
||||
close: "Close ledger"
|
||||
|
||||
# Job Spans
|
||||
job:
|
||||
enqueue: "Job added to queue"
|
||||
execute: "Job execution"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.4 Attribute Schema
|
||||
|
||||
### 2.4.1 Resource Attributes (Set Once at Startup)
|
||||
|
||||
```cpp
|
||||
// Standard OpenTelemetry semantic conventions
|
||||
resource::SemanticConventions::SERVICE_NAME = "rippled"
|
||||
resource::SemanticConventions::SERVICE_VERSION = BuildInfo::getVersionString()
|
||||
resource::SemanticConventions::SERVICE_INSTANCE_ID = <node_public_key_base58>
|
||||
|
||||
// Custom rippled attributes
|
||||
"xrpl.network.id" = <network_id> // e.g., 0 for mainnet
|
||||
"xrpl.network.type" = "mainnet" | "testnet" | "devnet" | "standalone"
|
||||
"xrpl.node.type" = "validator" | "stock" | "reporting"
|
||||
"xrpl.node.cluster" = <cluster_name> // If clustered
|
||||
```
|
||||
|
||||
### 2.4.2 Span Attributes by Category
|
||||
|
||||
#### Transaction Attributes
|
||||
```cpp
|
||||
"xrpl.tx.hash" = string // Transaction hash (hex)
|
||||
"xrpl.tx.type" = string // "Payment", "OfferCreate", etc.
|
||||
"xrpl.tx.account" = string // Source account (redacted in prod)
|
||||
"xrpl.tx.sequence" = int64 // Account sequence number
|
||||
"xrpl.tx.fee" = int64 // Fee in drops
|
||||
"xrpl.tx.result" = string // "tesSUCCESS", "tecPATH_DRY", etc.
|
||||
"xrpl.tx.ledger_index" = int64 // Ledger containing transaction
|
||||
```
|
||||
|
||||
#### Consensus Attributes
|
||||
```cpp
|
||||
"xrpl.consensus.round" = int64 // Round number
|
||||
"xrpl.consensus.phase" = string // "open", "establish", "accept"
|
||||
"xrpl.consensus.mode" = string // "proposing", "observing", etc.
|
||||
"xrpl.consensus.proposers" = int64 // Number of proposers
|
||||
"xrpl.consensus.ledger.prev" = string // Previous ledger hash
|
||||
"xrpl.consensus.ledger.seq" = int64 // Ledger sequence
|
||||
"xrpl.consensus.tx_count" = int64 // Transactions in consensus set
|
||||
"xrpl.consensus.duration_ms" = float64 // Round duration
|
||||
```
|
||||
|
||||
#### RPC Attributes
|
||||
```cpp
|
||||
"xrpl.rpc.command" = string // Command name
|
||||
"xrpl.rpc.version" = int64 // API version
|
||||
"xrpl.rpc.role" = string // "admin" or "user"
|
||||
"xrpl.rpc.params" = string // Sanitized parameters (optional)
|
||||
```
|
||||
|
||||
#### Peer & Message Attributes
|
||||
```cpp
|
||||
"xrpl.peer.id" = string // Peer public key (base58)
|
||||
"xrpl.peer.address" = string // IP:port
|
||||
"xrpl.peer.latency_ms" = float64 // Measured latency
|
||||
"xrpl.peer.cluster" = string // Cluster name if clustered
|
||||
"xrpl.message.type" = string // Protocol message type name
|
||||
"xrpl.message.size_bytes" = int64 // Message size
|
||||
"xrpl.message.compressed" = bool // Whether compressed
|
||||
```
|
||||
|
||||
#### Ledger & Job Attributes
|
||||
```cpp
|
||||
"xrpl.ledger.hash" = string // Ledger hash
|
||||
"xrpl.ledger.index" = int64 // Ledger sequence/index
|
||||
"xrpl.ledger.close_time" = int64 // Close time (epoch)
|
||||
"xrpl.ledger.tx_count" = int64 // Transaction count
|
||||
"xrpl.job.type" = string // Job type name
|
||||
"xrpl.job.queue_ms" = float64 // Time spent in queue
|
||||
"xrpl.job.worker" = int64 // Worker thread ID
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.5 Context Propagation Design
|
||||
|
||||
### 2.5.1 Propagation Boundaries
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph http["HTTP/WebSocket (RPC)"]
|
||||
w3c["W3C Trace Context Headers:<br/>traceparent: 00-{trace_id}-{span_id}-{flags}<br/>tracestate: rippled=<state>"]
|
||||
end
|
||||
|
||||
subgraph protobuf["Protocol Buffers (P2P)"]
|
||||
proto["message TraceContext {<br/> bytes trace_id = 1; // 16 bytes<br/> bytes span_id = 2; // 8 bytes<br/> uint32 trace_flags = 3;<br/> string trace_state = 4;<br/>}"]
|
||||
end
|
||||
|
||||
subgraph jobqueue["JobQueue (Internal Async)"]
|
||||
job["Context captured at job creation,<br/>restored at execution<br/><br/>class Job {<br/> opentelemetry::context::Context traceContext_;<br/>};"]
|
||||
end
|
||||
|
||||
style http fill:#e3f2fd,stroke:#1976d2
|
||||
style protobuf fill:#e8f5e9,stroke:#388e3c
|
||||
style jobqueue fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.6 Integration with Existing Observability
|
||||
|
||||
### 2.6.1 Coexistence Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Process"]
|
||||
perflog["PerfLog<br/>(JSON to file)"]
|
||||
insight["Beast Insight<br/>(StatsD)"]
|
||||
otel["OpenTelemetry<br/>(Tracing)"]
|
||||
end
|
||||
|
||||
perflog --> perffile["perf.log"]
|
||||
insight --> statsd["StatsD Server"]
|
||||
otel --> collector["OTLP Collector"]
|
||||
|
||||
perffile --> grafana["Grafana<br/>(Unified UI)"]
|
||||
statsd --> grafana
|
||||
collector --> grafana
|
||||
|
||||
style rippled fill:#f5f5f5,stroke:#333
|
||||
style grafana fill:#ff9800,stroke:#e65100
|
||||
```
|
||||
|
||||
### 2.6.2 Correlation with PerfLog
|
||||
|
||||
Trace IDs can be correlated with existing PerfLog entries for comprehensive debugging:
|
||||
|
||||
```cpp
|
||||
// In RPCHandler.cpp - correlate trace with PerfLog
|
||||
Status doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
// Start OpenTelemetry span
|
||||
auto span = context.app.getTelemetry().startSpan(
|
||||
"rpc.command." + context.method);
|
||||
|
||||
// Get trace ID for correlation
|
||||
auto traceId = span->GetContext().trace_id().IsValid()
|
||||
? toHex(span->GetContext().trace_id())
|
||||
: "";
|
||||
|
||||
// Use existing PerfLog with trace correlation
|
||||
auto const curId = context.app.getPerfLog().currentId();
|
||||
context.app.getPerfLog().rpcStart(context.method, curId);
|
||||
|
||||
// Future: Add trace ID to PerfLog entry
|
||||
// context.app.getPerfLog().setTraceId(curId, traceId);
|
||||
|
||||
try {
|
||||
auto ret = handler(context, result);
|
||||
context.app.getPerfLog().rpcFinish(context.method, curId);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
return ret;
|
||||
} catch (std::exception const& e) {
|
||||
context.app.getPerfLog().rpcError(context.method, curId);
|
||||
span->RecordException(e);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Architecture Analysis](./01-architecture-analysis.md)* | *Next: [Implementation Strategy](./03-implementation-strategy.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,427 +0,0 @@
|
||||
# Implementation Strategy
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 3.1 Directory Structure
|
||||
|
||||
The telemetry implementation follows rippled's existing code organization pattern:
|
||||
|
||||
```
|
||||
include/xrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.h # Main telemetry interface
|
||||
│ ├── TelemetryConfig.h # Configuration structures
|
||||
│ ├── TraceContext.h # Context propagation utilities
|
||||
│ ├── SpanGuard.h # RAII span management
|
||||
│ └── SpanAttributes.h # Attribute helper functions
|
||||
|
||||
src/libxrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.cpp # Implementation
|
||||
│ ├── TelemetryConfig.cpp # Config parsing
|
||||
│ ├── TraceContext.cpp # Context serialization
|
||||
│ └── NullTelemetry.cpp # No-op implementation
|
||||
|
||||
src/xrpld/
|
||||
├── telemetry/
|
||||
│ ├── TracingInstrumentation.h # Instrumentation macros
|
||||
│ └── TracingInstrumentation.cpp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.2 Implementation Approach
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph phase1["Phase 1: Core"]
|
||||
sdk["SDK Integration"]
|
||||
interface["Telemetry Interface"]
|
||||
config["Configuration"]
|
||||
end
|
||||
|
||||
subgraph phase2["Phase 2: RPC"]
|
||||
http["HTTP Context"]
|
||||
rpc["RPC Handlers"]
|
||||
end
|
||||
|
||||
subgraph phase3["Phase 3: P2P"]
|
||||
proto["Protobuf Context"]
|
||||
tx["Transaction Relay"]
|
||||
end
|
||||
|
||||
subgraph phase4["Phase 4: Consensus"]
|
||||
consensus["Consensus Rounds"]
|
||||
proposals["Proposals"]
|
||||
end
|
||||
|
||||
phase1 --> phase2 --> phase3 --> phase4
|
||||
|
||||
style phase1 fill:#e3f2fd,stroke:#1976d2
|
||||
style phase2 fill:#e8f5e9,stroke:#388e3c
|
||||
style phase3 fill:#fff3e0,stroke:#ff9800
|
||||
style phase4 fill:#fce4ec,stroke:#e91e63
|
||||
```
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Minimal Intrusion**: Instrumentation should not alter existing control flow
|
||||
2. **Zero-Cost When Disabled**: Use compile-time flags and no-op implementations
|
||||
3. **Backward Compatibility**: Protocol Buffer extensions use high field numbers
|
||||
4. **Graceful Degradation**: Tracing failures must not affect node operation
|
||||
|
||||
---
|
||||
|
||||
## 3.3 Performance Overhead Summary
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## 3.4 Detailed CPU Overhead Analysis
|
||||
|
||||
### 3.4.1 Per-Operation Costs
|
||||
|
||||
| Operation | Time (ns) | Frequency | Impact |
|
||||
| --------------------- | --------- | ---------------------- | ---------- |
|
||||
| Span creation | 200-500 | Every traced operation | Low |
|
||||
| Span end | 100-200 | Every traced operation | Low |
|
||||
| SetAttribute (string) | 80-120 | 3-5 per span | Low |
|
||||
| SetAttribute (int) | 40-60 | 2-3 per span | Negligible |
|
||||
| AddEvent | 50-80 | 0-2 per span | Negligible |
|
||||
| Context injection | 150-250 | Per outgoing message | Low |
|
||||
| Context extraction | 100-180 | Per incoming message | Low |
|
||||
| GetCurrent context | 10-20 | Thread-local access | Negligible |
|
||||
|
||||
### 3.4.2 Transaction Processing Overhead
|
||||
|
||||
```mermaid
|
||||
pie title Transaction Tracing Overhead (~2.4μs total)
|
||||
"tx.receive span" : 800
|
||||
"tx.validate span" : 500
|
||||
"tx.relay span" : 500
|
||||
"Context injection (×3)" : 600
|
||||
```
|
||||
|
||||
**Overhead percentage**: 2.4 μs / 200 μs (avg tx processing) = **~1.2%**
|
||||
|
||||
### 3.4.3 Consensus Round Overhead
|
||||
|
||||
| Operation | Count | Cost (ns) | Total |
|
||||
| ---------------------- | ----- | --------- | ---------- |
|
||||
| consensus.round span | 1 | ~1000 | ~1 μs |
|
||||
| consensus.phase spans | 3 | ~700 | ~2.1 μs |
|
||||
| proposal.receive spans | ~20 | ~600 | ~12 μs |
|
||||
| proposal.send spans | ~3 | ~600 | ~1.8 μs |
|
||||
| Context operations | ~30 | ~200 | ~6 μs |
|
||||
| **TOTAL** | | | **~23 μs** |
|
||||
|
||||
**Overhead percentage**: 23 μs / 3s (typical round) = **~0.0008%** (negligible)
|
||||
|
||||
### 3.4.4 RPC Request Overhead
|
||||
|
||||
| Operation | Cost (ns) |
|
||||
| ---------------- | ------------ |
|
||||
| rpc.request span | ~700 |
|
||||
| rpc.command span | ~600 |
|
||||
| Context extract | ~250 |
|
||||
| Context inject | ~200 |
|
||||
| **TOTAL** | **~1.75 μs** |
|
||||
|
||||
- Fast RPC (1ms): 1.75 μs / 1ms = **~0.175%**
|
||||
- Slow RPC (100ms): 1.75 μs / 100ms = **~0.002%**
|
||||
|
||||
---
|
||||
|
||||
## 3.5 Memory Overhead Analysis
|
||||
|
||||
### 3.5.1 Static Memory
|
||||
|
||||
| Component | Size | Allocated |
|
||||
| ------------------------ | ----------- | ---------- |
|
||||
| TracerProvider singleton | ~64 KB | At startup |
|
||||
| BatchSpanProcessor | ~128 KB | At startup |
|
||||
| OTLP exporter | ~256 KB | At startup |
|
||||
| Propagator registry | ~8 KB | At startup |
|
||||
| **Total static** | **~456 KB** | |
|
||||
|
||||
### 3.5.2 Dynamic Memory
|
||||
|
||||
| Component | Size per unit | Max units | Peak |
|
||||
| -------------------- | ------------- | ---------- | ----------- |
|
||||
| Active span | ~200 bytes | 1000 | ~200 KB |
|
||||
| Queued span (export) | ~500 bytes | 2048 | ~1 MB |
|
||||
| Attribute storage | ~50 bytes | 5 per span | Included |
|
||||
| Context storage | ~64 bytes | Per thread | ~6.4 KB |
|
||||
| **Total dynamic** | | | **~1.2 MB** |
|
||||
|
||||
### 3.5.3 Memory Growth Characteristics
|
||||
|
||||
```mermaid
|
||||
xychart-beta
|
||||
title "Memory Usage vs Span Rate"
|
||||
x-axis "Spans/second" [0, 200, 400, 600, 800, 1000]
|
||||
y-axis "Memory (MB)" 0 --> 6
|
||||
line [1, 1.8, 2.6, 3.4, 4.2, 5]
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
- Memory increases linearly with span rate
|
||||
- Batch export prevents unbounded growth
|
||||
- Queue size is configurable (default 2048 spans)
|
||||
- At queue limit, oldest spans are dropped (not blocked)
|
||||
|
||||
---
|
||||
|
||||
## 3.6 Network Overhead Analysis
|
||||
|
||||
### 3.6.1 Export Bandwidth
|
||||
|
||||
| Sampling Rate | Spans/sec | Bandwidth | Notes |
|
||||
| ------------- | --------- | --------- | ---------------- |
|
||||
| 100% | ~500 | ~250 KB/s | Development only |
|
||||
| 10% | ~50 | ~25 KB/s | Staging |
|
||||
| 1% | ~5 | ~2.5 KB/s | Production |
|
||||
| Error-only | ~1 | ~0.5 KB/s | Minimal overhead |
|
||||
|
||||
### 3.6.2 Trace Context Propagation
|
||||
|
||||
| Message Type | Context Size | Messages/sec | Overhead |
|
||||
| ---------------------- | ------------ | ------------ | ----------- |
|
||||
| TMTransaction | 32 bytes | ~100 | ~3.2 KB/s |
|
||||
| TMProposeSet | 32 bytes | ~10 | ~320 B/s |
|
||||
| TMValidation | 32 bytes | ~50 | ~1.6 KB/s |
|
||||
| **Total P2P overhead** | | | **~5 KB/s** |
|
||||
|
||||
---
|
||||
|
||||
## 3.7 Optimization Strategies
|
||||
|
||||
### 3.7.1 Sampling Strategies
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
trace["New Trace"]
|
||||
|
||||
trace --> errors{"Is Error?"}
|
||||
errors -->|Yes| sample["SAMPLE"]
|
||||
errors -->|No| consensus{"Is Consensus?"}
|
||||
|
||||
consensus -->|Yes| sample
|
||||
consensus -->|No| slow{"Is Slow?"}
|
||||
|
||||
slow -->|Yes| sample
|
||||
slow -->|No| prob{"Random < 10%?"}
|
||||
|
||||
prob -->|Yes| sample
|
||||
prob -->|No| drop["DROP"]
|
||||
|
||||
style sample fill:#4caf50,stroke:#388e3c,color:#fff
|
||||
style drop fill:#f44336,stroke:#c62828,color:#fff
|
||||
```
|
||||
|
||||
### 3.7.2 Batch Tuning Recommendations
|
||||
|
||||
| Environment | Batch Size | Batch Delay | Max Queue |
|
||||
| ------------------ | ---------- | ----------- | --------- |
|
||||
| Low-latency | 128 | 1000ms | 512 |
|
||||
| High-throughput | 1024 | 10000ms | 8192 |
|
||||
| Memory-constrained | 256 | 2000ms | 512 |
|
||||
|
||||
### 3.7.3 Conditional Instrumentation
|
||||
|
||||
```cpp
|
||||
// Compile-time feature flag
|
||||
#ifndef XRPL_ENABLE_TELEMETRY
|
||||
// Zero-cost when disabled
|
||||
#define XRPL_TRACE_SPAN(t, n) ((void)0)
|
||||
#endif
|
||||
|
||||
// Runtime component filtering
|
||||
if (telemetry.shouldTracePeer())
|
||||
{
|
||||
XRPL_TRACE_SPAN(telemetry, "peer.message.receive");
|
||||
// ... instrumentation
|
||||
}
|
||||
// No overhead when component tracing disabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.8 Links to Detailed Documentation
|
||||
|
||||
- **[Code Samples](./04-code-samples.md)**: Complete implementation code for all components
|
||||
- **[Configuration Reference](./05-configuration-reference.md)**: Configuration options and collector setup
|
||||
- **[Implementation Phases](./06-implementation-phases.md)**: Detailed timeline and milestones
|
||||
|
||||
---
|
||||
|
||||
## 3.9 Code Intrusiveness Assessment
|
||||
|
||||
This section provides a detailed assessment of how intrusive the OpenTelemetry integration is to the existing rippled codebase.
|
||||
|
||||
### 3.9.1 Files Modified Summary
|
||||
|
||||
| Component | Files Modified | Lines Added | Lines Changed | Architectural Impact |
|
||||
| --------------------- | -------------- | ----------- | ------------- | -------------------- |
|
||||
| **Core Telemetry** | 5 new files | ~800 | 0 | None (new module) |
|
||||
| **Application Init** | 2 files | ~30 | ~5 | Minimal |
|
||||
| **RPC Layer** | 3 files | ~80 | ~20 | Minimal |
|
||||
| **Transaction Relay** | 4 files | ~120 | ~40 | Low |
|
||||
| **Consensus** | 3 files | ~100 | ~30 | Low-Medium |
|
||||
| **Protocol Buffers** | 1 file | ~25 | 0 | Low |
|
||||
| **CMake/Build** | 3 files | ~50 | ~10 | Minimal |
|
||||
| **Total** | **~21 files** | **~1,205** | **~105** | **Low** |
|
||||
|
||||
### 3.9.2 Detailed File Impact
|
||||
|
||||
```mermaid
|
||||
pie title Code Changes by Component
|
||||
"New Telemetry Module" : 800
|
||||
"Transaction Relay" : 160
|
||||
"Consensus" : 130
|
||||
"RPC Layer" : 100
|
||||
"Application Init" : 35
|
||||
"Protocol Buffers" : 25
|
||||
"Build System" : 60
|
||||
```
|
||||
|
||||
#### New Files (No Impact on Existing Code)
|
||||
|
||||
| File | Lines | Purpose |
|
||||
| ---------------------------------------------- | ----- | -------------------- |
|
||||
| `include/xrpl/telemetry/Telemetry.h` | ~160 | Main interface |
|
||||
| `include/xrpl/telemetry/SpanGuard.h` | ~120 | RAII wrapper |
|
||||
| `include/xrpl/telemetry/TraceContext.h` | ~80 | Context propagation |
|
||||
| `src/xrpld/telemetry/TracingInstrumentation.h` | ~60 | Macros |
|
||||
| `src/libxrpl/telemetry/Telemetry.cpp` | ~200 | Implementation |
|
||||
| `src/libxrpl/telemetry/TelemetryConfig.cpp` | ~60 | Config parsing |
|
||||
| `src/libxrpl/telemetry/NullTelemetry.cpp` | ~40 | No-op implementation |
|
||||
|
||||
#### Modified Files (Existing Rippled Code)
|
||||
|
||||
| File | Lines Added | Lines Changed | Risk Level |
|
||||
| ------------------------------------------------- | ----------- | ------------- | ---------- |
|
||||
| `src/xrpld/app/main/Application.cpp` | ~15 | ~3 | Low |
|
||||
| `include/xrpl/app/main/Application.h` | ~5 | ~2 | Low |
|
||||
| `src/xrpld/rpc/detail/ServerHandler.cpp` | ~40 | ~10 | Low |
|
||||
| `src/xrpld/rpc/handlers/*.cpp` | ~30 | ~8 | Low |
|
||||
| `src/xrpld/overlay/detail/PeerImp.cpp` | ~60 | ~15 | Medium |
|
||||
| `src/xrpld/overlay/detail/OverlayImpl.cpp` | ~30 | ~10 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensus.cpp` | ~50 | ~15 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensusAdaptor.cpp` | ~40 | ~12 | Medium |
|
||||
| `src/xrpld/core/JobQueue.cpp` | ~20 | ~5 | Low |
|
||||
| `src/xrpld/overlay/detail/ripple.proto` | ~25 | 0 | Low |
|
||||
| `CMakeLists.txt` | ~40 | ~8 | Low |
|
||||
| `cmake/FindOpenTelemetry.cmake` | ~50 | 0 | None (new) |
|
||||
|
||||
### 3.9.3 Risk Assessment by Component
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Code Intrusiveness Risk Matrix
|
||||
x-axis Low Risk --> High Risk
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 High Value, Low Risk - Do First
|
||||
quadrant-2 High Value, High Risk - Plan Carefully
|
||||
quadrant-3 Low Value, Low Risk - Optional
|
||||
quadrant-4 Low Value, High Risk - Avoid
|
||||
|
||||
RPC Tracing: [0.2, 0.8]
|
||||
Transaction Relay: [0.5, 0.9]
|
||||
Consensus Tracing: [0.7, 0.95]
|
||||
Peer Message Tracing: [0.8, 0.4]
|
||||
JobQueue Context: [0.4, 0.5]
|
||||
Ledger Acquisition: [0.5, 0.6]
|
||||
```
|
||||
|
||||
#### Risk Level Definitions
|
||||
|
||||
| Risk Level | Definition | Mitigation |
|
||||
| ---------- | ---------------------------------------------------------------- | ---------------------------------- |
|
||||
| **Low** | Additive changes only; no modification to existing logic | Standard code review |
|
||||
| **Medium** | Minor modifications to existing functions; clear boundaries | Comprehensive unit tests |
|
||||
| **High** | Changes to core logic or data structures; potential side effects | Integration tests + staged rollout |
|
||||
|
||||
### 3.9.4 Architectural Impact Assessment
|
||||
|
||||
| Aspect | Impact | Justification |
|
||||
| -------------------- | ------- | --------------------------------------------------------------------- |
|
||||
| **Data Flow** | None | Tracing is purely observational; no business logic changes |
|
||||
| **Threading Model** | Minimal | Context propagation uses thread-local storage (standard OTel pattern) |
|
||||
| **Memory Model** | Low | Bounded queues prevent unbounded growth; RAII ensures cleanup |
|
||||
| **Network Protocol** | Low | Optional fields in protobuf (high field numbers); backward compatible |
|
||||
| **Configuration** | None | New config section; existing configs unaffected |
|
||||
| **Build System** | Low | Optional CMake flag; builds work without OpenTelemetry |
|
||||
| **Dependencies** | Low | OpenTelemetry SDK is optional; null implementation when disabled |
|
||||
|
||||
### 3.9.5 Backward Compatibility
|
||||
|
||||
| Compatibility | Status | Notes |
|
||||
| --------------- | ------ | ----------------------------------------------------- |
|
||||
| **Config File** | ✅ Full | New `[telemetry]` section is optional |
|
||||
| **Protocol** | ✅ Full | Optional protobuf fields with high field numbers |
|
||||
| **Build** | ✅ Full | `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary |
|
||||
| **Runtime** | ✅ Full | `enabled=0` produces zero overhead |
|
||||
| **API** | ✅ Full | No changes to public RPC or P2P APIs |
|
||||
|
||||
### 3.9.6 Rollback Strategy
|
||||
|
||||
If issues are discovered after deployment:
|
||||
|
||||
1. **Immediate**: Set `enabled=0` in config and restart (zero code change)
|
||||
2. **Quick**: Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`
|
||||
3. **Complete**: Revert telemetry commits (clean separation makes this easy)
|
||||
|
||||
### 3.9.7 Code Change Examples
|
||||
|
||||
**Minimal RPC Instrumentation (Low Intrusiveness):**
|
||||
```cpp
|
||||
// Before
|
||||
void ServerHandler::onRequest(...) {
|
||||
auto result = processRequest(req);
|
||||
send(result);
|
||||
}
|
||||
|
||||
// After (only ~10 lines added)
|
||||
void ServerHandler::onRequest(...) {
|
||||
XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.request"); // +1 line
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.command", command); // +1 line
|
||||
|
||||
auto result = processRequest(req);
|
||||
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.status", status); // +1 line
|
||||
send(result);
|
||||
}
|
||||
```
|
||||
|
||||
**Consensus Instrumentation (Medium Intrusiveness):**
|
||||
```cpp
|
||||
// Before
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
// ... existing logic
|
||||
}
|
||||
|
||||
// After (context storage required)
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.round");
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.seq", seq);
|
||||
|
||||
// Store context for child spans in phase transitions
|
||||
currentRoundContext_ = _xrpl_guard_->context(); // New member variable
|
||||
|
||||
// ... existing logic unchanged
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Design Decisions](./02-design-decisions.md)* | *Next: [Code Samples](./04-code-samples.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,967 +0,0 @@
|
||||
# Code Samples
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Strategy](./03-implementation-strategy.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 4.1 Core Interfaces
|
||||
|
||||
### 4.1.1 Main Telemetry Interface
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/Telemetry.h
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/telemetry/TelemetryConfig.h>
|
||||
#include <opentelemetry/trace/tracer.h>
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/context/context.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* Main telemetry interface for OpenTelemetry integration.
|
||||
*
|
||||
* This class provides the primary API for distributed tracing in rippled.
|
||||
* It manages the OpenTelemetry SDK lifecycle and provides convenience
|
||||
* methods for creating spans and propagating context.
|
||||
*/
|
||||
class Telemetry
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Configuration for the telemetry system.
|
||||
*/
|
||||
struct Setup
|
||||
{
|
||||
bool enabled = false;
|
||||
std::string serviceName = "rippled";
|
||||
std::string serviceVersion;
|
||||
std::string serviceInstanceId; // Node public key
|
||||
|
||||
// Exporter configuration
|
||||
std::string exporterType = "otlp_grpc"; // "otlp_grpc", "otlp_http", "none"
|
||||
std::string exporterEndpoint = "localhost:4317";
|
||||
bool useTls = false;
|
||||
std::string tlsCertPath;
|
||||
|
||||
// Sampling configuration
|
||||
double samplingRatio = 1.0; // 1.0 = 100% sampling
|
||||
|
||||
// Batch processor settings
|
||||
std::uint32_t batchSize = 512;
|
||||
std::chrono::milliseconds batchDelay{5000};
|
||||
std::uint32_t maxQueueSize = 2048;
|
||||
|
||||
// Network attributes
|
||||
std::uint32_t networkId = 0;
|
||||
std::string networkType = "mainnet";
|
||||
|
||||
// Component filtering
|
||||
bool traceTransactions = true;
|
||||
bool traceConsensus = true;
|
||||
bool traceRpc = true;
|
||||
bool tracePeer = false; // High volume, disabled by default
|
||||
bool traceLedger = true;
|
||||
};
|
||||
|
||||
virtual ~Telemetry() = default;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// LIFECYCLE
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Start the telemetry system (call after configuration) */
|
||||
virtual void start() = 0;
|
||||
|
||||
/** Stop the telemetry system (flushes pending spans) */
|
||||
virtual void stop() = 0;
|
||||
|
||||
/** Check if telemetry is enabled */
|
||||
virtual bool isEnabled() const = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// TRACER ACCESS
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Get the tracer for creating spans */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer>
|
||||
getTracer(std::string_view name = "rippled") = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// SPAN CREATION (Convenience Methods)
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Start a new span with default options */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::trace::SpanKind kind =
|
||||
opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
|
||||
/** Start a span as child of given context */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::context::Context const& parentContext,
|
||||
opentelemetry::trace::SpanKind kind =
|
||||
opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// CONTEXT PROPAGATION
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Serialize context for network transmission */
|
||||
virtual std::string serializeContext(
|
||||
opentelemetry::context::Context const& ctx) = 0;
|
||||
|
||||
/** Deserialize context from network data */
|
||||
virtual opentelemetry::context::Context deserializeContext(
|
||||
std::string const& serialized) = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// COMPONENT FILTERING
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Check if transaction tracing is enabled */
|
||||
virtual bool shouldTraceTransactions() const = 0;
|
||||
|
||||
/** Check if consensus tracing is enabled */
|
||||
virtual bool shouldTraceConsensus() const = 0;
|
||||
|
||||
/** Check if RPC tracing is enabled */
|
||||
virtual bool shouldTraceRpc() const = 0;
|
||||
|
||||
/** Check if peer message tracing is enabled */
|
||||
virtual bool shouldTracePeer() const = 0;
|
||||
};
|
||||
|
||||
// Factory functions
|
||||
std::unique_ptr<Telemetry>
|
||||
make_Telemetry(
|
||||
Telemetry::Setup const& setup,
|
||||
beast::Journal journal);
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version);
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.2 RAII Span Guard
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/SpanGuard.h
|
||||
#pragma once
|
||||
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/trace/scope.h>
|
||||
#include <opentelemetry/trace/status.h>
|
||||
|
||||
#include <string_view>
|
||||
#include <exception>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* RAII guard for OpenTelemetry spans.
|
||||
*
|
||||
* Automatically ends the span on destruction and makes it the current
|
||||
* span in the thread-local context.
|
||||
*/
|
||||
class SpanGuard
|
||||
{
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span> span_;
|
||||
opentelemetry::trace::Scope scope_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Construct guard with span.
|
||||
* The span becomes the current span in thread-local context.
|
||||
*/
|
||||
explicit SpanGuard(
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span> span)
|
||||
: span_(std::move(span))
|
||||
, scope_(span_)
|
||||
{
|
||||
}
|
||||
|
||||
// Non-copyable, non-movable
|
||||
SpanGuard(SpanGuard const&) = delete;
|
||||
SpanGuard& operator=(SpanGuard const&) = delete;
|
||||
SpanGuard(SpanGuard&&) = delete;
|
||||
SpanGuard& operator=(SpanGuard&&) = delete;
|
||||
|
||||
~SpanGuard()
|
||||
{
|
||||
if (span_)
|
||||
span_->End();
|
||||
}
|
||||
|
||||
/** Access the underlying span */
|
||||
opentelemetry::trace::Span& span() { return *span_; }
|
||||
opentelemetry::trace::Span const& span() const { return *span_; }
|
||||
|
||||
/** Set span status to OK */
|
||||
void setOk()
|
||||
{
|
||||
span_->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
}
|
||||
|
||||
/** Set span status with code and description */
|
||||
void setStatus(
|
||||
opentelemetry::trace::StatusCode code,
|
||||
std::string_view description = "")
|
||||
{
|
||||
span_->SetStatus(code, std::string(description));
|
||||
}
|
||||
|
||||
/** Set an attribute on the span */
|
||||
template<typename T>
|
||||
void setAttribute(std::string_view key, T&& value)
|
||||
{
|
||||
span_->SetAttribute(
|
||||
opentelemetry::nostd::string_view(key.data(), key.size()),
|
||||
std::forward<T>(value));
|
||||
}
|
||||
|
||||
/** Add an event to the span */
|
||||
void addEvent(std::string_view name)
|
||||
{
|
||||
span_->AddEvent(std::string(name));
|
||||
}
|
||||
|
||||
/** Record an exception on the span */
|
||||
void recordException(std::exception const& e)
|
||||
{
|
||||
span_->RecordException(e);
|
||||
span_->SetStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
e.what());
|
||||
}
|
||||
|
||||
/** Get the current trace context */
|
||||
opentelemetry::context::Context context() const
|
||||
{
|
||||
return opentelemetry::context::RuntimeContext::GetCurrent();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* No-op span guard for when tracing is disabled.
|
||||
* Provides the same interface but does nothing.
|
||||
*/
|
||||
class NullSpanGuard
|
||||
{
|
||||
public:
|
||||
NullSpanGuard() = default;
|
||||
|
||||
void setOk() {}
|
||||
void setStatus(opentelemetry::trace::StatusCode, std::string_view = "") {}
|
||||
|
||||
template<typename T>
|
||||
void setAttribute(std::string_view, T&&) {}
|
||||
|
||||
void addEvent(std::string_view) {}
|
||||
void recordException(std::exception const&) {}
|
||||
};
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.3 Instrumentation Macros
|
||||
|
||||
```cpp
|
||||
// src/xrpld/telemetry/TracingInstrumentation.h
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/telemetry/SpanGuard.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// INSTRUMENTATION MACROS
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
// Start a span that is automatically ended when guard goes out of scope
|
||||
#define XRPL_TRACE_SPAN(telemetry, name) \
|
||||
auto _xrpl_span_ = (telemetry).startSpan(name); \
|
||||
::xrpl::telemetry::SpanGuard _xrpl_guard_(_xrpl_span_)
|
||||
|
||||
// Start a span with specific kind
|
||||
#define XRPL_TRACE_SPAN_KIND(telemetry, name, kind) \
|
||||
auto _xrpl_span_ = (telemetry).startSpan(name, kind); \
|
||||
::xrpl::telemetry::SpanGuard _xrpl_guard_(_xrpl_span_)
|
||||
|
||||
// Conditional span based on component
|
||||
#define XRPL_TRACE_TX(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceTransactions()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_CONSENSUS(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceConsensus()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_RPC(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceRpc()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
// Set attribute on current span (if exists)
|
||||
#define XRPL_TRACE_SET_ATTR(key, value) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->setAttribute(key, value); \
|
||||
}
|
||||
|
||||
// Record exception on current span
|
||||
#define XRPL_TRACE_EXCEPTION(e) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->recordException(e); \
|
||||
}
|
||||
|
||||
#else // XRPL_ENABLE_TELEMETRY not defined
|
||||
|
||||
#define XRPL_TRACE_SPAN(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_SPAN_KIND(telemetry, name, kind) ((void)0)
|
||||
#define XRPL_TRACE_TX(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_CONSENSUS(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_RPC(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_SET_ATTR(key, value) ((void)0)
|
||||
#define XRPL_TRACE_EXCEPTION(e) ((void)0)
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.4 Protocol Buffer Extensions
|
||||
|
||||
### 4.4.1 TraceContext Message Definition
|
||||
|
||||
Add to `src/xrpld/overlay/detail/ripple.proto`:
|
||||
|
||||
```protobuf
|
||||
// Trace context for distributed tracing across nodes
|
||||
// Uses W3C Trace Context format internally
|
||||
message TraceContext {
|
||||
// 16-byte trace identifier (required for valid context)
|
||||
bytes trace_id = 1;
|
||||
|
||||
// 8-byte span identifier of parent span
|
||||
bytes span_id = 2;
|
||||
|
||||
// Trace flags (bit 0 = sampled)
|
||||
uint32 trace_flags = 3;
|
||||
|
||||
// W3C tracestate header value for vendor-specific data
|
||||
string trace_state = 4;
|
||||
}
|
||||
|
||||
// Extend existing messages with optional trace context
|
||||
// High field numbers (1000+) to avoid conflicts
|
||||
|
||||
message TMTransaction {
|
||||
// ... existing fields ...
|
||||
|
||||
// Optional trace context for distributed tracing
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMProposeSet {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMValidation {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMGetLedger {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMLedgerData {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
```
|
||||
|
||||
### 4.4.2 Context Serialization/Deserialization
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/TraceContext.h
|
||||
#pragma once
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
#include <opentelemetry/trace/span_context.h>
|
||||
#include <protocol/messages.h> // Generated protobuf
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* Utilities for trace context serialization and propagation.
|
||||
*/
|
||||
class TraceContextPropagator
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Extract trace context from Protocol Buffer message.
|
||||
* Returns empty context if no trace info present.
|
||||
*/
|
||||
static opentelemetry::context::Context
|
||||
extract(protocol::TraceContext const& proto);
|
||||
|
||||
/**
|
||||
* Inject current trace context into Protocol Buffer message.
|
||||
*/
|
||||
static void
|
||||
inject(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
protocol::TraceContext& proto);
|
||||
|
||||
/**
|
||||
* Extract trace context from HTTP headers (for RPC).
|
||||
* Supports W3C Trace Context (traceparent, tracestate).
|
||||
*/
|
||||
static opentelemetry::context::Context
|
||||
extractFromHeaders(
|
||||
std::function<std::optional<std::string>(std::string_view)> headerGetter);
|
||||
|
||||
/**
|
||||
* Inject trace context into HTTP headers (for RPC responses).
|
||||
*/
|
||||
static void
|
||||
injectToHeaders(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
std::function<void(std::string_view, std::string_view)> headerSetter);
|
||||
};
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// IMPLEMENTATION
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
inline opentelemetry::context::Context
|
||||
TraceContextPropagator::extract(protocol::TraceContext const& proto)
|
||||
{
|
||||
using namespace opentelemetry::trace;
|
||||
|
||||
if (proto.trace_id().size() != 16 || proto.span_id().size() != 8)
|
||||
return opentelemetry::context::Context{}; // Invalid, return empty
|
||||
|
||||
// Construct TraceId and SpanId from bytes
|
||||
TraceId traceId(reinterpret_cast<uint8_t const*>(proto.trace_id().data()));
|
||||
SpanId spanId(reinterpret_cast<uint8_t const*>(proto.span_id().data()));
|
||||
TraceFlags flags(static_cast<uint8_t>(proto.trace_flags()));
|
||||
|
||||
// Create SpanContext from extracted data
|
||||
SpanContext spanContext(traceId, spanId, flags, /* remote = */ true);
|
||||
|
||||
// Create context with extracted span as parent
|
||||
return opentelemetry::context::Context{}.SetValue(
|
||||
opentelemetry::trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<Span>(
|
||||
new DefaultSpan(spanContext)));
|
||||
}
|
||||
|
||||
inline void
|
||||
TraceContextPropagator::inject(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
protocol::TraceContext& proto)
|
||||
{
|
||||
using namespace opentelemetry::trace;
|
||||
|
||||
// Get current span from context
|
||||
auto span = GetSpan(ctx);
|
||||
if (!span)
|
||||
return;
|
||||
|
||||
auto const& spanCtx = span->GetContext();
|
||||
if (!spanCtx.IsValid())
|
||||
return;
|
||||
|
||||
// Serialize trace_id (16 bytes)
|
||||
auto const& traceId = spanCtx.trace_id();
|
||||
proto.set_trace_id(traceId.Id().data(), TraceId::kSize);
|
||||
|
||||
// Serialize span_id (8 bytes)
|
||||
auto const& spanId = spanCtx.span_id();
|
||||
proto.set_span_id(spanId.Id().data(), SpanId::kSize);
|
||||
|
||||
// Serialize flags
|
||||
proto.set_trace_flags(spanCtx.trace_flags().flags());
|
||||
|
||||
// Note: tracestate not implemented yet
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.5 Module-Specific Instrumentation
|
||||
|
||||
### 4.5.1 Transaction Relay Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/overlay/detail/PeerImp.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
PeerImp::handleTransaction(
|
||||
std::shared_ptr<protocol::TMTransaction> const& m)
|
||||
{
|
||||
// Extract trace context from incoming message
|
||||
opentelemetry::context::Context parentCtx;
|
||||
if (m->has_trace_context())
|
||||
{
|
||||
parentCtx = telemetry::TraceContextPropagator::extract(
|
||||
m->trace_context());
|
||||
}
|
||||
|
||||
// Start span as child of remote span (cross-node link)
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"tx.receive",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
try
|
||||
{
|
||||
// Parse and validate transaction
|
||||
SerialIter sit(makeSlice(m->rawtransaction()));
|
||||
auto stx = std::make_shared<STTx const>(sit);
|
||||
|
||||
// Add transaction attributes
|
||||
guard.setAttribute("xrpl.tx.hash", to_string(stx->getTransactionID()));
|
||||
guard.setAttribute("xrpl.tx.type", stx->getTxnType());
|
||||
guard.setAttribute("xrpl.peer.id", remote_address_.to_string());
|
||||
|
||||
// Check if we've seen this transaction (HashRouter)
|
||||
auto const [flags, suppressed] =
|
||||
app_.getHashRouter().addSuppressionPeer(
|
||||
stx->getTransactionID(),
|
||||
id_);
|
||||
|
||||
if (suppressed)
|
||||
{
|
||||
guard.setAttribute("xrpl.tx.suppressed", true);
|
||||
guard.addEvent("tx.duplicate");
|
||||
return; // Already processing this transaction
|
||||
}
|
||||
|
||||
// Create child span for validation
|
||||
{
|
||||
auto validateSpan = app_.getTelemetry().startSpan("tx.validate");
|
||||
telemetry::SpanGuard validateGuard(validateSpan);
|
||||
|
||||
auto [validity, reason] = checkTransaction(stx);
|
||||
validateGuard.setAttribute("xrpl.tx.validity",
|
||||
validity == Validity::Valid ? "valid" : "invalid");
|
||||
|
||||
if (validity != Validity::Valid)
|
||||
{
|
||||
validateGuard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
reason);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Relay to other peers (capture context for propagation)
|
||||
auto ctx = guard.context();
|
||||
|
||||
// Create child span for relay
|
||||
auto relaySpan = app_.getTelemetry().startSpan(
|
||||
"tx.relay",
|
||||
ctx,
|
||||
opentelemetry::trace::SpanKind::kClient);
|
||||
{
|
||||
telemetry::SpanGuard relayGuard(relaySpan);
|
||||
|
||||
// Inject context into outgoing message
|
||||
protocol::TraceContext protoCtx;
|
||||
telemetry::TraceContextPropagator::inject(
|
||||
relayGuard.context(), protoCtx);
|
||||
|
||||
// Relay to other peers
|
||||
app_.overlay().relay(
|
||||
stx->getTransactionID(),
|
||||
*m,
|
||||
protoCtx, // Pass trace context
|
||||
exclusions);
|
||||
|
||||
relayGuard.setAttribute("xrpl.tx.relay_count",
|
||||
static_cast<int64_t>(relayCount));
|
||||
}
|
||||
|
||||
guard.setOk();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.warn()) << "Transaction handling failed: " << e.what();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.2 Consensus Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/consensus/RCLConsensus.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
RCLConsensusAdaptor::startRound(
|
||||
NetClock::time_point const& now,
|
||||
RCLCxLedger::ID const& prevLedgerHash,
|
||||
RCLCxLedger const& prevLedger,
|
||||
hash_set<NodeID> const& peers,
|
||||
bool proposing)
|
||||
{
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.round");
|
||||
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.prev", to_string(prevLedgerHash));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.seq",
|
||||
static_cast<int64_t>(prevLedger.seq() + 1));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.proposers",
|
||||
static_cast<int64_t>(peers.size()));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.mode",
|
||||
proposing ? "proposing" : "observing");
|
||||
|
||||
// Store trace context for use in phase transitions
|
||||
currentRoundContext_ = _xrpl_guard_.has_value()
|
||||
? _xrpl_guard_->context()
|
||||
: opentelemetry::context::Context{};
|
||||
|
||||
// ... existing implementation ...
|
||||
}
|
||||
|
||||
ConsensusPhase
|
||||
RCLConsensusAdaptor::phaseTransition(ConsensusPhase newPhase)
|
||||
{
|
||||
// Create span for phase transition
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"consensus.phase." + to_string(newPhase),
|
||||
currentRoundContext_);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.consensus.phase", to_string(newPhase));
|
||||
guard.addEvent("phase.enter");
|
||||
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
try
|
||||
{
|
||||
auto result = doPhaseTransition(newPhase);
|
||||
|
||||
auto const duration = std::chrono::steady_clock::now() - startTime;
|
||||
guard.setAttribute("xrpl.consensus.phase_duration_ms",
|
||||
std::chrono::duration<double, std::milli>(duration).count());
|
||||
|
||||
guard.setOk();
|
||||
return result;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RCLConsensusAdaptor::peerProposal(
|
||||
NetClock::time_point const& now,
|
||||
RCLCxPeerPos const& proposal)
|
||||
{
|
||||
// Extract trace context from proposal message
|
||||
opentelemetry::context::Context parentCtx;
|
||||
if (proposal.hasTraceContext())
|
||||
{
|
||||
parentCtx = telemetry::TraceContextPropagator::extract(
|
||||
proposal.traceContext());
|
||||
}
|
||||
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"consensus.proposal.receive",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.consensus.proposer",
|
||||
toBase58(TokenType::NodePublic, proposal.nodeId()));
|
||||
guard.setAttribute("xrpl.consensus.round",
|
||||
static_cast<int64_t>(proposal.proposal().proposeSeq()));
|
||||
|
||||
// ... existing implementation ...
|
||||
|
||||
guard.setOk();
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.3 RPC Handler Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/rpc/detail/ServerHandler.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
ServerHandler::onRequest(
|
||||
http_request_type&& req,
|
||||
std::function<void(http_response_type&&)>&& send)
|
||||
{
|
||||
// Extract trace context from HTTP headers (W3C Trace Context)
|
||||
auto parentCtx = telemetry::TraceContextPropagator::extractFromHeaders(
|
||||
[&req](std::string_view name) -> std::optional<std::string> {
|
||||
auto it = req.find(boost::beast::http::field{
|
||||
std::string(name)});
|
||||
if (it != req.end())
|
||||
return std::string(it->value());
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
// Start request span
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"rpc.request",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
// Add HTTP attributes
|
||||
guard.setAttribute("http.method", std::string(req.method_string()));
|
||||
guard.setAttribute("http.target", std::string(req.target()));
|
||||
guard.setAttribute("http.user_agent",
|
||||
std::string(req[boost::beast::http::field::user_agent]));
|
||||
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
try
|
||||
{
|
||||
// Parse and process request
|
||||
auto const& body = req.body();
|
||||
Json::Value jv;
|
||||
Json::Reader reader;
|
||||
|
||||
if (!reader.parse(body, jv))
|
||||
{
|
||||
guard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
"Invalid JSON");
|
||||
sendError(send, "Invalid JSON");
|
||||
return;
|
||||
}
|
||||
|
||||
// Extract command name
|
||||
std::string command = jv.isMember("command")
|
||||
? jv["command"].asString()
|
||||
: jv.isMember("method")
|
||||
? jv["method"].asString()
|
||||
: "unknown";
|
||||
|
||||
guard.setAttribute("xrpl.rpc.command", command);
|
||||
|
||||
// Create child span for command execution
|
||||
auto cmdSpan = app_.getTelemetry().startSpan(
|
||||
"rpc.command." + command);
|
||||
{
|
||||
telemetry::SpanGuard cmdGuard(cmdSpan);
|
||||
|
||||
// Execute RPC command
|
||||
auto result = processRequest(jv);
|
||||
|
||||
// Record result attributes
|
||||
if (result.isMember("status"))
|
||||
{
|
||||
cmdGuard.setAttribute("xrpl.rpc.status",
|
||||
result["status"].asString());
|
||||
}
|
||||
|
||||
if (result["status"].asString() == "error")
|
||||
{
|
||||
cmdGuard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
result.isMember("error_message")
|
||||
? result["error_message"].asString()
|
||||
: "RPC error");
|
||||
}
|
||||
else
|
||||
{
|
||||
cmdGuard.setOk();
|
||||
}
|
||||
}
|
||||
|
||||
auto const duration = std::chrono::steady_clock::now() - startTime;
|
||||
guard.setAttribute("http.duration_ms",
|
||||
std::chrono::duration<double, std::milli>(duration).count());
|
||||
|
||||
// Inject trace context into response headers
|
||||
http_response_type resp;
|
||||
telemetry::TraceContextPropagator::injectToHeaders(
|
||||
guard.context(),
|
||||
[&resp](std::string_view name, std::string_view value) {
|
||||
resp.set(std::string(name), std::string(value));
|
||||
});
|
||||
|
||||
guard.setOk();
|
||||
send(std::move(resp));
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.error()) << "RPC request failed: " << e.what();
|
||||
sendError(send, e.what());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.4 JobQueue Context Propagation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/core/JobQueue.h (modified)
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
|
||||
class Job
|
||||
{
|
||||
// ... existing members ...
|
||||
|
||||
// Captured trace context at job creation
|
||||
opentelemetry::context::Context traceContext_;
|
||||
|
||||
public:
|
||||
// Constructor captures current trace context
|
||||
Job(JobType type, std::function<void()> func, ...)
|
||||
: type_(type)
|
||||
, func_(std::move(func))
|
||||
, traceContext_(opentelemetry::context::RuntimeContext::GetCurrent())
|
||||
// ... other initializations ...
|
||||
{
|
||||
}
|
||||
|
||||
// Get trace context for restoration during execution
|
||||
opentelemetry::context::Context const&
|
||||
traceContext() const { return traceContext_; }
|
||||
};
|
||||
|
||||
// src/xrpld/core/JobQueue.cpp (modified)
|
||||
|
||||
void
|
||||
Worker::run()
|
||||
{
|
||||
while (auto job = getJob())
|
||||
{
|
||||
// Restore trace context from job creation
|
||||
auto token = opentelemetry::context::RuntimeContext::Attach(
|
||||
job->traceContext());
|
||||
|
||||
// Start execution span
|
||||
auto span = app_.getTelemetry().startSpan("job.execute");
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.job.type", to_string(job->type()));
|
||||
guard.setAttribute("xrpl.job.queue_ms", job->queueTimeMs());
|
||||
guard.setAttribute("xrpl.job.worker", workerId_);
|
||||
|
||||
try
|
||||
{
|
||||
job->execute();
|
||||
guard.setOk();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.error()) << "Job execution failed: " << e.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.6 Span Flow Visualization
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Client["External Client"]
|
||||
submit["Submit TX"]
|
||||
end
|
||||
|
||||
subgraph NodeA["rippled Node A"]
|
||||
rpcA["rpc.request"]
|
||||
cmdA["rpc.command.submit"]
|
||||
txRecvA["tx.receive"]
|
||||
txValA["tx.validate"]
|
||||
txRelayA["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph NodeB["rippled Node B"]
|
||||
txRecvB["tx.receive"]
|
||||
txValB["tx.validate"]
|
||||
txRelayB["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph NodeC["rippled Node C"]
|
||||
txRecvC["tx.receive"]
|
||||
consensusC["consensus.round"]
|
||||
phaseC["consensus.phase.establish"]
|
||||
end
|
||||
|
||||
submit --> rpcA
|
||||
rpcA --> cmdA
|
||||
cmdA --> txRecvA
|
||||
txRecvA --> txValA
|
||||
txValA --> txRelayA
|
||||
txRelayA -.->|"TraceContext"| txRecvB
|
||||
txRecvB --> txValB
|
||||
txValB --> txRelayB
|
||||
txRelayB -.->|"TraceContext"| txRecvC
|
||||
txRecvC --> consensusC
|
||||
consensusC --> phaseC
|
||||
|
||||
style rpcA fill:#e3f2fd,stroke:#1976d2
|
||||
style txRecvA fill:#e8f5e9,stroke:#388e3c
|
||||
style txRecvB fill:#e8f5e9,stroke:#388e3c
|
||||
style txRecvC fill:#e8f5e9,stroke:#388e3c
|
||||
style consensusC fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Implementation Strategy](./03-implementation-strategy.md)* | *Next: [Configuration Reference](./05-configuration-reference.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,936 +0,0 @@
|
||||
# Configuration Reference
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Implementation Phases](./06-implementation-phases.md)
|
||||
|
||||
---
|
||||
|
||||
## 5.1 rippled Configuration
|
||||
|
||||
### 5.1.1 Configuration File Section
|
||||
|
||||
Add to `cfg/xrpld-example.cfg`:
|
||||
|
||||
```ini
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY (OpenTelemetry Distributed Tracing)
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
#
|
||||
# Enables distributed tracing for transaction flow, consensus, and RPC calls.
|
||||
# Traces are exported to an OpenTelemetry Collector using OTLP protocol.
|
||||
#
|
||||
# [telemetry]
|
||||
#
|
||||
# # Enable/disable telemetry (default: 0 = disabled)
|
||||
# enabled=1
|
||||
#
|
||||
# # Exporter type: "otlp_grpc" (default), "otlp_http", or "none"
|
||||
# exporter=otlp_grpc
|
||||
#
|
||||
# # OTLP endpoint (default: localhost:4317 for gRPC, localhost:4318 for HTTP)
|
||||
# endpoint=localhost:4317
|
||||
#
|
||||
# # Use TLS for exporter connection (default: 0)
|
||||
# use_tls=0
|
||||
#
|
||||
# # Path to CA certificate for TLS (optional)
|
||||
# # tls_ca_cert=/path/to/ca.crt
|
||||
#
|
||||
# # Sampling ratio: 0.0-1.0 (default: 1.0 = 100% sampling)
|
||||
# # Use lower values in production to reduce overhead
|
||||
# sampling_ratio=0.1
|
||||
#
|
||||
# # Batch processor settings
|
||||
# batch_size=512 # Spans per batch (default: 512)
|
||||
# batch_delay_ms=5000 # Max delay before sending batch (default: 5000)
|
||||
# max_queue_size=2048 # Max queued spans (default: 2048)
|
||||
#
|
||||
# # Component-specific tracing (default: all enabled except peer)
|
||||
# trace_transactions=1 # Transaction relay and processing
|
||||
# trace_consensus=1 # Consensus rounds and proposals
|
||||
# trace_rpc=1 # RPC request handling
|
||||
# trace_peer=0 # Peer messages (high volume, disabled by default)
|
||||
# trace_ledger=1 # Ledger acquisition and building
|
||||
#
|
||||
# # Service identification (automatically detected if not specified)
|
||||
# # service_name=rippled
|
||||
# # service_instance_id=<node_public_key>
|
||||
|
||||
[telemetry]
|
||||
enabled=0
|
||||
```
|
||||
|
||||
### 5.1.2 Configuration Options Summary
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| --------------------- | ------ | ---------------- | ----------------------------------------- |
|
||||
| `enabled` | bool | `false` | Enable/disable telemetry |
|
||||
| `exporter` | string | `"otlp_grpc"` | Exporter type: otlp_grpc, otlp_http, none |
|
||||
| `endpoint` | string | `localhost:4317` | OTLP collector endpoint |
|
||||
| `use_tls` | bool | `false` | Enable TLS for exporter connection |
|
||||
| `tls_ca_cert` | string | `""` | Path to CA certificate file |
|
||||
| `sampling_ratio` | float | `1.0` | Sampling ratio (0.0-1.0) |
|
||||
| `batch_size` | uint | `512` | Spans per export batch |
|
||||
| `batch_delay_ms` | uint | `5000` | Max delay before sending batch (ms) |
|
||||
| `max_queue_size` | uint | `2048` | Maximum queued spans |
|
||||
| `trace_transactions` | bool | `true` | Enable transaction tracing |
|
||||
| `trace_consensus` | bool | `true` | Enable consensus tracing |
|
||||
| `trace_rpc` | bool | `true` | Enable RPC tracing |
|
||||
| `trace_peer` | bool | `false` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | bool | `true` | Enable ledger tracing |
|
||||
| `service_name` | string | `"rippled"` | Service name for traces |
|
||||
| `service_instance_id` | string | `<node_pubkey>` | Instance identifier |
|
||||
|
||||
---
|
||||
|
||||
## 5.2 Configuration Parser
|
||||
|
||||
```cpp
|
||||
// src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version)
|
||||
{
|
||||
Telemetry::Setup setup;
|
||||
|
||||
// Basic settings
|
||||
setup.enabled = section.value_or("enabled", false);
|
||||
setup.serviceName = section.value_or("service_name", "rippled");
|
||||
setup.serviceVersion = version;
|
||||
setup.serviceInstanceId = section.value_or(
|
||||
"service_instance_id", nodePublicKey);
|
||||
|
||||
// Exporter settings
|
||||
setup.exporterType = section.value_or("exporter", "otlp_grpc");
|
||||
|
||||
if (setup.exporterType == "otlp_grpc")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4317");
|
||||
else if (setup.exporterType == "otlp_http")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4318");
|
||||
|
||||
setup.useTls = section.value_or("use_tls", false);
|
||||
setup.tlsCertPath = section.value_or("tls_ca_cert", "");
|
||||
|
||||
// Sampling
|
||||
setup.samplingRatio = section.value_or("sampling_ratio", 1.0);
|
||||
if (setup.samplingRatio < 0.0 || setup.samplingRatio > 1.0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"telemetry.sampling_ratio must be between 0.0 and 1.0");
|
||||
}
|
||||
|
||||
// Batch processor
|
||||
setup.batchSize = section.value_or("batch_size", 512u);
|
||||
setup.batchDelay = std::chrono::milliseconds{
|
||||
section.value_or("batch_delay_ms", 5000u)};
|
||||
setup.maxQueueSize = section.value_or("max_queue_size", 2048u);
|
||||
|
||||
// Component filtering
|
||||
setup.traceTransactions = section.value_or("trace_transactions", true);
|
||||
setup.traceConsensus = section.value_or("trace_consensus", true);
|
||||
setup.traceRpc = section.value_or("trace_rpc", true);
|
||||
setup.tracePeer = section.value_or("trace_peer", false);
|
||||
setup.traceLedger = section.value_or("trace_ledger", true);
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.3 Application Integration
|
||||
|
||||
### 5.3.1 ApplicationImp Changes
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/main/Application.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
class ApplicationImp : public Application
|
||||
{
|
||||
// ... existing members ...
|
||||
|
||||
// Telemetry (must be constructed early, destroyed late)
|
||||
std::unique_ptr<telemetry::Telemetry> telemetry_;
|
||||
|
||||
public:
|
||||
ApplicationImp(...)
|
||||
{
|
||||
// Initialize telemetry early (before other components)
|
||||
auto telemetrySection = config_->section("telemetry");
|
||||
auto telemetrySetup = telemetry::setup_Telemetry(
|
||||
telemetrySection,
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_.publicKey()),
|
||||
BuildInfo::getVersionString());
|
||||
|
||||
// Set network attributes
|
||||
telemetrySetup.networkId = config_->NETWORK_ID;
|
||||
telemetrySetup.networkType = [&]() {
|
||||
if (config_->NETWORK_ID == 0) return "mainnet";
|
||||
if (config_->NETWORK_ID == 1) return "testnet";
|
||||
if (config_->NETWORK_ID == 2) return "devnet";
|
||||
return "custom";
|
||||
}();
|
||||
|
||||
telemetry_ = telemetry::make_Telemetry(
|
||||
telemetrySetup,
|
||||
logs_->journal("Telemetry"));
|
||||
|
||||
// ... rest of initialization ...
|
||||
}
|
||||
|
||||
void start() override
|
||||
{
|
||||
// Start telemetry first
|
||||
if (telemetry_)
|
||||
telemetry_->start();
|
||||
|
||||
// ... existing start code ...
|
||||
}
|
||||
|
||||
void stop() override
|
||||
{
|
||||
// ... existing stop code ...
|
||||
|
||||
// Stop telemetry last (to capture shutdown spans)
|
||||
if (telemetry_)
|
||||
telemetry_->stop();
|
||||
}
|
||||
|
||||
telemetry::Telemetry& getTelemetry() override
|
||||
{
|
||||
assert(telemetry_);
|
||||
return *telemetry_;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 5.3.2 Application Interface Addition
|
||||
|
||||
```cpp
|
||||
// include/xrpl/app/main/Application.h (modified)
|
||||
|
||||
namespace telemetry { class Telemetry; }
|
||||
|
||||
class Application
|
||||
{
|
||||
public:
|
||||
// ... existing virtual methods ...
|
||||
|
||||
/** Get the telemetry system for distributed tracing */
|
||||
virtual telemetry::Telemetry& getTelemetry() = 0;
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.4 CMake Integration
|
||||
|
||||
### 5.4.1 Find OpenTelemetry Module
|
||||
|
||||
```cmake
|
||||
# cmake/FindOpenTelemetry.cmake
|
||||
|
||||
# Find OpenTelemetry C++ SDK
|
||||
#
|
||||
# This module defines:
|
||||
# OpenTelemetry_FOUND - System has OpenTelemetry
|
||||
# OpenTelemetry::api - API library target
|
||||
# OpenTelemetry::sdk - SDK library target
|
||||
# OpenTelemetry::otlp_grpc_exporter - OTLP gRPC exporter target
|
||||
# OpenTelemetry::otlp_http_exporter - OTLP HTTP exporter target
|
||||
|
||||
find_package(opentelemetry-cpp CONFIG QUIET)
|
||||
|
||||
if(opentelemetry-cpp_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
|
||||
# Create imported targets if not already created by config
|
||||
if(NOT TARGET OpenTelemetry::api)
|
||||
add_library(OpenTelemetry::api ALIAS opentelemetry-cpp::api)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::sdk)
|
||||
add_library(OpenTelemetry::sdk ALIAS opentelemetry-cpp::sdk)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::otlp_grpc_exporter)
|
||||
add_library(OpenTelemetry::otlp_grpc_exporter ALIAS
|
||||
opentelemetry-cpp::otlp_grpc_exporter)
|
||||
endif()
|
||||
else()
|
||||
# Try pkg-config fallback
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(OTEL opentelemetry-cpp QUIET)
|
||||
if(OTEL_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
# Create imported targets from pkg-config
|
||||
add_library(OpenTelemetry::api INTERFACE IMPORTED)
|
||||
target_include_directories(OpenTelemetry::api INTERFACE
|
||||
${OTEL_INCLUDE_DIRS})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(OpenTelemetry
|
||||
REQUIRED_VARS OpenTelemetry_FOUND)
|
||||
```
|
||||
|
||||
### 5.4.2 CMakeLists.txt Changes
|
||||
|
||||
```cmake
|
||||
# CMakeLists.txt (additions)
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY OPTIONS
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
option(XRPL_ENABLE_TELEMETRY
|
||||
"Enable OpenTelemetry distributed tracing support" OFF)
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
find_package(OpenTelemetry REQUIRED)
|
||||
|
||||
# Define compile-time flag
|
||||
add_compile_definitions(XRPL_ENABLE_TELEMETRY)
|
||||
|
||||
message(STATUS "OpenTelemetry tracing: ENABLED")
|
||||
else()
|
||||
message(STATUS "OpenTelemetry tracing: DISABLED")
|
||||
endif()
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY LIBRARY
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/Telemetry.cpp
|
||||
src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
src/libxrpl/telemetry/TraceContext.cpp
|
||||
)
|
||||
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
|
||||
target_link_libraries(xrpl_telemetry
|
||||
PUBLIC
|
||||
OpenTelemetry::api
|
||||
OpenTelemetry::sdk
|
||||
OpenTelemetry::otlp_grpc_exporter
|
||||
PRIVATE
|
||||
xrpl_basics
|
||||
)
|
||||
|
||||
# Add to main library dependencies
|
||||
target_link_libraries(xrpld PRIVATE xrpl_telemetry)
|
||||
else()
|
||||
# Create null implementation library
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/NullTelemetry.cpp
|
||||
)
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
endif()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.5 OpenTelemetry Collector Configuration
|
||||
|
||||
### 5.5.1 Development Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-dev.yaml
|
||||
# Minimal configuration for local development
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
# Console output for debugging
|
||||
logging:
|
||||
verbosity: detailed
|
||||
sampling_initial: 5
|
||||
sampling_thereafter: 200
|
||||
|
||||
# Jaeger for trace visualization
|
||||
jaeger:
|
||||
endpoint: jaeger:14250
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, jaeger]
|
||||
```
|
||||
|
||||
### 5.5.2 Production Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-prod.yaml
|
||||
# Production configuration with filtering, sampling, and multiple backends
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
tls:
|
||||
cert_file: /etc/otel/server.crt
|
||||
key_file: /etc/otel/server.key
|
||||
ca_file: /etc/otel/ca.crt
|
||||
|
||||
processors:
|
||||
# Memory limiter to prevent OOM
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 1000
|
||||
spike_limit_mib: 200
|
||||
|
||||
# Batch processing for efficiency
|
||||
batch:
|
||||
timeout: 5s
|
||||
send_batch_size: 512
|
||||
send_batch_max_size: 1024
|
||||
|
||||
# Tail-based sampling (keep errors and slow traces)
|
||||
tail_sampling:
|
||||
decision_wait: 10s
|
||||
num_traces: 100000
|
||||
expected_new_traces_per_sec: 1000
|
||||
policies:
|
||||
# Always keep error traces
|
||||
- name: errors
|
||||
type: status_code
|
||||
status_code:
|
||||
status_codes: [ERROR]
|
||||
# Keep slow consensus rounds (>5s)
|
||||
- name: slow-consensus
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 5000
|
||||
# Keep slow RPC requests (>1s)
|
||||
- name: slow-rpc
|
||||
type: and
|
||||
and:
|
||||
and_sub_policy:
|
||||
- name: rpc-spans
|
||||
type: string_attribute
|
||||
string_attribute:
|
||||
key: xrpl.rpc.command
|
||||
values: [".*"]
|
||||
enabled_regex_matching: true
|
||||
- name: latency
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 1000
|
||||
# Probabilistic sampling for the rest
|
||||
- name: probabilistic
|
||||
type: probabilistic
|
||||
probabilistic:
|
||||
sampling_percentage: 10
|
||||
|
||||
# Attribute processing
|
||||
attributes:
|
||||
actions:
|
||||
# Hash sensitive data
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Add deployment info
|
||||
- key: deployment.environment
|
||||
value: production
|
||||
action: upsert
|
||||
|
||||
exporters:
|
||||
# Grafana Tempo for long-term storage
|
||||
otlp/tempo:
|
||||
endpoint: tempo.monitoring:4317
|
||||
tls:
|
||||
insecure: false
|
||||
ca_file: /etc/otel/tempo-ca.crt
|
||||
|
||||
# Elastic APM for correlation with logs
|
||||
otlp/elastic:
|
||||
endpoint: apm.elastic:8200
|
||||
headers:
|
||||
Authorization: "Bearer ${ELASTIC_APM_TOKEN}"
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, tail_sampling, attributes, batch]
|
||||
exporters: [otlp/tempo, otlp/elastic]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.6 Docker Compose Development Environment
|
||||
|
||||
```yaml
|
||||
# docker-compose-telemetry.yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# OpenTelemetry Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.92.0
|
||||
container_name: otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-dev.yaml:/etc/otel-collector-config.yaml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "13133:13133" # Health check
|
||||
depends_on:
|
||||
- jaeger
|
||||
|
||||
# Jaeger for trace visualization
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:1.53
|
||||
container_name: jaeger
|
||||
environment:
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
ports:
|
||||
- "16686:16686" # UI
|
||||
- "14250:14250" # gRPC
|
||||
|
||||
# Grafana for dashboards
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.3
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
volumes:
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- jaeger
|
||||
|
||||
# Prometheus for metrics (optional, for correlation)
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.48.1
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: rippled-telemetry
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.7 Configuration Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph config["Configuration Sources"]
|
||||
cfgFile["xrpld.cfg<br/>[telemetry] section"]
|
||||
cmake["CMake<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
end
|
||||
|
||||
subgraph init["Initialization"]
|
||||
parse["setup_Telemetry()"]
|
||||
factory["make_Telemetry()"]
|
||||
end
|
||||
|
||||
subgraph runtime["Runtime Components"]
|
||||
tracer["TracerProvider"]
|
||||
exporter["OTLP Exporter"]
|
||||
processor["BatchProcessor"]
|
||||
end
|
||||
|
||||
subgraph collector["Collector Pipeline"]
|
||||
recv["Receivers"]
|
||||
proc["Processors"]
|
||||
exp["Exporters"]
|
||||
end
|
||||
|
||||
cfgFile --> parse
|
||||
cmake -->|"compile flag"| parse
|
||||
parse --> factory
|
||||
factory --> tracer
|
||||
tracer --> processor
|
||||
processor --> exporter
|
||||
exporter -->|"OTLP"| recv
|
||||
recv --> proc
|
||||
proc --> exp
|
||||
|
||||
style config fill:#e3f2fd,stroke:#1976d2
|
||||
style runtime fill:#e8f5e9,stroke:#388e3c
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.8 Grafana Integration
|
||||
|
||||
Step-by-step instructions for integrating rippled traces with Grafana.
|
||||
|
||||
### 5.8.1 Data Source Configuration
|
||||
|
||||
#### Tempo (Recommended)
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/tempo.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['service.name', 'xrpl.tx.hash']
|
||||
mappedTags: [{ key: 'trace_id', value: 'traceID' }]
|
||||
mapTagNamesEnabled: true
|
||||
filterByTraceID: true
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
search:
|
||||
hide: false
|
||||
lokiSearch:
|
||||
datasourceUid: loki
|
||||
```
|
||||
|
||||
#### Jaeger
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/jaeger.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
access: proxy
|
||||
url: http://jaeger:16686
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['service.name']
|
||||
```
|
||||
|
||||
#### Elastic APM
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/elastic-apm.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elasticsearch-APM
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://elasticsearch:9200
|
||||
database: "apm-*"
|
||||
jsonData:
|
||||
esVersion: "8.0.0"
|
||||
timeField: "@timestamp"
|
||||
logMessageField: message
|
||||
logLevelField: log.level
|
||||
```
|
||||
|
||||
### 5.8.2 Dashboard Provisioning
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/dashboards/dashboards.yaml
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'rippled-dashboards'
|
||||
orgId: 1
|
||||
folder: 'rippled'
|
||||
folderUid: 'rippled'
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/rippled
|
||||
```
|
||||
|
||||
### 5.8.3 Example Dashboard: RPC Performance
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled RPC Performance",
|
||||
"uid": "rippled-rpc-performance",
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Latency by Command",
|
||||
"type": "heatmap",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | histogram_over_time(duration) by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "RPC Error Rate",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Top 10 Slowest RPC Commands",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | avg(duration) by (span.xrpl.rpc.command) | topk(10)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Traces",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 16 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.4 Example Dashboard: Transaction Tracing
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Transaction Tracing",
|
||||
"uid": "rippled-tx-tracing",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Throughput",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | rate()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Cross-Node Relay Count",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.relay\"} | avg(span.xrpl.tx.relay_count)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Validation Errors",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.validate\" && status.code=error}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.5 TraceQL Query Examples
|
||||
|
||||
Common queries for rippled traces:
|
||||
|
||||
```
|
||||
# Find all traces for a specific transaction hash
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
|
||||
# Find slow RPC commands (>100ms)
|
||||
{resource.service.name="rippled" && name=~"rpc.command.*"} | duration > 100ms
|
||||
|
||||
# Find consensus rounds taking >5 seconds
|
||||
{resource.service.name="rippled" && name="consensus.round"} | duration > 5s
|
||||
|
||||
# Find failed transactions with error details
|
||||
{resource.service.name="rippled" && name="tx.validate" && status.code=error}
|
||||
|
||||
# Find transactions relayed to many peers
|
||||
{resource.service.name="rippled" && name="tx.relay"} | span.xrpl.tx.relay_count > 10
|
||||
|
||||
# Compare latency across nodes
|
||||
{resource.service.name="rippled" && name="rpc.command.account_info"} | avg(duration) by (resource.service.instance.id)
|
||||
```
|
||||
|
||||
### 5.8.6 Correlation with PerfLog
|
||||
|
||||
To correlate OpenTelemetry traces with existing PerfLog data:
|
||||
|
||||
**Step 1: Configure Loki to ingest PerfLog**
|
||||
|
||||
```yaml
|
||||
# promtail-config.yaml
|
||||
scrape_configs:
|
||||
- job_name: rippled-perflog
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: rippled
|
||||
__path__: /var/log/rippled/perf*.log
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
trace_id: trace_id
|
||||
ledger_seq: ledger_seq
|
||||
tx_hash: tx_hash
|
||||
- labels:
|
||||
trace_id:
|
||||
ledger_seq:
|
||||
tx_hash:
|
||||
```
|
||||
|
||||
**Step 2: Add trace_id to PerfLog entries**
|
||||
|
||||
Modify PerfLog to include trace_id when available:
|
||||
|
||||
```cpp
|
||||
// In PerfLog output, add trace_id from current span context
|
||||
void logPerf(Json::Value& entry) {
|
||||
auto span = opentelemetry::trace::GetSpan(
|
||||
opentelemetry::context::RuntimeContext::GetCurrent());
|
||||
if (span && span->GetContext().IsValid()) {
|
||||
char traceIdHex[33];
|
||||
span->GetContext().trace_id().ToLowerBase16(traceIdHex);
|
||||
entry["trace_id"] = std::string(traceIdHex, 32);
|
||||
}
|
||||
// ... existing logging
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3: Configure Grafana trace-to-logs link**
|
||||
|
||||
In Tempo data source configuration, set up the derived field:
|
||||
|
||||
```yaml
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['trace_id', 'xrpl.tx.hash']
|
||||
filterByTraceID: true
|
||||
filterBySpanID: false
|
||||
```
|
||||
|
||||
### 5.8.7 Correlation with Insight/StatsD Metrics
|
||||
|
||||
To correlate traces with existing Beast Insight metrics:
|
||||
|
||||
**Step 1: Export Insight metrics to Prometheus**
|
||||
|
||||
```yaml
|
||||
# prometheus.yaml
|
||||
scrape_configs:
|
||||
- job_name: 'rippled-statsd'
|
||||
static_configs:
|
||||
- targets: ['statsd-exporter:9102']
|
||||
```
|
||||
|
||||
**Step 2: Add exemplars to metrics**
|
||||
|
||||
OpenTelemetry SDK automatically adds exemplars (trace IDs) to metrics when using the Prometheus exporter. This links metrics spikes to specific traces.
|
||||
|
||||
**Step 3: Configure Grafana metric-to-trace link**
|
||||
|
||||
```yaml
|
||||
# In Prometheus data source
|
||||
jsonData:
|
||||
exemplarTraceIdDestinations:
|
||||
- name: trace_id
|
||||
datasourceUid: tempo
|
||||
```
|
||||
|
||||
**Step 4: Dashboard panel with exemplars**
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "RPC Latency with Trace Links",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, rate(rippled_rpc_duration_seconds_bucket[5m]))",
|
||||
"exemplar": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This allows clicking on metric data points to jump directly to the related trace.
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Code Samples](./04-code-samples.md)* | *Next: [Implementation Phases](./06-implementation-phases.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,506 +0,0 @@
|
||||
# Implementation Phases
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Configuration Reference](./05-configuration-reference.md) | [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 6.1 Phase Overview
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title OpenTelemetry Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
SDK Integration :p1a, 2024-01-01, 4d
|
||||
Telemetry Interface :p1b, after p1a, 3d
|
||||
Configuration & CMake :p1c, after p1b, 3d
|
||||
Unit Tests :p1d, after p1c, 2d
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
HTTP Context Extraction :p2a, after p1, 2d
|
||||
RPC Handler Instrumentation :p2b, after p2a, 4d
|
||||
WebSocket Support :p2c, after p2b, 2d
|
||||
Integration Tests :p2d, after p2c, 2d
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
Protocol Buffer Extension :p3a, after p2, 2d
|
||||
PeerImp Instrumentation :p3b, after p3a, 3d
|
||||
Relay Context Propagation :p3c, after p3b, 3d
|
||||
Multi-node Tests :p3d, after p3c, 2d
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
Consensus Round Spans :p4a, after p3, 3d
|
||||
Proposal Handling :p4b, after p4a, 3d
|
||||
Validation Tests :p4c, after p4b, 4d
|
||||
|
||||
section Phase 5
|
||||
Documentation & Deploy :p5, after p4, 1w
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.2 Phase 1: Core Infrastructure (Weeks 1-2)
|
||||
|
||||
**Objective**: Establish foundational telemetry infrastructure
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ----------------------------------------------------- | ------ | ------ |
|
||||
| 1.1 | Add OpenTelemetry C++ SDK to Conan/CMake | 2d | Low |
|
||||
| 1.2 | Implement `Telemetry` interface and factory | 2d | Low |
|
||||
| 1.3 | Implement `SpanGuard` RAII wrapper | 1d | Low |
|
||||
| 1.4 | Implement configuration parser | 1d | Low |
|
||||
| 1.5 | Integrate into `ApplicationImp` | 1d | Medium |
|
||||
| 1.6 | Add conditional compilation (`XRPL_ENABLE_TELEMETRY`) | 1d | Low |
|
||||
| 1.7 | Create `NullTelemetry` no-op implementation | 0.5d | Low |
|
||||
| 1.8 | Unit tests for core infrastructure | 1.5d | Low |
|
||||
|
||||
**Total Effort**: 10 days (2 developers)
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] OpenTelemetry SDK compiles and links
|
||||
- [ ] Telemetry can be enabled/disabled via config
|
||||
- [ ] Basic span creation works
|
||||
- [ ] No performance regression when disabled
|
||||
- [ ] Unit tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.3 Phase 2: RPC Tracing (Weeks 3-4)
|
||||
|
||||
**Objective**: Complete tracing for all RPC operations
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | -------------------------------------------------- | ------ | ------ |
|
||||
| 2.1 | Implement W3C Trace Context HTTP header extraction | 1d | Low |
|
||||
| 2.2 | Instrument `ServerHandler::onRequest()` | 1d | Low |
|
||||
| 2.3 | Instrument `RPCHandler::doCommand()` | 2d | Medium |
|
||||
| 2.4 | Add RPC-specific attributes | 1d | Low |
|
||||
| 2.5 | Instrument WebSocket handler | 1d | Medium |
|
||||
| 2.6 | Integration tests for RPC tracing | 2d | Low |
|
||||
| 2.7 | Performance benchmarks | 1d | Low |
|
||||
| 2.8 | Documentation | 1d | Low |
|
||||
|
||||
**Total Effort**: 10 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] All RPC commands traced
|
||||
- [ ] Trace context propagates from HTTP headers
|
||||
- [ ] WebSocket and HTTP both instrumented
|
||||
- [ ] <1ms overhead per RPC call
|
||||
- [ ] Integration tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.4 Phase 3: Transaction Tracing (Weeks 5-6)
|
||||
|
||||
**Objective**: Trace transaction lifecycle across network
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | --------------------------------------------- | ------ | ------ |
|
||||
| 3.1 | Define `TraceContext` Protocol Buffer message | 1d | Low |
|
||||
| 3.2 | Implement protobuf context serialization | 1d | Low |
|
||||
| 3.3 | Instrument `PeerImp::handleTransaction()` | 2d | Medium |
|
||||
| 3.4 | Instrument `NetworkOPs::submitTransaction()` | 1d | Medium |
|
||||
| 3.5 | Instrument HashRouter integration | 1d | Medium |
|
||||
| 3.6 | Implement relay context propagation | 2d | High |
|
||||
| 3.7 | Integration tests (multi-node) | 2d | Medium |
|
||||
| 3.8 | Performance benchmarks | 1d | Low |
|
||||
|
||||
**Total Effort**: 11 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Transaction traces span across nodes
|
||||
- [ ] Trace context in Protocol Buffer messages
|
||||
- [ ] HashRouter deduplication visible in traces
|
||||
- [ ] Multi-node integration tests passing
|
||||
- [ ] <5% overhead on transaction throughput
|
||||
|
||||
---
|
||||
|
||||
## 6.5 Phase 4: Consensus Tracing (Weeks 7-8)
|
||||
|
||||
**Objective**: Full observability into consensus rounds
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ---------------------------------------------- | ------ | ------ |
|
||||
| 4.1 | Instrument `RCLConsensusAdaptor::startRound()` | 1d | Medium |
|
||||
| 4.2 | Instrument phase transitions | 2d | Medium |
|
||||
| 4.3 | Instrument proposal handling | 2d | High |
|
||||
| 4.4 | Instrument validation handling | 1d | Medium |
|
||||
| 4.5 | Add consensus-specific attributes | 1d | Low |
|
||||
| 4.6 | Correlate with transaction traces | 1d | Medium |
|
||||
| 4.7 | Multi-validator integration tests | 2d | High |
|
||||
| 4.8 | Performance validation | 1d | Medium |
|
||||
|
||||
**Total Effort**: 11 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Complete consensus round traces
|
||||
- [ ] Phase transitions visible
|
||||
- [ ] Proposals and validations traced
|
||||
- [ ] No impact on consensus timing
|
||||
- [ ] Multi-validator test network validated
|
||||
|
||||
---
|
||||
|
||||
## 6.6 Phase 5: Documentation & Deployment (Week 9)
|
||||
|
||||
**Objective**: Production readiness
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ----------------------------- | ------ | ---- |
|
||||
| 5.1 | Operator runbook | 1d | Low |
|
||||
| 5.2 | Grafana dashboards | 1d | Low |
|
||||
| 5.3 | Alert definitions | 0.5d | Low |
|
||||
| 5.4 | Collector deployment examples | 0.5d | Low |
|
||||
| 5.5 | Developer documentation | 1d | Low |
|
||||
| 5.6 | Training materials | 0.5d | Low |
|
||||
| 5.7 | Final integration testing | 0.5d | Low |
|
||||
|
||||
**Total Effort**: 5 days
|
||||
|
||||
---
|
||||
|
||||
## 6.7 Risk Assessment
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Risk Assessment Matrix
|
||||
x-axis Low Impact --> High Impact
|
||||
y-axis Low Likelihood --> High Likelihood
|
||||
quadrant-1 Monitor Closely
|
||||
quadrant-2 Mitigate Immediately
|
||||
quadrant-3 Accept Risk
|
||||
quadrant-4 Plan Mitigation
|
||||
|
||||
SDK Compatibility: [0.25, 0.2]
|
||||
Protocol Changes: [0.75, 0.65]
|
||||
Performance Overhead: [0.65, 0.45]
|
||||
Context Propagation: [0.5, 0.5]
|
||||
Memory Leaks: [0.8, 0.2]
|
||||
```
|
||||
|
||||
### Risk Details
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
| ------------------------------------ | ---------- | ------ | --------------------------------------- |
|
||||
| Protocol changes break compatibility | Medium | High | Use high field numbers, optional fields |
|
||||
| Performance overhead unacceptable | Medium | Medium | Sampling, conditional compilation |
|
||||
| Context propagation complexity | Medium | Medium | Phased rollout, extensive testing |
|
||||
| SDK compatibility issues | Low | Medium | Pin SDK version, fallback to no-op |
|
||||
| Memory leaks in long-running nodes | Low | High | Memory profiling, bounded queues |
|
||||
|
||||
---
|
||||
|
||||
## 6.8 Success Metrics
|
||||
|
||||
| Metric | Target | Measurement |
|
||||
| ------------------------ | ------------------------------ | --------------------- |
|
||||
| Trace coverage | >95% of transactions | Sampling verification |
|
||||
| CPU overhead | <3% | Benchmark tests |
|
||||
| Memory overhead | <5 MB | Memory profiling |
|
||||
| Latency impact (p99) | <2% | Performance tests |
|
||||
| Trace completeness | >99% spans with required attrs | Validation script |
|
||||
| Cross-node trace linkage | >90% of multi-hop transactions | Integration tests |
|
||||
|
||||
---
|
||||
|
||||
## 6.9 Effort Summary
|
||||
|
||||
```mermaid
|
||||
pie title Total Effort Distribution (47 developer-days)
|
||||
"Phase 1: Core Infrastructure" : 10
|
||||
"Phase 2: RPC Tracing" : 10
|
||||
"Phase 3: Transaction Tracing" : 11
|
||||
"Phase 4: Consensus Tracing" : 11
|
||||
"Phase 5: Documentation" : 5
|
||||
```
|
||||
|
||||
### Resource Requirements
|
||||
|
||||
| Phase | Developers | Duration | Total Effort |
|
||||
| --------- | ---------- | ----------- | ------------ |
|
||||
| 1 | 2 | 2 weeks | 10 days |
|
||||
| 2 | 1-2 | 2 weeks | 10 days |
|
||||
| 3 | 2 | 2 weeks | 11 days |
|
||||
| 4 | 2 | 2 weeks | 11 days |
|
||||
| 5 | 1 | 1 week | 5 days |
|
||||
| **Total** | **2** | **9 weeks** | **47 days** |
|
||||
|
||||
---
|
||||
|
||||
## 6.10 Quick Wins and Crawl-Walk-Run Strategy
|
||||
|
||||
This section outlines a prioritized approach to maximize ROI with minimal initial investment.
|
||||
|
||||
### 6.10.1 Crawl-Walk-Run Overview
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph crawl["🐢 CRAWL (Week 1-2)"]
|
||||
c1[Core SDK Setup]
|
||||
c2[RPC Tracing Only]
|
||||
c3[Single Node]
|
||||
end
|
||||
|
||||
subgraph walk["🚶 WALK (Week 3-5)"]
|
||||
w1[Transaction Tracing]
|
||||
w2[Cross-Node Context]
|
||||
w3[Basic Dashboards]
|
||||
end
|
||||
|
||||
subgraph run["🏃 RUN (Week 6-9)"]
|
||||
r1[Consensus Tracing]
|
||||
r2[Full Correlation]
|
||||
r3[Production Deploy]
|
||||
end
|
||||
|
||||
crawl --> walk --> run
|
||||
|
||||
style crawl fill:#e8f5e9,stroke:#388e3c
|
||||
style walk fill:#fff3e0,stroke:#ff9800
|
||||
style run fill:#e3f2fd,stroke:#1976d2
|
||||
```
|
||||
|
||||
### 6.10.2 Quick Wins (Immediate Value)
|
||||
|
||||
| Quick Win | Effort | Value | When to Deploy |
|
||||
| ------------------------------ | -------- | ------ | -------------- |
|
||||
| **RPC Command Tracing** | 2 days | High | Week 2 |
|
||||
| **RPC Latency Histograms** | 0.5 days | High | Week 2 |
|
||||
| **Error Rate Dashboard** | 0.5 days | Medium | Week 2 |
|
||||
| **Transaction Submit Tracing** | 1 day | High | Week 3 |
|
||||
| **Consensus Round Duration** | 1 day | Medium | Week 6 |
|
||||
|
||||
### 6.10.3 CRAWL Phase (Weeks 1-2)
|
||||
|
||||
**Goal**: Get basic tracing working with minimal code changes.
|
||||
|
||||
**What You Get**:
|
||||
- RPC request/response traces for all commands
|
||||
- Latency breakdown per RPC command
|
||||
- Error visibility with stack traces
|
||||
- Basic Grafana dashboard
|
||||
|
||||
**Code Changes**: ~15 lines in `ServerHandler.cpp`, ~40 lines in new telemetry module
|
||||
|
||||
**Why Start Here**:
|
||||
- RPC is the lowest-risk, highest-visibility component
|
||||
- Immediate value for debugging client issues
|
||||
- No cross-node complexity
|
||||
- Single file modification to existing code
|
||||
|
||||
### 6.10.4 WALK Phase (Weeks 3-5)
|
||||
|
||||
**Goal**: Add transaction lifecycle tracing across nodes.
|
||||
|
||||
**What You Get**:
|
||||
- End-to-end transaction traces from submit to relay
|
||||
- Cross-node correlation (see transaction path)
|
||||
- HashRouter deduplication visibility
|
||||
- Relay latency metrics
|
||||
|
||||
**Code Changes**: ~120 lines across 4 files, plus protobuf extension
|
||||
|
||||
**Why Do This Second**:
|
||||
- Builds on RPC tracing (transactions submitted via RPC)
|
||||
- Moderate complexity (requires context propagation)
|
||||
- High value for debugging transaction issues
|
||||
|
||||
### 6.10.5 RUN Phase (Weeks 6-9)
|
||||
|
||||
**Goal**: Full observability including consensus.
|
||||
|
||||
**What You Get**:
|
||||
- Complete consensus round visibility
|
||||
- Phase transition timing
|
||||
- Validator proposal tracking
|
||||
- Full end-to-end traces (client → RPC → TX → consensus → ledger)
|
||||
|
||||
**Code Changes**: ~100 lines across 3 consensus files
|
||||
|
||||
**Why Do This Last**:
|
||||
- Highest complexity (consensus is critical path)
|
||||
- Requires thorough testing
|
||||
- Lower relative value (consensus issues are rarer)
|
||||
|
||||
### 6.10.6 ROI Prioritization Matrix
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Implementation ROI Matrix
|
||||
x-axis Low Effort --> High Effort
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Quick Wins - Do First
|
||||
quadrant-2 Major Projects - Plan Carefully
|
||||
quadrant-3 Nice to Have - Optional
|
||||
quadrant-4 Time Sinks - Avoid
|
||||
|
||||
RPC Tracing: [0.15, 0.9]
|
||||
TX Submit Trace: [0.25, 0.85]
|
||||
TX Relay Trace: [0.5, 0.8]
|
||||
Consensus Trace: [0.7, 0.75]
|
||||
Peer Message Trace: [0.85, 0.3]
|
||||
Ledger Acquire: [0.55, 0.5]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.11 Definition of Done
|
||||
|
||||
Clear, measurable criteria for each phase.
|
||||
|
||||
### 6.11.1 Phase 1: Core Infrastructure
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| --------------- | ---------------------------------------------------------- | ---------------------------- |
|
||||
| SDK Integration | `cmake --build` succeeds with `-DXRPL_ENABLE_TELEMETRY=ON` | ✅ Compiles |
|
||||
| Runtime Toggle | `enabled=0` produces zero overhead | <0.1% CPU difference |
|
||||
| Span Creation | Unit test creates and exports span | Span appears in Jaeger |
|
||||
| Configuration | All config options parsed correctly | Config validation tests pass |
|
||||
| Documentation | Developer guide exists | PR approved |
|
||||
|
||||
**Definition of Done**: All criteria met, PR merged, no regressions in CI.
|
||||
|
||||
### 6.11.2 Phase 2: RPC Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------------ | ---------------------------------- | -------------------------- |
|
||||
| Coverage | All RPC commands instrumented | 100% of commands |
|
||||
| Context Extraction | traceparent header propagates | Integration test passes |
|
||||
| Attributes | Command, status, duration recorded | Validation script confirms |
|
||||
| Performance | RPC latency overhead | <1ms p99 |
|
||||
| Dashboard | Grafana dashboard deployed | Screenshot in docs |
|
||||
|
||||
**Definition of Done**: RPC traces visible in Jaeger/Tempo for all commands, dashboard shows latency distribution.
|
||||
|
||||
### 6.11.3 Phase 3: Transaction Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ---------------- | ------------------------------- | ---------------------------------- |
|
||||
| Local Trace | Submit → validate → TxQ traced | Single-node test passes |
|
||||
| Cross-Node | Context propagates via protobuf | Multi-node test passes |
|
||||
| Relay Visibility | relay_count attribute correct | Spot check 100 txs |
|
||||
| HashRouter | Deduplication visible in trace | Duplicate txs show suppressed=true |
|
||||
| Performance | TX throughput overhead | <5% degradation |
|
||||
|
||||
**Definition of Done**: Transaction traces span 3+ nodes in test network, performance within bounds.
|
||||
|
||||
### 6.11.4 Phase 4: Consensus Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| -------------------- | ----------------------------- | ------------------------- |
|
||||
| Round Tracing | startRound creates root span | Unit test passes |
|
||||
| Phase Visibility | All phases have child spans | Integration test confirms |
|
||||
| Proposer Attribution | Proposer ID in attributes | Spot check 50 rounds |
|
||||
| Timing Accuracy | Phase durations match PerfLog | <5% variance |
|
||||
| No Consensus Impact | Round timing unchanged | Performance test passes |
|
||||
|
||||
**Definition of Done**: Consensus rounds fully traceable, no impact on consensus timing.
|
||||
|
||||
### 6.11.5 Phase 5: Production Deployment
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------ | ---------------------------- | -------------------------- |
|
||||
| Collector HA | Multiple collectors deployed | No single point of failure |
|
||||
| Sampling | Tail sampling configured | 10% base + errors + slow |
|
||||
| Retention | Data retained per policy | 7 days hot, 30 days warm |
|
||||
| Alerting | Alerts configured | Error spike, high latency |
|
||||
| Runbook | Operator documentation | Approved by ops team |
|
||||
| Training | Team trained | Session completed |
|
||||
|
||||
**Definition of Done**: Telemetry running in production, operators trained, alerts active.
|
||||
|
||||
### 6.11.6 Success Metrics Summary
|
||||
|
||||
| Phase | Primary Metric | Secondary Metric | Deadline |
|
||||
| ------- | ---------------------- | --------------------------- | ------------- |
|
||||
| Phase 1 | SDK compiles and runs | Zero overhead when disabled | End of Week 2 |
|
||||
| Phase 2 | 100% RPC coverage | <1ms latency overhead | End of Week 4 |
|
||||
| Phase 3 | Cross-node traces work | <5% throughput impact | End of Week 6 |
|
||||
| Phase 4 | Consensus fully traced | No consensus timing impact | End of Week 8 |
|
||||
| Phase 5 | Production deployment | Operators trained | End of Week 9 |
|
||||
|
||||
---
|
||||
|
||||
## 6.12 Recommended Implementation Order
|
||||
|
||||
Based on ROI analysis, implement in this exact order:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph week1["Week 1"]
|
||||
t1[1. OpenTelemetry SDK<br/>Conan/CMake integration]
|
||||
t2[2. Telemetry interface<br/>SpanGuard, config]
|
||||
end
|
||||
|
||||
subgraph week2["Week 2"]
|
||||
t3[3. RPC ServerHandler<br/>instrumentation]
|
||||
t4[4. Basic Jaeger setup<br/>for testing]
|
||||
end
|
||||
|
||||
subgraph week3["Week 3"]
|
||||
t5[5. Transaction submit<br/>tracing]
|
||||
t6[6. Grafana dashboard<br/>v1]
|
||||
end
|
||||
|
||||
subgraph week4["Week 4"]
|
||||
t7[7. Protobuf context<br/>extension]
|
||||
t8[8. PeerImp tx.relay<br/>instrumentation]
|
||||
end
|
||||
|
||||
subgraph week5["Week 5"]
|
||||
t9[9. Multi-node<br/>integration tests]
|
||||
t10[10. Performance<br/>benchmarks]
|
||||
end
|
||||
|
||||
subgraph week6_8["Weeks 6-8"]
|
||||
t11[11. Consensus<br/>instrumentation]
|
||||
t12[12. Full integration<br/>testing]
|
||||
end
|
||||
|
||||
subgraph week9["Week 9"]
|
||||
t13[13. Production<br/>deployment]
|
||||
t14[14. Documentation<br/>& training]
|
||||
end
|
||||
|
||||
t1 --> t2 --> t3 --> t4
|
||||
t4 --> t5 --> t6
|
||||
t6 --> t7 --> t8
|
||||
t8 --> t9 --> t10
|
||||
t10 --> t11 --> t12
|
||||
t12 --> t13 --> t14
|
||||
|
||||
style week1 fill:#e8f5e9,stroke:#388e3c
|
||||
style week2 fill:#e8f5e9,stroke:#388e3c
|
||||
style week3 fill:#fff3e0,stroke:#ff9800
|
||||
style week4 fill:#fff3e0,stroke:#ff9800
|
||||
style week5 fill:#fff3e0,stroke:#ff9800
|
||||
style week6_8 fill:#e3f2fd,stroke:#1976d2
|
||||
style week9 fill:#f3e5f5,stroke:#7b1fa2
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Configuration Reference](./05-configuration-reference.md)* | *Next: [Observability Backends](./07-observability-backends.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,559 +0,0 @@
|
||||
# Observability Backend Recommendations
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Phases](./06-implementation-phases.md) | [Appendix](./08-appendix.md)
|
||||
|
||||
---
|
||||
|
||||
## 7.1 Development/Testing Backends
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ---------- | ------------------- | ----------------- | ----------------- |
|
||||
| **Jaeger** | Easy setup, good UI | Limited retention | Local dev, CI |
|
||||
| **Zipkin** | Simple, lightweight | Basic features | Quick prototyping |
|
||||
|
||||
### Quick Start with Jaeger
|
||||
|
||||
```bash
|
||||
# Start Jaeger with OTLP support
|
||||
docker run -d --name jaeger \
|
||||
-e COLLECTOR_OTLP_ENABLED=true \
|
||||
-p 16686:16686 \
|
||||
-p 4317:4317 \
|
||||
-p 4318:4318 \
|
||||
jaegertracing/all-in-one:latest
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.2 Production Backends
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ----------------- | ----------------------------------------- | ------------------ | --------------------------- |
|
||||
| **Grafana Tempo** | Cost-effective, Grafana integration | Newer project | Most production deployments |
|
||||
| **Elastic APM** | Full observability stack, log correlation | Resource intensive | Existing Elastic users |
|
||||
| **Honeycomb** | Excellent query, high cardinality | SaaS cost | Deep debugging needs |
|
||||
| **Datadog APM** | Full platform, easy setup | SaaS cost | Enterprise with budget |
|
||||
|
||||
### Backend Selection Flowchart
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
start[Select Backend] --> budget{Budget<br/>Constraints?}
|
||||
|
||||
budget -->|Yes| oss[Open Source]
|
||||
budget -->|No| saas{Prefer<br/>SaaS?}
|
||||
|
||||
oss --> existing{Existing<br/>Stack?}
|
||||
existing -->|Grafana| tempo[Grafana Tempo]
|
||||
existing -->|Elastic| elastic[Elastic APM]
|
||||
existing -->|None| tempo
|
||||
|
||||
saas -->|Yes| enterprise{Enterprise<br/>Support?}
|
||||
saas -->|No| oss
|
||||
|
||||
enterprise -->|Yes| datadog[Datadog APM]
|
||||
enterprise -->|No| honeycomb[Honeycomb]
|
||||
|
||||
tempo --> final[Configure Collector]
|
||||
elastic --> final
|
||||
honeycomb --> final
|
||||
datadog --> final
|
||||
|
||||
style tempo fill:#e8f5e9,stroke:#388e3c
|
||||
style elastic fill:#fff3e0,stroke:#ff9800
|
||||
style honeycomb fill:#e3f2fd,stroke:#1976d2
|
||||
style datadog fill:#f3e5f5,stroke:#7b1fa2
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.3 Recommended Production Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph validators["Validator Nodes"]
|
||||
v1[rippled<br/>Validator 1]
|
||||
v2[rippled<br/>Validator 2]
|
||||
end
|
||||
|
||||
subgraph stock["Stock Nodes"]
|
||||
s1[rippled<br/>Stock 1]
|
||||
s2[rippled<br/>Stock 2]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector Cluster"]
|
||||
c1[Collector<br/>DC1]
|
||||
c2[Collector<br/>DC2]
|
||||
end
|
||||
|
||||
subgraph backends["Storage Backends"]
|
||||
tempo[(Grafana<br/>Tempo)]
|
||||
elastic[(Elastic<br/>APM)]
|
||||
archive[(S3/GCS<br/>Archive)]
|
||||
end
|
||||
|
||||
subgraph ui["Visualization"]
|
||||
grafana[Grafana<br/>Dashboards]
|
||||
end
|
||||
|
||||
v1 -->|OTLP| c1
|
||||
v2 -->|OTLP| c1
|
||||
s1 -->|OTLP| c2
|
||||
s2 -->|OTLP| c2
|
||||
|
||||
c1 --> tempo
|
||||
c1 --> elastic
|
||||
c2 --> tempo
|
||||
c2 --> archive
|
||||
|
||||
tempo --> grafana
|
||||
elastic --> grafana
|
||||
|
||||
style validators fill:#ffebee,stroke:#c62828
|
||||
style stock fill:#e3f2fd,stroke:#1976d2
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
style backends fill:#e8f5e9,stroke:#388e3c
|
||||
style ui fill:#f3e5f5,stroke:#7b1fa2
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.4 Architecture Considerations
|
||||
|
||||
### 7.4.1 Collector Placement
|
||||
|
||||
| Strategy | Description | Pros | Cons |
|
||||
| ------------- | -------------------- | ------------------------ | ----------------------- |
|
||||
| **Sidecar** | Collector per node | Isolation, simple config | Resource overhead |
|
||||
| **DaemonSet** | Collector per host | Shared resources | Complexity |
|
||||
| **Gateway** | Central collector(s) | Centralized processing | Single point of failure |
|
||||
|
||||
**Recommendation**: Use **Gateway** pattern with regional collectors for rippled networks:
|
||||
- One collector cluster per datacenter/region
|
||||
- Tail-based sampling at collector level
|
||||
- Multiple export destinations for redundancy
|
||||
|
||||
### 7.4.2 Sampling Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph head["Head Sampling (Node)"]
|
||||
hs[10% probabilistic]
|
||||
end
|
||||
|
||||
subgraph tail["Tail Sampling (Collector)"]
|
||||
ts1[Keep all errors]
|
||||
ts2[Keep slow >5s]
|
||||
ts3[Keep 10% rest]
|
||||
end
|
||||
|
||||
head --> tail
|
||||
|
||||
ts1 --> final[Final Traces]
|
||||
ts2 --> final
|
||||
ts3 --> final
|
||||
```
|
||||
|
||||
### 7.4.3 Data Retention
|
||||
|
||||
| Environment | Hot Storage | Warm Storage | Cold Archive |
|
||||
| ----------- | ----------- | ------------ | ------------ |
|
||||
| Development | 24 hours | N/A | N/A |
|
||||
| Staging | 7 days | N/A | N/A |
|
||||
| Production | 7 days | 30 days | 1 year |
|
||||
|
||||
---
|
||||
|
||||
## 7.5 Integration Checklist
|
||||
|
||||
- [ ] Choose primary backend (Tempo recommended for cost/features)
|
||||
- [ ] Deploy collector cluster with high availability
|
||||
- [ ] Configure tail-based sampling for error/latency traces
|
||||
- [ ] Set up Grafana dashboards for trace visualization
|
||||
- [ ] Configure alerts for trace anomalies
|
||||
- [ ] Establish data retention policies
|
||||
- [ ] Test trace correlation with logs and metrics
|
||||
|
||||
---
|
||||
|
||||
## 7.6 Grafana Dashboard Examples
|
||||
|
||||
Pre-built dashboards for rippled observability.
|
||||
|
||||
### 7.6.1 Consensus Health Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Consensus Health",
|
||||
"uid": "rippled-consensus-health",
|
||||
"tags": ["rippled", "consensus", "tracing"],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Consensus Round Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(duration) by (resource.service.instance.id)"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 4000 },
|
||||
{ "color": "red", "value": 5000 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Phase Duration Breakdown",
|
||||
"type": "barchart",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=~\"consensus.phase.*\"} | avg(duration) by (name)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Proposers per Round",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(span.xrpl.consensus.proposers)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Slow Rounds (>5s)",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | duration > 5s"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 12 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.2 Node Overview Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Node Overview",
|
||||
"uid": "rippled-node-overview",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Active Nodes",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"} | count_over_time() by (resource.service.instance.id) | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Total Transactions (1h)",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Error Rate",
|
||||
"type": "gauge",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() / {resource.service.name=\"rippled\"} | rate() * 100"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "percent",
|
||||
"max": 10,
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 1 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Service Map",
|
||||
"type": "nodeGraph",
|
||||
"datasource": "Tempo",
|
||||
"gridPos": { "h": 12, "w": 12, "x": 12, "y": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.3 Alert Rules
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/alerting/rippled-alerts.yaml
|
||||
apiVersion: 1
|
||||
|
||||
groups:
|
||||
- name: rippled-tracing-alerts
|
||||
folder: rippled
|
||||
interval: 1m
|
||||
rules:
|
||||
- uid: consensus-slow
|
||||
title: Consensus Round Slow
|
||||
condition: A
|
||||
data:
|
||||
- refId: A
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="consensus.round"} | avg(duration) > 5s'
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: Consensus rounds taking >5 seconds
|
||||
description: "Consensus duration: {{ $value }}ms"
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- uid: rpc-error-spike
|
||||
title: RPC Error Rate Spike
|
||||
condition: B
|
||||
data:
|
||||
- refId: B
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name=~"rpc.command.*" && status.code=error} | rate() > 0.05'
|
||||
for: 2m
|
||||
annotations:
|
||||
summary: RPC error rate >5%
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- uid: tx-throughput-drop
|
||||
title: Transaction Throughput Drop
|
||||
condition: C
|
||||
data:
|
||||
- refId: C
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="tx.receive"} | rate() < 10'
|
||||
for: 10m
|
||||
annotations:
|
||||
summary: Transaction throughput below threshold
|
||||
labels:
|
||||
severity: warning
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.7 PerfLog and Insight Correlation
|
||||
|
||||
How to correlate OpenTelemetry traces with existing rippled observability.
|
||||
|
||||
### 7.7.1 Correlation Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
otel[OpenTelemetry<br/>Spans]
|
||||
perflog[PerfLog<br/>JSON Logs]
|
||||
insight[Beast Insight<br/>StatsD Metrics]
|
||||
end
|
||||
|
||||
subgraph collectors["Data Collection"]
|
||||
otelc[OTel Collector]
|
||||
promtail[Promtail/Fluentd]
|
||||
statsd[StatsD Exporter]
|
||||
end
|
||||
|
||||
subgraph storage["Storage"]
|
||||
tempo[(Tempo)]
|
||||
loki[(Loki)]
|
||||
prom[(Prometheus)]
|
||||
end
|
||||
|
||||
subgraph grafana["Grafana"]
|
||||
traces[Trace View]
|
||||
logs[Log View]
|
||||
metrics[Metrics View]
|
||||
corr[Correlation<br/>Panel]
|
||||
end
|
||||
|
||||
otel -->|OTLP| otelc --> tempo
|
||||
perflog -->|JSON| promtail --> loki
|
||||
insight -->|StatsD| statsd --> prom
|
||||
|
||||
tempo --> traces
|
||||
loki --> logs
|
||||
prom --> metrics
|
||||
|
||||
traces --> corr
|
||||
logs --> corr
|
||||
metrics --> corr
|
||||
|
||||
style corr fill:#f3e5f5,stroke:#7b1fa2
|
||||
```
|
||||
|
||||
### 7.7.2 Correlation Fields
|
||||
|
||||
| Source | Field | Link To | Purpose |
|
||||
| ----------- | --------------------------- | ------------- | -------------------------- |
|
||||
| **Trace** | `trace_id` | Logs | Find log entries for trace |
|
||||
| **Trace** | `xrpl.tx.hash` | Logs, Metrics | Find TX-related data |
|
||||
| **Trace** | `xrpl.consensus.ledger.seq` | Logs | Find ledger-related logs |
|
||||
| **PerfLog** | `trace_id` (new) | Traces | Jump to trace from log |
|
||||
| **PerfLog** | `ledger_seq` | Traces | Find consensus trace |
|
||||
| **Insight** | `exemplar.trace_id` | Traces | Jump from metric spike |
|
||||
|
||||
### 7.7.3 Example: Debugging a Slow Transaction
|
||||
|
||||
**Step 1: Find the trace**
|
||||
```
|
||||
# In Grafana Explore with Tempo
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
```
|
||||
|
||||
**Step 2: Get the trace_id from the trace view**
|
||||
```
|
||||
Trace ID: 4bf92f3577b34da6a3ce929d0e0e4736
|
||||
```
|
||||
|
||||
**Step 3: Find related PerfLog entries**
|
||||
```
|
||||
# In Grafana Explore with Loki
|
||||
{job="rippled"} |= "4bf92f3577b34da6a3ce929d0e0e4736"
|
||||
```
|
||||
|
||||
**Step 4: Check Insight metrics for the time window**
|
||||
```
|
||||
# In Grafana with Prometheus
|
||||
rate(rippled_tx_applied_total[1m])
|
||||
@ timestamp_from_trace
|
||||
```
|
||||
|
||||
### 7.7.4 Unified Dashboard Example
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Unified Observability",
|
||||
"uid": "rippled-unified",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Latency (Traces)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | histogram_over_time(duration)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Rate (Metrics)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(rippled_tx_received_total[5m])",
|
||||
"legendFormat": "{{ instance }}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": [
|
||||
{
|
||||
"title": "View traces",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"{resource.service.name=\\\"rippled\\\" && name=\\\"tx.receive\\\"}\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 6, "w": 8, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Logs",
|
||||
"type": "logs",
|
||||
"datasource": "Loki",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{job=\"rippled\"} | json"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 16, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Trace Search",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "traceID" },
|
||||
"properties": [
|
||||
{
|
||||
"id": "links",
|
||||
"value": [
|
||||
{
|
||||
"title": "View trace",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"${__value.raw}\"}"
|
||||
},
|
||||
{
|
||||
"title": "View logs",
|
||||
"url": "/explore?left={\"datasource\":\"Loki\",\"query\":\"{job=\\\"rippled\\\"} |= \\\"${__value.raw}\\\"\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 6 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Implementation Phases](./06-implementation-phases.md)* | *Next: [Appendix](./08-appendix.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
@@ -1,126 +0,0 @@
|
||||
# Appendix
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.1 Glossary
|
||||
|
||||
| Term | Definition |
|
||||
| -------------- | ---------------------------------------------------------- |
|
||||
| **Span** | A unit of work with start/end time, name, and attributes |
|
||||
| **Trace** | A collection of spans representing a complete request flow |
|
||||
| **Trace ID** | 128-bit unique identifier for a trace |
|
||||
| **Span ID** | 64-bit unique identifier for a span within a trace |
|
||||
| **Context** | Carrier for trace/span IDs across boundaries |
|
||||
| **Propagator** | Component that injects/extracts context |
|
||||
| **Sampler** | Decides which traces to record |
|
||||
| **Exporter** | Sends spans to backend |
|
||||
| **Collector** | Receives, processes, and forwards telemetry |
|
||||
| **OTLP** | OpenTelemetry Protocol (wire format) |
|
||||
| **W3C Trace Context** | Standard HTTP headers for trace propagation |
|
||||
| **Baggage** | Key-value pairs propagated across service boundaries |
|
||||
| **Resource** | Entity producing telemetry (service, host, etc.) |
|
||||
| **Instrumentation** | Code that creates telemetry data |
|
||||
|
||||
### rippled-Specific Terms
|
||||
|
||||
| Term | Definition |
|
||||
| -------------- | ---------------------------------------------------------- |
|
||||
| **Overlay** | P2P network layer managing peer connections |
|
||||
| **Consensus** | XRP Ledger consensus algorithm (RCL) |
|
||||
| **Proposal** | Validator's suggested transaction set for a ledger |
|
||||
| **Validation** | Validator's signature on a closed ledger |
|
||||
| **HashRouter** | Component for transaction deduplication |
|
||||
| **JobQueue** | Thread pool for asynchronous task execution |
|
||||
| **PerfLog** | Existing performance logging system in rippled |
|
||||
| **Beast Insight** | Existing metrics framework in rippled |
|
||||
|
||||
---
|
||||
|
||||
## 8.2 Span Hierarchy Visualization
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: Transaction Lifecycle"]
|
||||
rpc["rpc.submit<br/>(entry point)"]
|
||||
validate["tx.validate"]
|
||||
relay["tx.relay<br/>(parent span)"]
|
||||
|
||||
subgraph peers["Peer Spans"]
|
||||
p1["peer.send<br/>Peer A"]
|
||||
p2["peer.send<br/>Peer B"]
|
||||
p3["peer.send<br/>Peer C"]
|
||||
end
|
||||
|
||||
consensus["consensus.round"]
|
||||
apply["tx.apply"]
|
||||
end
|
||||
|
||||
rpc --> validate
|
||||
validate --> relay
|
||||
relay --> p1
|
||||
relay --> p2
|
||||
relay --> p3
|
||||
p1 -.->|"context propagation"| consensus
|
||||
consensus --> apply
|
||||
|
||||
style trace fill:#f5f5f5,stroke:#333
|
||||
style peers fill:#e3f2fd,stroke:#1976d2
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8.3 References
|
||||
|
||||
### OpenTelemetry Resources
|
||||
|
||||
1. [OpenTelemetry C++ SDK](https://github.com/open-telemetry/opentelemetry-cpp)
|
||||
2. [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel/)
|
||||
3. [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/)
|
||||
4. [OTLP Protocol Specification](https://opentelemetry.io/docs/specs/otlp/)
|
||||
|
||||
### Standards
|
||||
|
||||
5. [W3C Trace Context](https://www.w3.org/TR/trace-context/)
|
||||
6. [W3C Baggage](https://www.w3.org/TR/baggage/)
|
||||
7. [Protocol Buffers](https://protobuf.dev/)
|
||||
|
||||
### rippled Resources
|
||||
|
||||
8. [rippled Source Code](https://github.com/XRPLF/rippled)
|
||||
9. [XRP Ledger Documentation](https://xrpl.org/docs/)
|
||||
10. [rippled Overlay README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/overlay/README.md)
|
||||
11. [rippled RPC README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/rpc/README.md)
|
||||
12. [rippled Consensus README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/app/consensus/README.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.4 Version History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
| ------- | ---------- | ------ | --------------------------- |
|
||||
| 1.0 | 2026-02-12 | - | Initial implementation plan |
|
||||
| 1.1 | 2026-02-13 | - | Refactored into modular documents |
|
||||
|
||||
---
|
||||
|
||||
## 8.5 Document Index
|
||||
|
||||
| Document | Description |
|
||||
| -------- | ----------- |
|
||||
| [OpenTelemetryPlan.md](./OpenTelemetryPlan.md) | Master overview and executive summary |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | rippled architecture and trace points |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection, exporters, span conventions |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure, performance analysis |
|
||||
| [04-code-samples.md](./04-code-samples.md) | C++ code examples for all components |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | rippled config, CMake, Collector configs |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Timeline, tasks, risks, success metrics |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Backend selection and architecture |
|
||||
| [08-appendix.md](./08-appendix.md) | Glossary, references, version history |
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Observability Backends](./07-observability-backends.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
# OpenTelemetry Distributed Tracing Implementation Plan for rippled (xrpld)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. The plan addresses the unique challenges of a decentralized peer-to-peer system where trace context must propagate across network boundaries between independent nodes.
|
||||
|
||||
### Key Benefits
|
||||
|
||||
- **End-to-end transaction visibility**: Track transactions from submission through consensus to ledger inclusion
|
||||
- **Consensus round analysis**: Understand timing and behavior of consensus phases across validators
|
||||
- **RPC performance insights**: Identify slow handlers and optimize response times
|
||||
- **Network topology understanding**: Visualize message propagation patterns between peers
|
||||
- **Incident debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
### Estimated Performance Overhead
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## Document Structure
|
||||
|
||||
This implementation plan is organized into modular documents for easier navigation:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
overview["📋 OpenTelemetryPlan.md<br/>(This Document)"]
|
||||
|
||||
subgraph analysis["Analysis & Design"]
|
||||
arch["01-architecture-analysis.md"]
|
||||
design["02-design-decisions.md"]
|
||||
end
|
||||
|
||||
subgraph impl["Implementation"]
|
||||
strategy["03-implementation-strategy.md"]
|
||||
code["04-code-samples.md"]
|
||||
config["05-configuration-reference.md"]
|
||||
end
|
||||
|
||||
subgraph deploy["Deployment & Planning"]
|
||||
phases["06-implementation-phases.md"]
|
||||
backends["07-observability-backends.md"]
|
||||
appendix["08-appendix.md"]
|
||||
end
|
||||
|
||||
overview --> analysis
|
||||
overview --> impl
|
||||
overview --> deploy
|
||||
|
||||
arch --> design
|
||||
design --> strategy
|
||||
strategy --> code
|
||||
code --> config
|
||||
config --> phases
|
||||
phases --> backends
|
||||
backends --> appendix
|
||||
|
||||
style overview fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||
style analysis fill:#e3f2fd,stroke:#1976d2
|
||||
style impl fill:#fff3e0,stroke:#ff9800
|
||||
style deploy fill:#f3e5f5,stroke:#7b1fa2
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
| Section | Document | Description |
|
||||
| ------- | -------- | ----------- |
|
||||
| **1** | [Architecture Analysis](./01-architecture-analysis.md) | rippled component analysis, trace points, instrumentation priorities |
|
||||
| **2** | [Design Decisions](./02-design-decisions.md) | SDK selection, exporters, span naming, attributes, context propagation |
|
||||
| **3** | [Implementation Strategy](./03-implementation-strategy.md) | Directory structure, key principles, performance optimization |
|
||||
| **4** | [Code Samples](./04-code-samples.md) | Complete C++ implementation examples for all components |
|
||||
| **5** | [Configuration Reference](./05-configuration-reference.md) | rippled config, CMake integration, Collector configurations |
|
||||
| **6** | [Implementation Phases](./06-implementation-phases.md) | 5-phase timeline, tasks, risks, success metrics |
|
||||
| **7** | [Observability Backends](./07-observability-backends.md) | Backend selection guide and production architecture |
|
||||
| **8** | [Appendix](./08-appendix.md) | Glossary, references, version history |
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Analysis
|
||||
|
||||
The rippled node consists of several key components that require instrumentation for comprehensive distributed tracing. The main areas include the RPC server (HTTP/WebSocket), Overlay P2P network, Consensus mechanism (RCLConsensus), JobQueue for async task execution, and existing observability infrastructure (PerfLog, Insight/StatsD, Journal logging).
|
||||
|
||||
Key trace points span across transaction submission via RPC, peer-to-peer message propagation, consensus round execution, and ledger building. The implementation prioritizes high-value, low-risk components first: RPC handlers provide immediate value with minimal risk, while consensus tracing requires careful implementation to avoid timing impacts.
|
||||
|
||||
➡️ **[Read full Architecture Analysis](./01-architecture-analysis.md)**
|
||||
|
||||
---
|
||||
|
||||
## 2. Design Decisions
|
||||
|
||||
The OpenTelemetry C++ SDK is selected for its CNCF backing, active development, and native performance characteristics. Traces are exported via OTLP/gRPC (primary) or OTLP/HTTP (fallback) to an OpenTelemetry Collector, which provides flexible routing and sampling.
|
||||
|
||||
Span naming follows a hierarchical `<component>.<operation>` convention (e.g., `rpc.submit`, `tx.relay`, `consensus.round`). Context propagation uses W3C Trace Context headers for HTTP and embedded Protocol Buffer fields for P2P messages. The implementation coexists with existing PerfLog and Insight observability systems through correlation IDs.
|
||||
|
||||
➡️ **[Read full Design Decisions](./02-design-decisions.md)**
|
||||
|
||||
---
|
||||
|
||||
## 3. Implementation Strategy
|
||||
|
||||
The telemetry code is organized under `include/xrpl/telemetry/` for headers and `src/libxrpl/telemetry/` for implementation. Key principles include RAII-based span management via `SpanGuard`, conditional compilation with `XRPL_ENABLE_TELEMETRY`, and minimal runtime overhead through batch processing and efficient sampling.
|
||||
|
||||
Performance optimization strategies include probabilistic head sampling (10% default), tail-based sampling at the collector for errors and slow traces, batch export to reduce network overhead, and conditional instrumentation that compiles to no-ops when disabled.
|
||||
|
||||
➡️ **[Read full Implementation Strategy](./03-implementation-strategy.md)**
|
||||
|
||||
---
|
||||
|
||||
## 4. Code Samples
|
||||
|
||||
Complete C++ implementation examples are provided for all telemetry components:
|
||||
- `Telemetry.h` - Core interface for tracer access and span creation
|
||||
- `SpanGuard.h` - RAII wrapper for automatic span lifecycle management
|
||||
- `TracingInstrumentation.h` - Macros for conditional instrumentation
|
||||
- Protocol Buffer extensions for trace context propagation
|
||||
- Module-specific instrumentation (RPC, Consensus, P2P, JobQueue)
|
||||
|
||||
➡️ **[View all Code Samples](./04-code-samples.md)**
|
||||
|
||||
---
|
||||
|
||||
## 5. Configuration Reference
|
||||
|
||||
Configuration is handled through the `[telemetry]` section in `xrpld.cfg` with options for enabling/disabling, exporter selection, endpoint configuration, sampling ratios, and component-level filtering. CMake integration includes a `XRPL_ENABLE_TELEMETRY` option for compile-time control.
|
||||
|
||||
OpenTelemetry Collector configurations are provided for development (with Jaeger) and production (with tail-based sampling, Tempo, and Elastic APM). Docker Compose examples enable quick local development environment setup.
|
||||
|
||||
➡️ **[View full Configuration Reference](./05-configuration-reference.md)**
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Phases
|
||||
|
||||
The implementation spans 9 weeks across 5 phases:
|
||||
|
||||
| Phase | Duration | Focus | Key Deliverables |
|
||||
| ----- | -------- | ----- | ---------------- |
|
||||
| 1 | Weeks 1-2 | Core Infrastructure | SDK integration, Telemetry interface, Configuration |
|
||||
| 2 | Weeks 3-4 | RPC Tracing | HTTP context extraction, Handler instrumentation |
|
||||
| 3 | Weeks 5-6 | Transaction Tracing | Protocol Buffer context, Relay propagation |
|
||||
| 4 | Weeks 7-8 | Consensus Tracing | Round spans, Proposal/validation tracing |
|
||||
| 5 | Week 9 | Documentation | Runbook, Dashboards, Training |
|
||||
|
||||
**Total Effort**: 47 developer-days with 2 developers
|
||||
|
||||
➡️ **[View full Implementation Phases](./06-implementation-phases.md)**
|
||||
|
||||
---
|
||||
|
||||
## 7. Observability Backends
|
||||
|
||||
For development and testing, Jaeger provides easy setup with a good UI. For production deployments, Grafana Tempo is recommended for its cost-effectiveness and Grafana integration, while Elastic APM is ideal for organizations with existing Elastic infrastructure.
|
||||
|
||||
The recommended production architecture uses a gateway collector pattern with regional collectors performing tail-based sampling, routing traces to multiple backends (Tempo for primary storage, Elastic for log correlation, S3/GCS for long-term archive).
|
||||
|
||||
➡️ **[View Observability Backend Recommendations](./07-observability-backends.md)**
|
||||
|
||||
---
|
||||
|
||||
## 8. Appendix
|
||||
|
||||
The appendix contains a glossary of OpenTelemetry and rippled-specific terms, references to external documentation and specifications, version history for this implementation plan, and a complete document index.
|
||||
|
||||
➡️ **[View Appendix](./08-appendix.md)**
|
||||
|
||||
---
|
||||
|
||||
*This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. For detailed information on any section, follow the links to the corresponding sub-documents.*
|
||||
|
||||
115
SECURITY.md
115
SECURITY.md
@@ -42,7 +42,7 @@ For more information on responsible disclosure, please read this [Wikipedia arti
|
||||
|
||||
## Report Handling Process
|
||||
|
||||
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic pre-commitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
|
||||
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic precommitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
|
||||
|
||||
Once we receive a report, we:
|
||||
|
||||
@@ -78,61 +78,72 @@ To report a qualifying bug, please send a detailed report to:
|
||||
|
||||
| Email Address | bugs@ripple.com |
|
||||
| :-----------: | :-------------------------------------------------- |
|
||||
| Short Key ID | `0xA9F514E0` |
|
||||
| Long Key ID | `0xD900855AA9F514E0` |
|
||||
| Fingerprint | `B72C 0654 2F2A E250 2763 A268 D900 855A A9F5 14E0` |
|
||||
| Short Key ID | `0xC57929BE` |
|
||||
| Long Key ID | `0xCD49A0AFC57929BE` |
|
||||
| Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
|
||||
|
||||
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
mQINBGkSZAQBEACprU199OhgdsOsygNjiQV4msuN3vDOUooehL+NwfsGfW79Tbqq
|
||||
Q2u7uQ3NZjW+M2T4nsDwuhkr7pe7xSReR5W8ssaczvtUyxkvbMClilcgZ2OSCAuC
|
||||
N9tzJsqOqkwBvXoNXkn//T2jnPz0ZU2wSF+NrEibq5FeuyGdoX3yXXBxq9pW9HzK
|
||||
HkQll63QSl6BzVSGRQq+B6lGgaZGLwf3mzmIND9Z5VGLNK2jKynyz9z091whNG/M
|
||||
kV+E7/r/bujHk7WIVId07G5/COTXmSr7kFnNEkd2Umw42dkgfiNKvlmJ9M7c1wLK
|
||||
KbL9Eb4ADuW6rRc5k4s1e6GT8R4/VPliWbCl9SE32hXH8uTkqVIFZP2eyM5WRRHs
|
||||
aKzitkQG9UK9gcb0kdgUkxOvvgPHAe5IuZlcHFzU4y0dBbU1VEFWVpiLU0q+IuNw
|
||||
5BRemeHc59YNsngkmAZ+/9zouoShRusZmC8Wzotv75C2qVBcjijPvmjWAUz0Zunm
|
||||
Lsr+O71vqHE73pERjD07wuD/ISjiYRYYE/bVrXtXLZijC7qAH4RE3nID+2ojcZyO
|
||||
/2jMQvt7un56RsGH4UBHi3aBHi9bUoDGCXKiQY981cEuNaOxpou7Mh3x/ONzzSvk
|
||||
sTV6nl1LOZHykN1JyKwaNbTSAiuyoN+7lOBqbV04DNYAHL88PrT21P83aQARAQAB
|
||||
tB1SaXBwbGUgTGFicyA8YnVnc0ByaXBwbGUuY29tPokCTgQTAQgAOBYhBLcsBlQv
|
||||
KuJQJ2OiaNkAhVqp9RTgBQJpEmQEAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA
|
||||
AAoJENkAhVqp9RTgBzgP/i7y+aDWl1maig1XMdyb+o0UGusumFSW4Hmj278wlKVv
|
||||
usgLPihYgHE0PKrv6WRyKOMC1tQEcYYN93M+OeQ1vFhS2YyURq6RCMmh4zq/awXG
|
||||
uZbG36OURB5NH8lGBOHiN/7O+nY0CgenBT2JWm+GW3nEOAVOVm4+r5GlpPlv+Dp1
|
||||
NPBThcKXFMnH73++NpSQoDzTfRYHPxhDAX3jkLi/moXfSanOLlR6l94XNNN0jBHW
|
||||
Quao0rzf4WSXq9g6AS224xhAA5JyIcFl8TX7hzj5HaFn3VWo3COoDu4U7H+BM0fl
|
||||
85yqiMQypp7EhN2gxpMMWaHY5TFM85U/bFXFYfEgihZ4/gt4uoIzsNI9jlX7mYvG
|
||||
KFdDij+oTlRsuOxdIy60B3dKcwOH9nZZCz0SPsN/zlRWgKzK4gDKdGhFkU9OlvPu
|
||||
94ZqscanoiWKDoZkF96+sjgfjkuHsDK7Lwc1Xi+T4drHG/3aVpkYabXox+lrKB/S
|
||||
yxZjeqOIQzWPhnLgCaLyvsKo5hxKzL0w3eURu8F3IS7RgOOlljv4M+Me9sEVcdNV
|
||||
aN3/tQwbaomSX1X5D5YXqhBwC3rU3wXwamsscRTGEpkV+JCX6KUqGP7nWmxCpAly
|
||||
FL05XuOd5SVHJjXLeuje0JqLUpN514uL+bThWwDbDTdAdwW3oK/2WbXz7IfJRLBj
|
||||
uQINBGkSZAQBEADdI3SL2F72qkrgFqXWE6HSRBu9bsAvTE5QrRPWk7ux6at537r4
|
||||
S4sIw2dOwLvbyIrDgKNq3LQ5wCK88NO/NeCOFm4AiCJSl3pJHXYnTDoUxTrrxx+o
|
||||
vSRI4I3fHEql/MqzgiAb0YUezjgFdh3vYheMPp/309PFbOLhiFqEcx80Mx5h06UH
|
||||
gDzu1qNj3Ec+31NLic5zwkrAkvFvD54d6bqYR3SEgMau6aYEewpGHbWBi2pLqSi2
|
||||
lQcAeOFixqGpTwDmAnYR8YtjBYepy0MojEAdTHcQQlOYSDk4q4elG+io2N8vECfU
|
||||
rD6ORecN48GXdZINYWTAdslrUeanmBdgQrYkSpce8TSghgT9P01SNaXxmyaehVUO
|
||||
lqI4pcg5G2oojAE8ncNS3TwDtt7daTaTC3bAdr4PXDVAzNAiewjMNZPB7xidkDGQ
|
||||
Y4W1LxTMXyJVWxehYOH7tsbBRKninlfRnLgYzmtIbNRAAvNcsxU6ihv3AV0WFknN
|
||||
YbSzotEv1Xq/5wk309x8zCDe+sP0cQicvbXafXmUzPAZzeqFg+VLFn7F9MP1WGlW
|
||||
B1u7VIvBF1Mp9Nd3EAGBAoLRdRu+0dVWIjPTQuPIuD9cCatJA0wVaKUrjYbBMl88
|
||||
a12LixNVGeSFS9N7ADHx0/o7GNT6l88YbaLP6zggUHpUD/bR+cDN7vllIQARAQAB
|
||||
iQI2BBgBCAAgFiEEtywGVC8q4lAnY6Jo2QCFWqn1FOAFAmkSZAQCGwwACgkQ2QCF
|
||||
Wqn1FOAfAA/8CYq4p0p4bobY20CKEMsZrkBTFJyPDqzFwMeTjgpzqbD7Y3Qq5QCK
|
||||
OBbvY02GWdiIsNOzKdBxiuam2xYP9WHZj4y7/uWEvT0qlPVmDFu+HXjoJ43oxwFd
|
||||
CUp2gMuQ4cSL3X94VRJ3BkVL+tgBm8CNY0vnTLLOO3kum/R69VsGJS1JSGUWjNM+
|
||||
4qwS3mz+73xJu1HmERyN2RZF/DGIZI2PyONQQ6aH85G1Dd2ohu2/DBAkQAMBrPbj
|
||||
FrbDaBLyFhODxU3kTWqnfLlaElSm2EGdIU2yx7n4BggEa//NZRMm5kyeo4vzhtlQ
|
||||
YIVUMLAOLZvnEqDnsLKp+22FzNR/O+htBQC4lPywl53oYSALdhz1IQlcAC1ru5KR
|
||||
XPzhIXV6IIzkcx9xNkEclZxmsuy5ERXyKEmLbIHAlzFmnrldlt2ZgXDtzaorLmxj
|
||||
klKibxd5tF50qOpOivz+oPtFo7n+HmFa1nlVAMxlDCUdM0pEVeYDKI5zfVwalyhZ
|
||||
NnjpakdZSXMwgc7NP/hH9buF35hKDp7EckT2y3JNYwHsDdy1icXN2q40XZw5tSIn
|
||||
zkPWdu3OUY8PISohN6Pw4h0RH4ZmoX97E8sEfmdKaT58U4Hf2aAv5r9IWCSrAVqY
|
||||
u5jvac29CzQR9Kal0A+8phHAXHNFD83SwzIC0syaT9ficAguwGH8X6Q=
|
||||
=nGuD
|
||||
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
|
||||
kCpUYEDal0ygkKobu8SzOoATcDl18iCrScX39VpTm96vISFZMhmOryYCIp4QLJNN
|
||||
4HKc2ZdBj6W4igNi6vj5Qo6JMyGpLY2mz4CZskbt0TNuUxWrGood+UrCzpY8x7/N
|
||||
a93fcvNw+prgCr0rCH3hAPmAFfsOBbtGzNnmq7xf3jg5r4Z4sDiNIF1X1y53DAfV
|
||||
rWDx49IKsuCEJfPMp1MnBSvDvLaQ2hKXs+cOpx1BCZgHn3skouEUxxgqbtTzBLt1
|
||||
xXpmuijsaltWngPnGO7mOAzbpZSdBm82/Emrk9bPMuD0QaLQjWr7HkTSUs6ZsKt4
|
||||
7CLPdWqxyY/QVw9UaxeHEtWGQGMIQGgVJGh1fjtUr5O1sC9z9jXcQ0HuIHnRCTls
|
||||
GP7hklJmfH5V4SyAJQ06/hLuEhUJ7dn+BlqCsT0tLmYTgZYNzNcLHcqBFMEZHvHw
|
||||
9GENMx/tDXgajKql4bJnzuTK0iGU/YepanANLd1JHECJ4jzTtmKOus9SOGlB2/l1
|
||||
0t0ADDYAS3eqOdOcUvo9ElSLCI5vSVHhShSte/n2FMWU+kMUboTUisEG8CgQnrng
|
||||
g2CvvQvqDkeOtZeqMcC7HdiZS0q3LJUWtwA/ViwxrVlBDCxiTUXCotyBWwARAQAB
|
||||
tDBSaXBwbGUgTGFicyBCdWcgQm91bnR5IFByb2dyYW0gPGJ1Z3NAcmlwcGxlLmNv
|
||||
bT6JAjcEEwEKACEFAlUwGHYCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
|
||||
zUmgr8V5Kb6R0g//SwY/mVJY59k87iL26/KayauSoOcz7xjcST26l4ZHVVX85gOY
|
||||
HYZl8k0+m8X3zxeYm9a3QAoAml8sfoaFRFQP8ynnefRrLUPaZ2MjbJ0SACMwZNef
|
||||
T6o7Mi8LBAaiNZdYVyIfX1oM6YXtqYkuJdav6ZCyvVYqc9OvMJPY2ZzJYuI/ZtvQ
|
||||
/lTndxCeg9ALNX/iezOLGdfMpf4HuIFVwcPPlwGi+HDlB9/bggDEHC8z434SXVFc
|
||||
aQatXAPcDkjMUweU7y0CZtYEj00HITd4pSX6MqGiHrxlDZTqinCOPs1Ieqp7qufs
|
||||
MzlM6irLGucxj1+wa16ieyYvEtGaPIsksUKkywx0O7cf8N2qKg+eIkUk6O0Uc6eO
|
||||
CszizmiXIXy4O6OiLlVHGKkXHMSW9Nwe9GE95O8G9WR8OZCEuDv+mHPAutO+IjdP
|
||||
PDAAUvy+3XnkceO+HGWRpVvJZfFP2YH4A33InFL5yqlJmSoR/yVingGLxk55bZDM
|
||||
+HYGR3VeMb8Xj1rf/02qERsZyccMCFdAvKDbTwmvglyHdVLu5sPmktxbBYiemfyJ
|
||||
qxMxmYXCc9S0hWrWZW7edktBa9NpE58z1mx+hRIrDNbS2sDHrib9PULYCySyVYcF
|
||||
P+PWEe1CAS5jqkR2ker5td2/pHNnJIycynBEs7l6zbc9fu+nktFJz0q2B+GJAhwE
|
||||
EAEKAAYFAlUwGaQACgkQ+tiY1qQ2QkjMFw//f2hNY3BPNe+1qbhzumMDCnbTnGif
|
||||
kLuAGl9OKt81VHG1f6RnaGiLpR696+6Ja45KzH15cQ5JJl5Bgs1YkR/noTGX8IAD
|
||||
c70eNwiFu8JXTaaeeJrsmFkF9Tueufb364risYkvPP8tNUD3InBFEZT3WN7JKwix
|
||||
coD4/BwekUwOZVDd/uCFEyhlhZsROxdKNisNo3VtAq2s+3tIBAmTrriFUl0K+ZC5
|
||||
zgavcpnPN57zMtW9aK+VO3wXqAKYLYmtgxkVzSLUZt2M7JuwOaAdyuYWAneKZPCu
|
||||
1AXkmyo+d84sd5mZaKOr5xArAFiNMWPUcZL4rkS1Fq4dKtGAqzzR7a7hWtA5o27T
|
||||
6vynuxZ1n0PPh0er2O/zF4znIjm5RhTlfjp/VmhZdQfpulFEQ/dMxxGkQ9z5IYbX
|
||||
mTlSDbCSb+FMsanRBJ7Drp5EmBIudVGY6SHI5Re1RQiEh7GoDfUMUwZO+TVDII5R
|
||||
Ra7WyuimYleJgDo/+7HyfuIyGDaUCVj6pwVtYtYIdOI3tTw1R1Mr0V8yaNVnJghL
|
||||
CHcEJQL+YHSmiMM3ySil3O6tm1By6lFz8bVe/rgG/5uklQrnjMR37jYboi1orCC4
|
||||
yeIoQeV0ItlxeTyBwYIV/o1DBNxDevTZvJabC93WiGLw2XFjpZ0q/9+zI2rJUZJh
|
||||
qxmKP+D4e27lCI65Ag0EVTAYdgEQAMvttYNqeRNBRpSX8fk45WVIV8Fb21fWdwk6
|
||||
2SkZnJURbiC0LxQnOi7wrtii7DeFZtwM2kFHihS1VHekBnIKKZQSgGoKuFAQMGyu
|
||||
a426H4ZsSmA9Ufd7kRbvdtEcp7/RTAanhrSL4lkBhaKJrXlxBJ27o3nd7/rh7r3a
|
||||
OszbPY6DJ5bWClX3KooPTDl/RF2lHn+fweFk58UvuunHIyo4BWJUdilSXIjLun+P
|
||||
Qaik4ZAsZVwNhdNz05d+vtai4AwbYoO7adboMLRkYaXSQwGytkm+fM6r7OpXHYuS
|
||||
cR4zB/OK5hxCVEpWfiwN71N2NMvnEMaWd/9uhqxJzyvYgkVUXV9274TUe16pzXnW
|
||||
ZLfmitjwc91e7mJBBfKNenDdhaLEIlDRwKTLj7k58f9srpMnyZFacntu5pUMNblB
|
||||
cjXwWxz5ZaQikLnKYhIvrIEwtWPyjqOzNXNvYfZamve/LJ8HmWGCKao3QHoAIDvB
|
||||
9XBxrDyTJDpxbog6Qu4SY8AdgVlan6c/PsLDc7EUegeYiNTzsOK+eq3G5/E92eIu
|
||||
TsUXlciypFcRm1q8vLRr+HYYe2mJDo4GetB1zLkAFBcYJm/x9iJQbu0hn5NxJvZO
|
||||
R0Y5nOJQdyi+muJzKYwhkuzaOlswzqVXkq/7+QCjg7QsycdcwDjiQh3OrsgXHrwl
|
||||
M7gyafL9ABEBAAGJAh8EGAEKAAkFAlUwGHYCGwwACgkQzUmgr8V5Kb50BxAAhj9T
|
||||
TwmNrgRldTHszj+Qc+v8RWqV6j+R+zc0cn5XlUa6XFaXI1OFFg71H4dhCPEiYeN0
|
||||
IrnocyMNvCol+eKIlPKbPTmoixjQ4udPTR1DC1Bx1MyW5FqOrsgBl5t0e1VwEViM
|
||||
NspSStxu5Hsr6oWz2GD48lXZWJOgoL1RLs+uxjcyjySD/em2fOKASwchYmI+ezRv
|
||||
plfhAFIMKTSCN2pgVTEOaaz13M0U+MoprThqF1LWzkGkkC7n/1V1f5tn83BWiagG
|
||||
2N2Q4tHLfyouzMUKnX28kQ9sXfxwmYb2sA9FNIgxy+TdKU2ofLxivoWT8zS189z/
|
||||
Yj9fErmiMjns2FzEDX+bipAw55X4D/RsaFgC+2x2PDbxeQh6JalRA2Wjq32Ouubx
|
||||
u+I4QhEDJIcVwt9x6LPDuos1F+M5QW0AiUhKrZJ17UrxOtaquh/nPUL9T3l2qPUn
|
||||
1ChrZEEEhHO6vA8+jn0+cV9n5xEz30Str9iHnDQ5QyR5LyV4UBPgTdWyQzNVKA69
|
||||
KsSr9lbHEtQFRzGuBKwt6UlSFv9vPWWJkJit5XDKAlcKuGXj0J8OlltToocGElkF
|
||||
+gEBZfoOWi/IBjRLrFW2cT3p36DTR5O1Ud/1DLnWRqgWNBLrbs2/KMKE6EnHttyD
|
||||
7Tz8SQkuxltX/yBXMV3Ddy0t6nWV2SZEfuxJAQI=
|
||||
=spg4
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
||||
@@ -29,18 +29,18 @@
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# This file documents and provides examples of all xrpld server process
|
||||
# configuration options. When the xrpld server instance is launched, it
|
||||
# This file documents and provides examples of all rippled server process
|
||||
# configuration options. When the rippled server instance is launched, it
|
||||
# looks for a file with the following name:
|
||||
#
|
||||
# xrpld.cfg
|
||||
# rippled.cfg
|
||||
#
|
||||
# For more information on where the xrpld server instance searches for the
|
||||
# For more information on where the rippled server instance searches for the
|
||||
# file, visit:
|
||||
#
|
||||
# https://xrpl.org/commandline-usage.html#generic-options
|
||||
#
|
||||
# This file should be named xrpld.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# or Mac style end of lines. Blank lines and lines beginning with '#' are
|
||||
# ignored. Undefined sections are reserved. No escapes are currently defined.
|
||||
#
|
||||
@@ -89,8 +89,8 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# xrpld offers various server protocols to clients making inbound
|
||||
# connections. The listening ports xrpld uses are "universal" ports
|
||||
# rippled offers various server protocols to clients making inbound
|
||||
# connections. The listening ports rippled uses are "universal" ports
|
||||
# which may be configured to handshake in one or more of the available
|
||||
# supported protocols. These universal ports simplify administration:
|
||||
# A single open port can be used for multiple protocols.
|
||||
@@ -103,7 +103,7 @@
|
||||
#
|
||||
# A list of port names and key/value pairs. A port name must start with a
|
||||
# letter and contain only letters and numbers. The name is not case-sensitive.
|
||||
# For each name in this list, xrpld will look for a configuration file
|
||||
# For each name in this list, rippled will look for a configuration file
|
||||
# section with the same name and use it to create a listening port. The
|
||||
# name is informational only; the choice of name does not affect the function
|
||||
# of the listening port.
|
||||
@@ -134,7 +134,7 @@
|
||||
# ip = 127.0.0.1
|
||||
# protocol = http
|
||||
#
|
||||
# When xrpld is used as a command line client (for example, issuing a
|
||||
# When rippled is used as a command line client (for example, issuing a
|
||||
# server stop command), the first port advertising the http or https
|
||||
# protocol will be used to make the connection.
|
||||
#
|
||||
@@ -175,7 +175,7 @@
|
||||
# same time. It is possible have both Websockets and Secure Websockets
|
||||
# together in one port.
|
||||
#
|
||||
# NOTE If no ports support the peer protocol, xrpld cannot
|
||||
# NOTE If no ports support the peer protocol, rippled cannot
|
||||
# receive incoming peer connections or become a superpeer.
|
||||
#
|
||||
# limit = <number>
|
||||
@@ -194,7 +194,7 @@
|
||||
# required. IP address restrictions, if any, will be checked in addition
|
||||
# to the credentials specified here.
|
||||
#
|
||||
# When acting in the client role, xrpld will supply these credentials
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# using HTTP's Basic Authentication headers when making outbound HTTP/S
|
||||
# requests.
|
||||
#
|
||||
@@ -218,7 +218,7 @@
|
||||
# administrative commands.
|
||||
#
|
||||
# NOTE A common configuration value for the admin field is "localhost".
|
||||
# If you are listening on all IPv4/IPv6 addresses by specifying
|
||||
# If you are listening on all IPv4/IPv6 addresses by specifing
|
||||
# ip = :: then you can use admin = ::ffff:127.0.0.1,::1 to allow
|
||||
# administrative access from both IPv4 and IPv6 localhost
|
||||
# connections.
|
||||
@@ -237,7 +237,7 @@
|
||||
# WS, or WSS protocol interfaces. If administrative commands are
|
||||
# disabled for a port, these credentials have no effect.
|
||||
#
|
||||
# When acting in the client role, xrpld will supply these credentials
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# in the submitted JSON for any administrative command requests when
|
||||
# invoking JSON-RPC commands on remote servers.
|
||||
#
|
||||
@@ -258,7 +258,7 @@
|
||||
# resource controls will default to those for non-administrative users.
|
||||
#
|
||||
# The secure_gateway IP addresses are intended to represent
|
||||
# proxies. Since xrpld trusts these hosts, they must be
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# If some IP addresses are included for both "admin" and
|
||||
@@ -272,7 +272,7 @@
|
||||
# Use the specified files when configuring SSL on the port.
|
||||
#
|
||||
# NOTE If no files are specified and secure protocols are selected,
|
||||
# xrpld will generate an internal self-signed certificate.
|
||||
# rippled will generate an internal self-signed certificate.
|
||||
#
|
||||
# The files have these meanings:
|
||||
#
|
||||
@@ -297,12 +297,12 @@
|
||||
# Control the ciphers which the server will support over SSL on the port,
|
||||
# specified using the OpenSSL "cipher list format".
|
||||
#
|
||||
# NOTE If unspecified, xrpld will automatically configure a modern
|
||||
# NOTE If unspecified, rippled will automatically configure a modern
|
||||
# cipher suite. This default suite should be widely supported.
|
||||
#
|
||||
# You should not modify this string unless you have a specific
|
||||
# reason and cryptographic expertise. Incorrect modification may
|
||||
# keep xrpld from connecting to other instances of xrpld or
|
||||
# keep rippled from connecting to other instances of rippled or
|
||||
# prevent RPC and WebSocket clients from connecting.
|
||||
#
|
||||
# send_queue_limit = [1..65535]
|
||||
@@ -382,7 +382,7 @@
|
||||
#-----------------
|
||||
#
|
||||
# These settings control security and access attributes of the Peer to Peer
|
||||
# server section of the xrpld process. Peer Protocol implements the
|
||||
# server section of the rippled process. Peer Protocol implements the
|
||||
# Ripple Payment protocol. It is over peer connections that transactions
|
||||
# and validations are passed from to machine to machine, to determine the
|
||||
# contents of validated ledgers.
|
||||
@@ -396,7 +396,7 @@
|
||||
# true - enables compression
|
||||
# false - disables compression [default].
|
||||
#
|
||||
# The xrpld server can save bandwidth by compressing its peer-to-peer communications,
|
||||
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
|
||||
# at a cost of greater CPU usage. If you enable link compression,
|
||||
# the server automatically compresses communications with peer servers
|
||||
# that also have link compression enabled.
|
||||
@@ -432,7 +432,7 @@
|
||||
#
|
||||
# [ips_fixed]
|
||||
#
|
||||
# List of IP addresses or hostnames to which xrpld should always attempt to
|
||||
# List of IP addresses or hostnames to which rippled should always attempt to
|
||||
# maintain peer connections with. This is useful for manually forming private
|
||||
# networks, for example to configure a validation server that connects to the
|
||||
# Ripple network through a public-facing server, or for building a set
|
||||
@@ -573,7 +573,7 @@
|
||||
#
|
||||
# minimum_txn_in_ledger_standalone = <number>
|
||||
#
|
||||
# Like minimum_txn_in_ledger when xrpld is running in standalone
|
||||
# Like minimum_txn_in_ledger when rippled is running in standalone
|
||||
# mode. Default: 1000.
|
||||
#
|
||||
# target_txn_in_ledger = <number>
|
||||
@@ -710,7 +710,7 @@
|
||||
#
|
||||
# [validator_token]
|
||||
#
|
||||
# This is an alternative to [validation_seed] that allows xrpld to perform
|
||||
# This is an alternative to [validation_seed] that allows rippled to perform
|
||||
# validation without having to store the validator keys on the network
|
||||
# connected server. The field should contain a single token in the form of a
|
||||
# base64-encoded blob.
|
||||
@@ -745,7 +745,7 @@
|
||||
#
|
||||
# Specify the file by its name or path.
|
||||
# Unless an absolute path is specified, it will be considered relative to
|
||||
# the folder in which the xrpld.cfg file is located.
|
||||
# the folder in which the rippled.cfg file is located.
|
||||
#
|
||||
# Examples:
|
||||
# /home/ripple/validators.txt
|
||||
@@ -840,7 +840,7 @@
|
||||
#
|
||||
# 0: Disable the ledger replay feature [default]
|
||||
# 1: Enable the ledger replay feature. With this feature enabled, when
|
||||
# acquiring a ledger from the network, a xrpld node only downloads
|
||||
# acquiring a ledger from the network, a rippled node only downloads
|
||||
# the ledger header and the transactions instead of the whole ledger.
|
||||
# And the ledger is built by applying the transactions to the parent
|
||||
# ledger.
|
||||
@@ -851,7 +851,7 @@
|
||||
#
|
||||
#----------------
|
||||
#
|
||||
# The xrpld server instance uses HTTPS GET requests in a variety of
|
||||
# The rippled server instance uses HTTPS GET requests in a variety of
|
||||
# circumstances, including but not limited to contacting trusted domains to
|
||||
# fetch information such as mapping an email address to a Ripple Payment
|
||||
# Network address.
|
||||
@@ -891,7 +891,7 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# xrpld creates 4 SQLite database to hold bookkeeping information
|
||||
# rippled creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers.
|
||||
@@ -902,7 +902,7 @@
|
||||
# the performance of the server.
|
||||
#
|
||||
# Partial pathnames will be considered relative to the location of
|
||||
# the xrpld.cfg file.
|
||||
# the rippled.cfg file.
|
||||
#
|
||||
# [node_db] Settings for the Node Database (required)
|
||||
#
|
||||
@@ -920,11 +920,11 @@
|
||||
# type = NuDB
|
||||
#
|
||||
# NuDB is a high-performance database written by Ripple Labs and optimized
|
||||
# for xrpld and solid-state drives.
|
||||
# for rippled and solid-state drives.
|
||||
#
|
||||
# NuDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete may be selected, but is not required. NuDB is
|
||||
# available on all platforms that xrpld runs on.
|
||||
# available on all platforms that rippled runs on.
|
||||
#
|
||||
# type = RocksDB
|
||||
#
|
||||
@@ -940,7 +940,23 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
# Setting this value to 0 will use the default value.
|
||||
#
|
||||
# cache_age Length of time in minutes to keep database records
|
||||
# cached. Default is 5 minutes. Setting this value to
|
||||
# 0 will use the default value.
|
||||
#
|
||||
# Note: if neither cache_size nor cache_age is
|
||||
# specified, the cache for database records will not
|
||||
# be created. If only one of cache_size or cache_age
|
||||
# is specified, the cache will be created using the
|
||||
# default value for the unspecified parameter.
|
||||
#
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified.
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
@@ -948,6 +964,8 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
@@ -1031,7 +1049,7 @@
|
||||
#
|
||||
# recovery_wait_seconds
|
||||
# The online delete process checks periodically
|
||||
# that xrpld is still in sync with the network,
|
||||
# that rippled is still in sync with the network,
|
||||
# and that the validated ledger is less than
|
||||
# 'age_threshold_seconds' old. If not, then continue
|
||||
# sleeping for this number of seconds and
|
||||
@@ -1051,8 +1069,8 @@
|
||||
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
|
||||
# the 'database_path' location. If you omit this configuration setting,
|
||||
# the server creates a directory called "db" located in the same place as
|
||||
# your xrpld.cfg file.
|
||||
# Partial pathnames are relative to the location of the xrpld executable.
|
||||
# your rippled.cfg file.
|
||||
# Partial pathnames are relative to the location of the rippled executable.
|
||||
#
|
||||
# [sqlite] Tuning settings for the SQLite databases (optional)
|
||||
#
|
||||
@@ -1102,7 +1120,7 @@
|
||||
# The default is "wal", which uses a write-ahead
|
||||
# log to implement database transactions.
|
||||
# Alternately, "memory" saves disk I/O, but if
|
||||
# xrpld crashes during a transaction, the
|
||||
# rippled crashes during a transaction, the
|
||||
# database is likely to be corrupted.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# for more details about the available options.
|
||||
@@ -1112,7 +1130,7 @@
|
||||
# synchronous Valid values: off, normal, full, extra
|
||||
# The default is "normal", which works well with
|
||||
# the "wal" journal mode. Alternatively, "off"
|
||||
# allows xrpld to continue as soon as data is
|
||||
# allows rippled to continue as soon as data is
|
||||
# passed to the OS, which can significantly
|
||||
# increase speed, but risks data corruption if
|
||||
# the host computer crashes before writing that
|
||||
@@ -1126,7 +1144,7 @@
|
||||
# The default is "file", which will use files
|
||||
# for temporary database tables and indices.
|
||||
# Alternatively, "memory" may save I/O, but
|
||||
# xrpld does not currently use many, if any,
|
||||
# rippled does not currently use many, if any,
|
||||
# of these temporary objects.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_temp_store
|
||||
# for more details about the available options.
|
||||
@@ -1155,7 +1173,7 @@
|
||||
#
|
||||
# These settings are designed to help server administrators diagnose
|
||||
# problems, and obtain detailed information about the activities being
|
||||
# performed by the xrpld process.
|
||||
# performed by the rippled process.
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -1172,7 +1190,7 @@
|
||||
#
|
||||
# Configuration parameters for the Beast. Insight stats collection module.
|
||||
#
|
||||
# Insight is a module that collects information from the areas of xrpld
|
||||
# Insight is a module that collects information from the areas of rippled
|
||||
# that have instrumentation. The configuration parameters control where the
|
||||
# collection metrics are sent. The parameters are expressed as key = value
|
||||
# pairs with no white space. The main parameter is the choice of server:
|
||||
@@ -1181,7 +1199,7 @@
|
||||
#
|
||||
# Choice of server to send metrics to. Currently the only choice is
|
||||
# "statsd" which sends UDP packets to a StatsD daemon, which must be
|
||||
# running while xrpld is running. More information on StatsD is
|
||||
# running while rippled is running. More information on StatsD is
|
||||
# available here:
|
||||
# https://github.com/b/statsd_spec
|
||||
#
|
||||
@@ -1191,7 +1209,7 @@
|
||||
# in the format, n.n.n.n:port.
|
||||
#
|
||||
# "prefix" A string prepended to each collected metric. This is used
|
||||
# to distinguish between different running instances of xrpld.
|
||||
# to distinguish between different running instances of rippled.
|
||||
#
|
||||
# If this section is missing, or the server type is unspecified or unknown,
|
||||
# statistics are not collected or reported.
|
||||
@@ -1218,7 +1236,7 @@
|
||||
#
|
||||
# Example:
|
||||
# [perf]
|
||||
# perf_log=/var/log/xrpld/perf.log
|
||||
# perf_log=/var/log/rippled/perf.log
|
||||
# log_interval=2
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -1228,7 +1246,7 @@
|
||||
#----------
|
||||
#
|
||||
# The vote settings configure settings for the entire Ripple network.
|
||||
# While a single instance of xrpld cannot unilaterally enforce network-wide
|
||||
# While a single instance of rippled cannot unilaterally enforce network-wide
|
||||
# settings, these choices become part of the instance's vote during the
|
||||
# consensus process for each voting ledger.
|
||||
#
|
||||
@@ -1242,7 +1260,7 @@
|
||||
# The reference transaction is the simplest form of transaction.
|
||||
# It represents an XRP payment between two parties.
|
||||
#
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1254,7 +1272,7 @@
|
||||
# account's XRP balance that is at or below the reserve may only be
|
||||
# spent on transaction fees, and not transferred out of the account.
|
||||
#
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1266,7 +1284,7 @@
|
||||
# each ledger item owned by the account. Ledger items an account may
|
||||
# own include trust lines, open orders, and tickets.
|
||||
#
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1308,7 +1326,7 @@
|
||||
# tool instead.
|
||||
#
|
||||
# This flag has no effect on the "sign" and "sign_for" command line options
|
||||
# that xrpld makes available.
|
||||
# that rippled makes available.
|
||||
#
|
||||
# The default value of this field is "false"
|
||||
#
|
||||
@@ -1387,7 +1405,7 @@
|
||||
#--------------------
|
||||
#
|
||||
# Administrators can use these values as a starting point for configuring
|
||||
# their instance of xrpld, but each value should be checked to make sure
|
||||
# their instance of rippled, but each value should be checked to make sure
|
||||
# it meets the business requirements for the organization.
|
||||
#
|
||||
# Server
|
||||
@@ -1397,7 +1415,7 @@
|
||||
# "peer"
|
||||
#
|
||||
# Peer protocol open to everyone. This is required to accept
|
||||
# incoming xrpld connections. This does not affect automatic
|
||||
# incoming rippled connections. This does not affect automatic
|
||||
# or manual outgoing Peer protocol connections.
|
||||
#
|
||||
# "rpc"
|
||||
@@ -1414,7 +1432,7 @@
|
||||
#
|
||||
# ETL commands for Clio. We recommend setting secure_gateway
|
||||
# in this section to a comma-separated list of the addresses
|
||||
# of your Clio servers, in order to bypass xrpld's rate limiting.
|
||||
# of your Clio servers, in order to bypass rippled's rate limiting.
|
||||
#
|
||||
# This port is commented out but can be enabled by removing
|
||||
# the '#' from each corresponding line including the entry under [server]
|
||||
@@ -1431,8 +1449,8 @@
|
||||
# NOTE
|
||||
#
|
||||
# To accept connections on well known ports such as 80 (HTTP) or
|
||||
# 443 (HTTPS), most operating systems will require xrpld to
|
||||
# run with administrator privileges, or else xrpld will not start.
|
||||
# 443 (HTTPS), most operating systems will require rippled to
|
||||
# run with administrator privileges, or else rippled will not start.
|
||||
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
@@ -1478,7 +1496,7 @@ secure_gateway = 127.0.0.1
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# This is primary persistent datastore for xrpld. This includes transaction
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found at https://xrpl.org/capacity-planning.html#node-db-type
|
||||
# type=NuDB is recommended for non-validators with fast SSDs. Validators or
|
||||
@@ -1493,19 +1511,19 @@ secure_gateway = 127.0.0.1
|
||||
# deletion.
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/xrpld/db/nudb
|
||||
path=/var/lib/rippled/db/nudb
|
||||
nudb_block_size=4096
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
[database_path]
|
||||
/var/lib/xrpld/db
|
||||
/var/lib/rippled/db
|
||||
|
||||
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/var/log/xrpld/debug.log
|
||||
/var/log/rippled/debug.log
|
||||
|
||||
# To use the XRP test network
|
||||
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
@@ -1515,7 +1533,7 @@ advisory_delete=0
|
||||
|
||||
# File containing trusted validator keys or validator list publishers.
|
||||
# Unless an absolute path is specified, it will be considered relative to the
|
||||
# folder in which the xrpld.cfg file is located.
|
||||
# folder in which the rippled.cfg file is located.
|
||||
[validators_file]
|
||||
validators.txt
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
# Default validators.txt
|
||||
#
|
||||
# This file is located in the same folder as your xrpld.cfg file
|
||||
# This file is located in the same folder as your rippled.cfg file
|
||||
# and defines which validators your server trusts not to collude.
|
||||
#
|
||||
# This file is UTF-8 with DOS, UNIX, or Mac style line endings.
|
||||
|
||||
@@ -1,29 +1,30 @@
|
||||
macro (exclude_from_default target_)
|
||||
set_target_properties(${target_} PROPERTIES EXCLUDE_FROM_ALL ON)
|
||||
set_target_properties(${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON)
|
||||
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_ALL ON)
|
||||
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON)
|
||||
endmacro ()
|
||||
|
||||
macro (exclude_if_included target_)
|
||||
get_directory_property(has_parent PARENT_DIRECTORY)
|
||||
if (has_parent)
|
||||
exclude_from_default(${target_})
|
||||
endif ()
|
||||
get_directory_property(has_parent PARENT_DIRECTORY)
|
||||
if (has_parent)
|
||||
exclude_from_default (${target_})
|
||||
endif ()
|
||||
endmacro ()
|
||||
|
||||
find_package(Git)
|
||||
|
||||
function (git_branch branch_val)
|
||||
if (NOT GIT_FOUND)
|
||||
return()
|
||||
endif ()
|
||||
set(_branch "")
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} "rev-parse" "--abbrev-ref" "HEAD"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE _git_exit_code
|
||||
OUTPUT_VARIABLE _temp_branch
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
|
||||
if (_git_exit_code EQUAL 0)
|
||||
set(_branch ${_temp_branch})
|
||||
endif ()
|
||||
set(${branch_val} "${_branch}" PARENT_SCOPE)
|
||||
if (NOT GIT_FOUND)
|
||||
return ()
|
||||
endif ()
|
||||
set (_branch "")
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} "rev-parse" "--abbrev-ref" "HEAD"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE _git_exit_code
|
||||
OUTPUT_VARIABLE _temp_branch
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_QUIET)
|
||||
if (_git_exit_code EQUAL 0)
|
||||
set (_branch ${_temp_branch})
|
||||
endif ()
|
||||
set (${branch_val} "${_branch}" PARENT_SCOPE)
|
||||
endfunction ()
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
find_program(CCACHE_PATH "ccache")
|
||||
if (NOT CCACHE_PATH)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
# For Linux and macOS we can use the ccache binary directly.
|
||||
if (NOT MSVC)
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
message(STATUS "Found ccache: ${CCACHE_PATH}")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
# For Windows more effort is required. The code below is a modified version of
|
||||
# https://github.com/ccache/ccache/wiki/MS-Visual-Studio#usage-with-cmake.
|
||||
if ("${CCACHE_PATH}" MATCHES "chocolatey")
|
||||
message(DEBUG "Ccache path: ${CCACHE_PATH}")
|
||||
# Chocolatey uses a shim executable that we cannot use directly, in which case we have to find the executable it
|
||||
# points to. If we cannot find the target executable then we cannot use ccache.
|
||||
find_program(BASH_PATH "bash")
|
||||
if (NOT BASH_PATH)
|
||||
message(WARNING "Could not find bash.")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
execute_process(COMMAND bash -c
|
||||
"export LC_ALL='en_US.UTF-8'; ${CCACHE_PATH} --shimgen-noop | grep -oP 'path to executable: \\K.+' | head -c -1"
|
||||
OUTPUT_VARIABLE CCACHE_PATH)
|
||||
|
||||
if (NOT CCACHE_PATH)
|
||||
message(WARNING "Could not find ccache target.")
|
||||
return()
|
||||
endif ()
|
||||
file(TO_CMAKE_PATH "${CCACHE_PATH}" CCACHE_PATH)
|
||||
endif ()
|
||||
message(STATUS "Found ccache: ${CCACHE_PATH}")
|
||||
|
||||
# Tell cmake to use ccache for compiling with Visual Studio.
|
||||
file(COPY_FILE ${CCACHE_PATH} ${CMAKE_BINARY_DIR}/cl.exe ONLY_IF_DIFFERENT)
|
||||
set(CMAKE_VS_GLOBALS "CLToolExe=cl.exe" "CLToolPath=${CMAKE_BINARY_DIR}" "TrackFileAccess=false"
|
||||
"UseMultiToolTask=true")
|
||||
|
||||
# By default Visual Studio generators will use /Zi to capture debug information, which is not compatible with ccache, so
|
||||
# tell it to use /Z7 instead.
|
||||
if (MSVC)
|
||||
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
|
||||
string(REPLACE "/Zi" "/Z7" ${var_} "${${var_}}")
|
||||
endforeach ()
|
||||
endif ()
|
||||
@@ -172,47 +172,51 @@ include(CMakeParseArguments)
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program(GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
|
||||
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if (APPLE)
|
||||
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
find_program(LLVMCOV_PATH llvm-cov)
|
||||
endif ()
|
||||
if (LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif ()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program(GCOV_PATH gcov)
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif ()
|
||||
if(DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif(DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if(APPLE)
|
||||
execute_process( COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVMCOV_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
find_program( LLVMCOV_PATH llvm-cov )
|
||||
endif()
|
||||
if(LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif()
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program( GCOV_PATH gcov )
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif()
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach (LANG ${LANGUAGES})
|
||||
if ("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if ("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif ()
|
||||
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES
|
||||
"(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif ()
|
||||
endforeach ()
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "")
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "")
|
||||
set(COVERAGE_C_COMPILER_FLAGS "")
|
||||
set(COVERAGE_CXX_LINKER_FLAGS "")
|
||||
set(COVERAGE_C_LINKER_FLAGS "")
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(CheckCCompilerFlag)
|
||||
include(CheckLinkerFlag)
|
||||
@@ -223,51 +227,51 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
set(COVERAGE_C_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
|
||||
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||
if (HAVE_cxx_fprofile_abs_path)
|
||||
if(HAVE_cxx_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||
if (HAVE_c_fprofile_abs_path)
|
||||
if(HAVE_c_fprofile_abs_path)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_linker_flag(CXX -fprofile-abs-path HAVE_cxx_linker_fprofile_abs_path)
|
||||
if (HAVE_cxx_linker_fprofile_abs_path)
|
||||
if(HAVE_cxx_linker_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-abs-path")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_linker_flag(C -fprofile-abs-path HAVE_c_linker_fprofile_abs_path)
|
||||
if (HAVE_c_linker_fprofile_abs_path)
|
||||
if(HAVE_c_linker_fprofile_abs_path)
|
||||
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-abs-path")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update)
|
||||
if (HAVE_cxx_fprofile_update)
|
||||
if(HAVE_cxx_fprofile_update)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-update=atomic")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update)
|
||||
if (HAVE_c_fprofile_update)
|
||||
if(HAVE_c_fprofile_update)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-update=atomic")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_linker_flag(CXX -fprofile-update=atomic HAVE_cxx_linker_fprofile_update)
|
||||
if (HAVE_cxx_linker_fprofile_update)
|
||||
if(HAVE_cxx_linker_fprofile_update)
|
||||
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-update=atomic")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
check_linker_flag(C -fprofile-update=atomic HAVE_c_linker_fprofile_update)
|
||||
if (HAVE_c_linker_fprofile_update)
|
||||
if(HAVE_c_linker_fprofile_update)
|
||||
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-update=atomic")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif () # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
@@ -291,181 +295,193 @@ endif () # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function (setup_target_for_coverage_gcovr)
|
||||
function(setup_target_for_coverage_gcovr)
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if (NOT GCOV_TOOL)
|
||||
if(NOT GCOV_TOOL)
|
||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT GCOVR_PATH)
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if (DEFINED Coverage_BASE_DIRECTORY)
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else ()
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED Coverage_FORMAT)
|
||||
if(NOT DEFINED Coverage_FORMAT)
|
||||
set(Coverage_FORMAT xml)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED Coverage_EXECUTABLE AND DEFINED Coverage_EXECUTABLE_ARGS)
|
||||
if(NOT DEFINED Coverage_EXECUTABLE AND DEFINED Coverage_EXECUTABLE_ARGS)
|
||||
message(FATAL_ERROR "EXECUTABLE_ARGS must not be set if EXECUTABLE is not set")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if ("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||
else ()
|
||||
if ((Coverage_FORMAT STREQUAL "html-details") OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
else()
|
||||
if((Coverage_FORMAT STREQUAL "html-details")
|
||||
OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||
elseif (Coverage_FORMAT STREQUAL "html-single")
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
elseif((Coverage_FORMAT STREQUAL "json-summary")
|
||||
OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||
elseif (Coverage_FORMAT STREQUAL "txt")
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||
elseif (Coverage_FORMAT STREQUAL "csv")
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||
elseif (Coverage_FORMAT STREQUAL "lcov")
|
||||
elseif(Coverage_FORMAT STREQUAL "lcov")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.lcov)
|
||||
else ()
|
||||
else()
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||
endif ()
|
||||
endif ()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if ((Coverage_FORMAT STREQUAL "cobertura") OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty)
|
||||
if((Coverage_FORMAT STREQUAL "cobertura")
|
||||
OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
|
||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||
elseif (Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}")
|
||||
elseif (Coverage_FORMAT STREQUAL "jacoco")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco "${GCOVR_OUTPUT_FILE}")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco-pretty)
|
||||
elseif (Coverage_FORMAT STREQUAL "clover")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover "${GCOVR_OUTPUT_FILE}")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover-pretty)
|
||||
elseif (Coverage_FORMAT STREQUAL "lcov")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --lcov "${GCOVR_OUTPUT_FILE}")
|
||||
elseif (Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}")
|
||||
elseif(Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "jacoco")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco-pretty )
|
||||
elseif(Coverage_FORMAT STREQUAL "clover")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover-pretty )
|
||||
elseif(Coverage_FORMAT STREQUAL "lcov")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --lcov "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||
elseif (Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}")
|
||||
elseif(Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||
elseif (Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}")
|
||||
elseif(Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||
elseif (Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}")
|
||||
elseif (Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}")
|
||||
elseif (Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}")
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||
elseif (Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}")
|
||||
elseif (Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}")
|
||||
else ()
|
||||
elseif(Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach (EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if (CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif ()
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach ()
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach (EXCLUDE ${GCOVR_EXCLUDES})
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach ()
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# If EXECUTABLE is not set, the user is expected to run the tests manually
|
||||
# before running the coverage target NAME
|
||||
if (DEFINED Coverage_EXECUTABLE)
|
||||
set(GCOVR_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
|
||||
endif ()
|
||||
if(DEFINED Coverage_EXECUTABLE)
|
||||
set(GCOVR_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
endif()
|
||||
|
||||
# Create folder
|
||||
if (DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD ${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
endif ()
|
||||
if(DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
endif()
|
||||
|
||||
# Running gcovr
|
||||
set(GCOVR_CMD
|
||||
${GCOVR_PATH}
|
||||
--gcov-executable
|
||||
${GCOV_TOOL}
|
||||
--gcov-executable ${GCOV_TOOL}
|
||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||
-r
|
||||
${BASEDIR}
|
||||
-r ${BASEDIR}
|
||||
${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS}
|
||||
--object-directory=${PROJECT_BINARY_DIR})
|
||||
--object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if (CODE_COVERAGE_VERBOSE)
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
if (NOT "${GCOVR_EXEC_TESTS_CMD}" STREQUAL "")
|
||||
if(NOT "${GCOVR_EXEC_TESTS_CMD}" STREQUAL "")
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT "${GCOVR_FOLDER_CMD}" STREQUAL "")
|
||||
if(NOT "${GCOVR_FOLDER_CMD}" STREQUAL "")
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
message(STATUS "Command to generate gcovr coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report.")
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD COMMAND echo
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}")
|
||||
endfunction () # setup_target_for_coverage_gcovr
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND echo
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr
|
||||
|
||||
function (add_code_coverage_to_target name scope)
|
||||
function(add_code_coverage_to_target name scope)
|
||||
separate_arguments(COVERAGE_CXX_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_COMPILER_FLAGS}")
|
||||
separate_arguments(COVERAGE_C_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_C_COMPILER_FLAGS}")
|
||||
separate_arguments(COVERAGE_CXX_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_LINKER_FLAGS}")
|
||||
separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}")
|
||||
|
||||
# Add compiler options to the target
|
||||
target_compile_options(${name} ${scope} $<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
|
||||
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
|
||||
target_compile_options(${name} ${scope}
|
||||
$<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
|
||||
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
|
||||
|
||||
target_link_libraries(${name} ${scope} $<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS}>
|
||||
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS}>)
|
||||
endfunction () # add_code_coverage_to_target
|
||||
target_link_libraries (${name} ${scope}
|
||||
$<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS} gcov>
|
||||
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS} gcov>
|
||||
)
|
||||
endfunction() # add_code_coverage_to_target
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
# Shared detection of compiler, operating system, and architecture.
|
||||
#
|
||||
# This module centralizes environment detection so that other CMake modules can use the same variables instead of
|
||||
# repeating checks on CMAKE_* and built-in platform variables.
|
||||
|
||||
# Only run once per configure step.
|
||||
include_guard(GLOBAL)
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Compiler detection (C++)
|
||||
# --------------------------------------------------------------------
|
||||
set(is_clang FALSE)
|
||||
set(is_gcc FALSE)
|
||||
set(is_msvc FALSE)
|
||||
set(is_xcode FALSE)
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") # Clang or AppleClang
|
||||
set(is_clang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(is_gcc TRUE)
|
||||
elseif (MSVC)
|
||||
set(is_msvc TRUE)
|
||||
else ()
|
||||
message(FATAL_ERROR "Unsupported C++ compiler: ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif ()
|
||||
|
||||
# Xcode generator detection
|
||||
if (CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
set(is_xcode TRUE)
|
||||
endif ()
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Operating system detection
|
||||
# --------------------------------------------------------------------
|
||||
set(is_linux FALSE)
|
||||
set(is_windows FALSE)
|
||||
set(is_macos FALSE)
|
||||
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
set(is_linux TRUE)
|
||||
elseif (CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
set(is_windows TRUE)
|
||||
elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
set(is_macos TRUE)
|
||||
endif ()
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Architecture
|
||||
# --------------------------------------------------------------------
|
||||
set(is_amd64 FALSE)
|
||||
set(is_arm64 FALSE)
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64")
|
||||
set(is_amd64 TRUE)
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64|ARM64")
|
||||
set(is_arm64 TRUE)
|
||||
else ()
|
||||
message(FATAL_ERROR "Unknown architecture: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
endif ()
|
||||
@@ -1,13 +1,25 @@
|
||||
include(isolate_headers)
|
||||
|
||||
function (xrpl_add_test name)
|
||||
set(target ${PROJECT_NAME}.test.${name})
|
||||
function(xrpl_add_test name)
|
||||
set(target ${PROJECT_NAME}.test.${name})
|
||||
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp")
|
||||
add_executable(${target} ${ARGN} ${sources})
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
|
||||
)
|
||||
add_executable(${target} ${ARGN} ${sources})
|
||||
|
||||
isolate_headers(${target} "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/tests/${name}" PRIVATE)
|
||||
isolate_headers(
|
||||
${target}
|
||||
"${CMAKE_SOURCE_DIR}"
|
||||
"${CMAKE_SOURCE_DIR}/tests/${name}"
|
||||
PRIVATE
|
||||
)
|
||||
|
||||
add_test(NAME ${target} COMMAND ${target})
|
||||
endfunction ()
|
||||
# Make sure the test isn't optimized away in unity builds
|
||||
set_target_properties(${target} PROPERTIES
|
||||
UNITY_BUILD_MODE GROUP
|
||||
UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
|
||||
|
||||
add_test(NAME ${target} COMMAND ${target})
|
||||
endfunction()
|
||||
|
||||
@@ -2,164 +2,154 @@
|
||||
setup project-wide compiler settings
|
||||
#]===================================================================]
|
||||
|
||||
include(CompilationEnv)
|
||||
|
||||
#[=========================================================[
|
||||
TODO some/most of these common settings belong in a
|
||||
toolchain file, especially the ABI-impacting ones
|
||||
#]=========================================================]
|
||||
add_library(common INTERFACE)
|
||||
add_library(Xrpl::common ALIAS common)
|
||||
include(XrplSanitizers)
|
||||
add_library (common INTERFACE)
|
||||
add_library (Xrpl::common ALIAS common)
|
||||
# add a single global dependency on this interface lib
|
||||
link_libraries(Xrpl::common)
|
||||
# Respect CMAKE_POSITION_INDEPENDENT_CODE setting (may be set by Conan toolchain)
|
||||
if (NOT DEFINED CMAKE_POSITION_INDEPENDENT_CODE)
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
endif ()
|
||||
set_target_properties(common PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})
|
||||
link_libraries (Xrpl::common)
|
||||
set_target_properties (common
|
||||
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
target_compile_definitions(
|
||||
common
|
||||
INTERFACE $<$<CONFIG:Debug>:DEBUG
|
||||
_DEBUG>
|
||||
#[===[
|
||||
target_compile_definitions (common
|
||||
INTERFACE
|
||||
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
||||
#[===[
|
||||
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
|
||||
explicitly except for the special case of (profile ON) and (assert OFF).
|
||||
Presumably this is because we don't want profile builds asserting unless
|
||||
asserts were specifically requested.
|
||||
]===]
|
||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
|
||||
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
|
||||
OPENSSL_SUPPRESS_DEPRECATED)
|
||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
|
||||
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
|
||||
OPENSSL_SUPPRESS_DEPRECATED
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
# remove existing exception flag since we set it to -EHa
|
||||
string(REGEX REPLACE "[-/]EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
# remove existing exception flag since we set it to -EHa
|
||||
string (REGEX REPLACE "[-/]EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
|
||||
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
|
||||
foreach (var_
|
||||
CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
|
||||
CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
|
||||
|
||||
# also remove dynamic runtime
|
||||
string(REGEX REPLACE "[-/]MD[d]*" " " ${var_} "${${var_}}")
|
||||
# also remove dynamic runtime
|
||||
string (REGEX REPLACE "[-/]MD[d]*" " " ${var_} "${${var_}}")
|
||||
|
||||
# /ZI (Edit & Continue debugging information) is incompatible with Gy-
|
||||
string(REPLACE "/ZI" "/Zi" ${var_} "${${var_}}")
|
||||
# /ZI (Edit & Continue debugging information) is incompatible with Gy-
|
||||
string (REPLACE "/ZI" "/Zi" ${var_} "${${var_}}")
|
||||
|
||||
# omit debug info completely under CI (not needed)
|
||||
if (is_ci)
|
||||
string(REPLACE "/Zi" " " ${var_} "${${var_}}")
|
||||
string(REPLACE "/Z7" " " ${var_} "${${var_}}")
|
||||
endif ()
|
||||
endforeach ()
|
||||
# omit debug info completely under CI (not needed)
|
||||
if (is_ci)
|
||||
string (REPLACE "/Zi" " " ${var_} "${${var_}}")
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
target_compile_options(
|
||||
common
|
||||
INTERFACE # Increase object file max size
|
||||
-bigobj
|
||||
# Floating point behavior
|
||||
-fp:precise
|
||||
# __cdecl calling convention
|
||||
-Gd
|
||||
# Minimal rebuild: disabled
|
||||
-Gm-
|
||||
# Function level linking: disabled
|
||||
-Gy-
|
||||
# Multiprocessor compilation
|
||||
-MP
|
||||
# pragma omp: disabled
|
||||
-openmp-
|
||||
# No error reporting to Internet
|
||||
-errorReport:none
|
||||
# Suppress login banner
|
||||
-nologo
|
||||
# Disable signed/unsigned comparison warnings
|
||||
-wd4018
|
||||
# Disable float to int possible loss of data warnings
|
||||
-wd4244
|
||||
# Disable size_t to T possible loss of data warnings
|
||||
-wd4267
|
||||
# Disable C4800(int to bool performance)
|
||||
-wd4800
|
||||
# Decorated name length exceeded, name was truncated
|
||||
-wd4503
|
||||
$<$<COMPILE_LANGUAGE:CXX>:
|
||||
-EHa
|
||||
-GR
|
||||
>
|
||||
$<$<CONFIG:Release>:-Ox>
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:
|
||||
-GS
|
||||
-Zc:forScope
|
||||
>
|
||||
# static runtime
|
||||
$<$<CONFIG:Debug>:-MTd>
|
||||
$<$<NOT:$<CONFIG:Debug>>:-MT>
|
||||
$<$<BOOL:${werr}>:-WX>)
|
||||
target_compile_definitions(
|
||||
common
|
||||
INTERFACE _WIN32_WINNT=0x6000
|
||||
_SCL_SECURE_NO_WARNINGS
|
||||
_CRT_SECURE_NO_WARNINGS
|
||||
WIN32_CONSOLE
|
||||
WIN32_LEAN_AND_MEAN
|
||||
NOMINMAX
|
||||
# TODO: Resolve these warnings, don't just silence them
|
||||
_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:_CRTDBG_MAP_ALLOC>)
|
||||
target_link_libraries(common INTERFACE -errorreport:none -machine:X64)
|
||||
target_compile_options (common
|
||||
INTERFACE
|
||||
-bigobj # Increase object file max size
|
||||
-fp:precise # Floating point behavior
|
||||
-Gd # __cdecl calling convention
|
||||
-Gm- # Minimal rebuild: disabled
|
||||
-Gy- # Function level linking: disabled
|
||||
-MP # Multiprocessor compilation
|
||||
-openmp- # pragma omp: disabled
|
||||
-errorReport:none # No error reporting to Internet
|
||||
-nologo # Suppress login banner
|
||||
-wd4018 # Disable signed/unsigned comparison warnings
|
||||
-wd4244 # Disable float to int possible loss of data warnings
|
||||
-wd4267 # Disable size_t to T possible loss of data warnings
|
||||
-wd4800 # Disable C4800(int to bool performance)
|
||||
-wd4503 # Decorated name length exceeded, name was truncated
|
||||
$<$<COMPILE_LANGUAGE:CXX>:
|
||||
-EHa
|
||||
-GR
|
||||
>
|
||||
$<$<CONFIG:Release>:-Ox>
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:
|
||||
-GS
|
||||
-Zc:forScope
|
||||
>
|
||||
# static runtime
|
||||
$<$<CONFIG:Debug>:-MTd>
|
||||
$<$<NOT:$<CONFIG:Debug>>:-MT>
|
||||
$<$<BOOL:${werr}>:-WX>
|
||||
)
|
||||
target_compile_definitions (common
|
||||
INTERFACE
|
||||
_WIN32_WINNT=0x6000
|
||||
_SCL_SECURE_NO_WARNINGS
|
||||
_CRT_SECURE_NO_WARNINGS
|
||||
WIN32_CONSOLE
|
||||
WIN32_LEAN_AND_MEAN
|
||||
NOMINMAX
|
||||
# TODO: Resolve these warnings, don't just silence them
|
||||
_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:_CRTDBG_MAP_ALLOC>)
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-errorreport:none
|
||||
-machine:X64)
|
||||
else ()
|
||||
target_compile_options(
|
||||
common
|
||||
INTERFACE -Wall
|
||||
-Wdeprecated
|
||||
$<$<BOOL:${is_clang}>:-Wno-deprecated-declarations>
|
||||
$<$<BOOL:${wextra}>:-Wextra
|
||||
-Wno-unused-parameter>
|
||||
$<$<BOOL:${werr}>:-Werror>
|
||||
-fstack-protector
|
||||
-Wno-sign-compare
|
||||
-Wno-unused-but-set-variable
|
||||
$<$<NOT:$<CONFIG:Debug>>:-fno-strict-aliasing>
|
||||
# tweak gcc optimization for debug
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<CONFIG:Debug>>:-O0>
|
||||
# Add debug symbols to release config
|
||||
$<$<CONFIG:Release>:-g>)
|
||||
target_link_libraries(
|
||||
common
|
||||
INTERFACE -rdynamic
|
||||
$<$<BOOL:${is_linux}>:-Wl,-z,relro,-z,now,--build-id>
|
||||
# link to static libc/c++ iff: * static option set and * NOT APPLE (AppleClang does not support static
|
||||
# libc/c++) and * NOT SANITIZERS (sanitizers typically don't work with static libc/c++)
|
||||
$<$<AND:$<BOOL:${static}>,$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${SANITIZERS_ENABLED}>>>:
|
||||
-static-libstdc++
|
||||
-static-libgcc
|
||||
>)
|
||||
target_compile_options (common
|
||||
INTERFACE
|
||||
-Wall
|
||||
-Wdeprecated
|
||||
$<$<BOOL:${is_clang}>:-Wno-deprecated-declarations>
|
||||
$<$<BOOL:${wextra}>:-Wextra -Wno-unused-parameter>
|
||||
$<$<BOOL:${werr}>:-Werror>
|
||||
-fstack-protector
|
||||
-Wno-sign-compare
|
||||
-Wno-unused-but-set-variable
|
||||
$<$<NOT:$<CONFIG:Debug>>:-fno-strict-aliasing>
|
||||
# tweak gcc optimization for debug
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<CONFIG:Debug>>:-O0>
|
||||
# Add debug symbols to release config
|
||||
$<$<CONFIG:Release>:-g>)
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-rdynamic
|
||||
$<$<BOOL:${is_linux}>:-Wl,-z,relro,-z,now,--build-id>
|
||||
# link to static libc/c++ iff:
|
||||
# * static option set and
|
||||
# * NOT APPLE (AppleClang does not support static libc/c++) and
|
||||
# * NOT san (sanitizers typically don't work with static libc/c++)
|
||||
$<$<AND:$<BOOL:${static}>,$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:
|
||||
-static-libstdc++
|
||||
-static-libgcc
|
||||
>)
|
||||
endif ()
|
||||
|
||||
# Antithesis instrumentation will only be built and deployed using machines running Linux.
|
||||
if (voidstar)
|
||||
if (NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Debug build type, aborting...")
|
||||
elseif (NOT is_linux)
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Linux, aborting...")
|
||||
elseif (NOT (is_clang AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 16.0))
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Clang version 16 or later, aborting...")
|
||||
endif ()
|
||||
if (NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Debug build type, aborting...")
|
||||
elseif (NOT is_linux)
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Linux, aborting...")
|
||||
elseif (NOT (is_clang AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 16.0))
|
||||
message(FATAL_ERROR "Antithesis instrumentation requires Clang version 16 or later, aborting...")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (use_mold)
|
||||
# use mold linker if available
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "mold")
|
||||
target_link_libraries(common INTERFACE -fuse-ld=mold)
|
||||
endif ()
|
||||
unset(LD_VERSION)
|
||||
# use mold linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "mold")
|
||||
target_link_libraries (common INTERFACE -fuse-ld=mold)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
elseif (use_gold AND is_gcc)
|
||||
# use gold linker if available
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
# use gold linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
#[=========================================================[
|
||||
NOTE: THE gold linker inserts -rpath as DT_RUNPATH by
|
||||
default instead of DT_RPATH, so you might have slightly
|
||||
default intead of DT_RPATH, so you might have slightly
|
||||
unexpected runtime ld behavior if you were expecting
|
||||
DT_RPATH. Specify --disable-new-dtags to gold if you do
|
||||
not want the default DT_RUNPATH behavior. This rpath
|
||||
@@ -170,31 +160,34 @@ elseif (use_gold AND is_gcc)
|
||||
disabling would be to figure out all the settings
|
||||
required to make gold play nicely with jemalloc.
|
||||
#]=========================================================]
|
||||
if (("${LD_VERSION}" MATCHES "GNU gold") AND (NOT jemalloc))
|
||||
target_link_libraries(
|
||||
common
|
||||
INTERFACE -fuse-ld=gold
|
||||
-Wl,--no-as-needed
|
||||
#[=========================================================[
|
||||
if (("${LD_VERSION}" MATCHES "GNU gold") AND (NOT jemalloc))
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-fuse-ld=gold
|
||||
-Wl,--no-as-needed
|
||||
#[=========================================================[
|
||||
see https://bugs.launchpad.net/ubuntu/+source/eglibc/+bug/1253638/comments/5
|
||||
DT_RUNPATH does not work great for transitive
|
||||
dependencies (of which boost has a few) - so just
|
||||
switch to DT_RPATH if doing dynamic linking with gold
|
||||
#]=========================================================]
|
||||
$<$<NOT:$<BOOL:${static}>>:-Wl,--disable-new-dtags>)
|
||||
endif ()
|
||||
unset(LD_VERSION)
|
||||
$<$<NOT:$<BOOL:${static}>>:-Wl,--disable-new-dtags>)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
elseif (use_lld)
|
||||
# use lld linker if available
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "LLD")
|
||||
target_link_libraries(common INTERFACE -fuse-ld=lld)
|
||||
endif ()
|
||||
unset(LD_VERSION)
|
||||
endif ()
|
||||
# use lld linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "LLD")
|
||||
target_link_libraries (common INTERFACE -fuse-ld=lld)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
endif()
|
||||
|
||||
|
||||
if (assert)
|
||||
foreach (var_ CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE)
|
||||
string(REGEX REPLACE "[-/]DNDEBUG" "" ${var_} "${${var_}}")
|
||||
endforeach ()
|
||||
foreach (var_ CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE)
|
||||
STRING (REGEX REPLACE "[-/]DNDEBUG" "" ${var_} "${${var_}}")
|
||||
endforeach ()
|
||||
endif ()
|
||||
|
||||
@@ -1,52 +1,54 @@
|
||||
include(CMakeFindDependencyMacro)
|
||||
include (CMakeFindDependencyMacro)
|
||||
# need to represent system dependencies of the lib here
|
||||
#[=========================================================[
|
||||
Boost
|
||||
#]=========================================================]
|
||||
if (static OR APPLE OR MSVC)
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
set (Boost_USE_MULTITHREADED ON)
|
||||
if (static OR MSVC)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
else ()
|
||||
set(Boost_USE_STATIC_RUNTIME OFF)
|
||||
set (Boost_USE_STATIC_RUNTIME OFF)
|
||||
endif ()
|
||||
find_dependency(Boost
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
find_dependency (Boost
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
#[=========================================================[
|
||||
OpenSSL
|
||||
#]=========================================================]
|
||||
if (NOT DEFINED OPENSSL_ROOT_DIR)
|
||||
if (DEFINED ENV{OPENSSL_ROOT})
|
||||
set(OPENSSL_ROOT_DIR $ENV{OPENSSL_ROOT})
|
||||
elseif (APPLE)
|
||||
find_program(homebrew brew)
|
||||
if (homebrew)
|
||||
execute_process(COMMAND ${homebrew} --prefix openssl OUTPUT_VARIABLE OPENSSL_ROOT_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif ()
|
||||
if (DEFINED ENV{OPENSSL_ROOT})
|
||||
set (OPENSSL_ROOT_DIR $ENV{OPENSSL_ROOT})
|
||||
elseif (APPLE)
|
||||
find_program (homebrew brew)
|
||||
if (homebrew)
|
||||
execute_process (COMMAND ${homebrew} --prefix openssl
|
||||
OUTPUT_VARIABLE OPENSSL_ROOT_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif ()
|
||||
file(TO_CMAKE_PATH "${OPENSSL_ROOT_DIR}" OPENSSL_ROOT_DIR)
|
||||
endif ()
|
||||
file (TO_CMAKE_PATH "${OPENSSL_ROOT_DIR}" OPENSSL_ROOT_DIR)
|
||||
endif ()
|
||||
|
||||
if (static OR APPLE OR MSVC)
|
||||
set(OPENSSL_USE_STATIC_LIBS ON)
|
||||
set (OPENSSL_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set(OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_dependency(OpenSSL REQUIRED)
|
||||
find_dependency(ZLIB)
|
||||
find_dependency(date)
|
||||
set (OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_dependency (OpenSSL REQUIRED)
|
||||
find_dependency (ZLIB)
|
||||
find_dependency (date)
|
||||
if (TARGET ZLIB::ZLIB)
|
||||
set_target_properties(OpenSSL::Crypto PROPERTIES INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
|
||||
set_target_properties(OpenSSL::Crypto PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
|
||||
endif ()
|
||||
|
||||
@@ -10,44 +10,63 @@ include(target_protobuf_sources)
|
||||
# so we just build them as a separate library.
|
||||
add_library(xrpl.libpb)
|
||||
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS include/xrpl/proto/xrpl.proto)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto
|
||||
LANGUAGE cpp
|
||||
IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS include/xrpl/proto/xrpl.proto
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE protos "include/xrpl/proto/org/*.proto")
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto PROTOS "${protos}")
|
||||
target_protobuf_sources(
|
||||
xrpl.libpb xrpl/proto
|
||||
LANGUAGE grpc
|
||||
IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS "${protos}"
|
||||
PLUGIN protoc-gen-grpc=$<TARGET_FILE:gRPC::grpc_cpp_plugin>
|
||||
GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto
|
||||
LANGUAGE cpp
|
||||
IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS "${protos}"
|
||||
)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto
|
||||
LANGUAGE grpc
|
||||
IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS "${protos}"
|
||||
PLUGIN protoc-gen-grpc=$<TARGET_FILE:gRPC::grpc_cpp_plugin>
|
||||
GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc
|
||||
)
|
||||
|
||||
target_compile_options(
|
||||
xrpl.libpb PUBLIC $<$<BOOL:${is_msvc}>:-wd4996> $<$<BOOL:${is_xcode}>: --system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec >
|
||||
PRIVATE $<$<BOOL:${is_msvc}>:-wd4065> $<$<NOT:$<BOOL:${is_msvc}>>:-Wno-deprecated-declarations>)
|
||||
target_compile_options(xrpl.libpb
|
||||
PUBLIC
|
||||
$<$<BOOL:${MSVC}>:-wd4996>
|
||||
$<$<BOOL:${XCODE}>:
|
||||
--system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec
|
||||
>
|
||||
PRIVATE
|
||||
$<$<BOOL:${MSVC}>:-wd4065>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:-Wno-deprecated-declarations>
|
||||
)
|
||||
|
||||
target_link_libraries(xrpl.libpb PUBLIC protobuf::libprotobuf gRPC::grpc++)
|
||||
target_link_libraries(xrpl.libpb
|
||||
PUBLIC
|
||||
protobuf::libprotobuf
|
||||
gRPC::grpc++
|
||||
)
|
||||
|
||||
# TODO: Clean up the number of library targets later.
|
||||
add_library(xrpl.imports.main INTERFACE)
|
||||
|
||||
target_link_libraries(
|
||||
xrpl.imports.main
|
||||
INTERFACE absl::random_random
|
||||
date::date
|
||||
ed25519::ed25519
|
||||
LibArchive::LibArchive
|
||||
OpenSSL::Crypto
|
||||
Xrpl::boost
|
||||
Xrpl::libs
|
||||
Xrpl::opts
|
||||
Xrpl::syslibs
|
||||
secp256k1::secp256k1
|
||||
xrpl.libpb
|
||||
xxHash::xxhash
|
||||
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>)
|
||||
target_link_libraries(xrpl.imports.main
|
||||
INTERFACE
|
||||
absl::random_random
|
||||
date::date
|
||||
ed25519::ed25519
|
||||
LibArchive::LibArchive
|
||||
OpenSSL::Crypto
|
||||
Xrpl::boost
|
||||
Xrpl::libs
|
||||
Xrpl::opts
|
||||
Xrpl::syslibs
|
||||
secp256k1::secp256k1
|
||||
xrpl.libpb
|
||||
xxHash::xxhash
|
||||
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
|
||||
)
|
||||
|
||||
include(add_module)
|
||||
include(target_link_modules)
|
||||
@@ -69,11 +88,18 @@ target_link_libraries(xrpl.libxrpl.crypto PUBLIC xrpl.libxrpl.basics)
|
||||
|
||||
# Level 04
|
||||
add_module(xrpl protocol)
|
||||
target_link_libraries(xrpl.libxrpl.protocol PUBLIC xrpl.libxrpl.crypto xrpl.libxrpl.json)
|
||||
target_link_libraries(xrpl.libxrpl.protocol PUBLIC
|
||||
xrpl.libxrpl.crypto
|
||||
xrpl.libxrpl.json
|
||||
)
|
||||
|
||||
# Level 05
|
||||
add_module(xrpl core)
|
||||
target_link_libraries(xrpl.libxrpl.core PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
|
||||
target_link_libraries(xrpl.libxrpl.core PUBLIC
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.protocol
|
||||
)
|
||||
|
||||
# Level 06
|
||||
add_module(xrpl resource)
|
||||
@@ -81,50 +107,62 @@ target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
|
||||
|
||||
# Level 07
|
||||
add_module(xrpl net)
|
||||
target_link_libraries(xrpl.libxrpl.net PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.resource)
|
||||
|
||||
add_module(xrpl nodestore)
|
||||
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
|
||||
|
||||
add_module(xrpl shamap)
|
||||
target_link_libraries(xrpl.libxrpl.shamap PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.crypto xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.nodestore)
|
||||
|
||||
add_module(xrpl rdb)
|
||||
target_link_libraries(xrpl.libxrpl.rdb PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.core)
|
||||
target_link_libraries(xrpl.libxrpl.net PUBLIC
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.resource
|
||||
)
|
||||
|
||||
add_module(xrpl server)
|
||||
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol xrpl.libxrpl.core xrpl.libxrpl.rdb)
|
||||
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
|
||||
|
||||
add_module(xrpl nodestore)
|
||||
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.protocol
|
||||
)
|
||||
|
||||
add_module(xrpl shamap)
|
||||
target_link_libraries(xrpl.libxrpl.shamap PUBLIC
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.crypto
|
||||
xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.nodestore
|
||||
)
|
||||
|
||||
add_module(xrpl ledger)
|
||||
target_link_libraries(xrpl.libxrpl.ledger PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.rdb)
|
||||
target_link_libraries(xrpl.libxrpl.ledger PUBLIC
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.protocol
|
||||
)
|
||||
|
||||
add_library(xrpl.libxrpl)
|
||||
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
|
||||
|
||||
add_library(xrpl::libxrpl ALIAS xrpl.libxrpl)
|
||||
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/libxrpl/*.cpp")
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/libxrpl/*.cpp"
|
||||
)
|
||||
target_sources(xrpl.libxrpl PRIVATE ${sources})
|
||||
|
||||
target_link_modules(
|
||||
xrpl
|
||||
PUBLIC
|
||||
basics
|
||||
beast
|
||||
core
|
||||
crypto
|
||||
json
|
||||
ledger
|
||||
net
|
||||
nodestore
|
||||
protocol
|
||||
rdb
|
||||
resource
|
||||
server
|
||||
shamap)
|
||||
target_link_modules(xrpl PUBLIC
|
||||
basics
|
||||
beast
|
||||
core
|
||||
crypto
|
||||
json
|
||||
protocol
|
||||
resource
|
||||
server
|
||||
nodestore
|
||||
shamap
|
||||
net
|
||||
ledger
|
||||
)
|
||||
|
||||
# All headers in libxrpl are in modules.
|
||||
# Uncomment this stanza if you have not yet moved new headers into a module.
|
||||
@@ -135,34 +173,63 @@ target_link_modules(
|
||||
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
|
||||
# $<INSTALL_INTERFACE:include>)
|
||||
|
||||
if (xrpld)
|
||||
add_executable(xrpld)
|
||||
if (tests)
|
||||
target_compile_definitions(xrpld PUBLIC ENABLE_TESTS)
|
||||
target_compile_definitions(xrpld PRIVATE UNIT_TEST_REFERENCE_FEE=${UNIT_TEST_REFERENCE_FEE})
|
||||
endif ()
|
||||
target_include_directories(xrpld PRIVATE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>)
|
||||
if(xrpld)
|
||||
add_executable(xrpld)
|
||||
if(tests)
|
||||
target_compile_definitions(xrpld PUBLIC ENABLE_TESTS)
|
||||
target_compile_definitions(xrpld PRIVATE
|
||||
UNIT_TEST_REFERENCE_FEE=${UNIT_TEST_REFERENCE_FEE}
|
||||
)
|
||||
endif()
|
||||
target_include_directories(xrpld
|
||||
PRIVATE
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/xrpld/*.cpp")
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/xrpld/*.cpp"
|
||||
)
|
||||
target_sources(xrpld PRIVATE ${sources})
|
||||
|
||||
if(tests)
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/test/*.cpp"
|
||||
)
|
||||
target_sources(xrpld PRIVATE ${sources})
|
||||
endif()
|
||||
|
||||
if (tests)
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/test/*.cpp")
|
||||
target_sources(xrpld PRIVATE ${sources})
|
||||
endif ()
|
||||
target_link_libraries(xrpld
|
||||
Xrpl::boost
|
||||
Xrpl::opts
|
||||
Xrpl::libs
|
||||
xrpl.libxrpl
|
||||
)
|
||||
exclude_if_included(xrpld)
|
||||
# define a macro for tests that might need to
|
||||
# be exluded or run differently in CI environment
|
||||
if(is_ci)
|
||||
target_compile_definitions(xrpld PRIVATE XRPL_RUNNING_IN_CI)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(xrpld Xrpl::boost Xrpl::opts Xrpl::libs xrpl.libxrpl)
|
||||
exclude_if_included(xrpld)
|
||||
# define a macro for tests that might need to
|
||||
# be excluded or run differently in CI environment
|
||||
if (is_ci)
|
||||
target_compile_definitions(xrpld PRIVATE XRPL_RUNNING_IN_CI)
|
||||
endif ()
|
||||
if(voidstar)
|
||||
target_compile_options(xrpld
|
||||
PRIVATE
|
||||
-fsanitize-coverage=trace-pc-guard
|
||||
)
|
||||
# xrpld requires access to antithesis-sdk-cpp implementation file
|
||||
# antithesis_instrumentation.h, which is not exported as INTERFACE
|
||||
target_include_directories(xrpld
|
||||
PRIVATE
|
||||
${CMAKE_SOURCE_DIR}/external/antithesis-sdk
|
||||
)
|
||||
endif()
|
||||
|
||||
if (voidstar)
|
||||
target_compile_options(xrpld PRIVATE -fsanitize-coverage=trace-pc-guard)
|
||||
# xrpld requires access to antithesis-sdk-cpp implementation file
|
||||
# antithesis_instrumentation.h, which is not exported as INTERFACE
|
||||
target_include_directories(xrpld PRIVATE ${CMAKE_SOURCE_DIR}/external/antithesis-sdk)
|
||||
endif ()
|
||||
endif ()
|
||||
# any files that don't play well with unity should be added here
|
||||
if(tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/ledger/Invariants_test.cpp
|
||||
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -2,51 +2,40 @@
|
||||
coverage report target
|
||||
#]===================================================================]
|
||||
|
||||
if (NOT coverage)
|
||||
message(FATAL_ERROR "Code coverage not enabled! Aborting ...")
|
||||
endif ()
|
||||
if(NOT coverage)
|
||||
message(FATAL_ERROR "Code coverage not enabled! Aborting ...")
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
|
||||
message(WARNING "Code coverage on Windows is not supported, ignoring 'coverage' flag")
|
||||
return()
|
||||
endif ()
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
|
||||
message(WARNING "Code coverage on Windows is not supported, ignoring 'coverage' flag")
|
||||
return()
|
||||
endif()
|
||||
|
||||
include(ProcessorCount)
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
|
||||
include(CodeCoverage)
|
||||
|
||||
# The instructions for these commands come from the `CodeCoverage` module, which was copied from
|
||||
# https://github.com/bilke/cmake-modules, commit fb7d2a3, then locally changed (see CHANGES: section in
|
||||
# `CodeCoverage.cmake`)
|
||||
# The instructions for these commands come from the `CodeCoverage` module,
|
||||
# which was copied from https://github.com/bilke/cmake-modules, commit fb7d2a3,
|
||||
# then locally changed (see CHANGES: section in `CodeCoverage.cmake`)
|
||||
|
||||
set(GCOVR_ADDITIONAL_ARGS ${coverage_extra_args})
|
||||
if (NOT GCOVR_ADDITIONAL_ARGS STREQUAL "")
|
||||
separate_arguments(GCOVR_ADDITIONAL_ARGS)
|
||||
endif ()
|
||||
if(NOT GCOVR_ADDITIONAL_ARGS STREQUAL "")
|
||||
separate_arguments(GCOVR_ADDITIONAL_ARGS)
|
||||
endif()
|
||||
|
||||
list(APPEND
|
||||
GCOVR_ADDITIONAL_ARGS
|
||||
--exclude-throw-branches
|
||||
--exclude-noncode-lines
|
||||
--exclude-unreachable-branches
|
||||
-s
|
||||
-j
|
||||
${PROCESSOR_COUNT})
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS
|
||||
--exclude-throw-branches
|
||||
--exclude-noncode-lines
|
||||
--exclude-unreachable-branches -s
|
||||
-j ${PROCESSOR_COUNT})
|
||||
|
||||
setup_target_for_coverage_gcovr(
|
||||
NAME
|
||||
coverage
|
||||
FORMAT
|
||||
${coverage_format}
|
||||
EXCLUDE
|
||||
"src/test"
|
||||
"src/tests"
|
||||
"include/xrpl/beast/test"
|
||||
"include/xrpl/beast/unit_test"
|
||||
"${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
|
||||
DEPENDENCIES
|
||||
xrpld
|
||||
xrpl.tests)
|
||||
NAME coverage
|
||||
FORMAT ${coverage_format}
|
||||
EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
|
||||
DEPENDENCIES xrpld xrpl.tests
|
||||
)
|
||||
|
||||
add_code_coverage_to_target(opts INTERFACE)
|
||||
|
||||
@@ -2,44 +2,45 @@
|
||||
docs target (optional)
|
||||
#]===================================================================]
|
||||
|
||||
if (NOT only_docs)
|
||||
return()
|
||||
endif ()
|
||||
if(NOT only_docs)
|
||||
return()
|
||||
endif()
|
||||
|
||||
find_package(Doxygen)
|
||||
if (NOT TARGET Doxygen::doxygen)
|
||||
message(STATUS "doxygen executable not found -- skipping docs target")
|
||||
return()
|
||||
endif ()
|
||||
if(NOT TARGET Doxygen::doxygen)
|
||||
message(STATUS "doxygen executable not found -- skipping docs target")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(doxygen_output_directory "${CMAKE_BINARY_DIR}/docs")
|
||||
set(doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src")
|
||||
set(doxygen_index_file "${doxygen_output_directory}/html/index.html")
|
||||
set(doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile")
|
||||
|
||||
file(GLOB_RECURSE
|
||||
doxygen_input
|
||||
docs/*.md
|
||||
include/*.h
|
||||
include/*.cpp
|
||||
include/*.md
|
||||
src/*.h
|
||||
src/*.cpp
|
||||
src/*.md
|
||||
Builds/*.md
|
||||
*.md)
|
||||
list(APPEND doxygen_input external/README.md)
|
||||
file(GLOB_RECURSE doxygen_input
|
||||
docs/*.md
|
||||
include/*.h
|
||||
include/*.cpp
|
||||
include/*.md
|
||||
src/*.h
|
||||
src/*.cpp
|
||||
src/*.md
|
||||
Builds/*.md
|
||||
*.md)
|
||||
list(APPEND doxygen_input
|
||||
external/README.md
|
||||
)
|
||||
set(dependencies "${doxygen_input}" "${doxyfile}")
|
||||
|
||||
function (verbose_find_path variable name)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path(${variable} "${name}" ${ARGN})
|
||||
if (NOT ${variable})
|
||||
message(NOTICE "could not find ${name}")
|
||||
else ()
|
||||
message(STATUS "found ${name}: ${${variable}}/${name}")
|
||||
endif ()
|
||||
endfunction ()
|
||||
function(verbose_find_path variable name)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path(${variable} "${name}" ${ARGN})
|
||||
if(NOT ${variable})
|
||||
message(NOTICE "could not find ${name}")
|
||||
else()
|
||||
message(STATUS "found ${name}: ${${variable}}/${name}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
verbose_find_path(doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml)
|
||||
verbose_find_path(doxygen_dot_path dot)
|
||||
@@ -47,26 +48,36 @@ verbose_find_path(doxygen_dot_path dot)
|
||||
# https://en.cppreference.com/w/Cppreference:Archives
|
||||
# https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step
|
||||
set(download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake")
|
||||
file(WRITE "${download_script}"
|
||||
"file(DOWNLOAD \
|
||||
file(WRITE
|
||||
"${download_script}"
|
||||
"file(DOWNLOAD \
|
||||
https://github.com/PeterFeicht/cppreference-doc/releases/download/v20250209/html-book-20250209.zip \
|
||||
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
||||
EXPECTED_HASH MD5=bda585f72fbca4b817b29a3d5746567b \
|
||||
)\n \
|
||||
execute_process( \
|
||||
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
||||
)\n")
|
||||
)\n"
|
||||
)
|
||||
set(tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml")
|
||||
add_custom_command(OUTPUT "${tagfile}" COMMAND "${CMAKE_COMMAND}" -P "${download_script}"
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs")
|
||||
add_custom_command(
|
||||
OUTPUT "${tagfile}"
|
||||
COMMAND "${CMAKE_COMMAND}" -P "${download_script}"
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs"
|
||||
)
|
||||
set(doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${doxygen_index_file}"
|
||||
COMMAND "${CMAKE_COMMAND}" -E env "DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
|
||||
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}" "DOXYGEN_TAGFILES=${doxygen_tagfiles}"
|
||||
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}" "DOXYGEN_DOT_PATH=${doxygen_dot_path}"
|
||||
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS "${dependencies}" "${tagfile}")
|
||||
add_custom_target(docs DEPENDS "${doxygen_index_file}" SOURCES "${dependencies}")
|
||||
OUTPUT "${doxygen_index_file}"
|
||||
COMMAND "${CMAKE_COMMAND}" -E env
|
||||
"DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
|
||||
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}"
|
||||
"DOXYGEN_TAGFILES=${doxygen_tagfiles}"
|
||||
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}"
|
||||
"DOXYGEN_DOT_PATH=${doxygen_dot_path}"
|
||||
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS "${dependencies}" "${tagfile}")
|
||||
add_custom_target(docs
|
||||
DEPENDS "${doxygen_index_file}"
|
||||
SOURCES "${dependencies}")
|
||||
|
||||
@@ -4,54 +4,57 @@
|
||||
|
||||
include(create_symbolic_link)
|
||||
|
||||
# If no suffix is defined for executables (e.g. Windows uses .exe but Linux
|
||||
# and macOS use none), then explicitly set it to the empty string.
|
||||
if (NOT DEFINED suffix)
|
||||
set(suffix "")
|
||||
endif ()
|
||||
install (
|
||||
TARGETS
|
||||
common
|
||||
opts
|
||||
xrpl_boost
|
||||
xrpl_libs
|
||||
xrpl_syslibs
|
||||
xrpl.imports.main
|
||||
xrpl.libpb
|
||||
xrpl.libxrpl
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.beast
|
||||
xrpl.libxrpl.core
|
||||
xrpl.libxrpl.crypto
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.ledger
|
||||
xrpl.libxrpl.net
|
||||
xrpl.libxrpl.nodestore
|
||||
xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.resource
|
||||
xrpl.libxrpl.server
|
||||
xrpl.libxrpl.shamap
|
||||
antithesis-sdk-cpp
|
||||
EXPORT XrplExports
|
||||
LIBRARY DESTINATION lib
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION bin
|
||||
INCLUDES DESTINATION include)
|
||||
|
||||
install(TARGETS common
|
||||
opts
|
||||
xrpl_boost
|
||||
xrpl_libs
|
||||
xrpl_syslibs
|
||||
xrpl.imports.main
|
||||
xrpl.libpb
|
||||
xrpl.libxrpl
|
||||
xrpl.libxrpl.basics
|
||||
xrpl.libxrpl.beast
|
||||
xrpl.libxrpl.core
|
||||
xrpl.libxrpl.crypto
|
||||
xrpl.libxrpl.json
|
||||
xrpl.libxrpl.rdb
|
||||
xrpl.libxrpl.ledger
|
||||
xrpl.libxrpl.net
|
||||
xrpl.libxrpl.nodestore
|
||||
xrpl.libxrpl.protocol
|
||||
xrpl.libxrpl.resource
|
||||
xrpl.libxrpl.server
|
||||
xrpl.libxrpl.shamap
|
||||
antithesis-sdk-cpp
|
||||
EXPORT XrplExports
|
||||
LIBRARY DESTINATION lib
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION bin
|
||||
INCLUDES
|
||||
DESTINATION include)
|
||||
install(
|
||||
DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/xrpl"
|
||||
DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
|
||||
)
|
||||
|
||||
install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/xrpl" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
|
||||
|
||||
install(EXPORT XrplExports FILE XrplTargets.cmake NAMESPACE Xrpl:: DESTINATION lib/cmake/xrpl)
|
||||
include(CMakePackageConfigHelpers)
|
||||
write_basic_package_version_file(XrplConfigVersion.cmake VERSION ${xrpld_version} COMPATIBILITY SameMajorVersion)
|
||||
install (EXPORT XrplExports
|
||||
FILE XrplTargets.cmake
|
||||
NAMESPACE Xrpl::
|
||||
DESTINATION lib/cmake/xrpl)
|
||||
include (CMakePackageConfigHelpers)
|
||||
write_basic_package_version_file (
|
||||
XrplConfigVersion.cmake
|
||||
VERSION ${xrpld_version}
|
||||
COMPATIBILITY SameMajorVersion)
|
||||
|
||||
if (is_root_project AND TARGET xrpld)
|
||||
install(TARGETS xrpld RUNTIME DESTINATION bin)
|
||||
set_target_properties(xrpld PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
|
||||
# sample configs should not overwrite existing files
|
||||
# install if-not-exists workaround as suggested by
|
||||
# https://cmake.org/Bug/view.php?id=12646
|
||||
install(CODE "
|
||||
install (TARGETS xrpld RUNTIME DESTINATION bin)
|
||||
set_target_properties(xrpld PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
|
||||
# sample configs should not overwrite existing files
|
||||
# install if-not-exists workaround as suggested by
|
||||
# https://cmake.org/Bug/view.php?id=12646
|
||||
install(CODE "
|
||||
macro (copy_if_not_exists SRC DEST NEWNAME)
|
||||
if (NOT EXISTS \"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
|
||||
file (INSTALL FILE_PERMISSIONS OWNER_READ OWNER_WRITE DESTINATION \"\${CMAKE_INSTALL_PREFIX}/\${DEST}\" FILES \"\${SRC}\" RENAME \"\${NEWNAME}\")
|
||||
@@ -59,10 +62,10 @@ if (is_root_project AND TARGET xrpld)
|
||||
message (\"-- Skipping : \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
|
||||
endif ()
|
||||
endmacro()
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/xrpld-example.cfg\" etc xrpld.cfg)
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/rippled-example.cfg\" etc rippled.cfg)
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/validators-example.txt\" etc validators.txt)
|
||||
")
|
||||
install(CODE "
|
||||
install(CODE "
|
||||
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
|
||||
include(create_symbolic_link)
|
||||
create_symbolic_link(xrpld${suffix} \
|
||||
@@ -70,5 +73,8 @@ if (is_root_project AND TARGET xrpld)
|
||||
")
|
||||
endif ()
|
||||
|
||||
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/XrplConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/XrplConfigVersion.cmake
|
||||
DESTINATION lib/cmake/xrpl)
|
||||
install (
|
||||
FILES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/XrplConfig.cmake
|
||||
${CMAKE_CURRENT_BINARY_DIR}/XrplConfigVersion.cmake
|
||||
DESTINATION lib/cmake/xrpl)
|
||||
|
||||
@@ -2,82 +2,96 @@
|
||||
xrpld compile options/settings via an interface library
|
||||
#]===================================================================]
|
||||
|
||||
include(CompilationEnv)
|
||||
add_library (opts INTERFACE)
|
||||
add_library (Xrpl::opts ALIAS opts)
|
||||
target_compile_definitions (opts
|
||||
INTERFACE
|
||||
BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
|
||||
BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
|
||||
BOOST_CONTAINER_FWD_BAD_DEQUE
|
||||
HAS_UNCAUGHT_EXCEPTIONS=1
|
||||
$<$<BOOL:${boost_show_deprecated}>:
|
||||
BOOST_ASIO_NO_DEPRECATED
|
||||
BOOST_FILESYSTEM_NO_DEPRECATED
|
||||
>
|
||||
$<$<NOT:$<BOOL:${boost_show_deprecated}>>:
|
||||
BOOST_COROUTINES_NO_DEPRECATION_WARNING
|
||||
BOOST_BEAST_ALLOW_DEPRECATED
|
||||
BOOST_FILESYSTEM_DEPRECATED
|
||||
>
|
||||
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
|
||||
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
|
||||
$<$<BOOL:${single_io_service_thread}>:XRPL_SINGLE_IO_SERVICE_THREAD=1>
|
||||
$<$<BOOL:${voidstar}>:ENABLE_VOIDSTAR>)
|
||||
target_compile_options (opts
|
||||
INTERFACE
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
|
||||
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized>
|
||||
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
# Set defaults for optional variables to avoid uninitialized variable warnings
|
||||
if (NOT DEFINED voidstar)
|
||||
set(voidstar OFF)
|
||||
target_link_libraries (opts
|
||||
INTERFACE
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
if(jemalloc)
|
||||
find_package(jemalloc REQUIRED)
|
||||
target_compile_definitions(opts INTERFACE PROFILE_JEMALLOC)
|
||||
target_link_libraries(opts INTERFACE jemalloc::jemalloc)
|
||||
endif ()
|
||||
|
||||
add_library(opts INTERFACE)
|
||||
add_library(Xrpl::opts ALIAS opts)
|
||||
target_compile_definitions(
|
||||
opts
|
||||
INTERFACE BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
|
||||
BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
|
||||
BOOST_CONTAINER_FWD_BAD_DEQUE
|
||||
HAS_UNCAUGHT_EXCEPTIONS=1
|
||||
$<$<BOOL:${boost_show_deprecated}>:
|
||||
BOOST_ASIO_NO_DEPRECATED
|
||||
BOOST_FILESYSTEM_NO_DEPRECATED
|
||||
>
|
||||
$<$<NOT:$<BOOL:${boost_show_deprecated}>>:
|
||||
BOOST_COROUTINES_NO_DEPRECATION_WARNING
|
||||
BOOST_BEAST_ALLOW_DEPRECATED
|
||||
BOOST_FILESYSTEM_DEPRECATED
|
||||
>
|
||||
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
|
||||
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
|
||||
$<$<BOOL:${single_io_service_thread}>:XRPL_SINGLE_IO_SERVICE_THREAD=1>
|
||||
$<$<BOOL:${voidstar}>:ENABLE_VOIDSTAR>)
|
||||
target_compile_options(
|
||||
opts
|
||||
INTERFACE $<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
|
||||
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized> $<$<BOOL:${perf}>:-fno-omit-frame-pointer>
|
||||
$<$<BOOL:${profile}>:-pg> $<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
target_link_libraries(opts INTERFACE $<$<BOOL:${profile}>:-pg> $<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
if (jemalloc)
|
||||
find_package(jemalloc REQUIRED)
|
||||
target_compile_definitions(opts INTERFACE PROFILE_JEMALLOC)
|
||||
target_link_libraries(opts INTERFACE jemalloc::jemalloc)
|
||||
if (san)
|
||||
target_compile_options (opts
|
||||
INTERFACE
|
||||
# sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1>
|
||||
${SAN_FLAG}
|
||||
-fno-omit-frame-pointer)
|
||||
target_compile_definitions (opts
|
||||
INTERFACE
|
||||
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
|
||||
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
|
||||
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
|
||||
target_link_libraries (opts INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
#[===================================================================[
|
||||
xrpld transitive library deps via an interface library
|
||||
#]===================================================================]
|
||||
|
||||
add_library(xrpl_syslibs INTERFACE)
|
||||
add_library(Xrpl::syslibs ALIAS xrpl_syslibs)
|
||||
target_link_libraries(
|
||||
xrpl_syslibs
|
||||
INTERFACE $<$<BOOL:${is_msvc}>:
|
||||
legacy_stdio_definitions.lib
|
||||
Shlwapi
|
||||
kernel32
|
||||
user32
|
||||
gdi32
|
||||
winspool
|
||||
comdlg32
|
||||
advapi32
|
||||
shell32
|
||||
ole32
|
||||
oleaut32
|
||||
uuid
|
||||
odbc32
|
||||
odbccp32
|
||||
crypt32
|
||||
>
|
||||
$<$<NOT:$<BOOL:${is_msvc}>>:dl>
|
||||
$<$<NOT:$<OR:$<BOOL:${is_msvc}>,$<BOOL:${is_macos}>>>:rt>)
|
||||
add_library (xrpl_syslibs INTERFACE)
|
||||
add_library (Xrpl::syslibs ALIAS xrpl_syslibs)
|
||||
target_link_libraries (xrpl_syslibs
|
||||
INTERFACE
|
||||
$<$<BOOL:${MSVC}>:
|
||||
legacy_stdio_definitions.lib
|
||||
Shlwapi
|
||||
kernel32
|
||||
user32
|
||||
gdi32
|
||||
winspool
|
||||
comdlg32
|
||||
advapi32
|
||||
shell32
|
||||
ole32
|
||||
oleaut32
|
||||
uuid
|
||||
odbc32
|
||||
odbccp32
|
||||
crypt32
|
||||
>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:dl>
|
||||
$<$<NOT:$<OR:$<BOOL:${MSVC}>,$<BOOL:${APPLE}>>>:rt>)
|
||||
|
||||
if (NOT is_msvc)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads)
|
||||
target_link_libraries(xrpl_syslibs INTERFACE Threads::Threads)
|
||||
if (NOT MSVC)
|
||||
set (THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
target_link_libraries (xrpl_syslibs INTERFACE Threads::Threads)
|
||||
endif ()
|
||||
|
||||
add_library(xrpl_libs INTERFACE)
|
||||
add_library(Xrpl::libs ALIAS xrpl_libs)
|
||||
target_link_libraries(xrpl_libs INTERFACE Xrpl::syslibs)
|
||||
add_library (xrpl_libs INTERFACE)
|
||||
add_library (Xrpl::libs ALIAS xrpl_libs)
|
||||
target_link_libraries (xrpl_libs INTERFACE Xrpl::syslibs)
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
#[===================================================================[
|
||||
Configure sanitizers based on environment variables.
|
||||
|
||||
This module reads the following environment variables:
|
||||
- SANITIZERS: The sanitizers to enable. Possible values:
|
||||
- "address"
|
||||
- "address,undefinedbehavior"
|
||||
- "thread"
|
||||
- "thread,undefinedbehavior"
|
||||
- "undefinedbehavior"
|
||||
|
||||
The compiler type and platform are detected in CompilationEnv.cmake.
|
||||
The sanitizer compile options are applied to the 'common' interface library
|
||||
which is linked to all targets in the project.
|
||||
|
||||
Internal flag variables set by this module:
|
||||
|
||||
- SANITIZER_TYPES: List of sanitizer types to enable (e.g., "address",
|
||||
"thread", "undefined"). And two more flags for undefined behavior sanitizer (e.g., "float-divide-by-zero", "unsigned-integer-overflow").
|
||||
This list is joined with commas and passed to -fsanitize=<list>.
|
||||
|
||||
- SANITIZERS_COMPILE_FLAGS: Compiler flags for sanitizer instrumentation.
|
||||
Includes:
|
||||
* -fno-omit-frame-pointer: Preserves frame pointers for stack traces
|
||||
* -O1: Minimum optimization for reasonable performance
|
||||
* -fsanitize=<types>: Enables sanitizer instrumentation
|
||||
* -fsanitize-ignorelist=<path>: (Clang only) Compile-time ignorelist
|
||||
* -mcmodel=large/medium: (GCC only) Code model for large binaries
|
||||
* -Wno-stringop-overflow: (GCC only) Suppresses false positive warnings
|
||||
* -Wno-tsan: (For GCC TSAN combination only) Suppresses atomic_thread_fence warnings
|
||||
|
||||
- SANITIZERS_LINK_FLAGS: Linker flags for sanitizer runtime libraries.
|
||||
Includes:
|
||||
* -fsanitize=<types>: Links sanitizer runtime libraries
|
||||
* -mcmodel=large/medium: (GCC only) Matches compile-time code model
|
||||
|
||||
- SANITIZERS_RELOCATION_FLAGS: (GCC only) Code model flags for linking.
|
||||
Used to handle large instrumented binaries on x86_64:
|
||||
* -mcmodel=large: For AddressSanitizer (prevents relocation errors)
|
||||
* -mcmodel=medium: For ThreadSanitizer (large model is incompatible)
|
||||
#]===================================================================]
|
||||
|
||||
include(CompilationEnv)
|
||||
|
||||
# Read environment variable
|
||||
set(SANITIZERS "")
|
||||
if (DEFINED ENV{SANITIZERS})
|
||||
set(SANITIZERS "$ENV{SANITIZERS}")
|
||||
endif ()
|
||||
|
||||
# Set SANITIZERS_ENABLED flag for use in other modules
|
||||
if (SANITIZERS MATCHES "address|thread|undefinedbehavior")
|
||||
set(SANITIZERS_ENABLED TRUE)
|
||||
else ()
|
||||
set(SANITIZERS_ENABLED FALSE)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
# Sanitizers are not supported on Windows/MSVC
|
||||
if (is_msvc)
|
||||
message(FATAL_ERROR "Sanitizers are not supported on Windows/MSVC. "
|
||||
"Please unset the SANITIZERS environment variable.")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Configuring sanitizers: ${SANITIZERS}")
|
||||
|
||||
# Parse SANITIZERS value to determine which sanitizers to enable
|
||||
set(enable_asan FALSE)
|
||||
set(enable_tsan FALSE)
|
||||
set(enable_ubsan FALSE)
|
||||
|
||||
# Normalize SANITIZERS into a list
|
||||
set(san_list "${SANITIZERS}")
|
||||
string(REPLACE "," ";" san_list "${san_list}")
|
||||
separate_arguments(san_list)
|
||||
|
||||
foreach (san IN LISTS san_list)
|
||||
if (san STREQUAL "address")
|
||||
set(enable_asan TRUE)
|
||||
elseif (san STREQUAL "thread")
|
||||
set(enable_tsan TRUE)
|
||||
elseif (san STREQUAL "undefinedbehavior")
|
||||
set(enable_ubsan TRUE)
|
||||
else ()
|
||||
message(FATAL_ERROR "Unsupported sanitizer type: ${san}"
|
||||
"Supported: address, thread, undefinedbehavior and their combinations.")
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
# Validate sanitizer compatibility
|
||||
if (enable_asan AND enable_tsan)
|
||||
message(FATAL_ERROR "AddressSanitizer and ThreadSanitizer are incompatible and cannot be enabled simultaneously. "
|
||||
"Use 'address' or 'thread', optionally with 'undefinedbehavior'.")
|
||||
endif ()
|
||||
|
||||
# Frame pointer is required for meaningful stack traces. Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
set(SANITIZERS_COMPILE_FLAGS "-fno-omit-frame-pointer" "-O1")
|
||||
|
||||
# Build the sanitizer flags list
|
||||
set(SANITIZER_TYPES)
|
||||
|
||||
if (enable_asan)
|
||||
list(APPEND SANITIZER_TYPES "address")
|
||||
elseif (enable_tsan)
|
||||
list(APPEND SANITIZER_TYPES "thread")
|
||||
endif ()
|
||||
|
||||
if (enable_ubsan)
|
||||
# UB sanitizer flags
|
||||
list(APPEND SANITIZER_TYPES "undefined" "float-divide-by-zero")
|
||||
if (is_clang)
|
||||
# Clang supports additional UB checks. More info here
|
||||
# https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
|
||||
list(APPEND SANITIZER_TYPES "unsigned-integer-overflow")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# Configure code model for GCC on amd64 Use large code model for ASAN to avoid relocation errors Use medium code model
|
||||
# for TSAN (large is not compatible with TSAN)
|
||||
set(SANITIZERS_RELOCATION_FLAGS)
|
||||
|
||||
# Compiler-specific configuration
|
||||
if (is_gcc)
|
||||
# Disable mold, gold and lld linkers for GCC with sanitizers Use default linker (bfd/ld) which is more lenient with
|
||||
# mixed code models This is needed since the size of instrumented binary exceeds the limits set by mold, lld and
|
||||
# gold linkers
|
||||
set(use_mold OFF CACHE BOOL "Use mold linker" FORCE)
|
||||
set(use_gold OFF CACHE BOOL "Use gold linker" FORCE)
|
||||
set(use_lld OFF CACHE BOOL "Use lld linker" FORCE)
|
||||
message(STATUS " Disabled mold, gold, and lld linkers for GCC with sanitizers")
|
||||
|
||||
# Suppress false positive warnings in GCC with stringop-overflow
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-Wno-stringop-overflow")
|
||||
|
||||
if (is_amd64 AND enable_asan)
|
||||
message(STATUS " Using large code model (-mcmodel=large)")
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-mcmodel=large")
|
||||
list(APPEND SANITIZERS_RELOCATION_FLAGS "-mcmodel=large")
|
||||
elseif (enable_tsan)
|
||||
# GCC doesn't support atomic_thread_fence with tsan. Suppress warnings.
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-Wno-tsan")
|
||||
message(STATUS " Using medium code model (-mcmodel=medium)")
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-mcmodel=medium")
|
||||
list(APPEND SANITIZERS_RELOCATION_FLAGS "-mcmodel=medium")
|
||||
endif ()
|
||||
|
||||
# Join sanitizer flags with commas for -fsanitize option
|
||||
list(JOIN SANITIZER_TYPES "," SANITIZER_TYPES_STR)
|
||||
|
||||
# Add sanitizer to compile and link flags
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
|
||||
set(SANITIZERS_LINK_FLAGS "${SANITIZERS_RELOCATION_FLAGS}" "-fsanitize=${SANITIZER_TYPES_STR}")
|
||||
|
||||
elseif (is_clang)
|
||||
# Add ignorelist for Clang (GCC doesn't support this) Use CMAKE_SOURCE_DIR to get the path to the ignorelist
|
||||
set(IGNORELIST_PATH "${CMAKE_SOURCE_DIR}/sanitizers/suppressions/sanitizer-ignorelist.txt")
|
||||
if (NOT EXISTS "${IGNORELIST_PATH}")
|
||||
message(FATAL_ERROR "Sanitizer ignorelist not found: ${IGNORELIST_PATH}")
|
||||
endif ()
|
||||
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize-ignorelist=${IGNORELIST_PATH}")
|
||||
message(STATUS " Using sanitizer ignorelist: ${IGNORELIST_PATH}")
|
||||
|
||||
# Join sanitizer flags with commas for -fsanitize option
|
||||
list(JOIN SANITIZER_TYPES "," SANITIZER_TYPES_STR)
|
||||
|
||||
# Add sanitizer to compile and link flags
|
||||
list(APPEND SANITIZERS_COMPILE_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
|
||||
set(SANITIZERS_LINK_FLAGS "-fsanitize=${SANITIZER_TYPES_STR}")
|
||||
endif ()
|
||||
|
||||
message(STATUS " Compile flags: ${SANITIZERS_COMPILE_FLAGS}")
|
||||
message(STATUS " Link flags: ${SANITIZERS_LINK_FLAGS}")
|
||||
|
||||
# Apply the sanitizer flags to the 'common' interface library This is the same library used by XrplCompiler.cmake
|
||||
target_compile_options(common INTERFACE $<$<COMPILE_LANGUAGE:CXX>:${SANITIZERS_COMPILE_FLAGS}>
|
||||
$<$<COMPILE_LANGUAGE:C>:${SANITIZERS_COMPILE_FLAGS}>)
|
||||
|
||||
# Apply linker flags
|
||||
target_link_options(common INTERFACE ${SANITIZERS_LINK_FLAGS})
|
||||
|
||||
# Define SANITIZERS macro for BuildInfo.cpp
|
||||
set(sanitizers_list)
|
||||
if (enable_asan)
|
||||
list(APPEND sanitizers_list "ASAN")
|
||||
endif ()
|
||||
if (enable_tsan)
|
||||
list(APPEND sanitizers_list "TSAN")
|
||||
endif ()
|
||||
if (enable_ubsan)
|
||||
list(APPEND sanitizers_list "UBSAN")
|
||||
endif ()
|
||||
|
||||
if (sanitizers_list)
|
||||
list(JOIN sanitizers_list "." sanitizers_str)
|
||||
target_compile_definitions(common INTERFACE SANITIZERS=${sanitizers_str})
|
||||
endif ()
|
||||
@@ -2,43 +2,49 @@
|
||||
sanity checks
|
||||
#]===================================================================]
|
||||
|
||||
include(CompilationEnv)
|
||||
|
||||
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
|
||||
set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
|
||||
if (NOT is_multiconfig)
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
message(STATUS "Build type not specified - defaulting to Release")
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "build type" FORCE)
|
||||
elseif (NOT (CMAKE_BUILD_TYPE STREQUAL Debug OR CMAKE_BUILD_TYPE STREQUAL Release))
|
||||
# for simplicity, these are the only two config types we care about. Limiting the build types simplifies dealing
|
||||
# with external project builds especially
|
||||
message(FATAL_ERROR " *** Only Debug or Release build types are currently supported ***")
|
||||
endif ()
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
message (STATUS "Build type not specified - defaulting to Release")
|
||||
set (CMAKE_BUILD_TYPE Release CACHE STRING "build type" FORCE)
|
||||
elseif (NOT (CMAKE_BUILD_TYPE STREQUAL Debug OR CMAKE_BUILD_TYPE STREQUAL Release))
|
||||
# for simplicity, these are the only two config types we care about. Limiting
|
||||
# the build types simplifies dealing with external project builds especially
|
||||
message (FATAL_ERROR " *** Only Debug or Release build types are currently supported ***")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (is_clang) # both Clang and AppleClang
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0)
|
||||
message(FATAL_ERROR "This project requires clang 16 or later")
|
||||
endif ()
|
||||
elseif (is_gcc)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
|
||||
message(FATAL_ERROR "This project requires GCC 12 or later")
|
||||
endif ()
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
|
||||
set (is_clang TRUE)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0)
|
||||
message (FATAL_ERROR "This project requires clang 16 or later")
|
||||
endif ()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set (is_gcc TRUE)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
|
||||
message (FATAL_ERROR "This project requires GCC 12 or later")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# check for in-source build and fail
|
||||
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
|
||||
message(FATAL_ERROR "Builds (in-source) are not allowed in "
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}. Please remove CMakeCache.txt and the CMakeFiles "
|
||||
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.")
|
||||
message (FATAL_ERROR "Builds (in-source) are not allowed in "
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}. Please remove CMakeCache.txt and the CMakeFiles "
|
||||
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.")
|
||||
endif ()
|
||||
|
||||
if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
|
||||
message(FATAL_ERROR "Visual Studio 32-bit build is not supported.")
|
||||
message (FATAL_ERROR "Visual Studio 32-bit build is not supported.")
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
message (FATAL_ERROR "Xrpld requires a 64 bit target architecture.\n"
|
||||
"The most likely cause of this warning is trying to build xrpld with a 32-bit OS.")
|
||||
endif ()
|
||||
|
||||
if (APPLE AND NOT HOMEBREW)
|
||||
find_program(HOMEBREW brew)
|
||||
find_program (HOMEBREW brew)
|
||||
endif ()
|
||||
|
||||
@@ -2,113 +2,143 @@
|
||||
declare options and variables
|
||||
#]===================================================================]
|
||||
|
||||
include(CompilationEnv)
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
set (is_linux TRUE)
|
||||
else()
|
||||
set(is_linux FALSE)
|
||||
endif()
|
||||
|
||||
set(is_ci FALSE)
|
||||
if (DEFINED ENV{CI})
|
||||
if ("$ENV{CI}" STREQUAL "true")
|
||||
set(is_ci TRUE)
|
||||
endif ()
|
||||
endif ()
|
||||
if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
|
||||
set(is_ci TRUE)
|
||||
else()
|
||||
set(is_ci FALSE)
|
||||
endif()
|
||||
|
||||
get_directory_property(has_parent PARENT_DIRECTORY)
|
||||
if (has_parent)
|
||||
set(is_root_project OFF)
|
||||
else ()
|
||||
set(is_root_project ON)
|
||||
endif ()
|
||||
if(has_parent)
|
||||
set(is_root_project OFF)
|
||||
else()
|
||||
set(is_root_project ON)
|
||||
endif()
|
||||
|
||||
option(assert "Enables asserts, even in release builds" OFF)
|
||||
|
||||
option(xrpld "Build xrpld" ON)
|
||||
|
||||
option(tests "Build tests" ON)
|
||||
if (tests)
|
||||
# This setting allows making a separate workflow to test fees other than default 10
|
||||
if (NOT UNIT_TEST_REFERENCE_FEE)
|
||||
set(UNIT_TEST_REFERENCE_FEE "10" CACHE STRING "")
|
||||
endif ()
|
||||
endif ()
|
||||
if(tests)
|
||||
# This setting allows making a separate workflow to test fees other than default 10
|
||||
if(NOT UNIT_TEST_REFERENCE_FEE)
|
||||
set(UNIT_TEST_REFERENCE_FEE "10" CACHE STRING "")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake." OFF)
|
||||
if (unity)
|
||||
if (NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
|
||||
endif ()
|
||||
if(unity)
|
||||
if(NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif()
|
||||
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
|
||||
endif()
|
||||
|
||||
if (is_clang AND is_linux)
|
||||
option(voidstar "Enable Antithesis instrumentation." OFF)
|
||||
endif ()
|
||||
if(is_clang AND is_linux)
|
||||
option(voidstar "Enable Antithesis instrumentation." OFF)
|
||||
endif()
|
||||
|
||||
if (is_gcc OR is_clang)
|
||||
include(ProcessorCount)
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
if(is_gcc OR is_clang)
|
||||
include(ProcessorCount)
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
|
||||
option(coverage "Generates coverage info." OFF)
|
||||
option(profile "Add profiling flags" OFF)
|
||||
set(coverage_format "html-details" CACHE STRING "Output format of the coverage report.")
|
||||
set(coverage_extra_args "" CACHE STRING "Additional arguments to pass to gcovr.")
|
||||
option(wextra "compile with extra gcc/clang warnings enabled" ON)
|
||||
else ()
|
||||
set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif ()
|
||||
option(coverage "Generates coverage info." OFF)
|
||||
option(profile "Add profiling flags" OFF)
|
||||
set(coverage_format "html-details" CACHE STRING
|
||||
"Output format of the coverage report.")
|
||||
set(coverage_extra_args "" CACHE STRING
|
||||
"Additional arguments to pass to gcovr.")
|
||||
option(wextra "compile with extra gcc/clang warnings enabled" ON)
|
||||
else()
|
||||
set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif()
|
||||
|
||||
if (is_linux AND NOT SANITIZER)
|
||||
option(BUILD_SHARED_LIBS "build shared xrpl libraries" OFF)
|
||||
option(static "link protobuf, openssl, libc++, and boost statically" ON)
|
||||
option(perf "Enables flags that assist with perf recording" OFF)
|
||||
option(use_gold "enables detection of gold (binutils) linker" ON)
|
||||
option(use_mold "enables detection of mold (binutils) linker" ON)
|
||||
# Set a default value for the log flag based on the build type. This provides a sensible default (on for debug, off
|
||||
# for release) while still allowing the user to override it for any build.
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(TRUNCATED_LOGS_DEFAULT ON)
|
||||
else ()
|
||||
set(TRUNCATED_LOGS_DEFAULT OFF)
|
||||
endif ()
|
||||
option(TRUNCATED_THREAD_NAME_LOGS "Show warnings about truncated thread names on Linux." ${TRUNCATED_LOGS_DEFAULT})
|
||||
if (TRUNCATED_THREAD_NAME_LOGS)
|
||||
add_compile_definitions(TRUNCATED_THREAD_NAME_LOGS)
|
||||
endif ()
|
||||
else ()
|
||||
# we are not ready to allow shared-libs on windows because it would require export declarations. On macos it's more
|
||||
# feasible, but static openssl produces odd linker errors, thus we disable shared lib builds for now.
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared xrpl libraries - OFF for win/macos" FORCE)
|
||||
set(static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
|
||||
set(perf OFF CACHE BOOL "perf flags, linux only" FORCE)
|
||||
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
|
||||
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
|
||||
endif ()
|
||||
if(is_linux)
|
||||
option(BUILD_SHARED_LIBS "build shared xrpl libraries" OFF)
|
||||
option(static "link protobuf, openssl, libc++, and boost statically" ON)
|
||||
option(perf "Enables flags that assist with perf recording" OFF)
|
||||
option(use_gold "enables detection of gold (binutils) linker" ON)
|
||||
option(use_mold "enables detection of mold (binutils) linker" ON)
|
||||
else()
|
||||
# we are not ready to allow shared-libs on windows because it would require
|
||||
# export declarations. On macos it's more feasible, but static openssl
|
||||
# produces odd linker errors, thus we disable shared lib builds for now.
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared xrpl libraries - OFF for win/macos" FORCE)
|
||||
set(static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
|
||||
set(perf OFF CACHE BOOL "perf flags, linux only" FORCE)
|
||||
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
|
||||
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
|
||||
endif()
|
||||
|
||||
if (is_clang)
|
||||
option(use_lld "enables detection of lld linker" ON)
|
||||
else ()
|
||||
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
|
||||
endif ()
|
||||
if(is_clang)
|
||||
option(use_lld "enables detection of lld linker" ON)
|
||||
else()
|
||||
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
|
||||
endif()
|
||||
|
||||
option(jemalloc "Enables jemalloc for heap profiling" OFF)
|
||||
option(werr "treat warnings as errors" OFF)
|
||||
option(local_protobuf "Force a local build of protobuf instead of looking for an installed version." OFF)
|
||||
option(local_grpc "Force a local build of gRPC instead of looking for an installed version." OFF)
|
||||
option(local_protobuf
|
||||
"Force a local build of protobuf instead of looking for an installed version." OFF)
|
||||
option(local_grpc
|
||||
"Force a local build of gRPC instead of looking for an installed version." OFF)
|
||||
|
||||
# this one is a string and therefore can't be an option
|
||||
set(san "" CACHE STRING "On gcc & clang, add sanitizer instrumentation")
|
||||
set_property(CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
if(san)
|
||||
string(TOLOWER ${san} san)
|
||||
set(SAN_FLAG "-fsanitize=${san}")
|
||||
set(SAN_LIB "")
|
||||
if(is_gcc)
|
||||
if(san STREQUAL "address")
|
||||
set(SAN_LIB "asan")
|
||||
elseif(san STREQUAL "thread")
|
||||
set(SAN_LIB "tsan")
|
||||
elseif(san STREQUAL "memory")
|
||||
set(SAN_LIB "msan")
|
||||
elseif(san STREQUAL "undefined")
|
||||
set(SAN_LIB "ubsan")
|
||||
endif()
|
||||
endif()
|
||||
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if(NOT COMPILER_SUPPORTS_SAN)
|
||||
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# the remaining options are obscure and rarely used
|
||||
option(beast_no_unit_test_inline "Prevents unit test definitions from being inserted into global table" OFF)
|
||||
option(single_io_service_thread "Restricts the number of threads calling io_context::run to one. \
|
||||
This can be useful when debugging." OFF)
|
||||
option(boost_show_deprecated "Allow boost to fail on deprecated usage. Only useful if you're trying\
|
||||
to find deprecated calls." OFF)
|
||||
option(beast_no_unit_test_inline
|
||||
"Prevents unit test definitions from being inserted into global table"
|
||||
OFF)
|
||||
option(single_io_service_thread
|
||||
"Restricts the number of threads calling io_context::run to one. \
|
||||
This can be useful when debugging."
|
||||
OFF)
|
||||
option(boost_show_deprecated
|
||||
"Allow boost to fail on deprecated usage. Only useful if you're trying\
|
||||
to find deprecated calls."
|
||||
OFF)
|
||||
|
||||
if (WIN32)
|
||||
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
|
||||
else ()
|
||||
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
|
||||
endif ()
|
||||
if(WIN32)
|
||||
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
|
||||
else()
|
||||
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
|
||||
endif()
|
||||
|
||||
if (coverage)
|
||||
message(STATUS "coverage build requested - forcing Debug build")
|
||||
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)
|
||||
endif ()
|
||||
if(coverage)
|
||||
message(STATUS "coverage build requested - forcing Debug build")
|
||||
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)
|
||||
endif()
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
option(validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
|
||||
option (validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
|
||||
|
||||
if (validator_keys)
|
||||
git_branch(current_branch)
|
||||
# default to tracking VK master branch unless we are on release
|
||||
if (NOT (current_branch STREQUAL "release"))
|
||||
set(current_branch "master")
|
||||
endif ()
|
||||
message(STATUS "Tracking ValidatorKeys branch: ${current_branch}")
|
||||
git_branch (current_branch)
|
||||
# default to tracking VK master branch unless we are on release
|
||||
if (NOT (current_branch STREQUAL "release"))
|
||||
set (current_branch "master")
|
||||
endif ()
|
||||
message (STATUS "Tracking ValidatorKeys branch: ${current_branch}")
|
||||
|
||||
FetchContent_Declare(validator_keys GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
|
||||
GIT_TAG "${current_branch}")
|
||||
FetchContent_MakeAvailable(validator_keys)
|
||||
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
|
||||
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
FetchContent_Declare (
|
||||
validator_keys
|
||||
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
|
||||
GIT_TAG "${current_branch}"
|
||||
)
|
||||
FetchContent_MakeAvailable(validator_keys)
|
||||
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
|
||||
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
endif ()
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
#]===================================================================]
|
||||
|
||||
file(STRINGS src/libxrpl/protocol/BuildInfo.cpp BUILD_INFO)
|
||||
foreach (line_ ${BUILD_INFO})
|
||||
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||
set(xrpld_version ${CMAKE_MATCH_1})
|
||||
endif ()
|
||||
endforeach ()
|
||||
if (xrpld_version)
|
||||
message(STATUS "xrpld version: ${xrpld_version}")
|
||||
else ()
|
||||
message(FATAL_ERROR "unable to determine xrpld version")
|
||||
endif ()
|
||||
foreach(line_ ${BUILD_INFO})
|
||||
if(line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||
set(xrpld_version ${CMAKE_MATCH_1})
|
||||
endif()
|
||||
endforeach()
|
||||
if(xrpld_version)
|
||||
message(STATUS "xrpld version: ${xrpld_version}")
|
||||
else()
|
||||
message(FATAL_ERROR "unable to determine xrpld version")
|
||||
endif()
|
||||
|
||||
@@ -12,14 +12,26 @@ include(isolate_headers)
|
||||
# add_module(parent a)
|
||||
# add_module(parent b)
|
||||
# target_link_libraries(project.libparent.b PUBLIC project.libparent.a)
|
||||
function (add_module parent name)
|
||||
set(target ${PROJECT_NAME}.lib${parent}.${name})
|
||||
add_library(${target} OBJECT)
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}/*.cpp")
|
||||
target_sources(${target} PRIVATE ${sources})
|
||||
target_include_directories(${target} PUBLIC "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>")
|
||||
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/include"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/include/${parent}/${name}" PUBLIC)
|
||||
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}"
|
||||
PRIVATE)
|
||||
endfunction ()
|
||||
function(add_module parent name)
|
||||
set(target ${PROJECT_NAME}.lib${parent}.${name})
|
||||
add_library(${target} OBJECT)
|
||||
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}/*.cpp"
|
||||
)
|
||||
target_sources(${target} PRIVATE ${sources})
|
||||
target_include_directories(${target} PUBLIC
|
||||
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
||||
)
|
||||
isolate_headers(
|
||||
${target}
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/include"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/include/${parent}/${name}"
|
||||
PUBLIC
|
||||
)
|
||||
isolate_headers(
|
||||
${target}
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}"
|
||||
PRIVATE
|
||||
)
|
||||
endfunction()
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
# file(CREATE_SYMLINK) only works on Windows with administrator privileges. https://stackoverflow.com/a/61244115/618906
|
||||
function (create_symbolic_link target link)
|
||||
if (WIN32)
|
||||
if (NOT IS_SYMLINK "${link}")
|
||||
if (NOT IS_ABSOLUTE "${target}")
|
||||
# Relative links work do not work on Windows.
|
||||
set(target "${link}/../${target}")
|
||||
endif ()
|
||||
file(TO_NATIVE_PATH "${target}" target)
|
||||
file(TO_NATIVE_PATH "${link}" link)
|
||||
execute_process(COMMAND cmd.exe /c mklink /J "${link}" "${target}")
|
||||
endif ()
|
||||
else ()
|
||||
file(CREATE_LINK "${target}" "${link}" SYMBOLIC)
|
||||
endif ()
|
||||
if (NOT IS_SYMLINK "${link}")
|
||||
message(ERROR "failed to create symlink: <${link}>")
|
||||
endif ()
|
||||
endfunction ()
|
||||
# file(CREATE_SYMLINK) only works on Windows with administrator privileges.
|
||||
# https://stackoverflow.com/a/61244115/618906
|
||||
function(create_symbolic_link target link)
|
||||
if(WIN32)
|
||||
if(NOT IS_SYMLINK "${link}")
|
||||
if(NOT IS_ABSOLUTE "${target}")
|
||||
# Relative links work do not work on Windows.
|
||||
set(target "${link}/../${target}")
|
||||
endif()
|
||||
file(TO_NATIVE_PATH "${target}" target)
|
||||
file(TO_NATIVE_PATH "${link}" link)
|
||||
execute_process(COMMAND cmd.exe /c mklink /J "${link}" "${target}")
|
||||
endif()
|
||||
else()
|
||||
file(CREATE_LINK "${target}" "${link}" SYMBOLIC)
|
||||
endif()
|
||||
if(NOT IS_SYMLINK "${link}")
|
||||
message(ERROR "failed to create symlink: <${link}>")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -1,44 +1,47 @@
|
||||
include(CompilationEnv)
|
||||
include(XrplSanitizers)
|
||||
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS chrono
|
||||
container
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
json
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
find_package(Boost 1.82 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
json
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread
|
||||
)
|
||||
|
||||
add_library(xrpl_boost INTERFACE)
|
||||
add_library(Xrpl::boost ALIAS xrpl_boost)
|
||||
|
||||
target_link_libraries(
|
||||
xrpl_boost
|
||||
INTERFACE Boost::headers
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
Boost::process
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::thread)
|
||||
if (Boost_COMPILER)
|
||||
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
|
||||
endif ()
|
||||
if (SANITIZERS_ENABLED AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif ()
|
||||
target_link_libraries(xrpl_boost
|
||||
INTERFACE
|
||||
Boost::headers
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
Boost::process
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
Boost::thread)
|
||||
if(Boost_COMPILER)
|
||||
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
|
||||
endif()
|
||||
if(san AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
|
||||
# for gcc ?
|
||||
if(NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts
|
||||
INTERFACE
|
||||
# ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif()
|
||||
|
||||
@@ -37,12 +37,12 @@ include(create_symbolic_link)
|
||||
# `${CMAKE_CURRENT_BINARY_DIR}/include/${target}`.
|
||||
#
|
||||
# isolate_headers(target A B scope)
|
||||
function (isolate_headers target A B scope)
|
||||
file(RELATIVE_PATH C "${A}" "${B}")
|
||||
set(X "${CMAKE_CURRENT_BINARY_DIR}/modules/${target}")
|
||||
set(Y "${X}/${C}")
|
||||
cmake_path(GET Y PARENT_PATH parent)
|
||||
file(MAKE_DIRECTORY "${parent}")
|
||||
create_symbolic_link("${B}" "${Y}")
|
||||
target_include_directories(${target} ${scope} "$<BUILD_INTERFACE:${X}>")
|
||||
endfunction ()
|
||||
function(isolate_headers target A B scope)
|
||||
file(RELATIVE_PATH C "${A}" "${B}")
|
||||
set(X "${CMAKE_CURRENT_BINARY_DIR}/modules/${target}")
|
||||
set(Y "${X}/${C}")
|
||||
cmake_path(GET Y PARENT_PATH parent)
|
||||
file(MAKE_DIRECTORY "${parent}")
|
||||
create_symbolic_link("${B}" "${Y}")
|
||||
target_include_directories(${target} ${scope} "$<BUILD_INTERFACE:${X}>")
|
||||
endfunction()
|
||||
|
||||
@@ -6,19 +6,19 @@
|
||||
# target_link_libraries(project.libparent.b PUBLIC project.libparent.a)
|
||||
# add_library(project.libparent)
|
||||
# target_link_modules(parent PUBLIC a b)
|
||||
function (target_link_modules parent scope)
|
||||
set(library ${PROJECT_NAME}.lib${parent})
|
||||
foreach (name ${ARGN})
|
||||
set(module ${library}.${name})
|
||||
get_target_property(sources ${library} SOURCES)
|
||||
list(LENGTH sources before)
|
||||
get_target_property(dupes ${module} SOURCES)
|
||||
list(LENGTH dupes expected)
|
||||
list(REMOVE_ITEM sources ${dupes})
|
||||
list(LENGTH sources after)
|
||||
math(EXPR actual "${before} - ${after}")
|
||||
message(STATUS "${module} with ${expected} sources took ${actual} sources from ${library}")
|
||||
set_target_properties(${library} PROPERTIES SOURCES "${sources}")
|
||||
target_link_libraries(${library} ${scope} ${module})
|
||||
endforeach ()
|
||||
endfunction ()
|
||||
function(target_link_modules parent scope)
|
||||
set(library ${PROJECT_NAME}.lib${parent})
|
||||
foreach(name ${ARGN})
|
||||
set(module ${library}.${name})
|
||||
get_target_property(sources ${library} SOURCES)
|
||||
list(LENGTH sources before)
|
||||
get_target_property(dupes ${module} SOURCES)
|
||||
list(LENGTH dupes expected)
|
||||
list(REMOVE_ITEM sources ${dupes})
|
||||
list(LENGTH sources after)
|
||||
math(EXPR actual "${before} - ${after}")
|
||||
message(STATUS "${module} with ${expected} sources took ${actual} sources from ${library}")
|
||||
set_target_properties(${library} PROPERTIES SOURCES "${sources}")
|
||||
target_link_libraries(${library} ${scope} ${module})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
@@ -35,20 +35,28 @@ find_package(Protobuf REQUIRED)
|
||||
# This prefix should appear at the start of all your consumer includes.
|
||||
# ARGN:
|
||||
# A list of .proto files.
|
||||
function (target_protobuf_sources target prefix)
|
||||
set(dir "${CMAKE_CURRENT_BINARY_DIR}/pb-${target}")
|
||||
file(MAKE_DIRECTORY "${dir}/${prefix}")
|
||||
function(target_protobuf_sources target prefix)
|
||||
set(dir "${CMAKE_CURRENT_BINARY_DIR}/pb-${target}")
|
||||
file(MAKE_DIRECTORY "${dir}/${prefix}")
|
||||
|
||||
protobuf_generate(TARGET ${target} PROTOC_OUT_DIR "${dir}/${prefix}" "${ARGN}")
|
||||
target_include_directories(
|
||||
${target} SYSTEM
|
||||
PUBLIC # Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<BUILD_INTERFACE:${dir}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<BUILD_INTERFACE:${dir}/${prefix}>
|
||||
# Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/${prefix}>)
|
||||
install(DIRECTORY ${dir}/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.h")
|
||||
endfunction ()
|
||||
protobuf_generate(
|
||||
TARGET ${target}
|
||||
PROTOC_OUT_DIR "${dir}/${prefix}"
|
||||
"${ARGN}"
|
||||
)
|
||||
target_include_directories(${target} SYSTEM PUBLIC
|
||||
# Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<BUILD_INTERFACE:${dir}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<BUILD_INTERFACE:${dir}/${prefix}>
|
||||
# Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/${prefix}>
|
||||
)
|
||||
install(
|
||||
DIRECTORY ${dir}/
|
||||
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
|
||||
FILES_MATCHING PATTERN "*.h"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
74
conan.lock
74
conan.lock
@@ -1,63 +1,59 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1765850147.878",
|
||||
"secp256k1/0.7.1#3a61e95e220062ef32c48d019e9c81f7%1770306721.686",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1765850186.86",
|
||||
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103",
|
||||
"protobuf/6.32.1#f481fd276fc23a33b85a3ed1e898b693%1765850161.038",
|
||||
"openssl/3.5.5#05a4ac5b7323f7a329b2db1391d9941f%1769599205.414",
|
||||
"nudb/2.0.9#0432758a24204da08fee953ec9ea03cb%1769436073.32",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
|
||||
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
|
||||
"secp256k1/0.7.0#9c4ab67bdc3860c16ea5b36aed8f74ea%1765202256.763",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1762797952.535",
|
||||
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1764175362.029",
|
||||
"protobuf/6.32.1#f481fd276fc23a33b85a3ed1e898b693%1764863245.83",
|
||||
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1760106486.594",
|
||||
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1764175360.142",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1768312129.152",
|
||||
"grpc/1.72.0#f244a57bff01e708c55a1100b12e1589%1765850193.734",
|
||||
"ed25519/2015.03#ae761bdc52730a843f0809bdf6c1b1f6%1765850143.772",
|
||||
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772",
|
||||
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336",
|
||||
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837",
|
||||
"boost/1.90.0#d5e8defe7355494953be18524a7f135b%1769454080.269",
|
||||
"abseil/20250127.0#99262a368bd01c0ccca8790dfced9719%1766517936.993"
|
||||
"grpc/1.72.0#f244a57bff01e708c55a1100b12e1589%1763158050.628",
|
||||
"ed25519/2015.03#ae761bdc52730a843f0809bdf6c1b1f6%1764270189.893",
|
||||
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1762797941.757",
|
||||
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1763584497.32",
|
||||
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1764175359.429",
|
||||
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1764175359.429",
|
||||
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
|
||||
"abseil/20250127.0#9e8e8cfc89a1324139fc0ee3bd4d8c8c%1753819045.301"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
|
||||
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1765850165.196",
|
||||
"protobuf/6.32.1#f481fd276fc23a33b85a3ed1e898b693%1765850161.038",
|
||||
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1765850144.707",
|
||||
"msys2/cci.latest#eea83308ad7e9023f7318c60d5a9e6cb%1770199879.083",
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733",
|
||||
"protobuf/6.32.1#f481fd276fc23a33b85a3ed1e898b693%1764863245.83",
|
||||
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901",
|
||||
"msys2/cci.latest#1996656c3c98e5765b25b60ff5cf77b4%1764840888.758",
|
||||
"m4/1.4.19#70dc8bbb33e981d119d2acc0175cf381%1763158052.846",
|
||||
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1765850153.937",
|
||||
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1765850153.479",
|
||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1765850144.355",
|
||||
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1764175359.44",
|
||||
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1764175359.429",
|
||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28",
|
||||
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
|
||||
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86",
|
||||
"abseil/20250127.0#99262a368bd01c0ccca8790dfced9719%1766517936.993"
|
||||
"abseil/20250127.0#9e8e8cfc89a1324139fc0ee3bd4d8c8c%1753819045.301"
|
||||
],
|
||||
"python_requires": [],
|
||||
"overrides": {
|
||||
"boost/1.90.0#d5e8defe7355494953be18524a7f135b": [
|
||||
null,
|
||||
"boost/1.90.0"
|
||||
],
|
||||
"protobuf/5.27.0": [
|
||||
"protobuf/6.32.1"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"boost/1.83.0": [
|
||||
"boost/1.88.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.49.1"
|
||||
],
|
||||
"boost/1.83.0": [
|
||||
"boost/1.90.0"
|
||||
],
|
||||
"lz4/[>=1.9.4 <2]": [
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504"
|
||||
]
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
include(sanitizers)
|
||||
@@ -1,59 +0,0 @@
|
||||
include(default)
|
||||
{% set compiler, version, compiler_exe = detect_api.detect_default_compiler() %}
|
||||
{% set sanitizers = os.getenv("SANITIZERS") %}
|
||||
|
||||
[conf]
|
||||
{% if sanitizers %}
|
||||
{% if compiler == "gcc" %}
|
||||
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
|
||||
{% set sanitizer_list = [] %}
|
||||
{% set model_code = "" %}
|
||||
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1", "-Wno-stringop-overflow"] %}
|
||||
|
||||
{% if "address" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("address") %}
|
||||
{% set model_code = "-mcmodel=large" %}
|
||||
{% elif "thread" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("thread") %}
|
||||
{% set model_code = "-mcmodel=medium" %}
|
||||
{% set _ = extra_cxxflags.append("-Wno-tsan") %}
|
||||
{% endif %}
|
||||
|
||||
{% if "undefinedbehavior" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("undefined") %}
|
||||
{% set _ = sanitizer_list.append("float-divide-by-zero") %}
|
||||
{% endif %}
|
||||
|
||||
{% set sanitizer_flags = "-fsanitize=" ~ ",".join(sanitizer_list) ~ " " ~ model_code %}
|
||||
|
||||
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
|
||||
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:exelinkflags+=['{{sanitizer_flags}}']
|
||||
{% endif %}
|
||||
{% elif compiler == "apple-clang" or compiler == "clang" %}
|
||||
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
|
||||
{% set sanitizer_list = [] %}
|
||||
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1"] %}
|
||||
|
||||
{% if "address" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("address") %}
|
||||
{% elif "thread" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("thread") %}
|
||||
{% endif %}
|
||||
|
||||
{% if "undefinedbehavior" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("undefined") %}
|
||||
{% set _ = sanitizer_list.append("float-divide-by-zero") %}
|
||||
{% set _ = sanitizer_list.append("unsigned-integer-overflow") %}
|
||||
{% endif %}
|
||||
|
||||
{% set sanitizer_flags = "-fsanitize=" ~ ",".join(sanitizer_list) %}
|
||||
|
||||
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
|
||||
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:exelinkflags+=['{{sanitizer_flags}}']
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
tools.info.package_id:confs+=["tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
21
conanfile.py
21
conanfile.py
@@ -32,14 +32,14 @@ class Xrpl(ConanFile):
|
||||
"grpc/1.72.0",
|
||||
"libarchive/3.8.1",
|
||||
"nudb/2.0.9",
|
||||
"openssl/3.5.5",
|
||||
"secp256k1/0.7.1",
|
||||
"openssl/3.5.4",
|
||||
"secp256k1/0.7.0",
|
||||
"soci/4.0.3",
|
||||
"zlib/1.3.1",
|
||||
]
|
||||
|
||||
test_requires = [
|
||||
"gtest/1.17.0",
|
||||
"doctest/2.4.12",
|
||||
]
|
||||
|
||||
tool_requires = [
|
||||
@@ -87,13 +87,7 @@ class Xrpl(ConanFile):
|
||||
"libarchive/*:with_xattr": False,
|
||||
"libarchive/*:with_zlib": False,
|
||||
"lz4/*:shared": False,
|
||||
"openssl/*:no_dtls": True,
|
||||
"openssl/*:no_ssl": True,
|
||||
"openssl/*:no_ssl3": True,
|
||||
"openssl/*:no_tls1": True,
|
||||
"openssl/*:no_tls1_1": True,
|
||||
"openssl/*:shared": False,
|
||||
"openssl/*:tls_security_level": 2,
|
||||
"protobuf/*:shared": False,
|
||||
"protobuf/*:with_zlib": True,
|
||||
"rocksdb/*:enable_sse": False,
|
||||
@@ -131,7 +125,7 @@ class Xrpl(ConanFile):
|
||||
transitive_headers_opt = (
|
||||
{"transitive_headers": True} if conan_version.split(".")[0] == "2" else {}
|
||||
)
|
||||
self.requires("boost/1.90.0", force=True, **transitive_headers_opt)
|
||||
self.requires("boost/1.88.0", force=True, **transitive_headers_opt)
|
||||
self.requires("date/3.0.4", **transitive_headers_opt)
|
||||
self.requires("lz4/1.10.0", force=True)
|
||||
self.requires("protobuf/6.32.1", force=True)
|
||||
@@ -188,10 +182,12 @@ class Xrpl(ConanFile):
|
||||
libxrpl.libs = [
|
||||
"xrpl",
|
||||
"xrpl.libpb",
|
||||
"ed25519",
|
||||
"secp256k1",
|
||||
]
|
||||
# TODO: Fix the protobufs to include each other relative to
|
||||
# `include/`, not `include/xrpl/proto/`.
|
||||
libxrpl.includedirs = ["include", "include/xrpl/proto"]
|
||||
# `include/`, not `include/ripple/proto/`.
|
||||
libxrpl.includedirs = ["include", "include/ripple/proto"]
|
||||
libxrpl.requires = [
|
||||
"boost::headers",
|
||||
"boost::chrono",
|
||||
@@ -203,6 +199,7 @@ class Xrpl(ConanFile):
|
||||
"boost::program_options",
|
||||
"boost::process",
|
||||
"boost::regex",
|
||||
"boost::system",
|
||||
"boost::thread",
|
||||
"date::date",
|
||||
"ed25519::ed25519",
|
||||
|
||||
@@ -1,305 +0,0 @@
|
||||
ignorePaths:
|
||||
- build/**
|
||||
- src/libxrpl/crypto
|
||||
- CMakeUserPresets.json
|
||||
- Doxyfile
|
||||
- docs/**/*.puml
|
||||
- cmake/**
|
||||
- LICENSE.md
|
||||
language: en
|
||||
allowCompoundWords: true # TODO (#6334)
|
||||
ignoreRandomStrings: true
|
||||
minWordLength: 5
|
||||
dictionaries:
|
||||
- cpp
|
||||
- en_US
|
||||
- en_GB
|
||||
ignoreRegExpList:
|
||||
- /\b[rs][1-9A-HJ-NP-Za-km-z]{25,34}/g # addresses and seeds
|
||||
- /\bC[A-Z0-9]{15}/g # CTIDs
|
||||
- /\b(XRPL|BEAST)_[A-Z_0-9]+_H_INCLUDED+/g # include guards
|
||||
- /\b(XRPL|BEAST)_[A-Z_0-9]+_H+/g # include guards
|
||||
- /::[a-z:_]+/g # things from other namespaces
|
||||
- /\blib[a-z]+/g # libraries
|
||||
- /\b[0-9]{4}-[0-9]{2}-[0-9]{2}[,:][A-Za-zÀ-ÖØ-öø-ÿ.\s]+/g # copyright dates
|
||||
- /\b[0-9]{4}[,:]?\s*[A-Za-zÀ-ÖØ-öø-ÿ.\s]+/g # copyright years
|
||||
- /\[[A-Za-z0-9-]+\]\(https:\/\/github.com\/[A-Za-z0-9-]+\)/g # Github usernames
|
||||
- /-[DWw][a-zA-Z0-9_-]+=/g # compile flags
|
||||
- /[\['"`]-[DWw][a-zA-Z0-9_-]+['"`\]]/g # compile flags
|
||||
- ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
|
||||
overrides:
|
||||
- filename: "**/*_test.cpp" # all test files
|
||||
ignoreRegExpList:
|
||||
- /"[^"]*"/g # double-quoted strings
|
||||
- /'[^']*'/g # single-quoted strings
|
||||
- /`[^`]*`/g # backtick strings
|
||||
suggestWords:
|
||||
- xprl->xrpl
|
||||
- xprld->xrpld # cspell: disable-line not sure what this problem is....
|
||||
- unsynched->unsynced # cspell: disable-line not sure what this problem is....
|
||||
- synched->synced
|
||||
- synch->sync
|
||||
words:
|
||||
- abempty
|
||||
- AMMID
|
||||
- amt
|
||||
- amts
|
||||
- asnode
|
||||
- asynchrony
|
||||
- attestation
|
||||
- authorises
|
||||
- autobridge
|
||||
- autobridged
|
||||
- autobridging
|
||||
- bimap
|
||||
- bindir
|
||||
- bookdir
|
||||
- Bougalis
|
||||
- Britto
|
||||
- Btrfs
|
||||
- canonicality
|
||||
- changespq
|
||||
- checkme
|
||||
- choco
|
||||
- chrono
|
||||
- citardauq
|
||||
- clawback
|
||||
- clawbacks
|
||||
- coeffs
|
||||
- coldwallet
|
||||
- compr
|
||||
- conanfile
|
||||
- conanrun
|
||||
- confs
|
||||
- connectability
|
||||
- coro
|
||||
- coros
|
||||
- cowid
|
||||
- cryptocondition
|
||||
- cryptoconditional
|
||||
- cryptoconditions
|
||||
- csprng
|
||||
- ctest
|
||||
- ctid
|
||||
- currenttxhash
|
||||
- daria
|
||||
- dcmake
|
||||
- dearmor
|
||||
- deleteme
|
||||
- demultiplexer
|
||||
- deserializaton
|
||||
- desync
|
||||
- desynced
|
||||
- determ
|
||||
- distro
|
||||
- doxyfile
|
||||
- dxrpl
|
||||
- endmacro
|
||||
- exceptioned
|
||||
- Falco
|
||||
- finalizers
|
||||
- firewalled
|
||||
- fmtdur
|
||||
- fsanitize
|
||||
- funclets
|
||||
- gcov
|
||||
- gcovr
|
||||
- ghead
|
||||
- Gnutella
|
||||
- gpgcheck
|
||||
- gpgkey
|
||||
- hotwallet
|
||||
- hwrap
|
||||
- ifndef
|
||||
- inequation
|
||||
- insuf
|
||||
- insuff
|
||||
- invasively
|
||||
- iou
|
||||
- ious
|
||||
- isrdc
|
||||
- itype
|
||||
- jemalloc
|
||||
- jlog
|
||||
- jtnofill
|
||||
- keylet
|
||||
- keylets
|
||||
- keyvadb
|
||||
- kwarg
|
||||
- kwargs
|
||||
- ledgerentry
|
||||
- ledgerhash
|
||||
- ledgerindex
|
||||
- leftw
|
||||
- legleux
|
||||
- levelization
|
||||
- levelized
|
||||
- libpb
|
||||
- libxrpl
|
||||
- llection
|
||||
- LOCALGOOD
|
||||
- logwstream
|
||||
- lseq
|
||||
- lsmf
|
||||
- ltype
|
||||
- mcmodel
|
||||
- MEMORYSTATUSEX
|
||||
- Merkle
|
||||
- Metafuncton
|
||||
- misprediction
|
||||
- mptbalance
|
||||
- MPTDEX
|
||||
- mptflags
|
||||
- mptid
|
||||
- mptissuance
|
||||
- mptissuanceid
|
||||
- mptoken
|
||||
- mptokenid
|
||||
- mptokenissuance
|
||||
- mptokens
|
||||
- mpts
|
||||
- mtgox
|
||||
- multisig
|
||||
- multisign
|
||||
- multisigned
|
||||
- Nakamoto
|
||||
- nftid
|
||||
- nftoffer
|
||||
- nftoken
|
||||
- nftokenid
|
||||
- nftokenpages
|
||||
- nftokens
|
||||
- nftpage
|
||||
- nikb
|
||||
- nonxrp
|
||||
- noripple
|
||||
- nudb
|
||||
- nullptr
|
||||
- nunl
|
||||
- Nyffenegger
|
||||
- ostr
|
||||
- pargs
|
||||
- partitioner
|
||||
- paychan
|
||||
- paychans
|
||||
- permdex
|
||||
- perminute
|
||||
- permissioned
|
||||
- pointee
|
||||
- populator
|
||||
- preauth
|
||||
- preauthorization
|
||||
- preauthorize
|
||||
- preauthorizes
|
||||
- preclaim
|
||||
- protobuf
|
||||
- protos
|
||||
- ptrs
|
||||
- pushd
|
||||
- pyenv
|
||||
- qalloc
|
||||
- queuable
|
||||
- Raphson
|
||||
- replayer
|
||||
- rerere
|
||||
- retriable
|
||||
- RIPD
|
||||
- ripdtop
|
||||
- rippleci
|
||||
- rippled
|
||||
- ripplerpc
|
||||
- rippletest
|
||||
- RLUSD
|
||||
- rngfill
|
||||
- rocksdb
|
||||
- Rohrs
|
||||
- roundings
|
||||
- sahyadri
|
||||
- Satoshi
|
||||
- scons
|
||||
- secp
|
||||
- sendq
|
||||
- seqit
|
||||
- sf
|
||||
- SFIELD
|
||||
- shamap
|
||||
- shamapitem
|
||||
- sidechain
|
||||
- SIGGOOD
|
||||
- sle
|
||||
- sles
|
||||
- soci
|
||||
- socidb
|
||||
- sslws
|
||||
- statsd
|
||||
- STATSDCOLLECTOR
|
||||
- stissue
|
||||
- stnum
|
||||
- stobj
|
||||
- stobject
|
||||
- stpath
|
||||
- stpathset
|
||||
- sttx
|
||||
- stvar
|
||||
- stvector
|
||||
- stxchainattestations
|
||||
- superpeer
|
||||
- superpeers
|
||||
- takergets
|
||||
- takerpays
|
||||
- ters
|
||||
- TMEndpointv2
|
||||
- trixie
|
||||
- tx
|
||||
- txid
|
||||
- txids
|
||||
- txjson
|
||||
- txn
|
||||
- txns
|
||||
- txs
|
||||
- UBSAN
|
||||
- ubsan
|
||||
- umant
|
||||
- unacquired
|
||||
- unambiguity
|
||||
- unauthorizes
|
||||
- unauthorizing
|
||||
- unergonomic
|
||||
- unfetched
|
||||
- unflatten
|
||||
- unfund
|
||||
- unimpair
|
||||
- unroutable
|
||||
- unscalable
|
||||
- unserviced
|
||||
- unshareable
|
||||
- unshares
|
||||
- unsquelch
|
||||
- unsquelched
|
||||
- unsquelching
|
||||
- unvalidated
|
||||
- unveto
|
||||
- unvetoed
|
||||
- upvotes
|
||||
- USDB
|
||||
- variadics
|
||||
- venv
|
||||
- vfalco
|
||||
- vinnie
|
||||
- wextra
|
||||
- wptr
|
||||
- writeme
|
||||
- wsrch
|
||||
- wthread
|
||||
- xbridge
|
||||
- xchain
|
||||
- ximinez
|
||||
- EXPECT_STREQ
|
||||
- XMACRO
|
||||
- xrpkuwait
|
||||
- xrpl
|
||||
- xrpld
|
||||
- xrplf
|
||||
- xxhash
|
||||
- xxhasher
|
||||
@@ -134,7 +134,7 @@ validation messages (_PAV_) received from each validator on the node's UNL. Note
|
||||
that the node will only count the validation messages that agree with its own
|
||||
validations.
|
||||
|
||||
We define the **PAV** as the Percentage of Agreed Validation
|
||||
We define the **PAV** as the **P**ercentage of **A**greed **V**alidation
|
||||
messages received for the last N ledgers, where N = 256 by default.
|
||||
|
||||
When the PAV drops below the **_low-water mark_**, the validator is considered
|
||||
|
||||
@@ -43,14 +43,14 @@ alt phase == OPEN
|
||||
alt sqn%256==0
|
||||
CA -[#green]> RM: <font color=green>getValidations
|
||||
CA -[#green]> CA: <font color=green>create UNLModify Tx
|
||||
hnote over CA#lightgreen: use validations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
|
||||
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
|
||||
end
|
||||
CA -> GC
|
||||
GC -> CA: propose
|
||||
deactivate CA
|
||||
end
|
||||
else phase == ESTABLISH
|
||||
hnote over GC: receive peer positions
|
||||
hnote over GC: receive peer postions
|
||||
GC -> GC : update our position
|
||||
GC -> CA : propose \n(if position changed)
|
||||
GC -> GC : check if have consensus
|
||||
|
||||
207
docs/build/sanitizers.md
vendored
207
docs/build/sanitizers.md
vendored
@@ -1,207 +0,0 @@
|
||||
# Sanitizer Configuration for Rippled
|
||||
|
||||
This document explains how to properly configure and run sanitizers (AddressSanitizer, undefinedbehaviorSanitizer, ThreadSanitizer) with the xrpld project.
|
||||
Corresponding suppression files are located in the `sanitizers/suppressions` directory.
|
||||
|
||||
- [Sanitizer Configuration for Rippled](#sanitizer-configuration-for-rippled)
|
||||
- [Building with Sanitizers](#building-with-sanitizers)
|
||||
- [Summary](#summary)
|
||||
- [Build steps:](#build-steps)
|
||||
- [Install dependencies](#install-dependencies)
|
||||
- [Call CMake](#call-cmake)
|
||||
- [Build](#build)
|
||||
- [Running Tests with Sanitizers](#running-tests-with-sanitizers)
|
||||
- [AddressSanitizer (ASAN)](#addresssanitizer-asan)
|
||||
- [ThreadSanitizer (TSan)](#threadsanitizer-tsan)
|
||||
- [LeakSanitizer (LSan)](#leaksanitizer-lsan)
|
||||
- [UndefinedBehaviorSanitizer (UBSan)](#undefinedbehaviorsanitizer-ubsan)
|
||||
- [Suppression Files](#suppression-files)
|
||||
- [`asan.supp`](#asansupp)
|
||||
- [`lsan.supp`](#lsansupp)
|
||||
- [`ubsan.supp`](#ubsansupp)
|
||||
- [`tsan.supp`](#tsansupp)
|
||||
- [`sanitizer-ignorelist.txt`](#sanitizer-ignorelisttxt)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- ["ASAN is ignoring requested \_\_asan_handle_no_return" warnings](#asan-is-ignoring-requested-__asan_handle_no_return-warnings)
|
||||
- [Sanitizer Mismatch Errors](#sanitizer-mismatch-errors)
|
||||
- [References](#references)
|
||||
|
||||
## Building with Sanitizers
|
||||
|
||||
### Summary
|
||||
|
||||
Follow the same instructions as mentioned in [BUILD.md](../../BUILD.md) but with the following changes:
|
||||
|
||||
1. Make sure you have a clean build directory.
|
||||
2. Set the `SANITIZERS` environment variable before calling conan install and cmake. Only set it once. Make sure both conan and cmake read the same values.
|
||||
Example: `export SANITIZERS=address,undefinedbehavior`
|
||||
3. Optionally use `--profile:all sanitizers` with Conan to build dependencies with sanitizer instrumentation. [!NOTE]Building with sanitizer-instrumented dependencies is slower but produces fewer false positives.
|
||||
4. Set `ASAN_OPTIONS`, `LSAN_OPTIONS`, `UBSAN_OPTIONS` and `TSAN_OPTIONS` environment variables to configure sanitizer behavior when running executables. [More details below](#running-tests-with-sanitizers).
|
||||
|
||||
---
|
||||
|
||||
### Build steps:
|
||||
|
||||
```bash
|
||||
cd /path/to/rippled
|
||||
rm -rf .build
|
||||
mkdir .build
|
||||
cd .build
|
||||
```
|
||||
|
||||
#### Install dependencies
|
||||
|
||||
The `SANITIZERS` environment variable is used by both Conan and CMake.
|
||||
|
||||
```bash
|
||||
export SANITIZERS=address,undefinedbehavior
|
||||
# Standard build (without instrumenting dependencies)
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
|
||||
# Or with sanitizer-instrumented dependencies (takes longer but fewer false positives)
|
||||
conan install .. --output-folder . --profile:all sanitizers --build missing --settings build_type=Debug
|
||||
```
|
||||
|
||||
[!CAUTION]
|
||||
Do not mix Address and Thread sanitizers - they are incompatible.
|
||||
|
||||
Since you already set the `SANITIZERS` environment variable when running Conan, same values will be read for the next part.
|
||||
|
||||
#### Call CMake
|
||||
|
||||
```bash
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-Dtests=ON -Dxrpld=ON
|
||||
```
|
||||
|
||||
#### Build
|
||||
|
||||
```bash
|
||||
cmake --build . --parallel 4
|
||||
```
|
||||
|
||||
## Running Tests with Sanitizers
|
||||
|
||||
### AddressSanitizer (ASAN)
|
||||
|
||||
**IMPORTANT**: ASAN with Boost produces many false positives. Use these options:
|
||||
|
||||
```bash
|
||||
export ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=path/to/asan.supp:halt_on_error=0:log_path=asan.log"
|
||||
export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsan.log"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
```
|
||||
|
||||
**Why `detect_container_overflow=0`?**
|
||||
|
||||
- Boost intrusive containers (used in `aged_unordered_container`) trigger false positives
|
||||
- Boost context switching (used in `Workers.cpp`) confuses ASAN's stack tracking
|
||||
- Since we usually don't build Boost (because we don't want to instrument Boost and detect issues in Boost code) with ASAN but use Boost containers in ASAN instrumented rippled code, it generates false positives.
|
||||
- Building dependencies with ASAN instrumentation reduces false positives. But we don't want to instrument dependencies like Boost with ASAN because it is slow (to compile as well as run tests) and not necessary.
|
||||
- See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
|
||||
- More such flags are detailed [here](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)
|
||||
|
||||
### ThreadSanitizer (TSan)
|
||||
|
||||
```bash
|
||||
export TSAN_OPTIONS="suppressions=path/to/tsan.supp halt_on_error=0 log_path=tsan.log"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
```
|
||||
|
||||
More details [here](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual).
|
||||
|
||||
### LeakSanitizer (LSan)
|
||||
|
||||
LSan is automatically enabled with ASAN. To disable it:
|
||||
|
||||
```bash
|
||||
export ASAN_OPTIONS="detect_leaks=0"
|
||||
```
|
||||
|
||||
More details [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer).
|
||||
|
||||
### UndefinedBehaviorSanitizer (UBSan)
|
||||
|
||||
```bash
|
||||
export UBSAN_OPTIONS="suppressions=path/to/ubsan.supp:print_stacktrace=1:halt_on_error=0:log_path=ubsan.log"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
```
|
||||
|
||||
More details [here](https://clang.llvm.org/docs/undefinedbehaviorSanitizer.html).
|
||||
|
||||
## Suppression Files
|
||||
|
||||
[!NOTE] Attached files contain more details.
|
||||
|
||||
### [`asan.supp`](../../sanitizers/suppressions/asan.supp)
|
||||
|
||||
- **Purpose**: Suppress AddressSanitizer (ASAN) errors only
|
||||
- **Format**: `interceptor_name:<pattern>` where pattern matches file names. Supported suppression types are:
|
||||
- interceptor_name
|
||||
- interceptor_via_fun
|
||||
- interceptor_via_lib
|
||||
- odr_violation
|
||||
- **More info**: [AddressSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizer)
|
||||
- **Note**: Cannot suppress stack-buffer-overflow, container-overflow, etc.
|
||||
|
||||
### [`lsan.supp`](../../sanitizers/suppressions/lsan.supp)
|
||||
|
||||
- **Purpose**: Suppress LeakSanitizer (LSan) errors only
|
||||
- **Format**: `leak:<pattern>` where pattern matches function/file names
|
||||
- **More info**: [LeakSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer)
|
||||
|
||||
### [`ubsan.supp`](../../sanitizers/suppressions/ubsan.supp)
|
||||
|
||||
- **Purpose**: Suppress undefinedbehaviorSanitizer errors
|
||||
- **Format**: `<error_type>:<pattern>` (e.g., `unsigned-integer-overflow:protobuf`)
|
||||
- **Covers**: Intentional overflows in sanitizers/suppressions libraries (protobuf, gRPC, stdlib)
|
||||
- More info [UBSan suppressions](https://clang.llvm.org/docs/SanitizerSpecialCaseList.html).
|
||||
|
||||
### [`tsan.supp`](../../sanitizers/suppressions/tsan.supp)
|
||||
|
||||
- **Purpose**: Suppress ThreadSanitizer data race warnings
|
||||
- **Format**: `race:<pattern>` where pattern matches function/file names
|
||||
- **More info**: [ThreadSanitizer suppressions](https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions)
|
||||
|
||||
### [`sanitizer-ignorelist.txt`](../../sanitizers/suppressions/sanitizer-ignorelist.txt)
|
||||
|
||||
- **Purpose**: Compile-time ignorelist for all sanitizers
|
||||
- **Usage**: Passed via `-fsanitize-ignorelist=absolute/path/to/sanitizer-ignorelist.txt`
|
||||
- **Format**: `<level>:<pattern>` (e.g., `src:Workers.cpp`)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "ASAN is ignoring requested \_\_asan_handle_no_return" warnings
|
||||
|
||||
These warnings appear when using Boost context switching and are harmless. They indicate potential false positives.
|
||||
|
||||
### Sanitizer Mismatch Errors
|
||||
|
||||
If you see undefined symbols like `___tsan_atomic_load` when building with ASAN:
|
||||
|
||||
**Problem**: Dependencies were built with a different sanitizer than the main project.
|
||||
|
||||
**Solution**: Rebuild everything with the same sanitizer:
|
||||
|
||||
```bash
|
||||
rm -rf .build
|
||||
# Then follow the build instructions above
|
||||
```
|
||||
|
||||
Then review the log files: `asan.log.*`, `ubsan.log.*`, `tsan.log.*`
|
||||
|
||||
## References
|
||||
|
||||
- [AddressSanitizer Wiki](https://github.com/google/sanitizers/wiki/AddressSanitizer)
|
||||
- [AddressSanitizer Flags](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)
|
||||
- [Container Overflow Detection](https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow)
|
||||
- [UndefinedBehavior Sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
|
||||
- [ThreadSanitizer](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual)
|
||||
@@ -189,7 +189,7 @@ validations. It checks this on every call to `timerEntry`.
|
||||
- _Wrong Ledger_ indicates the node is not working on the correct prior ledger
|
||||
and does not have it available. It requests that ledger from the network, but
|
||||
continues to work towards consensus this round while waiting. If it had been
|
||||
_proposing_, it will send a special "bow-out" proposal to its peers to indicate
|
||||
_proposing_, it will send a special "bowout" proposal to its peers to indicate
|
||||
its change in mode for the rest of this round. For the duration of the round,
|
||||
it defers to peer positions for determining the consensus outcome as if it
|
||||
were just _observing_.
|
||||
@@ -515,7 +515,7 @@ are excerpts of the generic consensus implementation and of helper types that wi
|
||||
interact with the concrete implementing class.
|
||||
|
||||
```{.cpp}
|
||||
// Represents a transaction under dispute this round
|
||||
// Represents a transction under dispute this round
|
||||
template <class Tx_t, class NodeID_t> class DisputedTx;
|
||||
|
||||
// Represents how the node participates in Consensus this round
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_ARCHIVE_H_INCLUDED
|
||||
#define XRPL_BASICS_ARCHIVE_H_INCLUDED
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
@@ -12,6 +13,10 @@ namespace xrpl {
|
||||
@throws runtime_error
|
||||
*/
|
||||
void
|
||||
extractTarLz4(boost::filesystem::path const& src, boost::filesystem::path const& dst);
|
||||
extractTarLz4(
|
||||
boost::filesystem::path const& src,
|
||||
boost::filesystem::path const& dst);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_BASICCONFIG_H_INCLUDED
|
||||
#define XRPL_BASICS_BASICCONFIG_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
@@ -13,7 +14,8 @@
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
using IniFileSections = std::unordered_map<std::string, std::vector<std::string>>;
|
||||
using IniFileSections =
|
||||
std::unordered_map<std::string, std::vector<std::string>>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -84,7 +86,8 @@ public:
|
||||
if (lines_.empty())
|
||||
return "";
|
||||
if (lines_.size() > 1)
|
||||
Throw<std::runtime_error>("A legacy value must have exactly one line. Section: " + name_);
|
||||
Throw<std::runtime_error>(
|
||||
"A legacy value must have exactly one line. Section: " + name_);
|
||||
return lines_[0];
|
||||
}
|
||||
|
||||
@@ -230,7 +233,10 @@ public:
|
||||
The previous value, if any, is overwritten.
|
||||
*/
|
||||
void
|
||||
overwrite(std::string const& section, std::string const& key, std::string const& value);
|
||||
overwrite(
|
||||
std::string const& section,
|
||||
std::string const& key,
|
||||
std::string const& value);
|
||||
|
||||
/** Remove all the key/value pairs from the section.
|
||||
*/
|
||||
@@ -268,7 +274,9 @@ public:
|
||||
bool
|
||||
had_trailing_comments() const
|
||||
{
|
||||
return std::any_of(map_.cbegin(), map_.cend(), [](auto s) { return s.second.had_trailing_comments(); });
|
||||
return std::any_of(map_.cbegin(), map_.cend(), [](auto s) {
|
||||
return s.second.had_trailing_comments();
|
||||
});
|
||||
}
|
||||
|
||||
protected:
|
||||
@@ -307,7 +315,10 @@ set(T& target, std::string const& name, Section const& section)
|
||||
*/
|
||||
template <class T>
|
||||
bool
|
||||
set(T& target, T const& defaultValue, std::string const& name, Section const& section)
|
||||
set(T& target,
|
||||
T const& defaultValue,
|
||||
std::string const& name,
|
||||
Section const& section)
|
||||
{
|
||||
bool found_and_valid = set<T>(target, name, section);
|
||||
if (!found_and_valid)
|
||||
@@ -322,7 +333,9 @@ set(T& target, T const& defaultValue, std::string const& name, Section const& se
|
||||
// NOTE This routine might be more clumsy than the previous two
|
||||
template <class T = std::string>
|
||||
T
|
||||
get(Section const& section, std::string const& name, T const& defaultValue = T{})
|
||||
get(Section const& section,
|
||||
std::string const& name,
|
||||
T const& defaultValue = T{})
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -368,3 +381,5 @@ get_if_exists<bool>(Section const& section, std::string const& name, bool& v)
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_BLOB_H_INCLUDED
|
||||
#define XRPL_BASICS_BLOB_H_INCLUDED
|
||||
|
||||
#include <vector>
|
||||
|
||||
@@ -10,3 +11,5 @@ namespace xrpl {
|
||||
using Blob = std::vector<unsigned char>;
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_BUFFER_H_INCLUDED
|
||||
#define XRPL_BASICS_BUFFER_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
@@ -24,7 +25,8 @@ public:
|
||||
Buffer() = default;
|
||||
|
||||
/** Create an uninitialized buffer with the given size. */
|
||||
explicit Buffer(std::size_t size) : p_(size ? new std::uint8_t[size] : nullptr), size_(size)
|
||||
explicit Buffer(std::size_t size)
|
||||
: p_(size ? new std::uint8_t[size] : nullptr), size_(size)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -60,7 +62,8 @@ public:
|
||||
/** Move-construct.
|
||||
The other buffer is reset.
|
||||
*/
|
||||
Buffer(Buffer&& other) noexcept : p_(std::move(other.p_)), size_(other.size_)
|
||||
Buffer(Buffer&& other) noexcept
|
||||
: p_(std::move(other.p_)), size_(other.size_)
|
||||
{
|
||||
other.size_ = 0;
|
||||
}
|
||||
@@ -91,7 +94,8 @@ public:
|
||||
{
|
||||
// Ensure the slice isn't a subset of the buffer.
|
||||
XRPL_ASSERT(
|
||||
s.size() == 0 || size_ == 0 || s.data() < p_.get() || s.data() >= p_.get() + size_,
|
||||
s.size() == 0 || size_ == 0 || s.data() < p_.get() ||
|
||||
s.data() >= p_.get() + size_,
|
||||
"xrpl::Buffer::operator=(Slice) : input not a subset");
|
||||
|
||||
if (auto p = alloc(s.size()))
|
||||
@@ -212,3 +216,5 @@ operator!=(Buffer const& lhs, Buffer const& rhs) noexcept
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_BYTEUTILITIES_H_INCLUDED
|
||||
#define XRPL_BASICS_BYTEUTILITIES_H_INCLUDED
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -19,3 +20,5 @@ megabytes(T value) noexcept
|
||||
static_assert(kilobytes(2) == 2048, "kilobytes(2) == 2048");
|
||||
static_assert(megabytes(3) == 3145728, "megabytes(3) == 3145728");
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
|
||||
#define XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
@@ -35,7 +36,10 @@ lz4Compress(void const* in, std::size_t inSize, BufferFactory&& bf)
|
||||
auto compressed = bf(outCapacity);
|
||||
|
||||
auto compressedSize = LZ4_compress_default(
|
||||
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(compressed), inSize, outCapacity);
|
||||
reinterpret_cast<char const*>(in),
|
||||
reinterpret_cast<char*>(compressed),
|
||||
inSize,
|
||||
outCapacity);
|
||||
if (compressedSize == 0)
|
||||
Throw<std::runtime_error>("lz4 compress: failed");
|
||||
|
||||
@@ -66,8 +70,10 @@ lz4Decompress(
|
||||
Throw<std::runtime_error>("lz4Decompress: integer overflow (output)");
|
||||
|
||||
if (LZ4_decompress_safe(
|
||||
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(decompressed), inSize, decompressedSize) !=
|
||||
decompressedSize)
|
||||
reinterpret_cast<char const*>(in),
|
||||
reinterpret_cast<char*>(decompressed),
|
||||
inSize,
|
||||
decompressedSize) != decompressedSize)
|
||||
Throw<std::runtime_error>("lz4Decompress: failed");
|
||||
|
||||
return decompressedSize;
|
||||
@@ -83,7 +89,11 @@ lz4Decompress(
|
||||
*/
|
||||
template <typename InputStream>
|
||||
std::size_t
|
||||
lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, std::size_t decompressedSize)
|
||||
lz4Decompress(
|
||||
InputStream& in,
|
||||
std::size_t inSize,
|
||||
std::uint8_t* decompressed,
|
||||
std::size_t decompressedSize)
|
||||
{
|
||||
std::vector<std::uint8_t> compressed;
|
||||
std::uint8_t const* chunk = nullptr;
|
||||
@@ -106,7 +116,9 @@ lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, s
|
||||
compressed.resize(inSize);
|
||||
}
|
||||
|
||||
chunkSize = chunkSize < (inSize - copiedInSize) ? chunkSize : (inSize - copiedInSize);
|
||||
chunkSize = chunkSize < (inSize - copiedInSize)
|
||||
? chunkSize
|
||||
: (inSize - copiedInSize);
|
||||
|
||||
std::copy(chunk, chunk + chunkSize, compressed.data() + copiedInSize);
|
||||
|
||||
@@ -123,7 +135,8 @@ lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, s
|
||||
if (in.ByteCount() > (currentBytes + copiedInSize))
|
||||
in.BackUp(in.ByteCount() - currentBytes - copiedInSize);
|
||||
|
||||
if ((copiedInSize == 0 && chunkSize < inSize) || (copiedInSize > 0 && copiedInSize != inSize))
|
||||
if ((copiedInSize == 0 && chunkSize < inSize) ||
|
||||
(copiedInSize > 0 && copiedInSize != inSize))
|
||||
Throw<std::runtime_error>("lz4 decompress: insufficient input size");
|
||||
|
||||
return lz4Decompress(chunk, inSize, decompressed, decompressedSize);
|
||||
@@ -132,3 +145,5 @@ lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, s
|
||||
} // namespace compression_algorithms
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_COUNTEDOBJECT_H_INCLUDED
|
||||
#define XRPL_BASICS_COUNTEDOBJECT_H_INCLUDED
|
||||
|
||||
#include <xrpl/beast/type_name.h>
|
||||
|
||||
@@ -133,3 +134,5 @@ public:
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_DECAYINGSAMPLE_H_INCLUDED
|
||||
#define XRPL_BASICS_DECAYINGSAMPLE_H_INCLUDED
|
||||
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
@@ -55,7 +56,9 @@ private:
|
||||
|
||||
if (m_value != value_type())
|
||||
{
|
||||
std::size_t elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - m_when).count();
|
||||
std::size_t elapsed =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(now - m_when)
|
||||
.count();
|
||||
|
||||
// A span larger than four times the window decays the
|
||||
// value to an insignificant amount so just reset it.
|
||||
@@ -129,3 +132,5 @@ private:
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_EXPECTED_H_INCLUDED
|
||||
#define XRPL_BASICS_EXPECTED_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
@@ -107,20 +108,23 @@ Unexpected(E (&)[N]) -> Unexpected<E const*>;
|
||||
|
||||
// Definition of Expected. All of the machinery comes from boost::result.
|
||||
template <class T, class E>
|
||||
class [[nodiscard]] Expected : private boost::outcome_v2::result<T, E, detail::throw_policy>
|
||||
class [[nodiscard]] Expected
|
||||
: private boost::outcome_v2::result<T, E, detail::throw_policy>
|
||||
{
|
||||
using Base = boost::outcome_v2::result<T, E, detail::throw_policy>;
|
||||
|
||||
public:
|
||||
template <typename U>
|
||||
requires std::convertible_to<U, T>
|
||||
constexpr Expected(U&& r) : Base(boost::outcome_v2::in_place_type_t<T>{}, std::forward<U>(r))
|
||||
constexpr Expected(U&& r)
|
||||
: Base(boost::outcome_v2::in_place_type_t<T>{}, std::forward<U>(r))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
requires std::convertible_to<U, E> && (!std::is_reference_v<U>)
|
||||
constexpr Expected(Unexpected<U> e) : Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
|
||||
constexpr Expected(Unexpected<U> e)
|
||||
: Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -191,7 +195,8 @@ public:
|
||||
// Specialization of Expected<void, E>. Allows returning either success
|
||||
// (without a value) or the reason for the failure.
|
||||
template <class E>
|
||||
class [[nodiscard]] Expected<void, E> : private boost::outcome_v2::result<void, E, detail::throw_policy>
|
||||
class [[nodiscard]] Expected<void, E>
|
||||
: private boost::outcome_v2::result<void, E, detail::throw_policy>
|
||||
{
|
||||
using Base = boost::outcome_v2::result<void, E, detail::throw_policy>;
|
||||
|
||||
@@ -228,3 +233,5 @@ public:
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_BASICS_EXPECTED_H_INCLUDED
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_FILEUTILITIES_H_INCLUDED
|
||||
#define XRPL_BASICS_FILEUTILITIES_H_INCLUDED
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/system/error_code.hpp>
|
||||
@@ -14,6 +15,11 @@ getFileContents(
|
||||
std::optional<std::size_t> maxSize = std::nullopt);
|
||||
|
||||
void
|
||||
writeFileContents(boost::system::error_code& ec, boost::filesystem::path const& destPath, std::string const& contents);
|
||||
writeFileContents(
|
||||
boost::system::error_code& ec,
|
||||
boost::filesystem::path const& destPath,
|
||||
std::string const& contents);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
#define XRPL_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
@@ -44,8 +45,8 @@ struct SharedIntrusiveAdoptNoIncrementTag
|
||||
//
|
||||
|
||||
template <class T>
|
||||
concept CAdoptTag =
|
||||
std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> || std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
|
||||
concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
|
||||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -57,7 +58,7 @@ concept CAdoptTag =
|
||||
When the strong pointer count goes to zero, the "partialDestructor" is
|
||||
called. This can be used to destroy as much of the object as possible while
|
||||
still retaining the reference counts. For example, for SHAMapInnerNodes the
|
||||
children may be reset in that function. Note that std::shared_pointer WILL
|
||||
children may be reset in that function. Note that std::shared_poiner WILL
|
||||
run the destructor when the strong count reaches zero, but may not free the
|
||||
memory used by the object until the weak count reaches zero. In rippled, we
|
||||
typically allocate shared pointers with the `make_shared` function. When
|
||||
@@ -121,7 +122,9 @@ public:
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs);
|
||||
SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by statically casting the pointer
|
||||
controlled by the rhs param.
|
||||
@@ -133,7 +136,9 @@ public:
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs);
|
||||
SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by dynamically casting the pointer
|
||||
controlled by the rhs param.
|
||||
@@ -299,7 +304,9 @@ class SharedWeakUnion
|
||||
// Tagged pointer. Low bit determines if this is a strong or a weak
|
||||
// pointer. The low bit must be masked to zero when converting back to a
|
||||
// pointer. If the low bit is '1', this is a weak pointer.
|
||||
static_assert(alignof(T) >= 2, "Bad alignment: Combo pointer requires low bit to be zero");
|
||||
static_assert(
|
||||
alignof(T) >= 2,
|
||||
"Bad alignment: Combo pointer requires low bit to be zero");
|
||||
|
||||
public:
|
||||
SharedWeakUnion() = default;
|
||||
@@ -443,7 +450,9 @@ make_SharedIntrusive(Args&&... args)
|
||||
auto p = new TT(std::forward<Args>(args)...);
|
||||
|
||||
static_assert(
|
||||
noexcept(SharedIntrusive<TT>(std::declval<TT*>(), std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
|
||||
noexcept(SharedIntrusive<TT>(
|
||||
std::declval<TT*>(),
|
||||
std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
|
||||
"SharedIntrusive constructor should not throw or this can leak "
|
||||
"memory");
|
||||
|
||||
@@ -484,3 +493,4 @@ dynamic_pointer_cast(TT const& v)
|
||||
}
|
||||
} // namespace intr_ptr
|
||||
} // namespace xrpl
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
#define XRPL_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/IntrusiveRefCounts.h>
|
||||
@@ -11,7 +12,9 @@ template <class T>
|
||||
template <CAdoptTag TAdoptTag>
|
||||
SharedIntrusive<T>::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p}
|
||||
{
|
||||
if constexpr (std::is_same_v<TAdoptTag, SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
@@ -43,14 +46,16 @@ SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT> const& rhs)
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs) : ptr_{rhs.unsafeExchange(nullptr)}
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs) : ptr_{rhs.unsafeExchange(nullptr)}
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
template <class T>
|
||||
@@ -107,7 +112,9 @@ requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
static_assert(!std::is_same_v<T, TT>, "This overload should not be instantiated for T == TT");
|
||||
static_assert(
|
||||
!std::is_same_v<T, TT>,
|
||||
"This overload should not be instantiated for T == TT");
|
||||
|
||||
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
|
||||
return *this;
|
||||
@@ -132,7 +139,9 @@ template <CAdoptTag TAdoptTag>
|
||||
void
|
||||
SharedIntrusive<T>::adopt(T* p)
|
||||
{
|
||||
if constexpr (std::is_same_v<TAdoptTag, SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
@@ -148,7 +157,9 @@ SharedIntrusive<T>::~SharedIntrusive()
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs)
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = static_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
@@ -160,14 +171,18 @@ SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusiv
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs)
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{static_cast<T*>(rhs.unsafeExchange(nullptr))}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs)
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = dynamic_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
@@ -179,7 +194,9 @@ SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusi
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs)
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
// This can be simplified without the `exchange`, but the `exchange` is kept
|
||||
// in anticipation of supporting atomic operations.
|
||||
@@ -298,7 +315,8 @@ WeakIntrusive<T>::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs) : ptr_{rhs.unsafeGetRawPtr()}
|
||||
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs)
|
||||
: ptr_{rhs.unsafeGetRawPtr()}
|
||||
{
|
||||
if (ptr_)
|
||||
ptr_->addWeakRef();
|
||||
@@ -702,3 +720,4 @@ SharedWeakUnion<T>::unsafeReleaseNoStore()
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
#define XRPL_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
@@ -159,19 +160,22 @@ private:
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyStartedMask = (one << (FieldTypeBits - 1));
|
||||
static constexpr FieldType partialDestroyStartedMask =
|
||||
(one << (FieldTypeBits - 1));
|
||||
|
||||
/** Flag that is set when the partialDestroy function has finished running
|
||||
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyFinishedMask = (one << (FieldTypeBits - 2));
|
||||
static constexpr FieldType partialDestroyFinishedMask =
|
||||
(one << (FieldTypeBits - 2));
|
||||
|
||||
/** Mask that will zero out all the `count` bits and leave the tag bits
|
||||
unchanged.
|
||||
*/
|
||||
static constexpr FieldType tagMask = partialDestroyStartedMask | partialDestroyFinishedMask;
|
||||
static constexpr FieldType tagMask =
|
||||
partialDestroyStartedMask | partialDestroyFinishedMask;
|
||||
|
||||
/** Mask that will zero out the `tag` bits and leave the count bits
|
||||
unchanged.
|
||||
@@ -180,11 +184,13 @@ private:
|
||||
|
||||
/** Mask that will zero out everything except the strong count.
|
||||
*/
|
||||
static constexpr FieldType strongMask = ((one << StrongCountNumBits) - 1) & valueMask;
|
||||
static constexpr FieldType strongMask =
|
||||
((one << StrongCountNumBits) - 1) & valueMask;
|
||||
|
||||
/** Mask that will zero out everything except the weak count.
|
||||
*/
|
||||
static constexpr FieldType weakMask = (((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
|
||||
static constexpr FieldType weakMask =
|
||||
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
|
||||
|
||||
/** Unpack the count and tag fields from the packed atomic integer form. */
|
||||
struct RefCountPair
|
||||
@@ -209,8 +215,10 @@ private:
|
||||
FieldType
|
||||
combinedValue() const noexcept;
|
||||
|
||||
static constexpr CountType maxStrongValue = static_cast<CountType>((one << StrongCountNumBits) - 1);
|
||||
static constexpr CountType maxWeakValue = static_cast<CountType>((one << WeakCountNumBits) - 1);
|
||||
static constexpr CountType maxStrongValue =
|
||||
static_cast<CountType>((one << StrongCountNumBits) - 1);
|
||||
static constexpr CountType maxWeakValue =
|
||||
static_cast<CountType>((one << WeakCountNumBits) - 1);
|
||||
/** Put an extra margin to detect when running up against limits.
|
||||
This is only used in debug code, and is useful if we reduce the
|
||||
number of bits in the strong and weak counts (to 16 and 14 bits).
|
||||
@@ -266,7 +274,8 @@ IntrusiveRefCounts::releaseStrongRef() const
|
||||
}
|
||||
}
|
||||
|
||||
if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_acq_rel))
|
||||
{
|
||||
// Can't be in partial destroy because only decrementing the strong
|
||||
// count to zero can start a partial destroy, and that can't happen
|
||||
@@ -292,7 +301,7 @@ IntrusiveRefCounts::addWeakReleaseStrongRef() const
|
||||
// change the counts and flags (the count could be atomically changed, but
|
||||
// the flags depend on the current value of the counts).
|
||||
//
|
||||
// Note: If this becomes a perf bottleneck, the `partialDestroyStartedMask`
|
||||
// Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
|
||||
// may be able to be set non-atomically. But it is easier to reason about
|
||||
// the code if the flag is set atomically.
|
||||
while (1)
|
||||
@@ -322,7 +331,8 @@ IntrusiveRefCounts::addWeakReleaseStrongRef() const
|
||||
action = partialDestroy;
|
||||
}
|
||||
}
|
||||
if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_acq_rel))
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(!(prevIntVal & partialDestroyStartedMask)),
|
||||
@@ -366,7 +376,8 @@ IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept
|
||||
auto curValue = RefCountPair{1, 1}.combinedValue();
|
||||
auto desiredValue = RefCountPair{2, 1}.combinedValue();
|
||||
|
||||
while (!refCounts.compare_exchange_weak(curValue, desiredValue, std::memory_order_acq_rel))
|
||||
while (!refCounts.compare_exchange_weak(
|
||||
curValue, desiredValue, std::memory_order_acq_rel))
|
||||
{
|
||||
RefCountPair const prev{curValue};
|
||||
if (!prev.strong)
|
||||
@@ -395,15 +406,20 @@ inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
auto v = refCounts.load(std::memory_order_acquire);
|
||||
XRPL_ASSERT((!(v & valueMask)), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
|
||||
XRPL_ASSERT(
|
||||
(!(v & valueMask)),
|
||||
"xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
|
||||
auto t = v & tagMask;
|
||||
XRPL_ASSERT((!t || t == tagMask), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
|
||||
XRPL_ASSERT(
|
||||
(!t || t == tagMask),
|
||||
"xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(IntrusiveRefCounts::FieldType v) noexcept
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
|
||||
IntrusiveRefCounts::FieldType v) noexcept
|
||||
: strong{static_cast<CountType>(v & strongMask)}
|
||||
, weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
|
||||
, partialDestroyStartedBit{v & partialDestroyStartedMask}
|
||||
@@ -433,8 +449,10 @@ IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
|
||||
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
|
||||
"xrpl::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
|
||||
"inside range");
|
||||
return (static_cast<IntrusiveRefCounts::FieldType>(weak) << IntrusiveRefCounts::StrongCountNumBits) |
|
||||
static_cast<IntrusiveRefCounts::FieldType>(strong) | partialDestroyStartedBit | partialDestroyFinishedBit;
|
||||
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
|
||||
<< IntrusiveRefCounts::StrongCountNumBits) |
|
||||
static_cast<IntrusiveRefCounts::FieldType>(strong) |
|
||||
partialDestroyStartedBit | partialDestroyFinishedBit;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@@ -442,9 +460,11 @@ inline void
|
||||
partialDestructorFinished(T** o)
|
||||
{
|
||||
T& self = **o;
|
||||
IntrusiveRefCounts::RefCountPair p = self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
|
||||
IntrusiveRefCounts::RefCountPair p =
|
||||
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
|
||||
XRPL_ASSERT(
|
||||
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit && !p.strong),
|
||||
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit &&
|
||||
!p.strong),
|
||||
"xrpl::partialDestructorFinished : not a weak ref");
|
||||
if (!p.weak)
|
||||
{
|
||||
@@ -460,3 +480,4 @@ partialDestructorFinished(T** o)
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
} // namespace xrpl
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_KEYCACHE_H
|
||||
#define XRPL_BASICS_KEYCACHE_H
|
||||
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
@@ -8,3 +9,5 @@ namespace xrpl {
|
||||
using KeyCache = TaggedCache<uint256, int, true>;
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_BASICS_KEYCACHE_H
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_LOCALVALUE_H_INCLUDED
|
||||
#define XRPL_BASICS_LOCALVALUE_H_INCLUDED
|
||||
|
||||
#include <boost/thread/tss.hpp>
|
||||
|
||||
@@ -54,7 +55,8 @@ template <class = void>
|
||||
boost::thread_specific_ptr<detail::LocalValues>&
|
||||
getLocalValues()
|
||||
{
|
||||
static boost::thread_specific_ptr<detail::LocalValues> tsp(&detail::LocalValues::cleanup);
|
||||
static boost::thread_specific_ptr<detail::LocalValues> tsp(
|
||||
&detail::LocalValues::cleanup);
|
||||
return tsp;
|
||||
}
|
||||
|
||||
@@ -103,6 +105,10 @@ LocalValue<T>::operator*()
|
||||
}
|
||||
|
||||
return *reinterpret_cast<T*>(
|
||||
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_)).first->second->get());
|
||||
lvs->values
|
||||
.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_))
|
||||
.first->second->get());
|
||||
}
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_LOG_H_INCLUDED
|
||||
#define XRPL_BASICS_LOG_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/UnorderedContainers.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
@@ -38,17 +39,22 @@ private:
|
||||
std::string partition_;
|
||||
|
||||
public:
|
||||
Sink(std::string const& partition, beast::severities::Severity thresh, Logs& logs);
|
||||
Sink(
|
||||
std::string const& partition,
|
||||
beast::severities::Severity thresh,
|
||||
Logs& logs);
|
||||
|
||||
Sink(Sink const&) = delete;
|
||||
Sink&
|
||||
operator=(Sink const&) = delete;
|
||||
|
||||
void
|
||||
write(beast::severities::Severity level, std::string const& text) override;
|
||||
write(beast::severities::Severity level, std::string const& text)
|
||||
override;
|
||||
|
||||
void
|
||||
writeAlways(beast::severities::Severity level, std::string const& text) override;
|
||||
writeAlways(beast::severities::Severity level, std::string const& text)
|
||||
override;
|
||||
};
|
||||
|
||||
/** Manages a system file containing logged output.
|
||||
@@ -134,7 +140,11 @@ private:
|
||||
};
|
||||
|
||||
std::mutex mutable mutex_;
|
||||
std::map<std::string, std::unique_ptr<beast::Journal::Sink>, boost::beast::iless> sinks_;
|
||||
std::map<
|
||||
std::string,
|
||||
std::unique_ptr<beast::Journal::Sink>,
|
||||
boost::beast::iless>
|
||||
sinks_;
|
||||
beast::severities::Severity thresh_;
|
||||
File file_;
|
||||
bool silent_ = false;
|
||||
@@ -170,7 +180,11 @@ public:
|
||||
partition_severities() const;
|
||||
|
||||
void
|
||||
write(beast::severities::Severity level, std::string const& partition, std::string const& text, bool console);
|
||||
write(
|
||||
beast::severities::Severity level,
|
||||
std::string const& partition,
|
||||
std::string const& text,
|
||||
bool console);
|
||||
|
||||
std::string
|
||||
rotate();
|
||||
@@ -187,7 +201,9 @@ public:
|
||||
}
|
||||
|
||||
virtual std::unique_ptr<beast::Journal::Sink>
|
||||
makeSink(std::string const& partition, beast::severities::Severity startingLevel);
|
||||
makeSink(
|
||||
std::string const& partition,
|
||||
beast::severities::Severity startingLevel);
|
||||
|
||||
public:
|
||||
static LogSeverity
|
||||
@@ -205,8 +221,7 @@ public:
|
||||
private:
|
||||
enum {
|
||||
// Maximum line length for log messages.
|
||||
// If the message exceeds this length it will be truncated with
|
||||
// ellipses.
|
||||
// If the message exceeds this length it will be truncated with elipses.
|
||||
maximumMessageCharacters = 12 * 1024
|
||||
};
|
||||
|
||||
@@ -257,3 +272,5 @@ beast::Journal
|
||||
debugLog();
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#ifndef XRPL_BASICS_MATHUTILITIES_H_INCLUDED
|
||||
#define XRPL_BASICS_MATHUTILITIES_H_INCLUDED
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
@@ -44,3 +45,5 @@ static_assert(calculatePercent(50'000'001, 100'000'000) == 51);
|
||||
static_assert(calculatePercent(99'999'999, 100'000'000) == 100);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user