mirror of
https://github.com/XRPLF/rippled.git
synced 2026-04-25 21:48:03 +00:00
Compare commits
9 Commits
develop
...
pratik/ote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a01b274352 | ||
|
|
193f5b39cb | ||
|
|
db8111ef7c | ||
|
|
913a4b794c | ||
|
|
accea17e9d | ||
|
|
c6fa00fbe3 | ||
|
|
bfb8f4f01a | ||
|
|
4b745a86b7 | ||
|
|
ddf894dcb0 |
32
.clang-tidy
32
.clang-tidy
@@ -66,14 +66,14 @@ Checks: "-*,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-unchecked-optional-access,
|
||||
# bugprone-unchecked-optional-access, # see https://github.com/XRPLF/rippled/pull/6502
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-exception-at-new,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unique-ptr-array-mismatch,
|
||||
bugprone-unsafe-functions,
|
||||
bugprone-use-after-move,
|
||||
bugprone-use-after-move, # has issues
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-unused-local-non-trivial-variable,
|
||||
@@ -97,22 +97,9 @@ Checks: "-*,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unused-alias-decls,
|
||||
misc-unused-using-decls,
|
||||
modernize-concat-nested-namespaces,
|
||||
modernize-deprecated-headers,
|
||||
modernize-make-shared,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-type-traits,
|
||||
modernize-use-designated-initializers,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-nodiscard,
|
||||
modernize-use-override,
|
||||
modernize-use-ranges,
|
||||
modernize-use-starts-ends-with,
|
||||
modernize-use-std-numbers,
|
||||
modernize-use-using,
|
||||
modernize-deprecated-headers,
|
||||
llvm-namespace-comment,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
@@ -154,6 +141,19 @@ Checks: "-*,
|
||||
# readability-inconsistent-declaration-parameter-name, # in this codebase this check will break a lot of arg names
|
||||
# readability-static-accessed-through-instance, # this check is probably unnecessary. it makes the code less readable
|
||||
# readability-identifier-naming, # https://github.com/XRPLF/rippled/pull/6571
|
||||
#
|
||||
# modernize-concat-nested-namespaces,
|
||||
# modernize-pass-by-value,
|
||||
# modernize-type-traits,
|
||||
# modernize-use-designated-initializers,
|
||||
# modernize-use-emplace,
|
||||
# modernize-use-equals-default,
|
||||
# modernize-use-equals-delete,
|
||||
# modernize-use-override,
|
||||
# modernize-use-ranges,
|
||||
# modernize-use-starts-ends-with,
|
||||
# modernize-use-std-numbers,
|
||||
# modernize-use-using,
|
||||
# ---
|
||||
#
|
||||
CheckOptions:
|
||||
|
||||
@@ -93,7 +93,6 @@ test.core > xrpl.basics
|
||||
test.core > xrpl.core
|
||||
test.core > xrpld.core
|
||||
test.core > xrpl.json
|
||||
test.core > xrpl.protocol
|
||||
test.core > xrpl.rdb
|
||||
test.core > xrpl.server
|
||||
test.csf > xrpl.basics
|
||||
|
||||
10
.github/scripts/rename/binary.sh
vendored
10
.github/scripts/rename/binary.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script changes the binary name from `rippled` to `xrpld`, and reverses
|
||||
|
||||
38
.github/scripts/rename/cmake.sh
vendored
38
.github/scripts/rename/cmake.sh
vendored
@@ -8,16 +8,16 @@ set -e
|
||||
SED_COMMAND=sed
|
||||
HEAD_COMMAND=head
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v ghead &> /dev/null; then
|
||||
echo "Error: ghead is not installed. Please install it using 'brew install coreutils'."
|
||||
exit 1
|
||||
fi
|
||||
HEAD_COMMAND=ghead
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v ghead &> /dev/null; then
|
||||
echo "Error: ghead is not installed. Please install it using 'brew install coreutils'."
|
||||
exit 1
|
||||
fi
|
||||
HEAD_COMMAND=ghead
|
||||
fi
|
||||
|
||||
# This script renames CMake files from `RippleXXX.cmake` or `RippledXXX.cmake`
|
||||
@@ -44,10 +44,10 @@ pushd "${DIRECTORY}"
|
||||
find cmake -type f -name 'Rippled*.cmake' -exec bash -c 'mv "${1}" "${1/Rippled/Xrpl}"' - {} \;
|
||||
find cmake -type f -name 'Ripple*.cmake' -exec bash -c 'mv "${1}" "${1/Ripple/Xrpl}"' - {} \;
|
||||
if [ -e cmake/xrpl_add_test.cmake ]; then
|
||||
mv cmake/xrpl_add_test.cmake cmake/XrplAddTest.cmake
|
||||
mv cmake/xrpl_add_test.cmake cmake/XrplAddTest.cmake
|
||||
fi
|
||||
if [ -e include/xrpl/proto/ripple.proto ]; then
|
||||
mv include/xrpl/proto/ripple.proto include/xrpl/proto/xrpl.proto
|
||||
mv include/xrpl/proto/ripple.proto include/xrpl/proto/xrpl.proto
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
@@ -71,14 +71,14 @@ ${SED_COMMAND} -i 's@xrpl/validator-keys-tool@ripple/validator-keys-tool@' cmake
|
||||
# Ensure the name of the binary and config remain 'rippled' for now.
|
||||
${SED_COMMAND} -i -E 's/xrpld(-example)?\.cfg/rippled\1.cfg/g' cmake/XrplInstall.cmake
|
||||
if grep -q '"xrpld"' cmake/XrplCore.cmake; then
|
||||
# The script has been rerun, so just restore the name of the binary.
|
||||
${SED_COMMAND} -i 's/"xrpld"/"rippled"/' cmake/XrplCore.cmake
|
||||
# The script has been rerun, so just restore the name of the binary.
|
||||
${SED_COMMAND} -i 's/"xrpld"/"rippled"/' cmake/XrplCore.cmake
|
||||
elif ! grep -q '"rippled"' cmake/XrplCore.cmake; then
|
||||
${HEAD_COMMAND} -n -1 cmake/XrplCore.cmake > cmake.tmp
|
||||
echo ' # For the time being, we will keep the name of the binary as it was.' >> cmake.tmp
|
||||
echo ' set_target_properties(xrpld PROPERTIES OUTPUT_NAME "rippled")' >> cmake.tmp
|
||||
tail -1 cmake/XrplCore.cmake >> cmake.tmp
|
||||
mv cmake.tmp cmake/XrplCore.cmake
|
||||
${HEAD_COMMAND} -n -1 cmake/XrplCore.cmake > cmake.tmp
|
||||
echo ' # For the time being, we will keep the name of the binary as it was.' >> cmake.tmp
|
||||
echo ' set_target_properties(xrpld PROPERTIES OUTPUT_NAME "rippled")' >> cmake.tmp
|
||||
tail -1 cmake/XrplCore.cmake >> cmake.tmp
|
||||
mv cmake.tmp cmake/XrplCore.cmake
|
||||
fi
|
||||
|
||||
# Restore the symlink from 'xrpld' to 'rippled'.
|
||||
|
||||
28
.github/scripts/rename/config.sh
vendored
28
.github/scripts/rename/config.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the config from `rippled.cfg` to `xrpld.cfg`, and updates
|
||||
@@ -32,28 +32,28 @@ pushd "${DIRECTORY}"
|
||||
|
||||
# Add the xrpld.cfg to the .gitignore.
|
||||
if ! grep -q 'xrpld.cfg' .gitignore; then
|
||||
${SED_COMMAND} -i '/rippled.cfg/a\
|
||||
${SED_COMMAND} -i '/rippled.cfg/a\
|
||||
/xrpld.cfg' .gitignore
|
||||
fi
|
||||
|
||||
# Rename the files.
|
||||
if [ -e rippled.cfg ]; then
|
||||
mv rippled.cfg xrpld.cfg
|
||||
mv rippled.cfg xrpld.cfg
|
||||
fi
|
||||
if [ -e cfg/rippled-example.cfg ]; then
|
||||
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
|
||||
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
DIRECTORIES=("cfg" "cmake" "include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' "${FILE}"
|
||||
done
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
|
||||
|
||||
54
.github/scripts/rename/copyright.sh
vendored
54
.github/scripts/rename/copyright.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script removes superfluous copyright notices in source and header files
|
||||
@@ -43,56 +43,56 @@ ${SED_COMMAND} -i -E "s@\\\t@${PLACEHOLDER_TAB}@g" src/test/rpc/ValidatorInfo_te
|
||||
# Process the include/ and src/ directories.
|
||||
DIRECTORIES=("include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
# Handle the cases where the copyright notice is enclosed in /* ... */
|
||||
# and usually surrounded by //---- and //======.
|
||||
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
|
||||
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}" # cspell: ignore Bougalis Falco Hinnant Ritchford
|
||||
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
# Handle the cases where the copyright notice is enclosed in /* ... */
|
||||
# and usually surrounded by //---- and //======.
|
||||
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
|
||||
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}" # cspell: ignore Bougalis Falco Hinnant Ritchford
|
||||
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
|
||||
|
||||
# Handle the cases where the copyright notice is commented out with //.
|
||||
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}" # cspell: ignore Vinnie Falco
|
||||
done
|
||||
# Handle the cases where the copyright notice is commented out with //.
|
||||
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}" # cspell: ignore Vinnie Falco
|
||||
done
|
||||
done
|
||||
|
||||
# Restore copyright notices that were removed from specific files, without
|
||||
# restoring the verbiage that is already present in LICENSE.md. Ensure that if
|
||||
# the script is run multiple times, duplicate notices are not added.
|
||||
if ! grep -q 'Raw Material Software' include/xrpl/beast/core/CurrentThreadName.h; then
|
||||
echo -e "// Portions of this file are from JUCE (http://www.juce.com).\n// Copyright (c) 2013 - Raw Material Software Ltd.\n// Please visit http://www.juce.com\n\n$(cat include/xrpl/beast/core/CurrentThreadName.h)" > include/xrpl/beast/core/CurrentThreadName.h
|
||||
echo -e "// Portions of this file are from JUCE (http://www.juce.com).\n// Copyright (c) 2013 - Raw Material Software Ltd.\n// Please visit http://www.juce.com\n\n$(cat include/xrpl/beast/core/CurrentThreadName.h)" > include/xrpl/beast/core/CurrentThreadName.h
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/app/NetworkID_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/NetworkID_test.cpp)" > src/test/app/NetworkID_test.cpp
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/NetworkID_test.cpp)" > src/test/app/NetworkID_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/app/tx/apply_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/tx/apply_test.cpp)" > src/test/app/tx/apply_test.cpp
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/tx/apply_test.cpp)" > src/test/app/tx/apply_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/rpc/ManifestRPC_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ManifestRPC_test.cpp)" > src/test/rpc/ManifestRPC_test.cpp
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ManifestRPC_test.cpp)" > src/test/rpc/ManifestRPC_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/rpc/ValidatorInfo_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ValidatorInfo_test.cpp)" > src/test/rpc/ValidatorInfo_test.cpp
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ValidatorInfo_test.cpp)" > src/test/rpc/ValidatorInfo_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/server_info/Manifest.cpp; then
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/server_info/Manifest.cpp)" > src/xrpld/rpc/handlers/server_info/Manifest.cpp
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/server_info/Manifest.cpp)" > src/xrpld/rpc/handlers/server_info/Manifest.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/admin/status/ValidatorInfo.cpp; then
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/admin/status/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/admin/status/ValidatorInfo.cpp
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/admin/status/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/admin/status/ValidatorInfo.cpp
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/SlabAllocator.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/spinlock.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/tagged_integer.h; then
|
||||
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Ritchford' include/xrpl/beast/utility/Zero.h; then
|
||||
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h # cspell: ignore Ritchford
|
||||
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h # cspell: ignore Ritchford
|
||||
fi
|
||||
|
||||
# Restore newlines and tabs in string literals in the affected file.
|
||||
|
||||
10
.github/scripts/rename/definitions.sh
vendored
10
.github/scripts/rename/definitions.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames definitions, such as include guards, in this project.
|
||||
|
||||
10
.github/scripts/rename/docs.sh
vendored
10
.github/scripts/rename/docs.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames all remaining references to `ripple` and `rippled` to
|
||||
|
||||
4
.github/scripts/rename/include.sh
vendored
4
.github/scripts/rename/include.sh
vendored
@@ -23,8 +23,8 @@ fi
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
if grep -q "#ifndef XRPL_" "${FILE}"; then
|
||||
echo "Please replace all include guards by #pragma once."
|
||||
exit 1
|
||||
echo "Please replace all include guards by #pragma once."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "Checking complete."
|
||||
|
||||
26
.github/scripts/rename/namespace.sh
vendored
26
.github/scripts/rename/namespace.sh
vendored
@@ -6,11 +6,11 @@ set -e
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the `ripple` namespace to `xrpl` in this project.
|
||||
@@ -35,15 +35,15 @@ pushd "${DIRECTORY}"
|
||||
|
||||
DIRECTORIES=("include" "src" "tests")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/namespace ripple/namespace xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple::/xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/"ripple:/"xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/(BEAST_DEFINE_TESTSUITE.+)ripple(.+)/\1xrpl\2/g' "${FILE}"
|
||||
done
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/namespace ripple/namespace xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple::/xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/"ripple:/"xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/(BEAST_DEFINE_TESTSUITE.+)ripple(.+)/\1xrpl\2/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
|
||||
# Special case for NuDBFactory that has ripple twice in the test suite name.
|
||||
|
||||
16
.github/workflows/reusable-build-test-config.yml
vendored
16
.github/workflows/reusable-build-test-config.yml
vendored
@@ -210,22 +210,6 @@ jobs:
|
||||
retention-days: 3
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Export server definitions
|
||||
if: ${{ runner.os != 'Windows' && !inputs.build_only && env.VOIDSTAR_ENABLED != 'true' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
run: |
|
||||
set -o pipefail
|
||||
./xrpld --definitions | python3 -m json.tool > server_definitions.json
|
||||
|
||||
- name: Upload server definitions
|
||||
if: ${{ github.event.repository.visibility == 'public' && inputs.config_name == 'debian-bookworm-gcc-13-amd64-release' }}
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: server-definitions
|
||||
path: ${{ env.BUILD_DIR }}/server_definitions.json
|
||||
retention-days: 3
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Check linking (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.SANITIZERS_ENABLED == 'false' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
|
||||
19
.github/workflows/reusable-clang-tidy-files.yml
vendored
19
.github/workflows/reusable-clang-tidy-files.yml
vendored
@@ -80,7 +80,7 @@ jobs:
|
||||
env:
|
||||
TARGETS: ${{ inputs.files != '' && inputs.files || 'src tests' }}
|
||||
run: |
|
||||
run-clang-tidy -j ${{ steps.nproc.outputs.nproc }} -p "${BUILD_DIR}" -quiet -fix -allow-no-checks ${TARGETS} 2>&1 | tee clang-tidy-output.txt
|
||||
run-clang-tidy -j ${{ steps.nproc.outputs.nproc }} -p "${BUILD_DIR}" -quiet -allow-no-checks ${TARGETS} 2>&1 | tee clang-tidy-output.txt
|
||||
|
||||
- name: Upload clang-tidy output
|
||||
if: ${{ github.event.repository.visibility == 'public' && steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -90,21 +90,8 @@ jobs:
|
||||
path: clang-tidy-output.txt
|
||||
retention-days: 30
|
||||
|
||||
- name: Generate git diff
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
run: |
|
||||
git diff | tee clang-tidy-git-diff.txt
|
||||
|
||||
- name: Upload clang-tidy diff output
|
||||
if: ${{ github.event.repository.visibility == 'public' && steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: clang-tidy-git-diff
|
||||
path: clang-tidy-git-diff.txt
|
||||
retention-days: 30
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' && inputs.create_issue_on_failure }}
|
||||
if: steps.run_clang_tidy.outcome != 'success' && inputs.create_issue_on_failure
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
@@ -169,7 +156,7 @@ jobs:
|
||||
rm -f create_issue.log issue.md clang-tidy-output.txt
|
||||
|
||||
- name: Fail the workflow if clang-tidy failed
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
if: steps.run_clang_tidy.outcome != 'success'
|
||||
run: |
|
||||
echo "Clang-tidy check failed!"
|
||||
exit 1
|
||||
|
||||
@@ -20,22 +20,6 @@ repos:
|
||||
- id: check-merge-conflict
|
||||
args: [--assume-in-merge]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: clang-tidy
|
||||
name: "clang-tidy (enable with: TIDY=1)"
|
||||
entry: ./bin/pre-commit/clang_tidy_check.py
|
||||
language: python
|
||||
types_or: [c++, c]
|
||||
exclude: ^include/xrpl/protocol_autogen
|
||||
pass_filenames: false # script determines the staged files itself
|
||||
- id: fix-include-style
|
||||
name: fix include style
|
||||
entry: ./bin/pre-commit/fix_include_style.py
|
||||
language: python
|
||||
types_or: [c++, c]
|
||||
exclude: ^include/xrpl/protocol_autogen/(transactions|ledger_entries)/
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: cd481d7b0bfb5c7b3090c21846317f9a8262e891 # frozen: v22.1.0
|
||||
hooks:
|
||||
@@ -60,12 +44,6 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/openstack/bashate
|
||||
rev: 5798d24d571676fc407e81df574c1ef57b520f23 # frozen: 2.1.1
|
||||
hooks:
|
||||
- id: bashate
|
||||
args: ["--ignore=E006"]
|
||||
|
||||
- repo: https://github.com/streetsidesoftware/cspell-cli
|
||||
rev: a42085ade523f591dca134379a595e7859986445 # frozen: v9.7.0
|
||||
hooks:
|
||||
|
||||
@@ -267,26 +267,6 @@ See the [environment setup guide](./docs/build/environment.md#clang-tidy) for pl
|
||||
|
||||
Before running clang-tidy, you must build the project to generate required files (particularly protobuf headers). Refer to [`BUILD.md`](./BUILD.md) for build instructions.
|
||||
|
||||
#### Via pre-commit (recommended)
|
||||
|
||||
If you have already installed the pre-commit hooks (see above), you can run clang-tidy on your staged files using:
|
||||
|
||||
```
|
||||
TIDY=1 pre-commit run clang-tidy
|
||||
```
|
||||
|
||||
This runs clang-tidy locally with the same configuration/flags as CI, scoped to your staged C++ files. The `TIDY=1` environment variable is required to opt in — without it the hook is skipped.
|
||||
|
||||
You can also have clang-tidy run automatically on every `git commit` by setting `TIDY=1` in your shell environment:
|
||||
|
||||
```
|
||||
export TIDY=1
|
||||
```
|
||||
|
||||
With this set, the hook will run as part of `git commit` alongside the other pre-commit checks.
|
||||
|
||||
#### Manually
|
||||
|
||||
Then run clang-tidy on your local changes:
|
||||
|
||||
```
|
||||
|
||||
567
OpenTelemetryPlan/00-tracing-fundamentals.md
Normal file
567
OpenTelemetryPlan/00-tracing-fundamentals.md
Normal file
@@ -0,0 +1,567 @@
|
||||
# Distributed Tracing Fundamentals
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Next**: [Architecture Analysis](./01-architecture-analysis.md)
|
||||
|
||||
---
|
||||
|
||||
## What is Distributed Tracing?
|
||||
|
||||
Distributed tracing is a method for tracking data objects as they flow through distributed systems. In a network like XRP Ledger, a single transaction touches multiple independent nodes—each with no shared memory or logging. Distributed tracing connects these dots.
|
||||
|
||||
**Without tracing:** You see isolated logs on each node with no way to correlate them.
|
||||
|
||||
**With tracing:** You see the complete journey of a transaction or an event across all nodes it touched.
|
||||
|
||||
---
|
||||
|
||||
## Actors and Actions at a Glance
|
||||
|
||||
### Actors
|
||||
|
||||
| Who (Plain English) | Technical Term |
|
||||
| ---------------------------------------------- | --------------- |
|
||||
| A single unit of work being tracked | Span |
|
||||
| The complete journey of a request | Trace |
|
||||
| Data that links spans across services | Trace Context |
|
||||
| Code that creates spans and propagates context | Instrumentation |
|
||||
| Service that receives and processes traces | Collector |
|
||||
| Storage and visualization system | Backend (Tempo) |
|
||||
| Decision logic for which traces to keep | Sampler |
|
||||
|
||||
### Actions
|
||||
|
||||
| What Happens (Plain English) | Technical Term |
|
||||
| --------------------------------------- | ----------------------- |
|
||||
| Start tracking a new operation | Create a Span |
|
||||
| Connect a child operation to its parent | Set `parent_span_id` |
|
||||
| Group all related operations together | Share a `trace_id` |
|
||||
| Pass tracking data between services | Context Propagation |
|
||||
| Decide whether to record a trace | Sampling (Head or Tail) |
|
||||
| Send completed traces to storage | Export (OTLP) |
|
||||
|
||||
---
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### 1. Trace
|
||||
|
||||
A **trace** represents the entire journey of a request through the system. It has a unique `trace_id` that stays constant across all nodes.
|
||||
|
||||
```
|
||||
Trace ID: abc123
|
||||
├── Node A: received transaction
|
||||
├── Node B: relayed transaction
|
||||
├── Node C: included in consensus
|
||||
└── Node D: applied to ledger
|
||||
```
|
||||
|
||||
### 2. Span
|
||||
|
||||
A **span** represents a single unit of work within a trace. Each span has:
|
||||
|
||||
| Attribute | Description | Example |
|
||||
| ---------------- | -------------------------------- | -------------------------- |
|
||||
| `trace_id` | Identifies the trace | `event123` |
|
||||
| `span_id` | Unique identifier | `span456` |
|
||||
| `parent_span_id` | Parent span (if any) | `p_span123` |
|
||||
| `name` | Operation name | `rpc.submit` |
|
||||
| `start_time` | When work began (local time) | `2024-01-15T10:30:00Z` |
|
||||
| `end_time` | When work completed (local time) | `2024-01-15T10:30:00.050Z` |
|
||||
| `attributes` | Key-value metadata | `tx.hash=ABC...` |
|
||||
| `status` | OK, ERROR MSG | `OK` |
|
||||
|
||||
### 3. Trace Context
|
||||
|
||||
**Trace context** is the data that propagates between services to link spans together. It contains:
|
||||
|
||||
- `trace_id` - The trace this span belongs to
|
||||
- `span_id` - The current span (becomes parent for child spans)
|
||||
- `trace_flags` - Sampling decisions
|
||||
|
||||
---
|
||||
|
||||
## How Spans Form a Trace
|
||||
|
||||
Spans have parent-child relationships forming a tree structure:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: abc123"]
|
||||
A["tx.submit<br/>span_id: 001<br/>50ms"] --> B["tx.validate<br/>span_id: 002<br/>5ms"]
|
||||
A --> C["tx.relay<br/>span_id: 003<br/>10ms"]
|
||||
A --> D["tx.apply<br/>span_id: 004<br/>30ms"]
|
||||
D --> E["ledger.update<br/>span_id: 005<br/>20ms"]
|
||||
end
|
||||
|
||||
style A fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style B fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style D fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style E fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **tx.submit (blue, root)**: The top-level span representing the entire transaction submission; all other spans are its descendants.
|
||||
- **tx.validate, tx.relay, tx.apply (green)**: Direct children of tx.submit, representing the three main stages -- validation, relay to peers, and application to the ledger.
|
||||
- **ledger.update (red)**: A grandchild span nested under tx.apply, representing the actual ledger state mutation triggered by applying the transaction.
|
||||
- **Arrows (parent to child)**: Each arrow indicates a parent-child span relationship where the parent's completion depends on the child finishing.
|
||||
|
||||
The same trace visualized as a **timeline (Gantt chart)**:
|
||||
|
||||
```
|
||||
Time → 0ms 10ms 20ms 30ms 40ms 50ms
|
||||
├───────────────────────────────────────────┤
|
||||
tx.submit│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
├─────┤
|
||||
tx.valid │▓▓▓▓▓│
|
||||
│ ├──────────┤
|
||||
tx.relay │ │▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├────────────────────────────┤
|
||||
tx.apply │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├──────────────────┤
|
||||
ledger │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Span Relationships
|
||||
|
||||
Spans don't always form simple parent-child trees. Distributed tracing defines several relationship types to capture different causal patterns:
|
||||
|
||||
### 1. Parent-Child (ChildOf)
|
||||
|
||||
The default relationship. The parent span **depends on** or **contains** the child span. The child runs within the scope of the parent.
|
||||
|
||||
```
|
||||
tx.submit (parent)
|
||||
├── tx.validate (child) ← parent waits for this
|
||||
├── tx.relay (child) ← parent waits for this
|
||||
└── tx.apply (child) ← parent waits for this
|
||||
```
|
||||
|
||||
**When to use:** Synchronous calls, nested operations, any case where the parent's completion depends on the child.
|
||||
|
||||
### 2. Follows-From
|
||||
|
||||
A causal relationship where the first span **triggers** the second, but does **not wait** for it. The originator fires and moves on.
|
||||
|
||||
```
|
||||
Time →
|
||||
|
||||
tx.receive [=======]
|
||||
↓ triggers (follows-from)
|
||||
tx.relay [===========] ← runs independently
|
||||
```
|
||||
|
||||
**When to use:** Asynchronous jobs, queued work, fire-and-forget patterns. For example, a node receives a transaction and queues it for relay — the relay span _follows from_ the receive span but the receiver doesn't wait for relaying to complete.
|
||||
|
||||
> **OpenTracing** defined `FollowsFrom` as a first-class reference type alongside `ChildOf`.
|
||||
> **OpenTelemetry** represents this using **Span Links** with descriptive attributes instead (see below).
|
||||
|
||||
### 3. Span Links (Cross-Trace and Non-Hierarchical)
|
||||
|
||||
Links connect spans that are **causally related but not in a parent-child hierarchy**. Unlike parent-child, links can cross trace boundaries.
|
||||
|
||||
```
|
||||
Trace A Trace B
|
||||
────── ──────
|
||||
batch.schedule batch.execute
|
||||
├─ item.enqueue (span X) ┌──► process.item
|
||||
├─ item.enqueue (span Y) ───┤ (links to X, Y, Z)
|
||||
├─ item.enqueue (span Z) └──►
|
||||
```
|
||||
|
||||
**Use cases:**
|
||||
|
||||
| Pattern | Description |
|
||||
| -------------------- | --------------------------------------------------------------------------- |
|
||||
| **Batch processing** | A batch span links back to all individual spans that contributed to it |
|
||||
| **Fan-in** | An aggregation span links to the multiple producer spans it merges |
|
||||
| **Fan-out** | Multiple downstream spans link back to the single span that triggered them |
|
||||
| **Async handoff** | A deferred job links back to the request that queued it (follows-from) |
|
||||
| **Cross-trace** | Correlating spans across independent traces (e.g., retries, related events) |
|
||||
|
||||
**Link structure:** Each link carries the target span's context plus optional attributes:
|
||||
|
||||
```
|
||||
Link {
|
||||
trace_id: <target trace>
|
||||
span_id: <target span>
|
||||
attributes: { "link.description": "triggered by batch scheduler" }
|
||||
}
|
||||
```
|
||||
|
||||
### Relationship Summary
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph parent_child["Parent-Child"]
|
||||
direction TB
|
||||
P["Parent"] --> C["Child"]
|
||||
end
|
||||
|
||||
subgraph follows_from["Follows-From"]
|
||||
direction TB
|
||||
A["Span A"] -.->|triggers| B["Span B"]
|
||||
end
|
||||
|
||||
subgraph links["Span Links"]
|
||||
direction TB
|
||||
X["Span X\n(Trace 1)"] -.-|link| Y["Span Y\n(Trace 2)"]
|
||||
end
|
||||
|
||||
parent_child ~~~ follows_from ~~~ links
|
||||
|
||||
style P fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style C fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style A fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style B fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style X fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style Y fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
```
|
||||
|
||||
| Relationship | Same Trace? | Dependency? | OTel Mechanism |
|
||||
| ---------------- | ----------- | -------------------------- | ----------------- |
|
||||
| **Parent-Child** | Yes | Parent depends on child | `parent_span_id` |
|
||||
| **Follows-From** | Usually | Causal but no dependency | Link + attributes |
|
||||
| **Span Link** | Either | Correlation, no dependency | Link + attributes |
|
||||
|
||||
---
|
||||
|
||||
## Trace ID Generation
|
||||
|
||||
A `trace_id` is a 128-bit (16-byte) identifier that groups all spans belonging to one logical operation. How it's generated determines how easily you can find and correlate traces later.
|
||||
|
||||
### General Approaches
|
||||
|
||||
#### 1. Random (W3C Default)
|
||||
|
||||
Generate a random 128-bit ID when a trace starts. Standard approach for most services.
|
||||
|
||||
```
|
||||
trace_id = random_128_bits()
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| --------------------------- | --------------------------------------------- |
|
||||
| Simple, standard | No natural correlation to domain events |
|
||||
| Guaranteed unique per trace | If propagation is lost, trace is broken |
|
||||
| Works with all OTel tooling | "Find trace for TX abc" requires index lookup |
|
||||
|
||||
#### 2. Deterministic (Derived from Domain Data)
|
||||
|
||||
Compute the trace_id from a hash of a natural identifier. Every node independently derives the **same** trace_id for the same event.
|
||||
|
||||
```
|
||||
trace_id = SHA-256(domain_identifier)[0:16] // truncate to 128 bits
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| --------------------------------------------------- | ---------------------------------------------------------- |
|
||||
| Propagation-resilient — same ID computed everywhere | Same event processed twice (retry) shares trace_id |
|
||||
| Natural search — domain ID maps directly to trace | Non-standard (tooling assumes random) |
|
||||
| No coordination needed between nodes | 256→128 bit truncation (collision risk negligible at ~2⁶⁴) |
|
||||
|
||||
#### 3. Hybrid (Deterministic Prefix + Random Suffix)
|
||||
|
||||
First 8 bytes derived from domain data, last 8 bytes random.
|
||||
|
||||
```
|
||||
trace_id = SHA-256(domain_identifier)[0:8] || random_64_bits()
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| ------------------------------------------- | ---------------------------------------- |
|
||||
| Prefix search: "find all traces for TX abc" | Must propagate to maintain full trace_id |
|
||||
| Unique per processing instance | More complex generation logic |
|
||||
| Retries get distinct trace_ids | Partial correlation only (prefix match) |
|
||||
|
||||
### XRPL Workflow Analysis
|
||||
|
||||
XRPL has a unique advantage: its core workflows produce **globally unique 256-bit hashes** that are known on every node. This makes deterministic trace_id generation practical in ways most systems can't achieve.
|
||||
|
||||
#### Natural Identifiers by Workflow
|
||||
|
||||
| Workflow | Natural Identifier | Size | Known at Start? | Same on All Nodes? |
|
||||
| ------------------- | --------------------------------- | ---------- | ----------------------------- | -------------------------------- |
|
||||
| **Transaction** | Transaction hash (`tid_`) | 256-bit | Yes — computed before signing | Yes — hash of canonical tx data |
|
||||
| **Consensus round** | Previous ledger hash + ledger seq | 256+32 bit | Yes — known when round opens | Yes — all validators agree |
|
||||
| **Validation** | Ledger hash being validated | 256-bit | Yes — from consensus result | Yes — same closed ledger |
|
||||
| **Ledger catch-up** | Target ledger hash | 256-bit | Yes — we know what to fetch | Yes — identifies ledger globally |
|
||||
|
||||
#### Where These Identifiers Live in Code
|
||||
|
||||
```
|
||||
Transaction: STTx::getTransactionID() → uint256 tid_
|
||||
TMTransaction::rawTransaction → recompute hash from bytes
|
||||
|
||||
Consensus: ConsensusProposal::prevLedger_ → uint256 (previous ledger hash)
|
||||
ConsensusProposal::position_ → uint256 (TxSet hash)
|
||||
LedgerHeader::seq → uint32_t (ledger sequence)
|
||||
|
||||
Validation: STValidation::getLedgerHash() → uint256
|
||||
STValidation::getNodeID() → NodeID (160-bit)
|
||||
|
||||
Ledger fetch: InboundLedger constructor → uint256 hash, uint32_t seq
|
||||
TMGetLedger::ledgerHash → bytes (uint256)
|
||||
```
|
||||
|
||||
### Recommended Strategy: Workflow-Scoped Deterministic
|
||||
|
||||
Each workflow type derives its trace_id from its natural domain identifier:
|
||||
|
||||
```
|
||||
Transaction trace: trace_id = SHA-256("tx" || tx_hash)[0:16]
|
||||
Consensus trace: trace_id = SHA-256("cons" || prev_ledger_hash || ledger_seq)[0:16]
|
||||
Ledger catch-up: trace_id = SHA-256("fetch" || target_ledger_hash)[0:16]
|
||||
```
|
||||
|
||||
The string prefix (`"tx"`, `"cons"`, `"fetch"`) prevents collisions between workflows that might share underlying hashes.
|
||||
|
||||
**Why this works for XRPL:**
|
||||
|
||||
1. **Propagation-resilient** — Even if a P2P message drops trace context, every node independently computes the same trace_id from the same tx_hash or ledger_hash. Spans still correlate.
|
||||
|
||||
2. **Zero-cost search** — "Show me the trace for transaction ABC" becomes a direct lookup: compute `SHA-256("tx" || ABC)[0:16]` and query. No secondary index needed.
|
||||
|
||||
3. **Cross-workflow linking via Span Links** — A consensus trace links to individual transaction traces. A validation span links to the consensus trace. This connects the full picture without forcing everything into one giant trace.
|
||||
|
||||
### Cross-Workflow Correlation
|
||||
|
||||
Each workflow gets its own trace. Span Links tie them together:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph tx_trace["Transaction Trace"]
|
||||
direction LR
|
||||
Tn["trace_id = f(tx_hash)"]:::note --> T1["tx.receive"] --> T2["tx.validate"] --> T3["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph cons_trace["Consensus Trace"]
|
||||
direction LR
|
||||
Cn["trace_id = f(prev_ledger, seq)"]:::note --> C1["cons.open"] --> C2["cons.propose"] --> C3["cons.accept"]
|
||||
end
|
||||
|
||||
subgraph val_trace["Validation"]
|
||||
direction LR
|
||||
Vn["spans within consensus trace"]:::note --> V1["val.create"] --> V2["val.broadcast"]
|
||||
end
|
||||
|
||||
subgraph fetch_trace["Catch-Up Trace"]
|
||||
direction LR
|
||||
Fn["trace_id = f(ledger_hash)"]:::note --> F1["fetch.request"] --> F2["fetch.receive"] --> F3["fetch.apply"]
|
||||
end
|
||||
|
||||
C1 -.-|"span link\n(tx traces)"| T3
|
||||
C3 --> V1
|
||||
F1 -.-|"span link\n(target ledger)"| C3
|
||||
|
||||
classDef note fill:none,stroke:#888,stroke-dasharray:5 5,color:#333,font-style:italic
|
||||
style T1 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style T2 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style T3 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style C1 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C2 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C3 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style V1 fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style V2 fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style F1 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style F2 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style F3 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Transaction Trace (blue)**: An independent trace whose `trace_id` is deterministically derived from the transaction hash. Contains receive, validate, and relay spans.
|
||||
- **Consensus Trace (green)**: An independent trace whose `trace_id` is derived from the previous ledger hash and sequence number. Covers the open, propose, and accept phases.
|
||||
- **Validation (red)**: Validation spans live within the consensus trace (not a separate trace). They are created after the accept phase completes.
|
||||
- **Catch-Up Trace (purple)**: An independent trace for ledger acquisition, derived from the target ledger hash. Used when a node is behind and fetching missing ledgers.
|
||||
- **Dotted arrows (span links)**: Cross-trace correlations. Consensus links to transaction traces it included; catch-up links to the consensus trace that produced the target ledger.
|
||||
- **Solid arrow (C3 to V1)**: A parent-child relationship -- validation spans are direct children of the consensus accept span within the same trace.
|
||||
|
||||
**How a query flows:**
|
||||
|
||||
```
|
||||
"Why was TX abc slow?"
|
||||
1. Compute trace_id = SHA-256("tx" || abc)[0:16]
|
||||
2. Find transaction trace → see it was included in consensus round N
|
||||
3. Follow span link → consensus trace for round N
|
||||
4. See which phase was slow (propose? accept?)
|
||||
5. If a node was catching up, follow link → catch-up trace
|
||||
```
|
||||
|
||||
### Trade-offs to Consider
|
||||
|
||||
| Concern | Mitigation |
|
||||
| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Retries get same trace_id** | Add `attempt` attribute to root span; spans have unique span_ids and timestamps |
|
||||
| **256→128 bit truncation** | Birthday-bound collision at ~2⁶⁴ operations — negligible for XRPL's throughput |
|
||||
| **Non-standard generation** | OTel spec allows any 16-byte non-zero value; tooling works on the hex string |
|
||||
| **Hash computation cost** | SHA-256 is ~0.3μs per call; XRPL already computes these hashes for other purposes |
|
||||
| **Late-binding identifiers** | Ledger hash isn't known until after consensus — validation spans use ledger_seq as fallback, then link to the consensus trace |
|
||||
|
||||
---
|
||||
|
||||
## Distributed Traces Across Nodes
|
||||
|
||||
In distributed systems like rippled, traces span **multiple independent nodes**. The trace context must be propagated in network messages:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant NodeA as Node A
|
||||
participant NodeB as Node B
|
||||
participant NodeC as Node C
|
||||
|
||||
Client->>NodeA: Submit TX<br/>(no trace context)
|
||||
|
||||
Note over NodeA: Creates new trace<br/>trace_id: abc123<br/>span: tx.receive
|
||||
|
||||
NodeA->>NodeB: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeB: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
NodeA->>NodeC: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeC: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
Note over NodeA,NodeC: All spans share trace_id: abc123<br/>enabling correlation across nodes
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Client**: The external entity that submits a transaction. It does not carry trace context -- the trace originates at the first node.
|
||||
- **Node A**: The entry point that creates a new trace (trace_id: abc123) and the root span `tx.receive`. It relays the transaction to peers with trace context attached.
|
||||
- **Node B and Node C**: Peer nodes that receive the relayed transaction along with the propagated trace context. Each creates a child span under Node A's span, preserving the same `trace_id`.
|
||||
- **Arrows with trace context**: The relay messages carry `trace_id` and `parent_span_id`, allowing each downstream node to link its spans back to the originating span on Node A.
|
||||
|
||||
---
|
||||
|
||||
## Context Propagation
|
||||
|
||||
For traces to work across nodes, **trace context must be propagated** in messages.
|
||||
|
||||
### What's in the Context (~26 bytes)
|
||||
|
||||
| Field | Size | Description |
|
||||
| ------------- | -------- | ------------------------------------------------------- |
|
||||
| `trace_id` | 16 bytes | Identifies the entire trace (constant across all nodes) |
|
||||
| `span_id` | 8 bytes | The sender's current span (becomes parent on receiver) |
|
||||
| `trace_flags` | 1 byte | Sampling decision (bit 0 = sampled; bits 1-7 reserved) |
|
||||
| `trace_state` | variable | Optional vendor-specific data (typically omitted) |
|
||||
|
||||
### How span_id Changes at Each Hop
|
||||
|
||||
Only **one** `span_id` travels in the context - the sender's current span. Each node:
|
||||
|
||||
1. Extracts the received `span_id` and uses it as the `parent_span_id`
|
||||
2. Creates a **new** `span_id` for its own span
|
||||
3. Sends its own `span_id` as the parent when forwarding
|
||||
|
||||
```
|
||||
Node A Node B Node C
|
||||
────── ────── ──────
|
||||
|
||||
Span AAA Span BBB Span CCC
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
Context out: Context out: Context out:
|
||||
├─ trace_id: abc123 ├─ trace_id: abc123 ├─ trace_id: abc123
|
||||
├─ span_id: AAA ──────────► ├─ span_id: BBB ──────────► ├─ span_id: CCC ──────►
|
||||
└─ flags: 01 └─ flags: 01 └─ flags: 01
|
||||
│ │
|
||||
parent = AAA parent = BBB
|
||||
```
|
||||
|
||||
The `trace_id` stays constant, but `span_id` **changes at every hop** to maintain the parent-child chain.
|
||||
|
||||
### Propagation Formats
|
||||
|
||||
There are two patterns:
|
||||
|
||||
### HTTP/RPC Headers (W3C Trace Context)
|
||||
|
||||
```
|
||||
traceparent: 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01
|
||||
│ │ │ │
|
||||
│ │ │ └── Flags (sampled)
|
||||
│ │ └── Parent span ID (16 hex)
|
||||
│ └── Trace ID (32 hex)
|
||||
└── Version
|
||||
```
|
||||
|
||||
### Protocol Buffers (rippled P2P messages)
|
||||
|
||||
```protobuf
|
||||
message TMTransaction {
|
||||
bytes rawTransaction = 1;
|
||||
// ... existing fields ...
|
||||
|
||||
// Trace context extension
|
||||
bytes trace_parent = 100; // W3C traceparent
|
||||
bytes trace_state = 101; // W3C tracestate
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Sampling
|
||||
|
||||
Not every trace needs to be recorded. **Sampling** reduces overhead:
|
||||
|
||||
### Head Sampling (at trace start)
|
||||
|
||||
```
|
||||
Request arrives → Random 10% chance → Record or skip entire trace
|
||||
```
|
||||
|
||||
- ✅ Low overhead
|
||||
- ❌ May miss interesting traces
|
||||
|
||||
### Tail Sampling (after trace completes)
|
||||
|
||||
```
|
||||
Trace completes → Collector evaluates:
|
||||
- Error? → KEEP
|
||||
- Slow? → KEEP
|
||||
- Normal? → Sample 10%
|
||||
```
|
||||
|
||||
- ✅ Never loses important traces
|
||||
- ❌ Higher memory usage at collector
|
||||
|
||||
---
|
||||
|
||||
## Key Benefits for rippled
|
||||
|
||||
| Challenge | How Tracing Helps |
|
||||
| ---------------------------------- | ---------------------------------------- |
|
||||
| "Where is my transaction?" | Follow trace across all nodes it touched |
|
||||
| "Why was consensus slow?" | See timing breakdown of each phase |
|
||||
| "Which node is the bottleneck?" | Compare span durations across nodes |
|
||||
| "What happened during the outage?" | Correlate errors across the network |
|
||||
|
||||
---
|
||||
|
||||
## Glossary
|
||||
|
||||
| Term | Definition |
|
||||
| -------------------- | ------------------------------------------------------------------- |
|
||||
| **Trace** | Complete journey of a request, identified by `trace_id` |
|
||||
| **Span** | Single operation within a trace |
|
||||
| **Parent-Child** | Span relationship where the parent depends on the child |
|
||||
| **Follows-From** | Causal relationship where originator doesn't wait for the result |
|
||||
| **Span Link** | Non-hierarchical connection between spans, possibly across traces |
|
||||
| **Deterministic ID** | Trace ID derived from domain data (e.g., tx_hash) instead of random |
|
||||
| **Context** | Data propagated between services (`trace_id`, `span_id`, flags) |
|
||||
| **Instrumentation** | Code that creates spans and propagates context |
|
||||
| **Collector** | Service that receives, processes, and exports traces |
|
||||
| **Backend** | Storage/visualization system (Tempo) |
|
||||
| **Head Sampling** | Sampling decision at trace start |
|
||||
| **Tail Sampling** | Sampling decision after trace completes |
|
||||
|
||||
---
|
||||
|
||||
_Next: [Architecture Analysis](./01-architecture-analysis.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
467
OpenTelemetryPlan/01-architecture-analysis.md
Normal file
467
OpenTelemetryPlan/01-architecture-analysis.md
Normal file
@@ -0,0 +1,467 @@
|
||||
# Architecture Analysis
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Design Decisions](./02-design-decisions.md) | [Implementation Strategy](./03-implementation-strategy.md)
|
||||
|
||||
---
|
||||
|
||||
## 1.1 Current rippled Architecture Overview
|
||||
|
||||
> **WS** = WebSocket | **UNL** = Unique Node List | **TxQ** = Transaction Queue | **StatsD** = Statistics Daemon
|
||||
|
||||
The rippled node software consists of several interconnected components that need instrumentation for distributed tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
subgraph services["Core Services"]
|
||||
RPC["RPC Server<br/>(HTTP/WS/gRPC)"]
|
||||
Overlay["Overlay<br/>(P2P Network)"]
|
||||
Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
ValidatorList["ValidatorList<br/>(UNL Mgmt)"]
|
||||
end
|
||||
|
||||
JobQueue["JobQueue<br/>(Thread Pool)"]
|
||||
|
||||
subgraph processing["Processing Layer"]
|
||||
NetworkOPs["NetworkOPs<br/>(Tx Processing)"]
|
||||
LedgerMaster["LedgerMaster<br/>(Ledger Mgmt)"]
|
||||
NodeStore["NodeStore<br/>(Database)"]
|
||||
InboundLedgers["InboundLedgers<br/>(Ledger Sync)"]
|
||||
end
|
||||
|
||||
subgraph appservices["Application Services"]
|
||||
PathFind["PathFinding<br/>(Payment Paths)"]
|
||||
TxQ["TxQ<br/>(Fee Escalation)"]
|
||||
LoadMgr["LoadManager<br/>(Fee/Load)"]
|
||||
end
|
||||
|
||||
subgraph observability["Existing Observability"]
|
||||
PerfLog["PerfLog<br/>(JSON)"]
|
||||
Insight["Insight<br/>(StatsD)"]
|
||||
Logging["Logging<br/>(Journal)"]
|
||||
end
|
||||
|
||||
services --> JobQueue
|
||||
JobQueue --> processing
|
||||
JobQueue --> appservices
|
||||
end
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#ffffff
|
||||
style services fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style processing fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style appservices fill:#6a1b9a,stroke:#4a148c,color:#ffffff
|
||||
style observability fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Core Services (blue)**: The entry points into rippled -- RPC Server handles client requests, Overlay manages peer-to-peer networking, Consensus drives agreement, and ValidatorList manages trusted validators.
|
||||
- **JobQueue (center)**: The asynchronous thread pool that decouples Core Services from the Processing and Application layers. All work flows through it.
|
||||
- **Processing Layer (green)**: Core business logic -- NetworkOPs processes transactions, LedgerMaster manages ledger state, NodeStore handles persistence, and InboundLedgers synchronizes missing data.
|
||||
- **Application Services (purple)**: Higher-level features -- PathFinding computes payment routes, TxQ manages fee-based queuing, and LoadManager tracks server load.
|
||||
- **Existing Observability (orange)**: The current monitoring stack (PerfLog, Insight, Journal logging) that OpenTelemetry will complement, not replace.
|
||||
- **Arrows (Services to JobQueue to layers)**: Work originates at Core Services, is enqueued onto the JobQueue, and dispatched to Processing or Application layers for execution.
|
||||
|
||||
---
|
||||
|
||||
## 1.1.1 Actors and Actions
|
||||
|
||||
### Actors
|
||||
|
||||
| Who (Plain English) | Technical Term |
|
||||
| ----------------------------------------- | -------------------------- |
|
||||
| Network node running XRPL software | rippled node |
|
||||
| External client submitting requests | RPC Client |
|
||||
| Network neighbor sharing data | Peer (PeerImp) |
|
||||
| Request handler for client queries | RPC Server (ServerHandler) |
|
||||
| Command executor for specific RPC methods | RPCHandler |
|
||||
| Agreement process between nodes | Consensus (RCLConsensus) |
|
||||
| Transaction processing coordinator | NetworkOPs |
|
||||
| Background task scheduler | JobQueue |
|
||||
| Ledger state manager | LedgerMaster |
|
||||
| Payment route calculator | PathFinding (Pathfinder) |
|
||||
| Transaction waiting room | TxQ (Transaction Queue) |
|
||||
| Fee adjustment system | LoadManager |
|
||||
| Trusted validator list manager | ValidatorList |
|
||||
| Protocol upgrade tracker | AmendmentTable |
|
||||
| Ledger state hash tree | SHAMap |
|
||||
| Persistent key-value storage | NodeStore |
|
||||
|
||||
### Actions
|
||||
|
||||
| What Happens (Plain English) | Technical Term |
|
||||
| ---------------------------------------------- | ---------------------- |
|
||||
| Client sends a request to a node | `rpc.request` |
|
||||
| Node executes a specific RPC command | `rpc.command.*` |
|
||||
| Node receives a transaction from a peer | `tx.receive` |
|
||||
| Node checks if a transaction is valid | `tx.validate` |
|
||||
| Node forwards a transaction to neighbors | `tx.relay` |
|
||||
| Nodes agree on which transactions to include | `consensus.round` |
|
||||
| Consensus progresses through phases | `consensus.phase.*` |
|
||||
| Node builds a new confirmed ledger | `ledger.build` |
|
||||
| Node fetches missing ledger data from peers | `ledger.acquire` |
|
||||
| Node computes payment routes | `pathfind.compute` |
|
||||
| Node queues a transaction for later processing | `txq.enqueue` |
|
||||
| Node increases fees due to high load | `fee.escalate` |
|
||||
| Node fetches the latest trusted validator list | `validator.list.fetch` |
|
||||
| Node votes on a protocol amendment | `amendment.vote` |
|
||||
| Node synchronizes state tree data | `shamap.sync` |
|
||||
|
||||
---
|
||||
|
||||
## 1.2 Key Components for Instrumentation
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List
|
||||
|
||||
| Component | Location | Purpose | Trace Value |
|
||||
| ------------------ | ------------------------------------------ | ------------------------ | -------------------------------- |
|
||||
| **Overlay** | `src/xrpld/overlay/` | P2P communication | Message propagation timing |
|
||||
| **PeerImp** | `src/xrpld/overlay/detail/PeerImp.cpp` | Individual peer handling | Per-peer latency |
|
||||
| **RCLConsensus** | `src/xrpld/app/consensus/RCLConsensus.cpp` | Consensus algorithm | Round timing, phase analysis |
|
||||
| **NetworkOPs** | `src/xrpld/app/misc/NetworkOPs.cpp` | Transaction processing | Tx lifecycle tracking |
|
||||
| **ServerHandler** | `src/xrpld/rpc/detail/ServerHandler.cpp` | RPC entry point | Request latency |
|
||||
| **RPCHandler** | `src/xrpld/rpc/detail/RPCHandler.cpp` | Command execution | Per-command timing |
|
||||
| **JobQueue** | `src/xrpl/core/JobQueue.h` | Async task execution | Queue wait times |
|
||||
| **PathFinding** | `src/xrpld/app/paths/` | Payment path computation | Path latency, cache hits |
|
||||
| **TxQ** | `src/xrpld/app/misc/TxQ.cpp` | Transaction queue/fees | Queue depth, eviction rates |
|
||||
| **LoadManager** | `src/xrpld/app/main/LoadManager.cpp` | Fee escalation/load | Fee levels, load factors |
|
||||
| **InboundLedgers** | `src/xrpld/app/ledger/InboundLedgers.cpp` | Ledger acquisition | Sync time, peer reliability |
|
||||
| **ValidatorList** | `src/xrpld/app/misc/ValidatorList.cpp` | UNL management | List freshness, fetch failures |
|
||||
| **AmendmentTable** | `src/xrpld/app/misc/AmendmentTable.cpp` | Protocol amendments | Voting status, activation events |
|
||||
| **SHAMap** | `src/xrpld/shamap/` | State hash tree | Sync speed, missing nodes |
|
||||
|
||||
---
|
||||
|
||||
## 1.3 Transaction Flow Diagram
|
||||
|
||||
Transaction flow spans multiple nodes in the network. Each node creates linked spans to form a distributed trace:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant PeerA as Peer A (Receive)
|
||||
participant PeerB as Peer B (Relay)
|
||||
participant PeerC as Peer C (Validate)
|
||||
|
||||
Client->>PeerA: 1. Submit TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerA: tx.receive SPAN START
|
||||
PeerA->>PeerA: HashRouter Deduplication
|
||||
PeerA->>PeerA: tx.validate (child span)
|
||||
end
|
||||
|
||||
PeerA->>PeerB: 2. Relay TX (with trace ctx)
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerB: tx.receive (linked span)
|
||||
end
|
||||
|
||||
PeerB->>PeerC: 3. Relay TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerC: tx.receive (linked span)
|
||||
PeerC->>PeerC: tx.process
|
||||
end
|
||||
|
||||
Note over Client,PeerC: DISTRIBUTED TRACE (same trace_id: abc123)
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Client**: The external entity that submits a transaction to Peer A. It has no trace context -- the trace starts at the first node.
|
||||
- **Peer A (Receive)**: The entry node that creates the root span `tx.receive`, runs HashRouter deduplication to avoid processing duplicates, and creates a child `tx.validate` span.
|
||||
- **Peer A to Peer B arrow**: The relay message carries trace context (trace_id + parent span_id), enabling Peer B to create a linked span under the same trace.
|
||||
- **Peer B (Relay)**: Receives the transaction and trace context, creates a `tx.receive` span linked to Peer A's trace, then relays onward.
|
||||
- **Peer C (Validate)**: Final hop in this example. Creates a linked `tx.receive` span and runs `tx.process` to fully process the transaction.
|
||||
- **Blue rectangles**: Highlight the span boundaries on each node, showing where instrumentation creates and closes spans.
|
||||
|
||||
### Trace Structure
|
||||
|
||||
```
|
||||
trace_id: abc123
|
||||
├── span: tx.receive (Peer A)
|
||||
│ ├── span: tx.validate
|
||||
│ └── span: tx.relay
|
||||
├── span: tx.receive (Peer B) [parent: Peer A]
|
||||
│ └── span: tx.relay
|
||||
└── span: tx.receive (Peer C) [parent: Peer B]
|
||||
└── span: tx.process
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.4 Consensus Round Flow
|
||||
|
||||
Consensus rounds are multi-phase operations that benefit significantly from tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph round["consensus.round (root span)"]
|
||||
attrs["Attributes:<br/>xrpl.consensus.ledger.seq = 12345678<br/>xrpl.consensus.mode = proposing<br/>xrpl.consensus.proposers = 35"]
|
||||
|
||||
subgraph open["consensus.phase.open"]
|
||||
open_desc["Duration: ~3s<br/>Waiting for transactions"]
|
||||
end
|
||||
|
||||
subgraph establish["consensus.phase.establish"]
|
||||
est_attrs["proposals_received = 28<br/>disputes_resolved = 3"]
|
||||
est_children["├── consensus.proposal.receive (×28)<br/>├── consensus.proposal.send (×1)<br/>└── consensus.dispute.resolve (×3)"]
|
||||
end
|
||||
|
||||
subgraph accept["consensus.phase.accept"]
|
||||
acc_attrs["transactions_applied = 150<br/>ledger.hash = DEF456..."]
|
||||
acc_children["├── ledger.build<br/>└── ledger.validate"]
|
||||
end
|
||||
|
||||
attrs --> open
|
||||
open --> establish
|
||||
establish --> accept
|
||||
end
|
||||
|
||||
style round fill:#f57f17,stroke:#e65100,color:#ffffff
|
||||
style open fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style establish fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style accept fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **consensus.round (orange, root span)**: The top-level span encompassing the entire consensus round, with attributes like ledger sequence, mode, and proposer count.
|
||||
- **consensus.phase.open (blue)**: The first phase where the node waits (~3s) to collect incoming transactions before proposing.
|
||||
- **consensus.phase.establish (green)**: The negotiation phase where validators exchange proposals, resolve disputes, and converge on a transaction set. Child spans track each proposal received/sent and each dispute resolved.
|
||||
- **consensus.phase.accept (pink)**: The final phase where the agreed transaction set is applied, a new ledger is built, and the ledger is validated. Child spans cover `ledger.build` and `ledger.validate`.
|
||||
- **Arrows (open to establish to accept)**: The sequential flow through the three consensus phases. Each phase must complete before the next begins.
|
||||
|
||||
---
|
||||
|
||||
## 1.5 RPC Request Flow
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
RPC requests support W3C Trace Context headers for distributed tracing across services:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph request["rpc.request (root span)"]
|
||||
http["HTTP Request — POST /<br/>traceparent:<br/>00-abc123...-def456...-01"]
|
||||
|
||||
attrs["Attributes:<br/>http.method = POST<br/>net.peer.ip = 192.168.1.100<br/>xrpl.rpc.command = submit"]
|
||||
|
||||
subgraph enqueue["jobqueue.enqueue"]
|
||||
job_attr["xrpl.job.type = jtCLIENT_RPC"]
|
||||
end
|
||||
|
||||
subgraph command["rpc.command.submit"]
|
||||
cmd_attrs["xrpl.rpc.version = 2<br/>xrpl.rpc.role = user"]
|
||||
cmd_children["├── tx.deserialize<br/>├── tx.validate_local<br/>└── tx.submit_to_network"]
|
||||
end
|
||||
|
||||
response["Response: 200 OK<br/>Duration: 45ms"]
|
||||
|
||||
http --> attrs
|
||||
attrs --> enqueue
|
||||
enqueue --> command
|
||||
command --> response
|
||||
end
|
||||
|
||||
style request fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style enqueue fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style command fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rpc.request (green, root span)**: The outermost span representing the full RPC request lifecycle, from HTTP receipt to response. Carries the W3C `traceparent` header for distributed tracing.
|
||||
- **HTTP Request node**: Shows the incoming POST request with its `traceparent` header and extracted attributes (method, peer IP, command name).
|
||||
- **jobqueue.enqueue (blue)**: The span covering the asynchronous handoff from the RPC thread to the JobQueue worker thread. The trace context is preserved across this async boundary.
|
||||
- **rpc.command.submit (orange)**: The span for the actual command execution, with child spans for deserialization, local validation, and network submission.
|
||||
- **Response node**: The final output with HTTP status and total duration, marking the end of the root span.
|
||||
- **Arrows (top to bottom)**: The sequential processing pipeline -- receive request, extract attributes, enqueue job, execute command, return response.
|
||||
|
||||
---
|
||||
|
||||
## 1.6 Key Trace Points
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
The following table identifies priority instrumentation points across the codebase:
|
||||
|
||||
| Category | Span Name | File | Method | Priority |
|
||||
| --------------- | ---------------------- | ---------------------- | ----------------------- | -------- |
|
||||
| **Transaction** | `tx.receive` | `PeerImp.cpp` | `handleTransaction()` | High |
|
||||
| **Transaction** | `tx.validate` | `NetworkOPs.cpp` | `processTransaction()` | High |
|
||||
| **Transaction** | `tx.process` | `NetworkOPs.cpp` | `doTransactionSync()` | High |
|
||||
| **Transaction** | `tx.relay` | `OverlayImpl.cpp` | `relay()` | Medium |
|
||||
| **Consensus** | `consensus.round` | `RCLConsensus.cpp` | `startRound()` | High |
|
||||
| **Consensus** | `consensus.phase.*` | `Consensus.h` | `timerEntry()` | High |
|
||||
| **Consensus** | `consensus.proposal.*` | `RCLConsensus.cpp` | `peerProposal()` | Medium |
|
||||
| **RPC** | `rpc.request` | `ServerHandler.cpp` | `onRequest()` | High |
|
||||
| **RPC** | `rpc.command.*` | `RPCHandler.cpp` | `doCommand()` | High |
|
||||
| **Peer** | `peer.connect` | `OverlayImpl.cpp` | `onHandoff()` | Low |
|
||||
| **Peer** | `peer.message.*` | `PeerImp.cpp` | `onMessage()` | Low |
|
||||
| **Ledger** | `ledger.acquire` | `InboundLedgers.cpp` | `acquire()` | Medium |
|
||||
| **Ledger** | `ledger.build` | `RCLConsensus.cpp` | `buildLCL()` | High |
|
||||
| **PathFinding** | `pathfind.request` | `PathRequest.cpp` | `doUpdate()` | High |
|
||||
| **PathFinding** | `pathfind.compute` | `Pathfinder.cpp` | `findPaths()` | High |
|
||||
| **TxQ** | `txq.enqueue` | `TxQ.cpp` | `apply()` | High |
|
||||
| **TxQ** | `txq.apply` | `TxQ.cpp` | `processClosedLedger()` | High |
|
||||
| **Fee** | `fee.escalate` | `LoadManager.cpp` | `raiseLocalFee()` | Medium |
|
||||
| **Ledger** | `ledger.replay` | `LedgerReplayer.h` | `replay()` | Medium |
|
||||
| **Ledger** | `ledger.delta` | `LedgerDeltaAcquire.h` | `processData()` | Medium |
|
||||
| **Validator** | `validator.list.fetch` | `ValidatorList.cpp` | `verify()` | Medium |
|
||||
| **Validator** | `validator.manifest` | `Manifest.cpp` | `applyManifest()` | Low |
|
||||
| **Amendment** | `amendment.vote` | `AmendmentTable.cpp` | `doVoting()` | Low |
|
||||
| **SHAMap** | `shamap.sync` | `SHAMap.cpp` | `fetchRoot()` | Medium |
|
||||
|
||||
---
|
||||
|
||||
## 1.7 Instrumentation Priority
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Instrumentation Priority Matrix
|
||||
x-axis Low Complexity --> High Complexity
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Implement First
|
||||
quadrant-2 Plan Carefully
|
||||
quadrant-3 Quick Wins
|
||||
quadrant-4 Consider Later
|
||||
|
||||
RPC Tracing: [0.2, 0.92]
|
||||
Transaction Tracing: [0.55, 0.88]
|
||||
Consensus Tracing: [0.78, 0.82]
|
||||
PathFinding: [0.38, 0.75]
|
||||
TxQ and Fees: [0.25, 0.65]
|
||||
Ledger Sync: [0.62, 0.58]
|
||||
Peer Message Tracing: [0.35, 0.25]
|
||||
JobQueue Tracing: [0.2, 0.48]
|
||||
Validator Mgmt: [0.48, 0.42]
|
||||
Amendment Tracking: [0.15, 0.32]
|
||||
SHAMap Operations: [0.72, 0.45]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.8 Observable Outcomes
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List
|
||||
|
||||
After implementing OpenTelemetry, operators and developers will gain visibility into the following:
|
||||
|
||||
### 1.8.1 What You Will See: Traces
|
||||
|
||||
| Trace Type | Description | Example Query in Grafana/Tempo |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| **Transaction Lifecycle** | Full journey from RPC submission through validation, relay, consensus, and ledger inclusion | `{service.name="rippled" && xrpl.tx.hash="ABC123..."}` |
|
||||
| **Cross-Node Propagation** | Transaction path across multiple rippled nodes with timing | `{xrpl.tx.relay_count > 0}` |
|
||||
| **Consensus Rounds** | Complete round with all phases (open, establish, accept) | `{span.name=~"consensus.round.*"}` |
|
||||
| **RPC Request Processing** | Individual command execution with timing breakdown | `{xrpl.rpc.command="account_info"}` |
|
||||
| **Ledger Acquisition** | Peer-to-peer ledger data requests and responses | `{span.name="ledger.acquire"}` |
|
||||
| **PathFinding Latency** | Path computation time and cache effectiveness for payment RPCs | `{span.name="pathfind.compute"}` |
|
||||
| **TxQ Behavior** | Queue depth, eviction patterns, fee escalation during congestion | `{span.name=~"txq.*"}` |
|
||||
| **Ledger Sync** | Full acquisition timeline including delta and transaction fetches | `{span.name=~"ledger.acquire.*"}` |
|
||||
| **Validator Health** | UNL fetch success, manifest updates, stale list detection | `{span.name=~"validator.*"}` |
|
||||
|
||||
### 1.8.2 What You Will See: Metrics (Derived from Traces)
|
||||
|
||||
| Metric | Description | Dashboard Panel |
|
||||
| ----------------------------- | --------------------------------------- | --------------------------- |
|
||||
| **RPC Latency (p50/p95/p99)** | Response time distribution per command | Heatmap by command |
|
||||
| **Transaction Throughput** | Transactions processed per second | Time series graph |
|
||||
| **Consensus Round Duration** | Time to complete consensus phases | Histogram |
|
||||
| **Cross-Node Latency** | Time for transaction to reach N nodes | Line chart with percentiles |
|
||||
| **Error Rate** | Failed transactions/RPC calls by type | Stacked bar chart |
|
||||
| **PathFinding Latency** | Path computation time per currency pair | Heatmap by currency |
|
||||
| **TxQ Depth** | Queued transactions over time | Time series with thresholds |
|
||||
| **Fee Escalation Level** | Current fee multiplier | Gauge with alert thresholds |
|
||||
| **Ledger Sync Duration** | Time to acquire missing ledgers | Histogram |
|
||||
|
||||
### 1.8.3 Concrete Dashboard Examples
|
||||
|
||||
**Transaction Trace View (Tempo):**
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Trace: abc123... (Transaction Submission) Duration: 847ms │
|
||||
├────────────────────────────────────────────────────────────────────────────────┤
|
||||
│ ├── rpc.request [ServerHandler] ████░░░░░░ 45ms │
|
||||
│ │ └── rpc.command.submit [RPCHandler] ████░░░░░░ 42ms │
|
||||
│ │ └── tx.receive [NetworkOPs] ███░░░░░░░ 35ms │
|
||||
│ │ ├── tx.validate [TxQ] █░░░░░░░░░ 8ms │
|
||||
│ │ └── tx.relay [Overlay] ██░░░░░░░░ 15ms │
|
||||
│ │ ├── tx.receive [Node-B] █████░░░░░ 52ms │
|
||||
│ │ │ └── tx.relay [Node-B] ██░░░░░░░░ 18ms │
|
||||
│ │ └── tx.receive [Node-C] ██████░░░░ 65ms │
|
||||
│ └── consensus.round [RCLConsensus] ████████░░ 720ms │
|
||||
│ ├── consensus.phase.open ██░░░░░░░░ 180ms │
|
||||
│ ├── consensus.phase.establish █████░░░░░ 480ms │
|
||||
│ └── consensus.phase.accept █░░░░░░░░░ 60ms │
|
||||
└────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**RPC Performance Dashboard Panel:**
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ RPC Command Latency (Last 1 Hour) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Command │ p50 │ p95 │ p99 │ Errors │ Rate │
|
||||
│──────────────────┼────────┼────────┼────────┼────────┼──────│
|
||||
│ account_info │ 12ms │ 45ms │ 89ms │ 0.1% │ 150/s│
|
||||
│ submit │ 35ms │ 120ms │ 250ms │ 2.3% │ 45/s│
|
||||
│ ledger │ 8ms │ 25ms │ 55ms │ 0.0% │ 80/s│
|
||||
│ tx │ 15ms │ 50ms │ 100ms │ 0.5% │ 60/s│
|
||||
│ server_info │ 5ms │ 12ms │ 20ms │ 0.0% │ 200/s│
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Consensus Health Dashboard Panel:**
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 1200
|
||||
height: 400
|
||||
plotReservedSpacePercent: 50
|
||||
chartOrientation: vertical
|
||||
themeVariables:
|
||||
xyChart:
|
||||
plotColorPalette: "#3498db"
|
||||
---
|
||||
xychart-beta
|
||||
title "Consensus Round Duration (Last 24 Hours)"
|
||||
x-axis "Time of Day (Hours)" [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
|
||||
y-axis "Duration (seconds)" 1 --> 5
|
||||
line [2.1, 2.4, 2.8, 3.2, 3.8, 4.3, 4.5, 5.0, 4.7, 4.0, 3.2, 2.6, 2.0]
|
||||
```
|
||||
|
||||
### 1.8.4 Operator Actionable Insights
|
||||
|
||||
| Scenario | What You'll See | Action |
|
||||
| ------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ |
|
||||
| **Slow RPC** | Span showing which phase is slow (parsing, execution, serialization) | Optimize specific code path |
|
||||
| **Transaction Stuck** | Trace stops at validation; error attribute shows reason | Fix transaction parameters |
|
||||
| **Consensus Delay** | Phase.establish taking too long; proposer attribute shows missing validators | Investigate network connectivity |
|
||||
| **Memory Spike** | Large batch of spans correlating with memory increase | Tune batch_size or sampling |
|
||||
| **Network Partition** | Traces missing cross-node links for specific peer | Check peer connectivity |
|
||||
| **Path Computation Slow** | pathfind.compute span shows high latency; cache miss rate in attributes | Warm the RippleLineCache, check order book depth |
|
||||
| **TxQ Full** | txq.enqueue spans show evictions; fee.escalate spans increasing | Monitor fee levels, alert operators |
|
||||
| **Ledger Sync Stalled** | ledger.acquire spans timing out; peer reliability attributes show issues | Check peer connectivity, add trusted peers |
|
||||
| **UNL Stale** | validator.list.fetch spans failing; last_update attribute aging | Verify validator site URLs, check DNS |
|
||||
|
||||
### 1.8.5 Developer Debugging Workflow
|
||||
|
||||
1. **Find Transaction**: Query by `xrpl.tx.hash` to get full trace
|
||||
2. **Identify Bottleneck**: Look at span durations to find slowest component
|
||||
3. **Check Attributes**: Review `xrpl.tx.validity`, `xrpl.rpc.status` for errors
|
||||
4. **Correlate Logs**: Use `trace_id` to find related PerfLog entries
|
||||
5. **Compare Nodes**: Filter by `service.instance.id` to compare behavior across nodes
|
||||
|
||||
---
|
||||
|
||||
_Next: [Design Decisions](./02-design-decisions.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
611
OpenTelemetryPlan/02-design-decisions.md
Normal file
611
OpenTelemetryPlan/02-design-decisions.md
Normal file
@@ -0,0 +1,611 @@
|
||||
# Design Decisions
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Architecture Analysis](./01-architecture-analysis.md) | [Code Samples](./04-code-samples.md)
|
||||
|
||||
---
|
||||
|
||||
## 2.1 OpenTelemetry Components
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 2.1.1 SDK Selection
|
||||
|
||||
**Primary Choice**: OpenTelemetry C++ SDK (`opentelemetry-cpp`)
|
||||
|
||||
| Component | Purpose | Required |
|
||||
| --------------------------------------- | ---------------------- | ----------- |
|
||||
| `opentelemetry-cpp::api` | Tracing API headers | Yes |
|
||||
| `opentelemetry-cpp::sdk` | SDK implementation | Yes |
|
||||
| `opentelemetry-cpp::ext` | Extensions (exporters) | Yes |
|
||||
| `opentelemetry-cpp::otlp_grpc_exporter` | OTLP/gRPC export | Recommended |
|
||||
| `opentelemetry-cpp::otlp_http_exporter` | OTLP/HTTP export | Alternative |
|
||||
|
||||
### 2.1.2 Instrumentation Strategy
|
||||
|
||||
**Manual Instrumentation** (recommended):
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
| ---------- | ----------------------------------------------------------------- | ------------------------------------------------------- |
|
||||
| **Manual** | Precise control, optimized placement, rippled-specific attributes | More development effort |
|
||||
| **Auto** | Less code, automatic coverage | Less control, potential overhead, limited customization |
|
||||
|
||||
---
|
||||
|
||||
## 2.2 Exporter Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph nodes["rippled Nodes"]
|
||||
node1["rippled<br/>Node 1"]
|
||||
node2["rippled<br/>Node 2"]
|
||||
node3["rippled<br/>Node 3"]
|
||||
end
|
||||
|
||||
collector["OpenTelemetry<br/>Collector<br/>(sidecar or standalone)"]
|
||||
|
||||
subgraph backends["Observability Backends"]
|
||||
tempo["Tempo"]
|
||||
elastic["Elastic<br/>APM"]
|
||||
end
|
||||
|
||||
node1 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node2 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node3 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
|
||||
collector --> tempo
|
||||
collector --> elastic
|
||||
|
||||
style nodes fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rippled Nodes (blue)**: The source of telemetry data. Each rippled node exports spans via OTLP/gRPC on port 4317.
|
||||
- **OpenTelemetry Collector (red)**: The central aggregation point that receives spans from all nodes. Can run as a sidecar (per-node) or standalone (shared). Handles batching, filtering, and routing.
|
||||
- **Observability Backends (green)**: The storage and visualization destinations. Tempo is the recommended backend for both development and production, and Elastic APM is an alternative. The Collector routes to one or more backends.
|
||||
- **Arrows (nodes to collector to backends)**: The data pipeline -- spans flow from nodes to the Collector over gRPC, then the Collector fans out to the configured backends.
|
||||
|
||||
### 2.2.1 OTLP/gRPC (Recommended)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over gRPC
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpGrpcExporterOptions opts;
|
||||
opts.endpoint = "localhost:4317";
|
||||
opts.useTls = true;
|
||||
opts.sslCaCertPath = "/path/to/ca.crt";
|
||||
```
|
||||
|
||||
### 2.2.2 OTLP/HTTP (Alternative)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over HTTP
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpHttpExporterOptions opts;
|
||||
opts.url = "http://localhost:4318/v1/traces";
|
||||
opts.content_type = otlp::HttpRequestContentType::kJson; // or kBinary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.3 Span Naming Conventions
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List | **WS** = WebSocket
|
||||
|
||||
### 2.3.1 Naming Schema
|
||||
|
||||
```
|
||||
<component>.<operation>[.<sub-operation>]
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
|
||||
- `tx.receive` - Transaction received from peer
|
||||
- `consensus.phase.establish` - Consensus establish phase
|
||||
- `rpc.command.server_info` - server_info RPC command
|
||||
|
||||
### 2.3.2 Complete Span Catalog
|
||||
|
||||
```yaml
|
||||
# Transaction Spans
|
||||
tx:
|
||||
receive: "Transaction received from network"
|
||||
validate: "Transaction signature/format validation"
|
||||
process: "Full transaction processing"
|
||||
relay: "Transaction relay to peers"
|
||||
apply: "Apply transaction to ledger"
|
||||
|
||||
# Consensus Spans
|
||||
consensus:
|
||||
round: "Complete consensus round"
|
||||
phase:
|
||||
open: "Open phase - collecting transactions"
|
||||
establish: "Establish phase - reaching agreement"
|
||||
accept: "Accept phase - applying consensus"
|
||||
proposal:
|
||||
receive: "Receive peer proposal"
|
||||
send: "Send our proposal"
|
||||
validation:
|
||||
receive: "Receive peer validation"
|
||||
send: "Send our validation"
|
||||
|
||||
# RPC Spans
|
||||
rpc:
|
||||
request: "HTTP/WebSocket request handling"
|
||||
command:
|
||||
"*": "Specific RPC command (dynamic)"
|
||||
|
||||
# Peer Spans
|
||||
peer:
|
||||
connect: "Peer connection establishment"
|
||||
disconnect: "Peer disconnection"
|
||||
message:
|
||||
send: "Send protocol message"
|
||||
receive: "Receive protocol message"
|
||||
|
||||
# Ledger Spans
|
||||
ledger:
|
||||
acquire: "Ledger acquisition from network"
|
||||
build: "Build new ledger"
|
||||
validate: "Ledger validation"
|
||||
close: "Close ledger"
|
||||
replay: "Ledger replay executed"
|
||||
delta: "Delta-based ledger acquired"
|
||||
|
||||
# PathFinding Spans
|
||||
pathfind:
|
||||
request: "Path request initiated"
|
||||
compute: "Path computation executed"
|
||||
|
||||
# TxQ Spans
|
||||
txq:
|
||||
enqueue: "Transaction queued"
|
||||
apply: "Queued transaction applied"
|
||||
|
||||
# Fee/Load Spans
|
||||
fee:
|
||||
escalate: "Fee escalation triggered"
|
||||
|
||||
# Validator Spans
|
||||
validator:
|
||||
list:
|
||||
fetch: "UNL list fetched"
|
||||
manifest: "Manifest update processed"
|
||||
|
||||
# Amendment Spans
|
||||
amendment:
|
||||
vote: "Amendment voting executed"
|
||||
|
||||
# SHAMap Spans
|
||||
shamap:
|
||||
sync: "State tree synchronization"
|
||||
|
||||
# Job Spans
|
||||
job:
|
||||
enqueue: "Job added to queue"
|
||||
execute: "Job execution"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.4 Attribute Schema
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List | **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 2.4.1 Resource Attributes (Set Once at Startup)
|
||||
|
||||
```cpp
|
||||
// Standard OpenTelemetry semantic conventions
|
||||
resource::SemanticConventions::SERVICE_NAME = "rippled"
|
||||
resource::SemanticConventions::SERVICE_VERSION = BuildInfo::getVersionString()
|
||||
resource::SemanticConventions::SERVICE_INSTANCE_ID = <node_public_key_base58>
|
||||
|
||||
// Custom rippled attributes
|
||||
"xrpl.network.id" = <network_id> // e.g., 0 for mainnet
|
||||
"xrpl.network.type" = "mainnet" | "testnet" | "devnet" | "standalone"
|
||||
"xrpl.node.type" = "validator" | "stock" | "reporting"
|
||||
"xrpl.node.cluster" = <cluster_name> // If clustered
|
||||
```
|
||||
|
||||
### 2.4.2 Span Attributes by Category
|
||||
|
||||
#### Transaction Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.tx.hash" = string // Transaction hash (hex)
|
||||
"xrpl.tx.type" = string // "Payment", "OfferCreate", etc.
|
||||
"xrpl.tx.account" = string // Source account (redacted in prod)
|
||||
"xrpl.tx.sequence" = int64 // Account sequence number
|
||||
"xrpl.tx.fee" = int64 // Fee in drops
|
||||
"xrpl.tx.result" = string // "tesSUCCESS", "tecPATH_DRY", etc.
|
||||
"xrpl.tx.ledger_index" = int64 // Ledger containing transaction
|
||||
```
|
||||
|
||||
#### Consensus Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.consensus.round" = int64 // Round number
|
||||
"xrpl.consensus.phase" = string // "open", "establish", "accept"
|
||||
"xrpl.consensus.mode" = string // "proposing", "observing", etc.
|
||||
"xrpl.consensus.proposers" = int64 // Number of proposers
|
||||
"xrpl.consensus.ledger.prev" = string // Previous ledger hash
|
||||
"xrpl.consensus.ledger.seq" = int64 // Ledger sequence
|
||||
"xrpl.consensus.tx_count" = int64 // Transactions in consensus set
|
||||
"xrpl.consensus.duration_ms" = float64 // Round duration
|
||||
```
|
||||
|
||||
#### RPC Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.rpc.command" = string // Command name
|
||||
"xrpl.rpc.version" = int64 // API version
|
||||
"xrpl.rpc.role" = string // "admin" or "user"
|
||||
"xrpl.rpc.params" = string // Sanitized parameters (optional)
|
||||
```
|
||||
|
||||
#### Peer & Message Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.peer.id" = string // Peer public key (base58)
|
||||
"xrpl.peer.address" = string // IP:port
|
||||
"xrpl.peer.latency_ms" = float64 // Measured latency
|
||||
"xrpl.peer.cluster" = string // Cluster name if clustered
|
||||
"xrpl.message.type" = string // Protocol message type name
|
||||
"xrpl.message.size_bytes" = int64 // Message size
|
||||
"xrpl.message.compressed" = bool // Whether compressed
|
||||
```
|
||||
|
||||
#### Ledger & Job Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.ledger.hash" = string // Ledger hash
|
||||
"xrpl.ledger.index" = int64 // Ledger sequence/index
|
||||
"xrpl.ledger.close_time" = int64 // Close time (epoch)
|
||||
"xrpl.ledger.tx_count" = int64 // Transaction count
|
||||
"xrpl.job.type" = string // Job type name
|
||||
"xrpl.job.queue_ms" = float64 // Time spent in queue
|
||||
"xrpl.job.worker" = int64 // Worker thread ID
|
||||
```
|
||||
|
||||
#### PathFinding Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.pathfind.source_currency" = string // Source currency code
|
||||
"xrpl.pathfind.dest_currency" = string // Destination currency code
|
||||
"xrpl.pathfind.path_count" = int64 // Number of paths found
|
||||
"xrpl.pathfind.cache_hit" = bool // RippleLineCache hit
|
||||
```
|
||||
|
||||
#### TxQ Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.txq.queue_depth" = int64 // Current queue depth
|
||||
"xrpl.txq.fee_level" = int64 // Fee level of transaction
|
||||
"xrpl.txq.eviction_reason" = string // Why transaction was evicted
|
||||
```
|
||||
|
||||
#### Fee Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.fee.load_factor" = int64 // Current load factor
|
||||
"xrpl.fee.escalation_level" = int64 // Fee escalation multiplier
|
||||
```
|
||||
|
||||
#### Validator Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.validator.list_size" = int64 // UNL size
|
||||
"xrpl.validator.list_age_sec" = int64 // Seconds since last update
|
||||
```
|
||||
|
||||
#### Amendment Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.amendment.name" = string // Amendment name
|
||||
"xrpl.amendment.status" = string // "enabled", "vetoed", "supported"
|
||||
```
|
||||
|
||||
#### SHAMap Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.shamap.type" = string // "transaction", "state", "account_state"
|
||||
"xrpl.shamap.missing_nodes" = int64 // Number of missing nodes during sync
|
||||
"xrpl.shamap.duration_ms" = float64 // Sync duration
|
||||
```
|
||||
|
||||
### 2.4.3 Data Collection Summary
|
||||
|
||||
The following table summarizes what data is collected by category:
|
||||
|
||||
| Category | Attributes Collected | Purpose |
|
||||
| --------------- | ---------------------------------------------------------------------- | ---------------------------- |
|
||||
| **Transaction** | `tx.hash`, `tx.type`, `tx.result`, `tx.fee`, `ledger_index` | Trace transaction lifecycle |
|
||||
| **Consensus** | `round`, `phase`, `mode`, `proposers` (public keys), `duration_ms` | Analyze consensus timing |
|
||||
| **RPC** | `command`, `version`, `status`, `duration_ms` | Monitor RPC performance |
|
||||
| **Peer** | `peer.id` (public key), `latency_ms`, `message.type`, `message.size` | Network topology analysis |
|
||||
| **Ledger** | `ledger.hash`, `ledger.index`, `close_time`, `tx_count` | Ledger progression tracking |
|
||||
| **Job** | `job.type`, `queue_ms`, `worker` | JobQueue performance |
|
||||
| **PathFinding** | `pathfind.source_currency`, `dest_currency`, `path_count`, `cache_hit` | Payment path analysis |
|
||||
| **TxQ** | `txq.queue_depth`, `fee_level`, `eviction_reason` | Queue depth and fee tracking |
|
||||
| **Fee** | `fee.load_factor`, `escalation_level` | Fee escalation monitoring |
|
||||
| **Validator** | `validator.list_size`, `list_age_sec` | UNL health monitoring |
|
||||
| **Amendment** | `amendment.name`, `status` | Protocol upgrade tracking |
|
||||
| **SHAMap** | `shamap.type`, `missing_nodes`, `duration_ms` | State tree sync performance |
|
||||
|
||||
### 2.4.4 Privacy & Sensitive Data Policy
|
||||
|
||||
> **PII** = Personally Identifiable Information
|
||||
|
||||
OpenTelemetry instrumentation is designed to collect **operational metadata only**, never sensitive content.
|
||||
|
||||
#### Data NOT Collected
|
||||
|
||||
The following data is explicitly **excluded** from telemetry collection:
|
||||
|
||||
| Excluded Data | Reason |
|
||||
| ----------------------- | ----------------------------------------- |
|
||||
| **Private Keys** | Never exposed; not relevant to tracing |
|
||||
| **Account Balances** | Financial data; privacy sensitive |
|
||||
| **Transaction Amounts** | Financial data; privacy sensitive |
|
||||
| **Raw TX Payloads** | May contain sensitive memo/data fields |
|
||||
| **Personal Data** | No PII collected |
|
||||
| **IP Addresses** | Configurable; excluded by default in prod |
|
||||
|
||||
#### Privacy Protection Mechanisms
|
||||
|
||||
| Mechanism | Description |
|
||||
| ----------------------------- | ------------------------------------------------------------------------- |
|
||||
| **Account Hashing** | `xrpl.tx.account` is hashed at collector level before storage |
|
||||
| **Configurable Redaction** | Sensitive fields can be excluded via `[telemetry]` config section |
|
||||
| **Sampling** | Only 10% of traces recorded by default, reducing data exposure |
|
||||
| **Local Control** | Node operators have full control over what gets exported |
|
||||
| **No Raw Payloads** | Transaction content is never recorded, only metadata (hash, type, result) |
|
||||
| **Collector-Level Filtering** | Additional redaction/hashing can be configured at OTel Collector |
|
||||
|
||||
#### Collector-Level Data Protection
|
||||
|
||||
The OpenTelemetry Collector can be configured to hash or redact sensitive attributes before export:
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
attributes:
|
||||
actions:
|
||||
# Hash account addresses before storage
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Remove IP addresses entirely
|
||||
- key: xrpl.peer.address
|
||||
action: delete
|
||||
# Redact specific fields
|
||||
- key: xrpl.rpc.params
|
||||
action: delete
|
||||
```
|
||||
|
||||
#### Configuration Options for Privacy
|
||||
|
||||
In `rippled.cfg`, operators can control data collection granularity:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
|
||||
# Disable collection of specific components
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_rpc=1
|
||||
trace_peer=0 # Disable peer tracing (high volume, includes addresses)
|
||||
|
||||
# Redact specific attributes
|
||||
redact_account=1 # Hash account addresses before export
|
||||
redact_peer_address=1 # Remove peer IP addresses
|
||||
```
|
||||
|
||||
> **Note**: The `redact_account` configuration in `rippled.cfg` controls SDK-level redaction before export, while collector-level filtering (see [Collector-Level Data Protection](#collector-level-data-protection) above) provides an additional defense-in-depth layer. Both can operate independently.
|
||||
|
||||
> **Key Principle**: Telemetry collects **operational metadata** (timing, counts, hashes) — never **sensitive content** (keys, balances, amounts, raw payloads).
|
||||
|
||||
---
|
||||
|
||||
## 2.5 Context Propagation Design
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
### 2.5.1 Propagation Boundaries
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph http["HTTP/WebSocket (RPC)"]
|
||||
w3c["W3C Trace Context Headers:<br/>traceparent:<br/>00-trace_id-span_id-flags<br/>tracestate: rippled=..."]
|
||||
end
|
||||
|
||||
subgraph protobuf["Protocol Buffers (P2P)"]
|
||||
proto["message TraceContext {<br/> bytes trace_id = 1; // 16 bytes<br/> bytes span_id = 2; // 8 bytes<br/> uint32 trace_flags = 3;<br/> string trace_state = 4;<br/>}"]
|
||||
end
|
||||
|
||||
subgraph jobqueue["JobQueue (Internal Async)"]
|
||||
job["Context captured at job creation,<br/>restored at execution<br/><br/>class Job {<br/> otel::context::Context<br/> traceContext_;<br/>};"]
|
||||
end
|
||||
|
||||
style http fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style protobuf fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style jobqueue fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **HTTP/WebSocket - RPC (blue)**: For client-facing RPC requests, trace context is propagated using the W3C `traceparent` header. This is the standard approach and works with any OTel-compatible client.
|
||||
- **Protocol Buffers - P2P (green)**: For peer-to-peer messages between rippled nodes, trace context is embedded as a protobuf `TraceContext` message carrying trace_id, span_id, flags, and optional trace_state.
|
||||
- **JobQueue - Internal Async (red)**: For asynchronous work within a single node, the OTel context is captured when a job is created and restored when the job executes on a worker thread. This bridges the async gap so spans remain linked.
|
||||
|
||||
---
|
||||
|
||||
## 2.6 Integration with Existing Observability
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### 2.6.1 Existing Frameworks Comparison
|
||||
|
||||
rippled already has two observability mechanisms. OpenTelemetry complements (not replaces) them:
|
||||
|
||||
| Aspect | PerfLog | Beast Insight (StatsD) | OpenTelemetry |
|
||||
| --------------------- | ----------------------------- | ---------------------------- | ------------------------- |
|
||||
| **Type** | Logging | Metrics | Distributed Tracing |
|
||||
| **Data** | JSON log entries | Counters, gauges, histograms | Spans with context |
|
||||
| **Scope** | Single node | Single node | **Cross-node** |
|
||||
| **Output** | `perf.log` file | StatsD server | OTLP Collector |
|
||||
| **Question answered** | "What happened on this node?" | "How many? How fast?" | "What was the journey?" |
|
||||
| **Correlation** | By timestamp | By metric name | By `trace_id` |
|
||||
| **Overhead** | Low (file I/O) | Low (UDP packets) | Low-Medium (configurable) |
|
||||
|
||||
### 2.6.2 What Each Framework Does Best
|
||||
|
||||
#### PerfLog
|
||||
|
||||
- **Purpose**: Detailed local event logging for RPC and job execution
|
||||
- **Strengths**:
|
||||
- Rich JSON output with timing data
|
||||
- Already integrated in RPC handlers
|
||||
- File-based, no external dependencies
|
||||
- **Limitations**:
|
||||
- Single-node only (no cross-node correlation)
|
||||
- No parent-child relationships between events
|
||||
- Manual log parsing required
|
||||
|
||||
```json
|
||||
// Example PerfLog entry
|
||||
{
|
||||
"time": "2024-01-15T10:30:00.123Z",
|
||||
"method": "submit",
|
||||
"duration_us": 1523,
|
||||
"result": "tesSUCCESS"
|
||||
}
|
||||
```
|
||||
|
||||
#### Beast Insight (StatsD)
|
||||
|
||||
- **Purpose**: Real-time metrics for monitoring dashboards
|
||||
- **Strengths**:
|
||||
- Aggregated metrics (counters, gauges, histograms)
|
||||
- Low overhead (UDP, fire-and-forget)
|
||||
- Good for alerting thresholds
|
||||
- **Limitations**:
|
||||
- No request-level detail
|
||||
- No causal relationships
|
||||
- Single-node perspective
|
||||
|
||||
```cpp
|
||||
// Example StatsD usage in rippled
|
||||
insight.increment("rpc.submit.count");
|
||||
insight.gauge("ledger.age", age);
|
||||
insight.timing("consensus.round", duration);
|
||||
```
|
||||
|
||||
#### OpenTelemetry (NEW)
|
||||
|
||||
- **Purpose**: Distributed request tracing across nodes
|
||||
- **Strengths**:
|
||||
- **Cross-node correlation** via `trace_id`
|
||||
- Parent-child span relationships
|
||||
- Rich attributes per span
|
||||
- Industry standard (CNCF)
|
||||
- **Limitations**:
|
||||
- Requires collector infrastructure
|
||||
- Higher complexity than logging
|
||||
|
||||
```cpp
|
||||
// Example OpenTelemetry span
|
||||
auto span = telemetry.startSpan("tx.relay");
|
||||
span->SetAttribute("tx.hash", hash);
|
||||
span->SetAttribute("peer.id", peerId);
|
||||
// Span automatically linked to parent via context
|
||||
```
|
||||
|
||||
### 2.6.3 When to Use Each
|
||||
|
||||
| Scenario | PerfLog | StatsD | OpenTelemetry |
|
||||
| --------------------------------------- | ---------- | ------ | ------------- |
|
||||
| "How many TXs per second?" | ❌ | ✅ | ✅ |
|
||||
| "What's the p99 RPC latency?" | ❌ | ✅ | ✅ |
|
||||
| "Why was this specific TX slow?" | ⚠️ partial | ❌ | ✅ |
|
||||
| "Which node delayed consensus?" | ❌ | ❌ | ✅ |
|
||||
| "What happened on node X at time T?" | ✅ | ❌ | ✅ |
|
||||
| "Show me the TX journey across 5 nodes" | ❌ | ❌ | ✅ |
|
||||
|
||||
### 2.6.4 Coexistence Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Process"]
|
||||
perflog["PerfLog<br/>(JSON to file)"]
|
||||
insight["Beast Insight<br/>(StatsD)"]
|
||||
otel["OpenTelemetry<br/>(Tracing)"]
|
||||
end
|
||||
|
||||
perflog --> perffile["perf.log"]
|
||||
insight --> statsd["StatsD Server"]
|
||||
otel --> collector["OTLP Collector"]
|
||||
|
||||
perffile --> grafana["Grafana<br/>(Unified UI)"]
|
||||
statsd --> grafana
|
||||
collector --> grafana
|
||||
|
||||
style rippled fill:#212121,stroke:#0a0a0a,color:#ffffff
|
||||
style grafana fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rippled Process (dark gray)**: The single rippled node running all three observability frameworks side by side. Each framework operates independently with no interference.
|
||||
- **PerfLog to perf.log**: PerfLog writes JSON-formatted event logs to a local file. Grafana can ingest these via Loki or a file-based datasource.
|
||||
- **Beast Insight to StatsD Server**: Insight sends aggregated metrics (counters, gauges) over UDP to a StatsD server. Grafana reads from StatsD-compatible backends like Graphite or Prometheus (via StatsD exporter).
|
||||
- **OpenTelemetry to OTLP Collector**: OTel exports spans over OTLP/gRPC to a Collector, which then forwards to a trace backend (Tempo).
|
||||
- **Grafana (red, unified UI)**: All three data streams converge in Grafana, enabling operators to correlate logs, metrics, and traces in a single dashboard.
|
||||
|
||||
### 2.6.5 Correlation with PerfLog
|
||||
|
||||
Trace IDs can be correlated with existing PerfLog entries for comprehensive debugging:
|
||||
|
||||
```cpp
|
||||
// In RPCHandler.cpp - correlate trace with PerfLog
|
||||
Status doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
// Start OpenTelemetry span
|
||||
auto span = context.app.getTelemetry().startSpan(
|
||||
"rpc.command." + context.method);
|
||||
|
||||
// Get trace ID for correlation
|
||||
auto traceId = span->GetContext().trace_id().IsValid()
|
||||
? toHex(span->GetContext().trace_id())
|
||||
: "";
|
||||
|
||||
// Use existing PerfLog with trace correlation
|
||||
auto const curId = context.app.getPerfLog().currentId();
|
||||
context.app.getPerfLog().rpcStart(context.method, curId);
|
||||
|
||||
// Future: Add trace ID to PerfLog entry
|
||||
// context.app.getPerfLog().setTraceId(curId, traceId);
|
||||
|
||||
try {
|
||||
auto ret = handler(context, result);
|
||||
context.app.getPerfLog().rpcFinish(context.method, curId);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
return ret;
|
||||
} catch (std::exception const& e) {
|
||||
context.app.getPerfLog().rpcError(context.method, curId);
|
||||
span->RecordException(e);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Architecture Analysis](./01-architecture-analysis.md)_ | _Next: [Implementation Strategy](./03-implementation-strategy.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
528
OpenTelemetryPlan/03-implementation-strategy.md
Normal file
528
OpenTelemetryPlan/03-implementation-strategy.md
Normal file
@@ -0,0 +1,528 @@
|
||||
# Implementation Strategy
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 3.1 Directory Structure
|
||||
|
||||
The telemetry implementation follows rippled's existing code organization pattern:
|
||||
|
||||
```
|
||||
include/xrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.h # Main telemetry interface
|
||||
│ ├── TelemetryConfig.h # Configuration structures
|
||||
│ ├── TraceContext.h # Context propagation utilities
|
||||
│ ├── SpanGuard.h # RAII span management
|
||||
│ └── SpanAttributes.h # Attribute helper functions
|
||||
|
||||
src/libxrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.cpp # Implementation
|
||||
│ ├── TelemetryConfig.cpp # Config parsing
|
||||
│ ├── TraceContext.cpp # Context serialization
|
||||
│ └── NullTelemetry.cpp # No-op implementation
|
||||
|
||||
src/xrpld/
|
||||
├── telemetry/
|
||||
│ ├── TracingInstrumentation.h # Instrumentation macros
|
||||
│ └── TracingInstrumentation.cpp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.2 Implementation Approach
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'flowchart': {'nodeSpacing': 20, 'rankSpacing': 30}}}%%
|
||||
flowchart TB
|
||||
subgraph phase1["Phase 1: Core"]
|
||||
direction LR
|
||||
sdk["SDK Integration"] ~~~ interface["Telemetry Interface"] ~~~ config["Configuration"]
|
||||
end
|
||||
|
||||
subgraph phase2["Phase 2: RPC"]
|
||||
direction LR
|
||||
http["HTTP Context"] ~~~ rpc["RPC Handlers"]
|
||||
end
|
||||
|
||||
subgraph phase3["Phase 3: P2P"]
|
||||
direction LR
|
||||
proto["Protobuf Context"] ~~~ tx["Transaction Relay"]
|
||||
end
|
||||
|
||||
subgraph phase4["Phase 4: Consensus"]
|
||||
direction LR
|
||||
consensus["Consensus Rounds"] ~~~ proposals["Proposals"]
|
||||
end
|
||||
|
||||
phase1 --> phase2 --> phase3 --> phase4
|
||||
|
||||
style phase1 fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style phase2 fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style phase3 fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
style phase4 fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Minimal Intrusion**: Instrumentation should not alter existing control flow
|
||||
2. **Zero-Cost When Disabled**: Use compile-time flags and no-op implementations
|
||||
3. **Backward Compatibility**: Protocol Buffer extensions use high field numbers
|
||||
4. **Graceful Degradation**: Tracing failures must not affect node operation
|
||||
|
||||
---
|
||||
|
||||
## 3.3 Performance Overhead Summary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ------------------------------------------------ |
|
||||
| CPU | 1-3% | Of per-transaction CPU cost (~200μs baseline) |
|
||||
| Memory | ~10 MB | SDK statics + batch buffer + worker thread stack |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## 3.4 Detailed CPU Overhead Analysis
|
||||
|
||||
### 3.4.1 Per-Operation Costs
|
||||
|
||||
> **Note on hardware assumptions**: The costs below are based on the official OTel C++ SDK CI benchmarks
|
||||
> (969 runs on GitHub Actions 2-core shared runners). On production server hardware (3+ GHz Xeon),
|
||||
> expect costs at the **lower end** of each range (~30-50% improvement over CI hardware).
|
||||
|
||||
| Operation | Time (ns) | Frequency | Impact |
|
||||
| --------------------- | --------- | ---------------------- | ---------- |
|
||||
| Span creation | 500-1000 | Every traced operation | Low |
|
||||
| Span end | 100-200 | Every traced operation | Low |
|
||||
| SetAttribute (string) | 80-120 | 3-5 per span | Low |
|
||||
| SetAttribute (int) | 40-60 | 2-3 per span | Negligible |
|
||||
| AddEvent | 100-200 | 0-2 per span | Low |
|
||||
| Context injection | 150-250 | Per outgoing message | Low |
|
||||
| Context extraction | 100-180 | Per incoming message | Low |
|
||||
| GetCurrent context | 10-20 | Thread-local access | Negligible |
|
||||
|
||||
**Source**: Span creation based on OTel C++ SDK `BM_SpanCreation` benchmark (AlwaysOnSampler +
|
||||
SimpleSpanProcessor + InMemoryExporter), median ~1,000 ns on CI hardware. AddEvent includes
|
||||
timestamp read + string copy + vector push + mutex acquisition. Context injection/extraction
|
||||
confirmed by `BM_SpanCreationWithScope` benchmark delta (~160 ns).
|
||||
|
||||
### 3.4.2 Transaction Processing Overhead
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'pie': {'textPosition': 0.75}}}%%
|
||||
pie showData
|
||||
"tx.receive (1400ns)" : 1400
|
||||
"tx.validate (1200ns)" : 1200
|
||||
"tx.relay (1200ns)" : 1200
|
||||
"Context inject (200ns)" : 200
|
||||
```
|
||||
|
||||
**Transaction Tracing Overhead (~4.0μs total)**
|
||||
|
||||
</div>
|
||||
|
||||
**Overhead percentage**: 4.0 μs / 200 μs (avg tx processing) = **~2.0%**
|
||||
|
||||
> **Breakdown**: Each span (tx.receive, tx.validate, tx.relay) costs ~1,000 ns for creation plus
|
||||
> ~200-400 ns for 3-5 attribute sets. Context injection is ~200 ns (confirmed by benchmarks).
|
||||
> On production hardware, expect ~2.6 μs total (~1.3% overhead) due to faster span creation (~500-600 ns).
|
||||
|
||||
### 3.4.3 Consensus Round Overhead
|
||||
|
||||
| Operation | Count | Cost (ns) | Total |
|
||||
| ---------------------- | ----- | --------- | ---------- |
|
||||
| consensus.round span | 1 | ~1200 | ~1.2 μs |
|
||||
| consensus.phase spans | 3 | ~1100 | ~3.3 μs |
|
||||
| proposal.receive spans | ~20 | ~1100 | ~22 μs |
|
||||
| proposal.send spans | ~3 | ~1100 | ~3.3 μs |
|
||||
| Context operations | ~30 | ~200 | ~6 μs |
|
||||
| **TOTAL** | | | **~36 μs** |
|
||||
|
||||
> **Why higher**: Each span costs ~1,000 ns creation + ~100-200 ns for 1-2 attributes, totaling ~1,100-1,200 ns.
|
||||
> Context operations remain ~200 ns (confirmed by benchmarks). On production hardware, expect ~24 μs total.
|
||||
|
||||
**Overhead percentage**: 36 μs / 3s (typical round) = **~0.001%** (negligible)
|
||||
|
||||
### 3.4.4 RPC Request Overhead
|
||||
|
||||
| Operation | Cost (ns) |
|
||||
| ---------------- | ------------ |
|
||||
| rpc.request span | ~1200 |
|
||||
| rpc.command span | ~1100 |
|
||||
| Context extract | ~250 |
|
||||
| Context inject | ~200 |
|
||||
| **TOTAL** | **~2.75 μs** |
|
||||
|
||||
> **Why higher**: Each span costs ~1,000 ns creation + ~100-200 ns for attributes (command name,
|
||||
> version, role). Context extract/inject costs are confirmed by OTel C++ benchmarks.
|
||||
|
||||
- Fast RPC (1ms): 2.75 μs / 1ms = **~0.275%**
|
||||
- Slow RPC (100ms): 2.75 μs / 100ms = **~0.003%**
|
||||
|
||||
---
|
||||
|
||||
## 3.5 Memory Overhead Analysis
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 3.5.1 Static Memory
|
||||
|
||||
| Component | Size | Allocated |
|
||||
| ------------------------------------ | ----------- | ---------- |
|
||||
| TracerProvider singleton | ~64 KB | At startup |
|
||||
| BatchSpanProcessor (circular buffer) | ~16 KB | At startup |
|
||||
| BatchSpanProcessor (worker thread) | ~8 MB | At startup |
|
||||
| OTLP exporter (gRPC channel init) | ~256 KB | At startup |
|
||||
| Propagator registry | ~8 KB | At startup |
|
||||
| **Total static** | **~8.3 MB** | |
|
||||
|
||||
> **Why higher than earlier estimate**: The BatchSpanProcessor's circular buffer itself is only ~16 KB
|
||||
> (2049 x 8-byte `AtomicUniquePtr` entries), but it spawns a dedicated worker thread whose default
|
||||
> stack size on Linux is ~8 MB. The OTLP gRPC exporter allocates memory for channel stubs and TLS
|
||||
> initialization. The worker thread stack dominates the static footprint.
|
||||
|
||||
### 3.5.2 Dynamic Memory
|
||||
|
||||
| Component | Size per unit | Max units | Peak |
|
||||
| -------------------- | -------------- | ---------- | --------------- |
|
||||
| Active span | ~500-800 bytes | 1000 | ~500-800 KB |
|
||||
| Queued span (export) | ~500 bytes | 2048 | ~1 MB |
|
||||
| Attribute storage | ~80 bytes | 5 per span | Included |
|
||||
| Context storage | ~64 bytes | Per thread | ~6.4 KB |
|
||||
| **Total dynamic** | | | **~1.5-1.8 MB** |
|
||||
|
||||
> **Why active spans are larger**: An active `Span` object includes the wrapper (~88 bytes: shared_ptr,
|
||||
> mutex, unique_ptr to Recordable) plus `SpanData` (~250 bytes: SpanContext, timestamps, name, status,
|
||||
> empty containers) plus attribute storage (~200-500 bytes for 3-5 string attributes in a `std::map`).
|
||||
> Source: `sdk/src/trace/span.h` and `sdk/include/opentelemetry/sdk/trace/span_data.h`.
|
||||
> Queued spans release the wrapper, keeping only `SpanData` + attributes (~500 bytes).
|
||||
|
||||
### 3.5.3 Memory Growth Characteristics
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 700
|
||||
height: 400
|
||||
---
|
||||
xychart-beta
|
||||
title "Memory Usage vs Span Rate (bounded by queue limit)"
|
||||
x-axis "Spans/second" [0, 200, 400, 600, 800, 1000]
|
||||
y-axis "Memory (MB)" 0 --> 12
|
||||
line [8.5, 9.2, 9.6, 9.9, 10.0, 10.0]
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
|
||||
- Memory increases with span rate but **plateaus at queue capacity** (default 2048 spans)
|
||||
- Batch export prevents unbounded growth
|
||||
- At queue limit, oldest spans are dropped (not blocked)
|
||||
- Maximum memory is bounded: ~8.3 MB static (dominated by worker thread stack) + 2048 queued spans x ~500 bytes (~1 MB) + active spans (~0.8 MB) ≈ **~10 MB ceiling**
|
||||
- The worker thread stack (~8 MB) is virtual memory; actual RSS depends on stack usage (typically much less)
|
||||
|
||||
### 3.5.4 Performance Data Sources
|
||||
|
||||
The overhead estimates in Sections 3.3-3.5 are derived from the following sources:
|
||||
|
||||
| Source | What it covers | URL |
|
||||
| ------------------------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| OTel C++ SDK CI benchmarks (969 runs) | Span creation, context activation, sampler overhead | [Benchmark Dashboard](https://open-telemetry.github.io/opentelemetry-cpp/benchmarks/) |
|
||||
| `api/test/trace/span_benchmark.cc` | API-level span creation (~22 ns no-op) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/api/test/trace/span_benchmark.cc) |
|
||||
| `sdk/test/trace/sampler_benchmark.cc` | SDK span creation with samplers (~1,000 ns AlwaysOn) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/test/trace/sampler_benchmark.cc) |
|
||||
| `sdk/include/.../span_data.h` | SpanData memory layout (~250 bytes base) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/trace/span_data.h) |
|
||||
| `sdk/src/trace/span.h` | Span wrapper memory layout (~88 bytes) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/src/trace/span.h) |
|
||||
| `sdk/include/.../batch_span_processor_options.h` | Default queue size (2048), batch size (512) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/trace/batch_span_processor_options.h) |
|
||||
| `sdk/include/.../circular_buffer.h` | CircularBuffer implementation (AtomicUniquePtr array) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/common/circular_buffer.h) |
|
||||
| OTLP proto definition | Serialized span size estimation | [Proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto) |
|
||||
|
||||
---
|
||||
|
||||
## 3.6 Network Overhead Analysis
|
||||
|
||||
### 3.6.1 Export Bandwidth
|
||||
|
||||
> **Bytes per span**: Estimates use ~500 bytes/span (conservative upper bound). OTLP protobuf analysis
|
||||
> shows a typical span with 3-5 string attributes serializes to ~200-300 bytes raw; with gzip
|
||||
> compression (~60-70% of raw) and batching (amortized headers), ~350 bytes/span is more realistic.
|
||||
> The table uses the conservative estimate for capacity planning.
|
||||
|
||||
| Sampling Rate | Spans/sec | Bandwidth | Notes |
|
||||
| ------------- | --------- | --------- | ---------------- |
|
||||
| 100% | ~500 | ~250 KB/s | Development only |
|
||||
| 10% | ~50 | ~25 KB/s | Staging |
|
||||
| 1% | ~5 | ~2.5 KB/s | Production |
|
||||
| Error-only | ~1 | ~0.5 KB/s | Minimal overhead |
|
||||
|
||||
### 3.6.2 Trace Context Propagation
|
||||
|
||||
| Message Type | Context Size | Messages/sec | Overhead |
|
||||
| ---------------------- | ------------ | ------------ | ----------- |
|
||||
| TMTransaction | 25 bytes | ~100 | ~2.5 KB/s |
|
||||
| TMProposeSet | 25 bytes | ~10 | ~250 B/s |
|
||||
| TMValidation | 25 bytes | ~50 | ~1.25 KB/s |
|
||||
| **Total P2P overhead** | | | **~4 KB/s** |
|
||||
|
||||
---
|
||||
|
||||
## 3.7 Optimization Strategies
|
||||
|
||||
### 3.7.1 Sampling Strategies
|
||||
|
||||
#### Tail Sampling
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
trace["New Trace"]
|
||||
|
||||
trace --> errors{"Is Error?"}
|
||||
errors -->|Yes| sample["SAMPLE"]
|
||||
errors -->|No| consensus{"Is Consensus?"}
|
||||
|
||||
consensus -->|Yes| sample
|
||||
consensus -->|No| slow{"Is Slow?"}
|
||||
|
||||
slow -->|Yes| sample
|
||||
slow -->|No| prob{"Random < 10%?"}
|
||||
|
||||
prob -->|Yes| sample
|
||||
prob -->|No| drop["DROP"]
|
||||
|
||||
style sample fill:#4caf50,stroke:#388e3c,color:#fff
|
||||
style drop fill:#f44336,stroke:#c62828,color:#fff
|
||||
```
|
||||
|
||||
### 3.7.2 Batch Tuning Recommendations
|
||||
|
||||
| Environment | Batch Size | Batch Delay | Max Queue |
|
||||
| ------------------ | ---------- | ----------- | --------- |
|
||||
| Low-latency | 128 | 1000ms | 512 |
|
||||
| High-throughput | 1024 | 10000ms | 8192 |
|
||||
| Memory-constrained | 256 | 2000ms | 512 |
|
||||
|
||||
### 3.7.3 Conditional Instrumentation
|
||||
|
||||
```cpp
|
||||
// Compile-time feature flag
|
||||
#ifndef XRPL_ENABLE_TELEMETRY
|
||||
// Zero-cost when disabled
|
||||
#define XRPL_TRACE_SPAN(t, n) ((void)0)
|
||||
#endif
|
||||
|
||||
// Runtime component filtering
|
||||
if (telemetry.shouldTracePeer())
|
||||
{
|
||||
XRPL_TRACE_SPAN(telemetry, "peer.message.receive");
|
||||
// ... instrumentation
|
||||
}
|
||||
// No overhead when component tracing disabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.8 Links to Detailed Documentation
|
||||
|
||||
- **[Code Samples](./04-code-samples.md)**: Complete implementation code for all components
|
||||
- **[Configuration Reference](./05-configuration-reference.md)**: Configuration options and collector setup
|
||||
- **[Implementation Phases](./06-implementation-phases.md)**: Detailed timeline and milestones
|
||||
|
||||
---
|
||||
|
||||
## 3.9 Code Intrusiveness Assessment
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
This section provides a detailed assessment of how intrusive the OpenTelemetry integration is to the existing rippled codebase.
|
||||
|
||||
### 3.9.1 Files Modified Summary
|
||||
|
||||
| Component | Files Modified | Lines Added | Lines Changed | Architectural Impact |
|
||||
| --------------------- | -------------- | ----------- | ------------- | -------------------- |
|
||||
| **Core Telemetry** | 5 new files | ~800 | 0 | None (new module) |
|
||||
| **Application Init** | 2 files | ~30 | ~5 | Minimal |
|
||||
| **RPC Layer** | 3 files | ~80 | ~20 | Minimal |
|
||||
| **Transaction Relay** | 4 files | ~120 | ~40 | Low |
|
||||
| **Consensus** | 3 files | ~100 | ~30 | Low-Medium |
|
||||
| **Protocol Buffers** | 1 file | ~25 | 0 | Low |
|
||||
| **CMake/Build** | 3 files | ~50 | ~10 | Minimal |
|
||||
| **PathFinding** | 2 | ~80 | ~5 | Minimal |
|
||||
| **TxQ/Fee** | 2 | ~60 | ~5 | Minimal |
|
||||
| **Validator/Amend** | 3 | ~40 | ~5 | Minimal |
|
||||
| **Total** | **~28 files** | **~1,490** | **~120** | **Low** |
|
||||
|
||||
### 3.9.2 Detailed File Impact
|
||||
|
||||
```mermaid
|
||||
pie title Code Changes by Component
|
||||
"New Telemetry Module" : 800
|
||||
"Transaction Relay" : 160
|
||||
"Consensus" : 130
|
||||
"RPC Layer" : 100
|
||||
"PathFinding" : 80
|
||||
"TxQ/Fee" : 60
|
||||
"Validator/Amendment" : 40
|
||||
"Application Init" : 35
|
||||
"Protocol Buffers" : 25
|
||||
"Build System" : 60
|
||||
```
|
||||
|
||||
#### New Files (No Impact on Existing Code)
|
||||
|
||||
| File | Lines | Purpose |
|
||||
| ---------------------------------------------- | ----- | -------------------- |
|
||||
| `include/xrpl/telemetry/Telemetry.h` | ~160 | Main interface |
|
||||
| `include/xrpl/telemetry/SpanGuard.h` | ~120 | RAII wrapper |
|
||||
| `include/xrpl/telemetry/TraceContext.h` | ~80 | Context propagation |
|
||||
| `src/xrpld/telemetry/TracingInstrumentation.h` | ~60 | Macros |
|
||||
| `src/libxrpl/telemetry/Telemetry.cpp` | ~200 | Implementation |
|
||||
| `src/libxrpl/telemetry/TelemetryConfig.cpp` | ~60 | Config parsing |
|
||||
| `src/libxrpl/telemetry/NullTelemetry.cpp` | ~40 | No-op implementation |
|
||||
|
||||
#### Modified Files (Existing Rippled Code)
|
||||
|
||||
| File | Lines Added | Lines Changed | Risk Level |
|
||||
| ------------------------------------------------- | ----------- | ------------- | ---------- |
|
||||
| `src/xrpld/app/main/Application.cpp` | ~15 | ~3 | Low |
|
||||
| `include/xrpl/core/ServiceRegistry.h` | ~5 | ~2 | Low |
|
||||
| `src/xrpld/rpc/detail/ServerHandler.cpp` | ~40 | ~10 | Low |
|
||||
| `src/xrpld/rpc/handlers/*.cpp` | ~30 | ~8 | Low |
|
||||
| `src/xrpld/overlay/detail/PeerImp.cpp` | ~60 | ~15 | Medium |
|
||||
| `src/xrpld/overlay/detail/OverlayImpl.cpp` | ~30 | ~10 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensus.cpp` | ~50 | ~15 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensusAdaptor.cpp` | ~40 | ~12 | Medium |
|
||||
| `src/xrpld/core/JobQueue.cpp` | ~20 | ~5 | Low |
|
||||
| `src/xrpld/app/paths/PathRequest.cpp` | ~40 | ~3 | Low |
|
||||
| `src/xrpld/app/paths/Pathfinder.cpp` | ~40 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/TxQ.cpp` | ~40 | ~3 | Low |
|
||||
| `src/xrpld/app/main/LoadManager.cpp` | ~20 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/ValidatorList.cpp` | ~20 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/AmendmentTable.cpp` | ~10 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/Manifest.cpp` | ~10 | ~1 | Low |
|
||||
| `src/xrpld/shamap/SHAMap.cpp` | ~20 | ~3 | Low |
|
||||
| `src/xrpld/overlay/detail/ripple.proto` | ~25 | 0 | Low |
|
||||
| `CMakeLists.txt` | ~40 | ~8 | Low |
|
||||
| `cmake/FindOpenTelemetry.cmake` | ~50 | 0 | None (new) |
|
||||
|
||||
### 3.9.3 Risk Assessment by Component
|
||||
|
||||
<div align="center">
|
||||
|
||||
**Do First** ↖ ↗ **Plan Carefully**
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Code Intrusiveness Risk Matrix
|
||||
x-axis Low Risk --> High Risk
|
||||
y-axis Low Value --> High Value
|
||||
|
||||
RPC Tracing: [0.2, 0.55]
|
||||
Transaction Relay: [0.55, 0.85]
|
||||
Consensus Tracing: [0.75, 0.92]
|
||||
Peer Message Tracing: [0.85, 0.35]
|
||||
JobQueue Context: [0.3, 0.42]
|
||||
Ledger Acquisition: [0.48, 0.65]
|
||||
PathFinding: [0.38, 0.72]
|
||||
TxQ and Fees: [0.25, 0.62]
|
||||
Validator Mgmt: [0.15, 0.35]
|
||||
```
|
||||
|
||||
**Optional** ↙ ↘ **Avoid**
|
||||
|
||||
</div>
|
||||
|
||||
#### Risk Level Definitions
|
||||
|
||||
| Risk Level | Definition | Mitigation |
|
||||
| ---------- | ---------------------------------------------------------------- | ---------------------------------- |
|
||||
| **Low** | Additive changes only; no modification to existing logic | Standard code review |
|
||||
| **Medium** | Minor modifications to existing functions; clear boundaries | Comprehensive unit tests |
|
||||
| **High** | Changes to core logic or data structures; potential side effects | Integration tests + staged rollout |
|
||||
|
||||
### 3.9.4 Architectural Impact Assessment
|
||||
|
||||
| Aspect | Impact | Justification |
|
||||
| -------------------- | ------- | -------------------------------------------------------------------------------- |
|
||||
| **Data Flow** | Minimal | Read-only instrumentation; no modification to consensus or transaction data flow |
|
||||
| **Threading Model** | Minimal | Context propagation uses thread-local storage (standard OTel pattern) |
|
||||
| **Memory Model** | Low | Bounded queues prevent unbounded growth; RAII ensures cleanup |
|
||||
| **Network Protocol** | Low | Optional fields in protobuf (high field numbers); backward compatible |
|
||||
| **Configuration** | None | New config section; existing configs unaffected |
|
||||
| **Build System** | Low | Optional CMake flag; builds work without OpenTelemetry |
|
||||
| **Dependencies** | Low | OpenTelemetry SDK is optional; null implementation when disabled |
|
||||
|
||||
### 3.9.5 Backward Compatibility
|
||||
|
||||
| Compatibility | Status | Notes |
|
||||
| --------------- | ------- | ----------------------------------------------------- |
|
||||
| **Config File** | ✅ Full | New `[telemetry]` section is optional |
|
||||
| **Protocol** | ✅ Full | Optional protobuf fields with high field numbers |
|
||||
| **Build** | ✅ Full | `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary |
|
||||
| **Runtime** | ✅ Full | `enabled=0` produces zero overhead |
|
||||
| **API** | ✅ Full | No changes to public RPC or P2P APIs |
|
||||
|
||||
### 3.9.6 Rollback Strategy
|
||||
|
||||
If issues are discovered after deployment:
|
||||
|
||||
1. **Immediate**: Set `enabled=0` in config and restart (zero code change)
|
||||
2. **Quick**: Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`
|
||||
3. **Complete**: Revert telemetry commits (clean separation makes this easy)
|
||||
|
||||
### 3.9.7 Code Change Examples
|
||||
|
||||
**Minimal RPC Instrumentation (Low Intrusiveness):**
|
||||
|
||||
```cpp
|
||||
// Before
|
||||
void ServerHandler::onRequest(...) {
|
||||
auto result = processRequest(req);
|
||||
send(result);
|
||||
}
|
||||
|
||||
// After (only ~10 lines added)
|
||||
void ServerHandler::onRequest(...) {
|
||||
XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.request"); // +1 line
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.command", command); // +1 line
|
||||
|
||||
auto result = processRequest(req);
|
||||
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.status", status); // +1 line
|
||||
send(result);
|
||||
}
|
||||
```
|
||||
|
||||
**Consensus Instrumentation (Medium Intrusiveness):**
|
||||
|
||||
```cpp
|
||||
// Before
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
// ... existing logic
|
||||
}
|
||||
|
||||
// After (context storage required)
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.round");
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.seq", seq);
|
||||
|
||||
// Store context for child spans in phase transitions
|
||||
currentRoundContext_ = _xrpl_guard_->context(); // New member variable
|
||||
|
||||
// ... existing logic unchanged
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Design Decisions](./02-design-decisions.md)_ | _Next: [Code Samples](./04-code-samples.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
1127
OpenTelemetryPlan/04-code-samples.md
Normal file
1127
OpenTelemetryPlan/04-code-samples.md
Normal file
File diff suppressed because it is too large
Load Diff
964
OpenTelemetryPlan/05-configuration-reference.md
Normal file
964
OpenTelemetryPlan/05-configuration-reference.md
Normal file
@@ -0,0 +1,964 @@
|
||||
# Configuration Reference
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Implementation Phases](./06-implementation-phases.md)
|
||||
|
||||
---
|
||||
|
||||
## 5.1 rippled Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **TxQ** = Transaction Queue
|
||||
|
||||
### 5.1.1 Configuration File Section
|
||||
|
||||
Add to `cfg/xrpld-example.cfg`:
|
||||
|
||||
```ini
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY (OpenTelemetry Distributed Tracing)
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
#
|
||||
# Enables distributed tracing for transaction flow, consensus, and RPC calls.
|
||||
# Traces are exported to an OpenTelemetry Collector using OTLP protocol.
|
||||
#
|
||||
# [telemetry]
|
||||
#
|
||||
# # Enable/disable telemetry (default: 0 = disabled)
|
||||
# enabled=1
|
||||
#
|
||||
# # Exporter type: "otlp_grpc" (default), "otlp_http", or "none"
|
||||
# exporter=otlp_grpc
|
||||
#
|
||||
# # OTLP endpoint (default: localhost:4317 for gRPC, localhost:4318 for HTTP)
|
||||
# endpoint=localhost:4317
|
||||
#
|
||||
# # Use TLS for exporter connection (default: 0)
|
||||
# use_tls=0
|
||||
#
|
||||
# # Path to CA certificate for TLS (optional)
|
||||
# # tls_ca_cert=/path/to/ca.crt
|
||||
#
|
||||
# # Sampling ratio: 0.0-1.0 (default: 1.0 = 100% sampling)
|
||||
# # Use lower values in production to reduce overhead
|
||||
# # Default: 1.0 (all traces). For production deployments with high
|
||||
# # throughput, 0.1 (10%) is recommended to reduce overhead.
|
||||
# # See Section 7.4.2 for sampling strategy details.
|
||||
# sampling_ratio=0.1
|
||||
#
|
||||
# # Batch processor settings
|
||||
# batch_size=512 # Spans per batch (default: 512)
|
||||
# batch_delay_ms=5000 # Max delay before sending batch (default: 5000)
|
||||
# max_queue_size=2048 # Max queued spans (default: 2048)
|
||||
#
|
||||
# # Component-specific tracing (default: all enabled except peer)
|
||||
# trace_transactions=1 # Transaction relay and processing
|
||||
# trace_consensus=1 # Consensus rounds and proposals
|
||||
# trace_rpc=1 # RPC request handling
|
||||
# trace_peer=0 # Peer messages (high volume, disabled by default)
|
||||
# trace_ledger=1 # Ledger acquisition and building
|
||||
# trace_pathfind=1 # Path computation (can be expensive)
|
||||
# trace_txq=1 # Transaction queue and fee escalation
|
||||
# trace_validator=0 # Validator list and manifest updates (low volume)
|
||||
# trace_amendment=0 # Amendment voting (very low volume)
|
||||
#
|
||||
# # Service identification (automatically detected if not specified)
|
||||
# # service_name=rippled
|
||||
# # service_instance_id=<node_public_key>
|
||||
|
||||
[telemetry]
|
||||
enabled=0
|
||||
```
|
||||
|
||||
### 5.1.2 Configuration Options Summary
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| --------------------- | ------ | ---------------- | ----------------------------------------- |
|
||||
| `enabled` | bool | `false` | Enable/disable telemetry |
|
||||
| `exporter` | string | `"otlp_grpc"` | Exporter type: otlp_grpc, otlp_http, none |
|
||||
| `endpoint` | string | `localhost:4317` | OTLP collector endpoint |
|
||||
| `use_tls` | bool | `false` | Enable TLS for exporter connection |
|
||||
| `tls_ca_cert` | string | `""` | Path to CA certificate file |
|
||||
| `sampling_ratio` | float | `1.0` | Sampling ratio (0.0-1.0) |
|
||||
| `batch_size` | uint | `512` | Spans per export batch |
|
||||
| `batch_delay_ms` | uint | `5000` | Max delay before sending batch (ms) |
|
||||
| `max_queue_size` | uint | `2048` | Maximum queued spans |
|
||||
| `trace_transactions` | bool | `true` | Enable transaction tracing |
|
||||
| `trace_consensus` | bool | `true` | Enable consensus tracing |
|
||||
| `trace_rpc` | bool | `true` | Enable RPC tracing |
|
||||
| `trace_peer` | bool | `false` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | bool | `true` | Enable ledger tracing |
|
||||
| `trace_pathfind` | bool | `true` | Enable path computation tracing |
|
||||
| `trace_txq` | bool | `true` | Enable transaction queue tracing |
|
||||
| `trace_validator` | bool | `false` | Enable validator list/manifest tracing |
|
||||
| `trace_amendment` | bool | `false` | Enable amendment voting tracing |
|
||||
| `service_name` | string | `"rippled"` | Service name for traces |
|
||||
| `service_instance_id` | string | `<node_pubkey>` | Instance identifier |
|
||||
|
||||
---
|
||||
|
||||
## 5.2 Configuration Parser
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```cpp
|
||||
// src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version)
|
||||
{
|
||||
Telemetry::Setup setup;
|
||||
|
||||
// Basic settings
|
||||
setup.enabled = section.value_or("enabled", false);
|
||||
setup.serviceName = section.value_or("service_name", "rippled");
|
||||
setup.serviceVersion = version;
|
||||
setup.serviceInstanceId = section.value_or(
|
||||
"service_instance_id", nodePublicKey);
|
||||
|
||||
// Exporter settings
|
||||
setup.exporterType = section.value_or("exporter", "otlp_grpc");
|
||||
|
||||
if (setup.exporterType == "otlp_grpc")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4317");
|
||||
else if (setup.exporterType == "otlp_http")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4318");
|
||||
|
||||
setup.useTls = section.value_or("use_tls", false);
|
||||
setup.tlsCertPath = section.value_or("tls_ca_cert", "");
|
||||
|
||||
// Sampling
|
||||
setup.samplingRatio = section.value_or("sampling_ratio", 1.0);
|
||||
if (setup.samplingRatio < 0.0 || setup.samplingRatio > 1.0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"telemetry.sampling_ratio must be between 0.0 and 1.0");
|
||||
}
|
||||
|
||||
// Batch processor
|
||||
setup.batchSize = section.value_or("batch_size", 512u);
|
||||
setup.batchDelay = std::chrono::milliseconds{
|
||||
section.value_or("batch_delay_ms", 5000u)};
|
||||
setup.maxQueueSize = section.value_or("max_queue_size", 2048u);
|
||||
|
||||
// Component filtering
|
||||
setup.traceTransactions = section.value_or("trace_transactions", true);
|
||||
setup.traceConsensus = section.value_or("trace_consensus", true);
|
||||
setup.traceRpc = section.value_or("trace_rpc", true);
|
||||
setup.tracePeer = section.value_or("trace_peer", false);
|
||||
setup.traceLedger = section.value_or("trace_ledger", true);
|
||||
setup.tracePathfind = section.value_or("trace_pathfind", true);
|
||||
setup.traceTxQ = section.value_or("trace_txq", true);
|
||||
setup.traceValidator = section.value_or("trace_validator", false);
|
||||
setup.traceAmendment = section.value_or("trace_amendment", false);
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.3 Application Integration
|
||||
|
||||
### 5.3.1 ApplicationImp Changes
|
||||
|
||||
> **Deferred identity**: The node public key (`nodeIdentity_`) is not
|
||||
> available during `ApplicationImp`'s member initializer list — it is
|
||||
> resolved later in `setup()`. The `Telemetry` object is therefore
|
||||
> constructed with an empty `serviceInstanceId` and patched via
|
||||
> `setServiceInstanceId()` once `setup()` has called `getNodeIdentity()`.
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/main/Application.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
class ApplicationImp : public Application, public BasicApp
|
||||
{
|
||||
// ... existing members (perfLog_, etc.) ...
|
||||
|
||||
// Telemetry — constructed in the member initializer list with
|
||||
// an empty serviceInstanceId, patched in setup().
|
||||
std::unique_ptr<telemetry::Telemetry> telemetry_;
|
||||
|
||||
// Member initializer list (excerpt):
|
||||
// ...
|
||||
// , telemetry_(
|
||||
// telemetry::make_Telemetry(
|
||||
// telemetry::setup_Telemetry(
|
||||
// config_->section("telemetry"),
|
||||
// "", // Updated later via setServiceInstanceId()
|
||||
// BuildInfo::getVersionString()),
|
||||
// logs_->journal("Telemetry")))
|
||||
// ...
|
||||
|
||||
bool setup(...) override
|
||||
{
|
||||
// ... existing setup code ...
|
||||
|
||||
nodeIdentity_ = getNodeIdentity(*this, cmdline);
|
||||
|
||||
// Inject node identity into telemetry resource attributes,
|
||||
// unless the user already set a custom service_instance_id.
|
||||
if (!config_->section("telemetry").exists("service_instance_id"))
|
||||
telemetry_->setServiceInstanceId(
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_->first));
|
||||
|
||||
// ... rest of setup ...
|
||||
}
|
||||
|
||||
void start(bool withTimers) override
|
||||
{
|
||||
// ... existing start code ...
|
||||
telemetry_->start();
|
||||
}
|
||||
|
||||
void run() override
|
||||
{
|
||||
// ... existing run/shutdown code ...
|
||||
telemetry_->stop();
|
||||
}
|
||||
|
||||
telemetry::Telemetry&
|
||||
getTelemetry() override
|
||||
{
|
||||
return *telemetry_;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 5.3.2 ServiceRegistry Interface Addition
|
||||
|
||||
```cpp
|
||||
// include/xrpl/core/ServiceRegistry.h (modified)
|
||||
|
||||
namespace telemetry {
|
||||
class Telemetry;
|
||||
} // namespace telemetry
|
||||
|
||||
class ServiceRegistry
|
||||
{
|
||||
public:
|
||||
// ... existing virtual methods ...
|
||||
|
||||
/** Get the telemetry system for distributed tracing. */
|
||||
virtual telemetry::Telemetry&
|
||||
getTelemetry() = 0;
|
||||
};
|
||||
```
|
||||
|
||||
> **Note:** `Application` extends `ServiceRegistry`, so `getTelemetry()` is
|
||||
> available on both. Components that hold a `ServiceRegistry&` (e.g.
|
||||
> `NetworkOPsImp`) call `registry_.get().getTelemetry()`. Components that
|
||||
> still hold an `Application&` (e.g. `ServerHandler`, `PeerImp`,
|
||||
> `RCLConsensusAdaptor`) call `app_.getTelemetry()` directly.
|
||||
|
||||
---
|
||||
|
||||
## 5.4 CMake Integration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 5.4.1 Find OpenTelemetry Module
|
||||
|
||||
```cmake
|
||||
# cmake/FindOpenTelemetry.cmake
|
||||
|
||||
# Find OpenTelemetry C++ SDK
|
||||
#
|
||||
# This module defines:
|
||||
# OpenTelemetry_FOUND - System has OpenTelemetry
|
||||
# OpenTelemetry::api - API library target
|
||||
# OpenTelemetry::sdk - SDK library target
|
||||
# OpenTelemetry::otlp_grpc_exporter - OTLP gRPC exporter target
|
||||
# OpenTelemetry::otlp_http_exporter - OTLP HTTP exporter target
|
||||
|
||||
find_package(opentelemetry-cpp CONFIG QUIET)
|
||||
|
||||
if(opentelemetry-cpp_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
|
||||
# Create imported targets if not already created by config
|
||||
if(NOT TARGET OpenTelemetry::api)
|
||||
add_library(OpenTelemetry::api ALIAS opentelemetry-cpp::api)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::sdk)
|
||||
add_library(OpenTelemetry::sdk ALIAS opentelemetry-cpp::sdk)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::otlp_grpc_exporter)
|
||||
add_library(OpenTelemetry::otlp_grpc_exporter ALIAS
|
||||
opentelemetry-cpp::otlp_grpc_exporter)
|
||||
endif()
|
||||
else()
|
||||
# Try pkg-config fallback
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(OTEL opentelemetry-cpp QUIET)
|
||||
if(OTEL_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
# Create imported targets from pkg-config
|
||||
add_library(OpenTelemetry::api INTERFACE IMPORTED)
|
||||
target_include_directories(OpenTelemetry::api INTERFACE
|
||||
${OTEL_INCLUDE_DIRS})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(OpenTelemetry
|
||||
REQUIRED_VARS OpenTelemetry_FOUND)
|
||||
```
|
||||
|
||||
### 5.4.2 CMakeLists.txt Changes
|
||||
|
||||
```cmake
|
||||
# CMakeLists.txt (additions)
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY OPTIONS
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
option(XRPL_ENABLE_TELEMETRY
|
||||
"Enable OpenTelemetry distributed tracing support" OFF)
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
find_package(OpenTelemetry REQUIRED)
|
||||
|
||||
# Define compile-time flag
|
||||
add_compile_definitions(XRPL_ENABLE_TELEMETRY)
|
||||
|
||||
message(STATUS "OpenTelemetry tracing: ENABLED")
|
||||
else()
|
||||
message(STATUS "OpenTelemetry tracing: DISABLED")
|
||||
endif()
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY LIBRARY
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/Telemetry.cpp
|
||||
src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
src/libxrpl/telemetry/TraceContext.cpp
|
||||
)
|
||||
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
|
||||
target_link_libraries(xrpl_telemetry
|
||||
PUBLIC
|
||||
OpenTelemetry::api
|
||||
OpenTelemetry::sdk
|
||||
OpenTelemetry::otlp_grpc_exporter
|
||||
PRIVATE
|
||||
xrpl_basics
|
||||
)
|
||||
|
||||
# Add to main library dependencies
|
||||
target_link_libraries(xrpld PRIVATE xrpl_telemetry)
|
||||
else()
|
||||
# Create null implementation library
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/NullTelemetry.cpp
|
||||
)
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
endif()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.5 OpenTelemetry Collector Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring
|
||||
|
||||
### 5.5.1 Development Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-dev.yaml
|
||||
# Minimal configuration for local development
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
# Console output for debugging
|
||||
logging:
|
||||
verbosity: detailed
|
||||
sampling_initial: 5
|
||||
sampling_thereafter: 200
|
||||
|
||||
# Tempo for trace visualization
|
||||
otlp/tempo:
|
||||
endpoint: tempo:4317
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, otlp/tempo]
|
||||
```
|
||||
|
||||
### 5.5.2 Production Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-prod.yaml
|
||||
# Production configuration with filtering, sampling, and multiple backends
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
tls:
|
||||
cert_file: /etc/otel/server.crt
|
||||
key_file: /etc/otel/server.key
|
||||
ca_file: /etc/otel/ca.crt
|
||||
|
||||
processors:
|
||||
# Memory limiter to prevent OOM
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 1000
|
||||
spike_limit_mib: 200
|
||||
|
||||
# Batch processing for efficiency
|
||||
batch:
|
||||
timeout: 5s
|
||||
send_batch_size: 512
|
||||
send_batch_max_size: 1024
|
||||
|
||||
# Tail-based sampling (keep errors and slow traces)
|
||||
tail_sampling:
|
||||
decision_wait: 10s
|
||||
num_traces: 100000
|
||||
expected_new_traces_per_sec: 1000
|
||||
policies:
|
||||
# Always keep error traces
|
||||
- name: errors
|
||||
type: status_code
|
||||
status_code:
|
||||
status_codes: [ERROR]
|
||||
# Keep slow consensus rounds (>5s)
|
||||
- name: slow-consensus
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 5000
|
||||
# Keep slow RPC requests (>1s)
|
||||
- name: slow-rpc
|
||||
type: and
|
||||
and:
|
||||
and_sub_policy:
|
||||
- name: rpc-spans
|
||||
type: string_attribute
|
||||
string_attribute:
|
||||
key: xrpl.rpc.command
|
||||
values: [".*"]
|
||||
enabled_regex_matching: true
|
||||
- name: latency
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 1000
|
||||
# Probabilistic sampling for the rest
|
||||
- name: probabilistic
|
||||
type: probabilistic
|
||||
probabilistic:
|
||||
sampling_percentage: 10
|
||||
|
||||
# Attribute processing
|
||||
attributes:
|
||||
actions:
|
||||
# Hash sensitive data
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Add deployment info
|
||||
- key: deployment.environment
|
||||
value: production
|
||||
action: upsert
|
||||
|
||||
exporters:
|
||||
# Grafana Tempo for long-term storage
|
||||
otlp/tempo:
|
||||
endpoint: tempo.monitoring:4317
|
||||
tls:
|
||||
insecure: false
|
||||
ca_file: /etc/otel/tempo-ca.crt
|
||||
|
||||
# Elastic APM for correlation with logs
|
||||
otlp/elastic:
|
||||
endpoint: apm.elastic:8200
|
||||
headers:
|
||||
Authorization: "Bearer ${ELASTIC_APM_TOKEN}"
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, tail_sampling, attributes, batch]
|
||||
exporters: [otlp/tempo, otlp/elastic]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.6 Docker Compose Development Environment
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```yaml
|
||||
# docker-compose-telemetry.yaml
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# OpenTelemetry Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.92.0
|
||||
container_name: otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-dev.yaml:/etc/otel-collector-config.yaml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "13133:13133" # Health check
|
||||
depends_on:
|
||||
- tempo
|
||||
|
||||
# Tempo for trace visualization
|
||||
tempo:
|
||||
image: grafana/tempo:2.6.1
|
||||
container_name: tempo
|
||||
ports:
|
||||
- "3200:3200" # Tempo HTTP API
|
||||
- "4317" # OTLP gRPC (internal)
|
||||
|
||||
# Grafana for dashboards
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.3
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
volumes:
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- tempo
|
||||
|
||||
# Prometheus for metrics (optional, for correlation)
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.48.1
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: rippled-telemetry
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.7 Configuration Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph config["Configuration Sources"]
|
||||
cfgFile["xrpld.cfg<br/>[telemetry] section"]
|
||||
cmake["CMake<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
end
|
||||
|
||||
subgraph init["Initialization"]
|
||||
parse["setup_Telemetry()"]
|
||||
factory["make_Telemetry()"]
|
||||
end
|
||||
|
||||
subgraph runtime["Runtime Components"]
|
||||
tracer["TracerProvider"]
|
||||
exporter["OTLP Exporter"]
|
||||
processor["BatchProcessor"]
|
||||
end
|
||||
|
||||
subgraph collector["Collector Pipeline"]
|
||||
recv["Receivers"]
|
||||
proc["Processors"]
|
||||
exp["Exporters"]
|
||||
end
|
||||
|
||||
cfgFile --> parse
|
||||
cmake -->|"compile flag"| parse
|
||||
parse --> factory
|
||||
factory --> tracer
|
||||
tracer --> processor
|
||||
processor --> exporter
|
||||
exporter -->|"OTLP"| recv
|
||||
recv --> proc
|
||||
proc --> exp
|
||||
|
||||
style config fill:#e3f2fd,stroke:#1976d2
|
||||
style runtime fill:#e8f5e9,stroke:#388e3c
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Configuration Sources**: `xrpld.cfg` provides runtime settings (endpoint, sampling) while the CMake flag controls whether telemetry is compiled in at all.
|
||||
- **Initialization**: `setup_Telemetry()` parses config values, then `make_Telemetry()` constructs the provider, processor, and exporter objects.
|
||||
- **Runtime Components**: The `TracerProvider` creates spans, the `BatchProcessor` buffers them, and the `OTLP Exporter` serializes and sends them over the wire.
|
||||
- **OTLP arrow to Collector**: Trace data leaves the rippled process via OTLP (gRPC or HTTP) and enters the external Collector pipeline.
|
||||
- **Collector Pipeline**: `Receivers` ingest OTLP data, `Processors` apply sampling/filtering/enrichment, and `Exporters` forward traces to storage backends (Tempo, etc.).
|
||||
|
||||
---
|
||||
|
||||
## 5.8 Grafana Integration
|
||||
|
||||
> **APM** = Application Performance Monitoring
|
||||
|
||||
Step-by-step instructions for integrating rippled traces with Grafana.
|
||||
|
||||
### 5.8.1 Data Source Configuration
|
||||
|
||||
#### Tempo (Recommended)
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/tempo.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ["service.name", "xrpl.tx.hash"]
|
||||
mappedTags: [{ key: "trace_id", value: "traceID" }]
|
||||
mapTagNamesEnabled: true
|
||||
filterByTraceID: true
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
search:
|
||||
hide: false
|
||||
lokiSearch:
|
||||
datasourceUid: loki
|
||||
```
|
||||
|
||||
#### Elastic APM
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/elastic-apm.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elasticsearch-APM
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://elasticsearch:9200
|
||||
database: "apm-*"
|
||||
jsonData:
|
||||
esVersion: "8.0.0"
|
||||
timeField: "@timestamp"
|
||||
logMessageField: message
|
||||
logLevelField: log.level
|
||||
```
|
||||
|
||||
### 5.8.2 Dashboard Provisioning
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/dashboards/dashboards.yaml
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: "rippled-dashboards"
|
||||
orgId: 1
|
||||
folder: "rippled"
|
||||
folderUid: "rippled"
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/rippled
|
||||
```
|
||||
|
||||
### 5.8.3 Example Dashboard: RPC Performance
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled RPC Performance",
|
||||
"uid": "rippled-rpc-performance",
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Latency by Command",
|
||||
"type": "heatmap",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | histogram_over_time(duration) by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "RPC Error Rate",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Top 10 Slowest RPC Commands",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | avg(duration) by (span.xrpl.rpc.command) | topk(10)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Traces",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 16 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.4 Example Dashboard: Transaction Tracing
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Transaction Tracing",
|
||||
"uid": "rippled-tx-tracing",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Throughput",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | rate()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Cross-Node Relay Count",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.relay\"} | avg(span.xrpl.tx.relay_count)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Validation Errors",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.validate\" && status.code=error}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.5 TraceQL Query Examples
|
||||
|
||||
Common queries for rippled traces:
|
||||
|
||||
```
|
||||
# Find all traces for a specific transaction hash
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
|
||||
# Find slow RPC commands (>100ms)
|
||||
{resource.service.name="rippled" && name=~"rpc.command.*"} | duration > 100ms
|
||||
|
||||
# Find consensus rounds taking >5 seconds
|
||||
{resource.service.name="rippled" && name="consensus.round"} | duration > 5s
|
||||
|
||||
# Find failed transactions with error details
|
||||
{resource.service.name="rippled" && name="tx.validate" && status.code=error}
|
||||
|
||||
# Find transactions relayed to many peers
|
||||
{resource.service.name="rippled" && name="tx.relay"} | span.xrpl.tx.relay_count > 10
|
||||
|
||||
# Compare latency across nodes
|
||||
{resource.service.name="rippled" && name="rpc.command.account_info"} | avg(duration) by (resource.service.instance.id)
|
||||
```
|
||||
|
||||
### 5.8.6 Correlation with PerfLog
|
||||
|
||||
To correlate OpenTelemetry traces with existing PerfLog data:
|
||||
|
||||
**Step 1: Configure Loki to ingest PerfLog**
|
||||
|
||||
```yaml
|
||||
# promtail-config.yaml
|
||||
scrape_configs:
|
||||
- job_name: rippled-perflog
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: rippled
|
||||
__path__: /var/log/rippled/perf*.log
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
trace_id: trace_id
|
||||
ledger_seq: ledger_seq
|
||||
tx_hash: tx_hash
|
||||
- labels:
|
||||
trace_id:
|
||||
ledger_seq:
|
||||
tx_hash:
|
||||
```
|
||||
|
||||
**Step 2: Add trace_id to PerfLog entries**
|
||||
|
||||
Modify PerfLog to include trace_id when available:
|
||||
|
||||
```cpp
|
||||
// In PerfLog output, add trace_id from current span context
|
||||
void logPerf(Json::Value& entry) {
|
||||
auto span = opentelemetry::trace::GetSpan(
|
||||
opentelemetry::context::RuntimeContext::GetCurrent());
|
||||
if (span && span->GetContext().IsValid()) {
|
||||
char traceIdHex[33];
|
||||
span->GetContext().trace_id().ToLowerBase16(traceIdHex);
|
||||
entry["trace_id"] = std::string(traceIdHex, 32);
|
||||
}
|
||||
// ... existing logging
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3: Configure Grafana trace-to-logs link**
|
||||
|
||||
In Tempo data source configuration, set up the derived field:
|
||||
|
||||
```yaml
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ["trace_id", "xrpl.tx.hash"]
|
||||
filterByTraceID: true
|
||||
filterBySpanID: false
|
||||
```
|
||||
|
||||
### 5.8.7 Correlation with Insight/StatsD Metrics
|
||||
|
||||
To correlate traces with existing Beast Insight metrics:
|
||||
|
||||
**Step 1: Export Insight metrics to Prometheus**
|
||||
|
||||
```yaml
|
||||
# prometheus.yaml
|
||||
scrape_configs:
|
||||
- job_name: "rippled-statsd"
|
||||
static_configs:
|
||||
- targets: ["statsd-exporter:9102"]
|
||||
```
|
||||
|
||||
**Step 2: Add exemplars to metrics**
|
||||
|
||||
OpenTelemetry SDK automatically adds exemplars (trace IDs) to metrics when using the Prometheus exporter. This links metrics spikes to specific traces.
|
||||
|
||||
**Step 3: Configure Grafana metric-to-trace link**
|
||||
|
||||
```yaml
|
||||
# In Prometheus data source
|
||||
jsonData:
|
||||
exemplarTraceIdDestinations:
|
||||
- name: trace_id
|
||||
datasourceUid: tempo
|
||||
```
|
||||
|
||||
**Step 4: Dashboard panel with exemplars**
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "RPC Latency with Trace Links",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, rate(rippled_rpc_duration_seconds_bucket[5m]))",
|
||||
"exemplar": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This allows clicking on metric data points to jump directly to the related trace.
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Code Samples](./04-code-samples.md)_ | _Next: [Implementation Phases](./06-implementation-phases.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
575
OpenTelemetryPlan/06-implementation-phases.md
Normal file
575
OpenTelemetryPlan/06-implementation-phases.md
Normal file
@@ -0,0 +1,575 @@
|
||||
# Implementation Phases
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Configuration Reference](./05-configuration-reference.md) | [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 6.1 Phase Overview
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title OpenTelemetry Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
SDK Integration :p1a, 2024-01-01, 4d
|
||||
Telemetry Interface :p1b, after p1a, 3d
|
||||
Configuration & CMake :p1c, after p1b, 3d
|
||||
Unit Tests :p1d, after p1c, 2d
|
||||
Buffer & Integration :p1e, after p1d, 2d
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
HTTP Context Extraction :p2a, after p1, 2d
|
||||
RPC Handler Instrumentation :p2b, after p2a, 4d
|
||||
PathFinding Instrumentation :p2f, after p2b, 2d
|
||||
TxQ Instrumentation :p2g, after p2f, 2d
|
||||
WebSocket Support :p2c, after p2g, 2d
|
||||
Integration Tests :p2d, after p2c, 2d
|
||||
Buffer & Review :p2e, after p2d, 4d
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
Protocol Buffer Extension :p3a, after p2, 2d
|
||||
PeerImp Instrumentation :p3b, after p3a, 3d
|
||||
Fee Escalation Instrumentation :p3f, after p3b, 2d
|
||||
Relay Context Propagation :p3c, after p3f, 3d
|
||||
Multi-node Tests :p3d, after p3c, 2d
|
||||
Buffer & Review :p3e, after p3d, 4d
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
Consensus Round Spans :p4a, after p3, 3d
|
||||
Proposal Handling :p4b, after p4a, 3d
|
||||
Validator List & Manifest Tracing :p4f, after p4b, 2d
|
||||
Amendment Voting Tracing :p4g, after p4f, 2d
|
||||
SHAMap Sync Tracing :p4h, after p4g, 2d
|
||||
Validation Tests :p4c, after p4h, 4d
|
||||
Buffer & Review :p4e, after p4c, 4d
|
||||
|
||||
section Phase 5
|
||||
Documentation & Deploy :p5, after p4, 1w
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.2 Phase 1: Core Infrastructure (Weeks 1-2)
|
||||
|
||||
**Objective**: Establish foundational telemetry infrastructure
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ----------------------------------------------------- |
|
||||
| 1.1 | Add OpenTelemetry C++ SDK to Conan/CMake |
|
||||
| 1.2 | Implement `Telemetry` interface and factory |
|
||||
| 1.3 | Implement `SpanGuard` RAII wrapper |
|
||||
| 1.4 | Implement configuration parser |
|
||||
| 1.5 | Integrate into `ApplicationImp` |
|
||||
| 1.6 | Add conditional compilation (`XRPL_ENABLE_TELEMETRY`) |
|
||||
| 1.7 | Create `NullTelemetry` no-op implementation |
|
||||
| 1.8 | Unit tests for core infrastructure |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] OpenTelemetry SDK compiles and links
|
||||
- [ ] Telemetry can be enabled/disabled via config
|
||||
- [ ] Basic span creation works
|
||||
- [ ] No performance regression when disabled
|
||||
- [ ] Unit tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.3 Phase 2: RPC Tracing (Weeks 3-4)
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
**Objective**: Complete tracing for all RPC operations
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | -------------------------------------------------------------------------- |
|
||||
| 2.1 | Implement W3C Trace Context HTTP header extraction |
|
||||
| 2.2 | Instrument `ServerHandler::onRequest()` |
|
||||
| 2.3 | Instrument `RPCHandler::doCommand()` |
|
||||
| 2.4 | Add RPC-specific attributes |
|
||||
| 2.5 | Instrument WebSocket handler |
|
||||
| 2.6 | PathFinding instrumentation (`pathfind.request`, `pathfind.compute` spans) |
|
||||
| 2.7 | TxQ instrumentation (`txq.enqueue`, `txq.apply` spans) |
|
||||
| 2.8 | Integration tests for RPC tracing |
|
||||
| 2.9 | Performance benchmarks |
|
||||
| 2.10 | Documentation |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] All RPC commands traced
|
||||
- [ ] Trace context propagates from HTTP headers
|
||||
- [ ] WebSocket and HTTP both instrumented
|
||||
- [ ] <1ms overhead per RPC call
|
||||
- [ ] Integration tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.4 Phase 3: Transaction Tracing (Weeks 5-6)
|
||||
|
||||
**Objective**: Trace transaction lifecycle across network
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ---------------------------------------------------- |
|
||||
| 3.1 | Define `TraceContext` Protocol Buffer message |
|
||||
| 3.2 | Implement protobuf context serialization |
|
||||
| 3.3 | Instrument `PeerImp::handleTransaction()` |
|
||||
| 3.4 | Instrument `NetworkOPs::submitTransaction()` |
|
||||
| 3.5 | Instrument HashRouter integration |
|
||||
| 3.6 | Fee escalation instrumentation (`fee.escalate` span) |
|
||||
| 3.7 | Implement relay context propagation |
|
||||
| 3.8 | Integration tests (multi-node) |
|
||||
| 3.9 | Performance benchmarks |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Transaction traces span across nodes
|
||||
- [ ] Trace context in Protocol Buffer messages
|
||||
- [ ] HashRouter deduplication visible in traces
|
||||
- [ ] Multi-node integration tests passing
|
||||
- [ ] <5% overhead on transaction throughput
|
||||
|
||||
---
|
||||
|
||||
## 6.5 Phase 4: Consensus Tracing (Weeks 7-8)
|
||||
|
||||
**Objective**: Full observability into consensus rounds
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ---------------------------------------------- |
|
||||
| 4.1 | Instrument `RCLConsensusAdaptor::startRound()` |
|
||||
| 4.2 | Instrument phase transitions |
|
||||
| 4.3 | Instrument proposal handling |
|
||||
| 4.4 | Instrument validation handling |
|
||||
| 4.5 | Add consensus-specific attributes |
|
||||
| 4.6 | Correlate with transaction traces |
|
||||
| 4.7 | Validator list and manifest tracing |
|
||||
| 4.8 | Amendment voting tracing |
|
||||
| 4.9 | SHAMap sync tracing |
|
||||
| 4.10 | Multi-validator integration tests |
|
||||
| 4.11 | Performance validation |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [x] Complete consensus round traces
|
||||
- [x] Phase transitions visible
|
||||
- [x] Proposals and validations traced
|
||||
- [x] No impact on consensus timing
|
||||
- [ ] Multi-validator test network validated
|
||||
|
||||
### Implementation Status — Phase 4a Complete
|
||||
|
||||
Phase 4a (establish-phase gap fill & cross-node correlation) adds:
|
||||
|
||||
- **Deterministic trace ID** derived from `previousLedger.id()` so all validators
|
||||
in the same round share the same `trace_id` (switchable via
|
||||
`consensus_trace_strategy` config: `"deterministic"` or `"attribute"`).
|
||||
See [Configuration Reference](./05-configuration-reference.md) for full
|
||||
configuration options. The `consensus_trace_strategy` option will be
|
||||
documented in the configuration reference as part of Phase 4a implementation.
|
||||
- **Round lifecycle spans**: `consensus.round` with round-to-round span links.
|
||||
- **Establish phase**: `consensus.establish`, `consensus.update_positions` (with
|
||||
`dispute.resolve` events), `consensus.check` (with threshold tracking).
|
||||
- **Mode changes**: `consensus.mode_change` spans.
|
||||
- **Validation**: `consensus.validation.send` with span link to round span
|
||||
(thread-safe cross-thread access via `roundSpanContext_` snapshot).
|
||||
- **Separation of concerns**: telemetry extracted to private helpers
|
||||
(`startRoundTracing`, `createValidationSpan`, `startEstablishTracing`,
|
||||
`updateEstablishTracing`, `endEstablishTracing`).
|
||||
|
||||
See [Phase4_taskList.md](./Phase4_taskList.md) for the full spec and implementation notes.
|
||||
|
||||
---
|
||||
|
||||
## 6.6 Phase 5: Documentation & Deployment (Week 9)
|
||||
|
||||
**Objective**: Production readiness
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ----------------------------- |
|
||||
| 5.1 | Operator runbook |
|
||||
| 5.2 | Grafana dashboards |
|
||||
| 5.3 | Alert definitions |
|
||||
| 5.4 | Collector deployment examples |
|
||||
| 5.5 | Developer documentation |
|
||||
| 5.6 | Training materials |
|
||||
| 5.7 | Final integration testing |
|
||||
|
||||
---
|
||||
|
||||
## 6.7 Risk Assessment
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Risk Assessment Matrix
|
||||
x-axis Low Impact --> High Impact
|
||||
y-axis Low Likelihood --> High Likelihood
|
||||
quadrant-1 Mitigate Immediately
|
||||
quadrant-2 Plan Mitigation
|
||||
quadrant-3 Accept Risk
|
||||
quadrant-4 Monitor Closely
|
||||
|
||||
SDK Compat: [0.2, 0.18]
|
||||
Protocol Chg: [0.75, 0.72]
|
||||
Perf Overhead: [0.58, 0.42]
|
||||
Context Prop: [0.4, 0.55]
|
||||
Memory Leaks: [0.85, 0.25]
|
||||
```
|
||||
|
||||
### Risk Details
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
| ------------------------------------ | ---------- | ------ | --------------------------------------- |
|
||||
| Protocol changes break compatibility | Medium | High | Use high field numbers, optional fields |
|
||||
| Performance overhead unacceptable | Medium | Medium | Sampling, conditional compilation |
|
||||
| Context propagation complexity | Medium | Medium | Phased rollout, extensive testing |
|
||||
| SDK compatibility issues | Low | Medium | Pin SDK version, fallback to no-op |
|
||||
| Memory leaks in long-running nodes | Low | High | Memory profiling, bounded queues |
|
||||
|
||||
---
|
||||
|
||||
## 6.8 Success Metrics
|
||||
|
||||
| Metric | Target | Measurement |
|
||||
| ------------------------ | -------------------------------------------------------------- | --------------------- |
|
||||
| Trace coverage | >95% of transaction code paths (independent of sampling ratio) | Sampling verification |
|
||||
| CPU overhead | <3% | Benchmark tests |
|
||||
| Memory overhead | <10 MB | Memory profiling |
|
||||
| Latency impact (p99) | <2% | Performance tests |
|
||||
| Trace completeness | >99% spans with required attrs | Validation script |
|
||||
| Cross-node trace linkage | >90% of multi-hop transactions | Integration tests |
|
||||
|
||||
---
|
||||
|
||||
## 6.9 Quick Wins and Crawl-Walk-Run Strategy
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
This section outlines a prioritized approach to maximize ROI with minimal initial investment.
|
||||
|
||||
### 6.9.1 Crawl-Walk-Run Overview
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph crawl["🐢 CRAWL (Week 1-2)"]
|
||||
direction LR
|
||||
c1[Core SDK Setup] ~~~ c2[RPC Tracing Only] ~~~ c3[PathFinding + TxQ Tracing] ~~~ c4[Single Node]
|
||||
end
|
||||
|
||||
subgraph walk["🚶 WALK (Week 3-5)"]
|
||||
direction LR
|
||||
w1[Transaction Tracing] ~~~ w2[Fee Escalation Tracing] ~~~ w3[Cross-Node Context] ~~~ w4[Basic Dashboards]
|
||||
end
|
||||
|
||||
subgraph run["🏃 RUN (Week 6-9)"]
|
||||
direction LR
|
||||
r1[Consensus Tracing] ~~~ r2[Validator, Amendment,<br/>SHAMap Tracing] ~~~ r3[Full Correlation] ~~~ r4[Production Deploy]
|
||||
end
|
||||
|
||||
crawl --> walk --> run
|
||||
|
||||
style crawl fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style walk fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style run fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style c1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c4 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style w1 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w2 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w3 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w4 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style r1 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r2 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r3 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r4 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **CRAWL (Weeks 1-2)**: Minimal investment -- set up the SDK, instrument RPC and PathFinding/TxQ handlers, and verify on a single node. Delivers immediate latency visibility.
|
||||
- **WALK (Weeks 3-5)**: Expand to transaction lifecycle tracing, fee escalation, cross-node context propagation, and basic Grafana dashboards. This is where distributed tracing starts working.
|
||||
- **RUN (Weeks 6-9)**: Full consensus instrumentation, validator/amendment/SHAMap tracing, end-to-end correlation, and production deployment with sampling and alerting.
|
||||
- **Arrows (crawl → walk → run)**: Each phase builds on the prior one; you cannot skip ahead because later phases depend on infrastructure established earlier.
|
||||
|
||||
### 6.9.2 Quick Wins (Immediate Value)
|
||||
|
||||
| Quick Win | Value | When to Deploy |
|
||||
| ------------------------------ | ------ | -------------- |
|
||||
| **RPC Command Tracing** | High | Week 2 |
|
||||
| **RPC Latency Histograms** | High | Week 2 |
|
||||
| **Error Rate Dashboard** | Medium | Week 2 |
|
||||
| **Transaction Submit Tracing** | High | Week 3 |
|
||||
| **Consensus Round Duration** | Medium | Week 6 |
|
||||
|
||||
### 6.9.3 CRAWL Phase (Weeks 1-2)
|
||||
|
||||
**Goal**: Get basic tracing working with minimal code changes.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- RPC request/response traces for all commands
|
||||
- Latency breakdown per RPC command
|
||||
- PathFinding and TxQ tracing (directly impacts RPC latency)
|
||||
- Error visibility with stack traces
|
||||
- Basic Grafana dashboard
|
||||
|
||||
**Code Changes**: ~15 lines in `ServerHandler.cpp`, ~40 lines in new telemetry module
|
||||
|
||||
**Why Start Here**:
|
||||
|
||||
- RPC is the lowest-risk, highest-visibility component
|
||||
- PathFinding and TxQ are RPC-adjacent and directly affect latency
|
||||
- Immediate value for debugging client issues
|
||||
- No cross-node complexity
|
||||
- Single file modification to existing code
|
||||
|
||||
### 6.9.4 WALK Phase (Weeks 3-5)
|
||||
|
||||
**Goal**: Add transaction lifecycle tracing across nodes.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- End-to-end transaction traces from submit to relay
|
||||
- Fee escalation tracing within the transaction pipeline
|
||||
- Cross-node correlation (see transaction path)
|
||||
- HashRouter deduplication visibility
|
||||
- Relay latency metrics
|
||||
|
||||
**Code Changes**: ~120 lines across 4 files, plus protobuf extension
|
||||
|
||||
**Why Do This Second**:
|
||||
|
||||
- Builds on RPC tracing (transactions submitted via RPC)
|
||||
- Fee escalation is integral to the transaction processing pipeline
|
||||
- Moderate complexity (requires context propagation)
|
||||
- High value for debugging transaction issues
|
||||
|
||||
### 6.9.5 RUN Phase (Weeks 6-9)
|
||||
|
||||
**Goal**: Full observability including consensus.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- Complete consensus round visibility
|
||||
- Phase transition timing
|
||||
- Validator proposal tracking
|
||||
- Validator list and manifest tracing
|
||||
- Amendment voting tracing
|
||||
- SHAMap sync tracing
|
||||
- Full end-to-end traces (client → RPC → TX → consensus → ledger)
|
||||
|
||||
**Code Changes**: ~100 lines across 3 consensus files, plus validator/amendment/SHAMap modules
|
||||
|
||||
**Why Do This Last**:
|
||||
|
||||
- Highest complexity (consensus is critical path)
|
||||
- Validator, amendment, and SHAMap components are lower priority
|
||||
- Requires thorough testing
|
||||
- Lower relative value (consensus issues are rarer)
|
||||
|
||||
### 6.9.6 ROI Prioritization Matrix
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Implementation ROI Matrix
|
||||
x-axis Low Effort --> High Effort
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Quick Wins - Do First
|
||||
quadrant-2 Major Projects - Plan Carefully
|
||||
quadrant-3 Nice to Have - Optional
|
||||
quadrant-4 Time Sinks - Avoid
|
||||
|
||||
RPC Tracing: [0.15, 0.92]
|
||||
TX Submit Trace: [0.3, 0.78]
|
||||
TX Relay Trace: [0.5, 0.88]
|
||||
Consensus Trace: [0.72, 0.72]
|
||||
Peer Msg Trace: [0.85, 0.3]
|
||||
Ledger Acquire: [0.55, 0.52]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.10 Definition of Done
|
||||
|
||||
> **TxQ** = Transaction Queue | **HA** = High Availability
|
||||
|
||||
Clear, measurable criteria for each phase.
|
||||
|
||||
### 6.10.1 Phase 1: Core Infrastructure
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| --------------- | ---------------------------------------------------------- | ---------------------------- |
|
||||
| SDK Integration | `cmake --build` succeeds with `-DXRPL_ENABLE_TELEMETRY=ON` | ✅ Compiles |
|
||||
| Runtime Toggle | `enabled=0` produces zero overhead | <0.1% CPU difference |
|
||||
| Span Creation | Unit test creates and exports span | Span appears in Tempo |
|
||||
| Configuration | All config options parsed correctly | Config validation tests pass |
|
||||
| Documentation | Developer guide exists | PR approved |
|
||||
|
||||
**Definition of Done**: All criteria met, PR merged, no regressions in CI.
|
||||
|
||||
### 6.10.2 Phase 2: RPC Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------------ | ---------------------------------- | -------------------------- |
|
||||
| Coverage | All RPC commands instrumented | 100% of commands |
|
||||
| Context Extraction | traceparent header propagates | Integration test passes |
|
||||
| Attributes | Command, status, duration recorded | Validation script confirms |
|
||||
| Performance | RPC latency overhead | <1ms p99 |
|
||||
| Dashboard | Grafana dashboard deployed | Screenshot in docs |
|
||||
|
||||
**Definition of Done**: RPC traces visible in Tempo for all commands, dashboard shows latency distribution.
|
||||
|
||||
### 6.10.3 Phase 3: Transaction Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ---------------- | ------------------------------- | ---------------------------------- |
|
||||
| Local Trace | Submit → validate → TxQ traced | Single-node test passes |
|
||||
| Cross-Node | Context propagates via protobuf | Multi-node test passes |
|
||||
| Relay Visibility | relay_count attribute correct | Spot check 100 txs |
|
||||
| HashRouter | Deduplication visible in trace | Duplicate txs show suppressed=true |
|
||||
| Performance | TX throughput overhead | <5% degradation |
|
||||
|
||||
**Definition of Done**: Transaction traces span 3+ nodes in test network, performance within bounds.
|
||||
|
||||
### 6.10.4 Phase 4: Consensus Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| -------------------- | ----------------------------- | ------------------------- |
|
||||
| Round Tracing | startRound creates root span | Unit test passes |
|
||||
| Phase Visibility | All phases have child spans | Integration test confirms |
|
||||
| Proposer Attribution | Proposer ID in attributes | Spot check 50 rounds |
|
||||
| Timing Accuracy | Phase durations match PerfLog | <5% variance |
|
||||
| No Consensus Impact | Round timing unchanged | Performance test passes |
|
||||
|
||||
**Definition of Done**: Consensus rounds fully traceable, no impact on consensus timing.
|
||||
|
||||
### 6.10.5 Phase 5: Production Deployment
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------ | ---------------------------- | -------------------------- |
|
||||
| Collector HA | Multiple collectors deployed | No single point of failure |
|
||||
| Sampling | Tail sampling configured | 10% base + errors + slow |
|
||||
| Retention | Data retained per policy | 7 days hot, 30 days warm |
|
||||
| Alerting | Alerts configured | Error spike, high latency |
|
||||
| Runbook | Operator documentation | Approved by ops team |
|
||||
| Training | Team trained | Session completed |
|
||||
|
||||
**Definition of Done**: Telemetry running in production, operators trained, alerts active.
|
||||
|
||||
### 6.10.6 Success Metrics Summary
|
||||
|
||||
| Phase | Primary Metric | Secondary Metric | Deadline |
|
||||
| ------- | ---------------------- | --------------------------- | ------------- |
|
||||
| Phase 1 | SDK compiles and runs | Zero overhead when disabled | End of Week 2 |
|
||||
| Phase 2 | 100% RPC coverage | <1ms latency overhead | End of Week 4 |
|
||||
| Phase 3 | Cross-node traces work | <5% throughput impact | End of Week 6 |
|
||||
| Phase 4 | Consensus fully traced | No consensus timing impact | End of Week 8 |
|
||||
| Phase 5 | Production deployment | Operators trained | End of Week 9 |
|
||||
|
||||
---
|
||||
|
||||
## 6.12 Recommended Implementation Order
|
||||
|
||||
Based on ROI analysis, implement in this exact order:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph week1["Week 1"]
|
||||
t1[1. OpenTelemetry SDK<br/>Conan/CMake integration]
|
||||
t2[2. Telemetry interface<br/>SpanGuard, config]
|
||||
end
|
||||
|
||||
subgraph week2["Week 2"]
|
||||
t3[3. RPC ServerHandler<br/>instrumentation]
|
||||
t4[4. Basic Tempo setup<br/>for testing]
|
||||
end
|
||||
|
||||
subgraph week3["Week 3"]
|
||||
t5[5. Transaction submit<br/>tracing]
|
||||
t6[6. Grafana dashboard<br/>v1]
|
||||
end
|
||||
|
||||
subgraph week4["Week 4"]
|
||||
t7[7. Protobuf context<br/>extension]
|
||||
t8[8. PeerImp tx.relay<br/>instrumentation]
|
||||
end
|
||||
|
||||
subgraph week5["Week 5"]
|
||||
t9[9. Multi-node<br/>integration tests]
|
||||
t10[10. Performance<br/>benchmarks]
|
||||
end
|
||||
|
||||
subgraph week6_8["Weeks 6-8"]
|
||||
t11[11. Consensus<br/>instrumentation]
|
||||
t12[12. Full integration<br/>testing]
|
||||
end
|
||||
|
||||
subgraph week9["Week 9"]
|
||||
t13[13. Production<br/>deployment]
|
||||
t14[14. Documentation<br/>& training]
|
||||
end
|
||||
|
||||
t1 --> t2 --> t3 --> t4
|
||||
t4 --> t5 --> t6
|
||||
t6 --> t7 --> t8
|
||||
t8 --> t9 --> t10
|
||||
t10 --> t11 --> t12
|
||||
t12 --> t13 --> t14
|
||||
|
||||
style week1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week3 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week4 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week5 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week6_8 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style week9 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t4 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t5 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t6 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t7 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t8 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t9 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t10 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t11 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t12 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t13 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t14 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Week 1 (tasks 1-2)**: Foundation work -- integrate the OpenTelemetry SDK via Conan/CMake and build the `Telemetry` interface with `SpanGuard` and config parsing.
|
||||
- **Week 2 (tasks 3-4)**: First observable output -- instrument `ServerHandler` for RPC tracing and stand up Tempo so developers can see traces immediately.
|
||||
- **Weeks 3-5 (tasks 5-10)**: Transaction lifecycle -- add submit tracing, build the first Grafana dashboard, extend protobuf for cross-node context, instrument `PeerImp` relay, then validate with multi-node integration tests and performance benchmarks.
|
||||
- **Weeks 6-8 (tasks 11-12)**: Consensus deep-dive -- instrument consensus rounds and phases, then run full integration testing across all instrumented paths.
|
||||
- **Week 9 (tasks 13-14)**: Go-live -- deploy to production with sampling/alerting configured, and deliver documentation and operator training.
|
||||
- **Arrow chain (t1 → ... → t14)**: Strict sequential dependency; each task's output is a prerequisite for the next.
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Configuration Reference](./05-configuration-reference.md)_ | _Next: [Observability Backends](./07-observability-backends.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
641
OpenTelemetryPlan/07-observability-backends.md
Normal file
641
OpenTelemetryPlan/07-observability-backends.md
Normal file
@@ -0,0 +1,641 @@
|
||||
# Observability Backend Recommendations
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Phases](./06-implementation-phases.md) | [Appendix](./08-appendix.md)
|
||||
|
||||
---
|
||||
|
||||
## 7.1 Development/Testing Backends
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ---------- | ----------------------------------- | ---------------------- | ------------------- |
|
||||
| **Tempo** | Cost-effective, Grafana integration | Requires Grafana stack | Local dev, CI, Prod |
|
||||
| **Zipkin** | Simple, lightweight | Basic features | Quick prototyping |
|
||||
|
||||
### Quick Start with Tempo
|
||||
|
||||
```bash
|
||||
# Start Tempo with OTLP support
|
||||
docker run -d --name tempo \
|
||||
-p 3200:3200 \
|
||||
-p 4317:4317 \
|
||||
-p 4318:4318 \
|
||||
grafana/tempo:2.6.1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.2 Production Backends
|
||||
|
||||
> **APM** = Application Performance Monitoring
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ----------------- | ----------------------------------------- | ---------------------- | --------------------------- |
|
||||
| **Grafana Tempo** | Cost-effective, Grafana integration | Requires Grafana stack | Most production deployments |
|
||||
| **Elastic APM** | Full observability stack, log correlation | Resource intensive | Existing Elastic users |
|
||||
| **Honeycomb** | Excellent query, high cardinality | SaaS cost | Deep debugging needs |
|
||||
| **Datadog APM** | Full platform, easy setup | SaaS cost | Enterprise with budget |
|
||||
|
||||
### Backend Selection Flowchart
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
start[Select Backend] --> budget{Budget<br/>Constraints?}
|
||||
|
||||
budget -->|Yes| oss[Open Source]
|
||||
budget -->|No| saas{Prefer<br/>SaaS?}
|
||||
|
||||
oss --> existing{Existing<br/>Stack?}
|
||||
existing -->|Grafana| tempo[Grafana Tempo]
|
||||
existing -->|Elastic| elastic[Elastic APM]
|
||||
existing -->|None| tempo
|
||||
|
||||
saas -->|Yes| enterprise{Enterprise<br/>Support?}
|
||||
saas -->|No| oss
|
||||
|
||||
enterprise -->|Yes| datadog[Datadog APM]
|
||||
enterprise -->|No| honeycomb[Honeycomb]
|
||||
|
||||
tempo --> final[Configure Collector]
|
||||
elastic --> final
|
||||
honeycomb --> final
|
||||
datadog --> final
|
||||
|
||||
style start fill:#0f172a,stroke:#020617,color:#fff
|
||||
style budget fill:#334155,stroke:#1e293b,color:#fff
|
||||
style oss fill:#1e293b,stroke:#0f172a,color:#fff
|
||||
style existing fill:#334155,stroke:#1e293b,color:#fff
|
||||
style saas fill:#334155,stroke:#1e293b,color:#fff
|
||||
style enterprise fill:#334155,stroke:#1e293b,color:#fff
|
||||
style final fill:#0f172a,stroke:#020617,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style elastic fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style honeycomb fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style datadog fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Budget Constraints? (Yes)**: Leads to open-source options. If you already run Grafana or Elastic, pick the matching backend; otherwise default to Grafana Tempo.
|
||||
- **Budget Constraints? (No) → Prefer SaaS?**: If you want a managed service, choose between Datadog (enterprise support) and Honeycomb (developer-focused). If not, fall back to open-source.
|
||||
- **Terminal nodes (Tempo / Elastic / Honeycomb / Datadog)**: Each represents a concrete backend choice, all of which feed into the same final step.
|
||||
- **Configure Collector**: Regardless of backend, you always finish by configuring the OTel Collector to export to your chosen destination.
|
||||
|
||||
---
|
||||
|
||||
## 7.3 Recommended Production Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring | **HA** = High Availability
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph validators["Validator Nodes"]
|
||||
v1[rippled<br/>Validator 1]
|
||||
v2[rippled<br/>Validator 2]
|
||||
end
|
||||
|
||||
subgraph stock["Stock Nodes"]
|
||||
s1[rippled<br/>Stock 1]
|
||||
s2[rippled<br/>Stock 2]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector Cluster"]
|
||||
c1[Collector<br/>DC1]
|
||||
c2[Collector<br/>DC2]
|
||||
end
|
||||
|
||||
subgraph backends["Storage Backends"]
|
||||
tempo[(Grafana<br/>Tempo)]
|
||||
elastic[(Elastic<br/>APM)]
|
||||
archive[(S3/GCS<br/>Archive)]
|
||||
end
|
||||
|
||||
subgraph ui["Visualization"]
|
||||
grafana[Grafana<br/>Dashboards]
|
||||
end
|
||||
|
||||
v1 -->|OTLP| c1
|
||||
v2 -->|OTLP| c1
|
||||
s1 -->|OTLP| c2
|
||||
s2 -->|OTLP| c2
|
||||
|
||||
c1 --> tempo
|
||||
c1 --> elastic
|
||||
c2 --> tempo
|
||||
c2 --> archive
|
||||
|
||||
tempo --> grafana
|
||||
elastic --> grafana
|
||||
|
||||
%% Note: simplified single-collector-per-DC topology shown for clarity
|
||||
|
||||
style validators fill:#b71c1c,stroke:#7f1d1d,color:#ffffff
|
||||
style stock fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style ui fill:#4a148c,stroke:#2e0d57,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Validator / Stock Nodes**: All rippled nodes emit trace data via OTLP. Validators and stock nodes are grouped separately because they may reside in different network zones.
|
||||
- **Collector Cluster (DC1, DC2)**: Regional collectors receive OTLP from nodes in their datacenter, apply processing (sampling, enrichment), and fan out to multiple backends.
|
||||
- **Storage Backends**: Tempo and Elastic provide queryable trace storage; S3/GCS Archive provides long-term cold storage for compliance or post-incident analysis.
|
||||
- **Grafana Dashboards**: The single visualization layer that queries both Tempo and Elastic, giving operators a unified view of all traces.
|
||||
- **Data flow direction**: Nodes → Collectors → Storage → Grafana. Each arrow represents a network hop; minimizing collector-to-backend hops reduces latency.
|
||||
|
||||
> **Note**: Production deployments should use multiple collector instances behind a load balancer for high availability. The diagram shows a simplified single-collector topology for clarity.
|
||||
|
||||
---
|
||||
|
||||
## 7.4 Architecture Considerations
|
||||
|
||||
### 7.4.1 Collector Placement
|
||||
|
||||
| Strategy | Description | Pros | Cons |
|
||||
| ------------- | -------------------- | ------------------------ | ----------------------- |
|
||||
| **Sidecar** | Collector per node | Isolation, simple config | Resource overhead |
|
||||
| **DaemonSet** | Collector per host | Shared resources | Complexity |
|
||||
| **Gateway** | Central collector(s) | Centralized processing | Single point of failure |
|
||||
|
||||
**Recommendation**: Use **Gateway** pattern with regional collectors for rippled networks:
|
||||
|
||||
- One collector cluster per datacenter/region
|
||||
- Tail-based sampling at collector level
|
||||
- Multiple export destinations for redundancy
|
||||
|
||||
### 7.4.2 Sampling Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph head["Head Sampling (Node)"]
|
||||
hs[Node-level head sampling<br/>configurable, default: 100%<br/>recommended production: 10%]
|
||||
end
|
||||
|
||||
subgraph tail["Tail Sampling (Collector)"]
|
||||
ts1[Keep all errors]
|
||||
ts2[Keep slow >5s]
|
||||
ts3[Keep 10% rest]
|
||||
end
|
||||
|
||||
head --> tail
|
||||
|
||||
ts1 --> final[Final Traces]
|
||||
ts2 --> final
|
||||
ts3 --> final
|
||||
|
||||
style head fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style tail fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style hs fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style ts1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style final fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Head Sampling (Node)**: The first filter -- each rippled node decides whether to sample a trace at creation time (default 100%, recommended 10% in production). This controls the volume leaving the node.
|
||||
- **Tail Sampling (Collector)**: The second filter -- the collector inspects completed traces and applies rules: keep all errors, keep anything slower than 5 seconds, and keep 10% of the remainder.
|
||||
- **Arrow head → tail**: All head-sampled traces flow to the collector, where tail sampling further reduces volume while preserving the most valuable data.
|
||||
- **Final Traces**: The output after both sampling stages; this is what gets stored and queried. The two-stage approach balances cost with debuggability.
|
||||
|
||||
### 7.4.3 Data Retention
|
||||
|
||||
| Environment | Hot Storage | Warm Storage | Cold Archive |
|
||||
| ----------- | ----------- | ------------ | ------------ |
|
||||
| Development | 24 hours | N/A | N/A |
|
||||
| Staging | 7 days | N/A | N/A |
|
||||
| Production | 7 days | 30 days | many years |
|
||||
|
||||
---
|
||||
|
||||
## 7.5 Integration Checklist
|
||||
|
||||
- [ ] Choose primary backend (Tempo recommended for cost/features)
|
||||
- [ ] Deploy collector cluster with high availability
|
||||
- [ ] Configure tail-based sampling for error/latency traces
|
||||
- [ ] Set up Grafana dashboards for trace visualization
|
||||
- [ ] Configure alerts for trace anomalies
|
||||
- [ ] Establish data retention policies
|
||||
- [ ] Test trace correlation with logs and metrics
|
||||
|
||||
---
|
||||
|
||||
## 7.6 Grafana Dashboard Examples
|
||||
|
||||
Pre-built dashboards for rippled observability.
|
||||
|
||||
### 7.6.1 Consensus Health Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Consensus Health",
|
||||
"uid": "rippled-consensus-health",
|
||||
"tags": ["rippled", "consensus", "tracing"],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Consensus Round Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(duration) by (resource.service.instance.id)"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 4000 },
|
||||
{ "color": "red", "value": 5000 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Phase Duration Breakdown",
|
||||
"type": "barchart",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=~\"consensus.phase.*\"} | avg(duration) by (name)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Proposers per Round",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(span.xrpl.consensus.proposers)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Slow Rounds (>5s)",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | duration > 5s"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 12 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.2 Node Overview Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Node Overview",
|
||||
"uid": "rippled-node-overview",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Active Nodes",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"} | count_over_time() by (resource.service.instance.id) | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Total Transactions (1h)",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Error Rate",
|
||||
"type": "gauge",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() / {resource.service.name=\"rippled\"} | rate() * 100"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "percent",
|
||||
"max": 10,
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 1 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Service Map",
|
||||
"type": "nodeGraph",
|
||||
"datasource": "Tempo",
|
||||
"gridPos": { "h": 12, "w": 12, "x": 12, "y": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.3 Alert Rules
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/alerting/rippled-alerts.yaml
|
||||
apiVersion: 1
|
||||
|
||||
groups:
|
||||
- name: rippled-tracing-alerts
|
||||
folder: rippled
|
||||
interval: 1m
|
||||
rules:
|
||||
- uid: consensus-slow
|
||||
title: Consensus Round Slow
|
||||
condition: A
|
||||
data:
|
||||
- refId: A
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="consensus.round"} | avg(duration) > 5s'
|
||||
# Note: Verify TraceQL aggregate queries are supported by your
|
||||
# Tempo version. Aggregate alerting (e.g., avg(duration)) requires
|
||||
# Tempo 2.3+ with TraceQL metrics enabled.
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: Consensus rounds taking >5 seconds
|
||||
description: "Consensus duration: {{ $value }}ms"
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- uid: rpc-error-spike
|
||||
title: RPC Error Rate Spike
|
||||
condition: B
|
||||
data:
|
||||
- refId: B
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name=~"rpc.command.*" && status.code=error} | rate() > 0.05'
|
||||
# Note: Verify TraceQL aggregate queries are supported by your
|
||||
# Tempo version. Aggregate alerting (e.g., rate()) requires
|
||||
# Tempo 2.3+ with TraceQL metrics enabled.
|
||||
for: 2m
|
||||
annotations:
|
||||
summary: RPC error rate >5%
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- uid: tx-throughput-drop
|
||||
title: Transaction Throughput Drop
|
||||
condition: C
|
||||
data:
|
||||
- refId: C
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="tx.receive"} | rate() < 10'
|
||||
for: 10m
|
||||
annotations:
|
||||
summary: Transaction throughput below threshold
|
||||
labels:
|
||||
severity: warning
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.7 PerfLog and Insight Correlation
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
How to correlate OpenTelemetry traces with existing rippled observability.
|
||||
|
||||
### 7.7.1 Correlation Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
otel[OpenTelemetry<br/>Spans]
|
||||
perflog[PerfLog<br/>JSON Logs]
|
||||
insight[Beast Insight<br/>StatsD Metrics]
|
||||
end
|
||||
|
||||
subgraph collectors["Data Collection"]
|
||||
otelc[OTel Collector]
|
||||
promtail[Promtail/Fluentd]
|
||||
statsd[StatsD Exporter]
|
||||
end
|
||||
|
||||
subgraph storage["Storage"]
|
||||
tempo[(Tempo)]
|
||||
loki[(Loki)]
|
||||
prom[(Prometheus)]
|
||||
end
|
||||
|
||||
subgraph grafana["Grafana"]
|
||||
traces[Trace View]
|
||||
logs[Log View]
|
||||
metrics[Metrics View]
|
||||
corr[Correlation<br/>Panel]
|
||||
end
|
||||
|
||||
otel -->|OTLP| otelc --> tempo
|
||||
perflog -->|JSON| promtail --> loki
|
||||
insight -->|StatsD| statsd --> prom
|
||||
|
||||
tempo --> traces
|
||||
loki --> logs
|
||||
prom --> metrics
|
||||
|
||||
traces --> corr
|
||||
logs --> corr
|
||||
metrics --> corr
|
||||
|
||||
style rippled fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style collectors fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style storage fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style grafana fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style otel fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style perflog fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style insight fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style otelc fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style promtail fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style statsd fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style loki fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style prom fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style traces fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style logs fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style metrics fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style corr fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rippled Node (three sources)**: A single node emits three independent data streams -- OpenTelemetry spans, PerfLog JSON logs, and Beast Insight StatsD metrics.
|
||||
- **Data Collection layer**: Each stream has its own collector -- OTel Collector for spans, Promtail/Fluentd for logs, and a StatsD exporter for metrics. They operate independently.
|
||||
- **Storage layer (Tempo, Loki, Prometheus)**: Each data type lands in a purpose-built store optimized for its query patterns (trace search, log grep, metric aggregation).
|
||||
- **Grafana Correlation Panel**: The key integration point -- Grafana queries all three stores and links them via shared fields (`trace_id`, `xrpl.tx.hash`, `ledger_seq`), enabling a single-pane debugging experience.
|
||||
|
||||
### 7.7.2 Correlation Fields
|
||||
|
||||
| Source | Field | Link To | Purpose |
|
||||
| ----------- | --------------------------- | ------------- | -------------------------- |
|
||||
| **Trace** | `trace_id` | Logs | Find log entries for trace |
|
||||
| **Trace** | `xrpl.tx.hash` | Logs, Metrics | Find TX-related data |
|
||||
| **Trace** | `xrpl.consensus.ledger.seq` | Logs | Find ledger-related logs |
|
||||
| **PerfLog** | `trace_id` (new) | Traces | Jump to trace from log |
|
||||
| **PerfLog** | `ledger_seq` | Traces | Find consensus trace |
|
||||
| **Insight** | `exemplar.trace_id` | Traces | Jump from metric spike |
|
||||
|
||||
### 7.7.3 Example: Debugging a Slow Transaction
|
||||
|
||||
**Step 1: Find the trace**
|
||||
|
||||
```
|
||||
# In Grafana Explore with Tempo
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
```
|
||||
|
||||
**Step 2: Get the trace_id from the trace view**
|
||||
|
||||
```
|
||||
Trace ID: 4bf92f3577b34da6a3ce929d0e0e4736
|
||||
```
|
||||
|
||||
**Step 3: Find related PerfLog entries**
|
||||
|
||||
```
|
||||
# In Grafana Explore with Loki
|
||||
{job="rippled"} |= "4bf92f3577b34da6a3ce929d0e0e4736"
|
||||
```
|
||||
|
||||
**Step 4: Check Insight metrics for the time window**
|
||||
|
||||
```
|
||||
# In Grafana with Prometheus
|
||||
rate(rippled_tx_applied_total[1m])
|
||||
@ timestamp_from_trace
|
||||
```
|
||||
|
||||
### 7.7.4 Unified Dashboard Example
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Unified Observability",
|
||||
"uid": "rippled-unified",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Latency (Traces)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | histogram_over_time(duration)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Rate (Metrics)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(rippled_tx_received_total[5m])",
|
||||
"legendFormat": "{{ instance }}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": [
|
||||
{
|
||||
"title": "View traces",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"{resource.service.name=\\\"rippled\\\" && name=\\\"tx.receive\\\"}\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 6, "w": 8, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Logs",
|
||||
"type": "logs",
|
||||
"datasource": "Loki",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{job=\"rippled\"} | json"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 16, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Trace Search",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "traceID" },
|
||||
"properties": [
|
||||
{
|
||||
"id": "links",
|
||||
"value": [
|
||||
{
|
||||
"title": "View trace",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"${__value.raw}\"}"
|
||||
},
|
||||
{
|
||||
"title": "View logs",
|
||||
"url": "/explore?left={\"datasource\":\"Loki\",\"query\":\"{job=\\\"rippled\\\"} |= \\\"${__value.raw}\\\"\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 6 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Implementation Phases](./06-implementation-phases.md)_ | _Next: [Appendix](./08-appendix.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
195
OpenTelemetryPlan/08-appendix.md
Normal file
195
OpenTelemetryPlan/08-appendix.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Appendix
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.1 Glossary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **TxQ** = Transaction Queue
|
||||
|
||||
| Term | Definition |
|
||||
| --------------------- | ---------------------------------------------------------- |
|
||||
| **Span** | A unit of work with start/end time, name, and attributes |
|
||||
| **Trace** | A collection of spans representing a complete request flow |
|
||||
| **Trace ID** | 128-bit unique identifier for a trace |
|
||||
| **Span ID** | 64-bit unique identifier for a span within a trace |
|
||||
| **Context** | Carrier for trace/span IDs across boundaries |
|
||||
| **Propagator** | Component that injects/extracts context |
|
||||
| **Sampler** | Decides which traces to record |
|
||||
| **Exporter** | Sends spans to backend |
|
||||
| **Collector** | Receives, processes, and forwards telemetry |
|
||||
| **OTLP** | OpenTelemetry Protocol (wire format) |
|
||||
| **W3C Trace Context** | Standard HTTP headers for trace propagation |
|
||||
| **Baggage** | Key-value pairs propagated across service boundaries |
|
||||
| **Resource** | Entity producing telemetry (service, host, etc.) |
|
||||
| **Instrumentation** | Code that creates telemetry data |
|
||||
|
||||
### rippled-Specific Terms
|
||||
|
||||
| Term | Definition |
|
||||
| ----------------- | ------------------------------------------------------------- |
|
||||
| **Overlay** | P2P network layer managing peer connections |
|
||||
| **Consensus** | XRP Ledger consensus algorithm (RCL) |
|
||||
| **Proposal** | Validator's suggested transaction set for a ledger |
|
||||
| **Validation** | Validator's signature on a closed ledger |
|
||||
| **HashRouter** | Component for transaction deduplication |
|
||||
| **JobQueue** | Thread pool for asynchronous task execution |
|
||||
| **PerfLog** | Existing performance logging system in rippled |
|
||||
| **Beast Insight** | Existing metrics framework in rippled |
|
||||
| **PathFinding** | Payment path computation engine for cross-currency payments |
|
||||
| **TxQ** | Transaction queue managing fee-based prioritization |
|
||||
| **LoadManager** | Dynamic fee escalation based on network load |
|
||||
| **SHAMap** | SHA-256 hash-based map (Merkle trie variant) for ledger state |
|
||||
|
||||
---
|
||||
|
||||
## 8.2 Span Hierarchy Visualization
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: Transaction Lifecycle"]
|
||||
rpc["rpc.request<br/>(entry point)"]
|
||||
validate["tx.validate"]
|
||||
relay["tx.relay<br/>(parent span)"]
|
||||
|
||||
subgraph peers["Peer Spans"]
|
||||
p1["peer.send<br/>Peer A"]
|
||||
p2["peer.send<br/>Peer B"]
|
||||
p3["peer.send<br/>Peer C"]
|
||||
end
|
||||
|
||||
subgraph pathfinding["PathFinding Spans"]
|
||||
pathfind["pathfind.request"]
|
||||
pathcomp["pathfind.compute"]
|
||||
end
|
||||
|
||||
consensus["consensus.round"]
|
||||
apply["tx.apply"]
|
||||
|
||||
subgraph txqueue["TxQ Spans"]
|
||||
txq["txq.enqueue"]
|
||||
txqApply["txq.apply"]
|
||||
end
|
||||
|
||||
feeCalc["fee.escalate"]
|
||||
end
|
||||
|
||||
subgraph validators["Validator Spans"]
|
||||
valFetch["validator.list.fetch"]
|
||||
valManifest["validator.manifest"]
|
||||
end
|
||||
|
||||
rpc --> validate
|
||||
rpc --> pathfind
|
||||
pathfind --> pathcomp
|
||||
validate --> relay
|
||||
relay --> p1
|
||||
relay --> p2
|
||||
relay --> p3
|
||||
p1 -.->|"context propagation"| consensus
|
||||
consensus --> apply
|
||||
apply --> txq
|
||||
txq --> txqApply
|
||||
txq --> feeCalc
|
||||
|
||||
style trace fill:#0f172a,stroke:#020617,color:#fff
|
||||
style peers fill:#1e3a8a,stroke:#172554,color:#fff
|
||||
style pathfinding fill:#134e4a,stroke:#0f766e,color:#fff
|
||||
style txqueue fill:#064e3b,stroke:#047857,color:#fff
|
||||
style validators fill:#4c1d95,stroke:#6d28d9,color:#fff
|
||||
style rpc fill:#1d4ed8,stroke:#1e40af,color:#fff
|
||||
style validate fill:#047857,stroke:#064e3b,color:#fff
|
||||
style relay fill:#047857,stroke:#064e3b,color:#fff
|
||||
style p1 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p2 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p3 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style consensus fill:#fef3c7,stroke:#fde68a,color:#1e293b
|
||||
style apply fill:#047857,stroke:#064e3b,color:#fff
|
||||
style pathfind fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style pathcomp fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style txq fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txqApply fill:#047857,stroke:#064e3b,color:#fff
|
||||
style feeCalc fill:#047857,stroke:#064e3b,color:#fff
|
||||
style valFetch fill:#6d28d9,stroke:#4c1d95,color:#fff
|
||||
style valManifest fill:#6d28d9,stroke:#4c1d95,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rpc.request (blue, top)**: The entry point — every traced transaction starts as an RPC call; this root span is the parent of all downstream work.
|
||||
- **tx.validate and pathfind.request (green/teal, first fork)**: The RPC request fans out into transaction validation and, for cross-currency payments, a PathFinding branch (`pathfind.request` -> `pathfind.compute`).
|
||||
- **tx.relay -> Peer Spans (teal, middle)**: After validation, the transaction is relayed to peers A, B, and C in parallel; each `peer.send` is a sibling child span showing fan-out across the network.
|
||||
- **context propagation (dashed arrow)**: The dotted line from `peer.send Peer A` to `consensus.round` represents the trace context crossing a node boundary — the receiving validator picks up the same `trace_id` and continues the trace.
|
||||
- **consensus.round -> tx.apply -> TxQ Spans (green, lower)**: Once consensus accepts the transaction, it is applied to the ledger; the TxQ spans (`txq.enqueue`, `txq.apply`, `fee.escalate`) capture queue depth and fee escalation behavior.
|
||||
- **Validator Spans (purple, detached)**: `validator.list.fetch` and `validator.manifest` are independent workflows for UNL management — they run on their own traces and are linked to consensus via Span Links, not parent-child relationships.
|
||||
|
||||
---
|
||||
|
||||
## 8.3 References
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### OpenTelemetry Resources
|
||||
|
||||
1. [OpenTelemetry C++ SDK](https://github.com/open-telemetry/opentelemetry-cpp)
|
||||
2. [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel/)
|
||||
3. [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/)
|
||||
4. [OTLP Protocol Specification](https://opentelemetry.io/docs/specs/otlp/)
|
||||
|
||||
### Standards
|
||||
|
||||
5. [W3C Trace Context](https://www.w3.org/TR/trace-context/)
|
||||
6. [W3C Baggage](https://www.w3.org/TR/baggage/)
|
||||
7. [Protocol Buffers](https://protobuf.dev/)
|
||||
|
||||
### rippled Resources
|
||||
|
||||
8. [rippled Source Code](https://github.com/XRPLF/rippled)
|
||||
9. [XRP Ledger Documentation](https://xrpl.org/docs/)
|
||||
10. [rippled Overlay README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/overlay/README.md)
|
||||
11. [rippled RPC README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/rpc/README.md)
|
||||
12. [rippled Consensus README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/app/consensus/README.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.4 Version History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
| ------- | ---------- | ------ | -------------------------------------------------------------- |
|
||||
| 1.0 | 2026-02-12 | - | Initial implementation plan |
|
||||
| 1.1 | 2026-02-13 | - | Refactored into modular documents |
|
||||
| 1.2 | 2026-03-24 | - | Review fixes: accuracy corrections, cross-document consistency |
|
||||
|
||||
---
|
||||
|
||||
## 8.5 Document Index
|
||||
|
||||
### Plan Documents
|
||||
|
||||
| Document | Description |
|
||||
| ---------------------------------------------------------------- | -------------------------------------------- |
|
||||
| [OpenTelemetryPlan.md](./OpenTelemetryPlan.md) | Master overview and executive summary |
|
||||
| [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) | Distributed tracing concepts and OTel primer |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | rippled architecture and trace points |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection, exporters, span conventions |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure, performance analysis |
|
||||
| [04-code-samples.md](./04-code-samples.md) | C++ code examples for all components |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | rippled config, CMake, Collector configs |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Timeline, tasks, risks, success metrics |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Backend selection and architecture |
|
||||
| [08-appendix.md](./08-appendix.md) | Glossary, references, version history |
|
||||
|
||||
### Task Lists
|
||||
|
||||
| Document | Description |
|
||||
| ------------------------------------ | --------------------------------------------------- |
|
||||
| [POC_taskList.md](./POC_taskList.md) | Proof-of-concept telemetry integration |
|
||||
| [presentation.md](./presentation.md) | Presentation slides for OpenTelemetry plan overview |
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Observability Backends](./07-observability-backends.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
230
OpenTelemetryPlan/OpenTelemetryPlan.md
Normal file
230
OpenTelemetryPlan/OpenTelemetryPlan.md
Normal file
@@ -0,0 +1,230 @@
|
||||
# [OpenTelemetry](00-tracing-fundamentals.md) Distributed Tracing Implementation Plan for rippled (xrpld)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. The plan addresses the unique challenges of a decentralized peer-to-peer system where trace context must propagate across network boundaries between independent nodes.
|
||||
|
||||
### Key Benefits
|
||||
|
||||
- **End-to-end transaction visibility**: Track transactions from submission through consensus to ledger inclusion
|
||||
- **Consensus round analysis**: Understand timing and behavior of consensus phases across validators
|
||||
- **RPC performance insights**: Identify slow handlers and optimize response times
|
||||
- **Network topology understanding**: Visualize message propagation patterns between peers
|
||||
- **Incident debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
### Estimated Performance Overhead
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## Document Structure
|
||||
|
||||
This implementation plan is organized into modular documents for easier navigation:
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
overview["📋 OpenTelemetryPlan.md<br/>(This Document)"]
|
||||
|
||||
subgraph fundamentals["Fundamentals"]
|
||||
fund["00-tracing-fundamentals.md"]
|
||||
end
|
||||
|
||||
subgraph analysis["Analysis & Design"]
|
||||
arch["01-architecture-analysis.md"]
|
||||
design["02-design-decisions.md"]
|
||||
end
|
||||
|
||||
subgraph impl["Implementation"]
|
||||
strategy["03-implementation-strategy.md"]
|
||||
code["04-code-samples.md"]
|
||||
config["05-configuration-reference.md"]
|
||||
end
|
||||
|
||||
subgraph deploy["Deployment & Planning"]
|
||||
phases["06-implementation-phases.md"]
|
||||
backends["07-observability-backends.md"]
|
||||
appendix["08-appendix.md"]
|
||||
poc["POC_taskList.md"]
|
||||
end
|
||||
|
||||
overview --> fundamentals
|
||||
overview --> analysis
|
||||
overview --> impl
|
||||
overview --> deploy
|
||||
|
||||
fund --> arch
|
||||
arch --> design
|
||||
design --> strategy
|
||||
strategy --> code
|
||||
code --> config
|
||||
config --> phases
|
||||
phases --> backends
|
||||
backends --> appendix
|
||||
phases --> poc
|
||||
|
||||
style overview fill:#1b5e20,stroke:#0d3d14,color:#fff,stroke-width:2px
|
||||
style fundamentals fill:#00695c,stroke:#004d40,color:#fff
|
||||
style fund fill:#00695c,stroke:#004d40,color:#fff
|
||||
style analysis fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style impl fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style deploy fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style arch fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style design fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style strategy fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style code fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style config fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style phases fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style backends fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style appendix fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style poc fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
| Section | Document | Description |
|
||||
| ------- | ---------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
| **0** | [Tracing Fundamentals](./00-tracing-fundamentals.md) | Distributed tracing concepts, span relationships, context propagation |
|
||||
| **1** | [Architecture Analysis](./01-architecture-analysis.md) | rippled component analysis, trace points, instrumentation priorities |
|
||||
| **2** | [Design Decisions](./02-design-decisions.md) | SDK selection, exporters, span naming, attributes, context propagation |
|
||||
| **3** | [Implementation Strategy](./03-implementation-strategy.md) | Directory structure, key principles, performance optimization |
|
||||
| **4** | [Code Samples](./04-code-samples.md) | C++ implementation examples for core infrastructure and key modules |
|
||||
| **5** | [Configuration Reference](./05-configuration-reference.md) | rippled config, CMake integration, Collector configurations |
|
||||
| **6** | [Implementation Phases](./06-implementation-phases.md) | 5-phase timeline, tasks, risks, success metrics |
|
||||
| **7** | [Observability Backends](./07-observability-backends.md) | Backend selection guide and production architecture |
|
||||
| **8** | [Appendix](./08-appendix.md) | Glossary, references, version history |
|
||||
| **POC** | [POC Task List](./POC_taskList.md) | Proof of concept tasks for RPC tracing end-to-end demo |
|
||||
|
||||
---
|
||||
|
||||
## 0. Tracing Fundamentals
|
||||
|
||||
This document introduces distributed tracing concepts for readers unfamiliar with the domain. It covers what traces and spans are, how parent-child and follows-from relationships model causality, how context propagates across service boundaries, and how sampling controls data volume. It also maps these concepts to rippled-specific scenarios like transaction relay and consensus.
|
||||
|
||||
➡️ **[Read Tracing Fundamentals](./00-tracing-fundamentals.md)**
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Analysis
|
||||
|
||||
> **WS** = WebSocket | **TxQ** = Transaction Queue
|
||||
|
||||
The rippled node consists of several key components that require instrumentation for comprehensive distributed tracing. The main areas include the RPC server (HTTP/WebSocket), Overlay P2P network, Consensus mechanism (RCLConsensus), JobQueue for async task execution, PathFinding, Transaction Queue (TxQ), fee escalation (LoadManager), ledger acquisition, validator management, and existing observability infrastructure (PerfLog, Insight/StatsD, Journal logging).
|
||||
|
||||
Key trace points span across transaction submission via RPC, peer-to-peer message propagation, consensus round execution, ledger building, path computation, transaction queue behavior, fee escalation, and validator health. The implementation prioritizes high-value, low-risk components first: RPC handlers provide immediate value with minimal risk, while consensus tracing requires careful implementation to avoid timing impacts.
|
||||
|
||||
➡️ **[Read full Architecture Analysis](./01-architecture-analysis.md)**
|
||||
|
||||
---
|
||||
|
||||
## 2. Design Decisions
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
The OpenTelemetry C++ SDK is selected for its CNCF backing, active development, and native performance characteristics. Traces are exported via OTLP/gRPC (primary) or OTLP/HTTP (fallback) to an OpenTelemetry Collector, which provides flexible routing and sampling.
|
||||
|
||||
Span naming follows a hierarchical `<component>.<operation>` convention (e.g., `rpc.submit`, `tx.relay`, `consensus.round`). Context propagation uses W3C Trace Context headers for HTTP and embedded Protocol Buffer fields for P2P messages. The implementation coexists with existing PerfLog and Insight observability systems through correlation IDs.
|
||||
|
||||
**Data Collection & Privacy**: Telemetry collects only operational metadata (timing, counts, hashes) — never sensitive content (private keys, balances, amounts, raw payloads). Privacy protection includes account hashing, configurable redaction, sampling, and collector-level filtering. Node operators retain full control over telemetry configuration.
|
||||
|
||||
➡️ **[Read full Design Decisions](./02-design-decisions.md)**
|
||||
|
||||
---
|
||||
|
||||
## 3. Implementation Strategy
|
||||
|
||||
The telemetry code is organized under `include/xrpl/telemetry/` for headers and `src/libxrpl/telemetry/` for implementation. Key principles include RAII-based span management via `SpanGuard`, conditional compilation with `XRPL_ENABLE_TELEMETRY`, and minimal runtime overhead through batch processing and efficient sampling.
|
||||
|
||||
Performance optimization strategies include probabilistic head sampling (10% default), tail-based sampling at the collector for errors and slow traces, batch export to reduce network overhead, and conditional instrumentation that compiles to no-ops when disabled.
|
||||
|
||||
➡️ **[Read full Implementation Strategy](./03-implementation-strategy.md)**
|
||||
|
||||
---
|
||||
|
||||
## 4. Code Samples
|
||||
|
||||
C++ implementation examples are provided for the core telemetry infrastructure and key modules:
|
||||
|
||||
- `Telemetry.h` - Core interface for tracer access and span creation
|
||||
- `SpanGuard.h` - RAII wrapper for automatic span lifecycle management
|
||||
- `TracingInstrumentation.h` - Macros for conditional instrumentation
|
||||
- Protocol Buffer extensions for trace context propagation
|
||||
- Module-specific instrumentation (RPC, Consensus, P2P, JobQueue)
|
||||
- Remaining modules (PathFinding, TxQ, Validator, etc.) follow the same patterns
|
||||
|
||||
➡️ **[View all Code Samples](./04-code-samples.md)**
|
||||
|
||||
---
|
||||
|
||||
## 5. Configuration Reference
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring
|
||||
|
||||
Configuration is handled through the `[telemetry]` section in `xrpld.cfg` with options for enabling/disabling, exporter selection, endpoint configuration, sampling ratios, and component-level filtering. CMake integration includes a `XRPL_ENABLE_TELEMETRY` option for compile-time control.
|
||||
|
||||
OpenTelemetry Collector configurations are provided for development and production (with tail-based sampling, Tempo, and Elastic APM). Docker Compose examples enable quick local development environment setup.
|
||||
|
||||
➡️ **[View full Configuration Reference](./05-configuration-reference.md)**
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Phases
|
||||
|
||||
The implementation spans 9 weeks across 5 phases:
|
||||
|
||||
| Phase | Duration | Focus | Key Deliverables |
|
||||
| ----- | --------- | ------------------- | --------------------------------------------------- |
|
||||
| 1 | Weeks 1-2 | Core Infrastructure | SDK integration, Telemetry interface, Configuration |
|
||||
| 2 | Weeks 3-4 | RPC Tracing | HTTP context extraction, Handler instrumentation |
|
||||
| 3 | Weeks 5-6 | Transaction Tracing | Protocol Buffer context, Relay propagation |
|
||||
| 4 | Weeks 7-8 | Consensus Tracing | Round spans, Proposal/validation tracing |
|
||||
| 5 | Week 9 | Documentation | Runbook, Dashboards, Training |
|
||||
|
||||
**Total Effort**: 47 person-days (2 developers working in parallel)
|
||||
|
||||
➡️ **[View full Implementation Phases](./06-implementation-phases.md)**
|
||||
|
||||
---
|
||||
|
||||
## 7. Observability Backends
|
||||
|
||||
> **APM** = Application Performance Monitoring | **GCS** = Google Cloud Storage
|
||||
|
||||
Grafana Tempo is recommended for all environments due to its cost-effectiveness and Grafana integration, while Elastic APM is ideal for organizations with existing Elastic infrastructure.
|
||||
|
||||
The recommended production architecture uses a gateway collector pattern with regional collectors performing tail-based sampling, routing traces to multiple backends (Tempo for primary storage, Elastic for log correlation, S3/GCS for long-term archive).
|
||||
|
||||
➡️ **[View Observability Backend Recommendations](./07-observability-backends.md)**
|
||||
|
||||
---
|
||||
|
||||
## 8. Appendix
|
||||
|
||||
The appendix contains a glossary of OpenTelemetry and rippled-specific terms, references to external documentation and specifications, version history for this implementation plan, and a complete document index.
|
||||
|
||||
➡️ **[View Appendix](./08-appendix.md)**
|
||||
|
||||
---
|
||||
|
||||
## POC Task List
|
||||
|
||||
A step-by-step task list for building a minimal end-to-end proof of concept that demonstrates distributed tracing in rippled. The POC scope is limited to RPC tracing — showing request traces flowing from rippled through an OpenTelemetry Collector into Tempo, viewable in Grafana.
|
||||
|
||||
➡️ **[View POC Task List](./POC_taskList.md)**
|
||||
|
||||
---
|
||||
|
||||
_This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. For detailed information on any section, follow the links to the corresponding sub-documents._
|
||||
636
OpenTelemetryPlan/POC_taskList.md
Normal file
636
OpenTelemetryPlan/POC_taskList.md
Normal file
@@ -0,0 +1,636 @@
|
||||
# OpenTelemetry POC Task List
|
||||
|
||||
> **Goal**: Build a minimal end-to-end proof of concept that demonstrates distributed tracing in rippled. A successful POC will show RPC request traces flowing from rippled through an OTel Collector into Tempo, viewable in Grafana.
|
||||
>
|
||||
> **Scope**: RPC tracing only (highest value, lowest risk per the [CRAWL phase](./06-implementation-phases.md#6102-quick-wins-immediate-value) in the implementation phases). No cross-node P2P context propagation or consensus tracing in the POC.
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance to POC |
|
||||
| ---------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) | Core concepts: traces, spans, context propagation, sampling |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | RPC request flow (§1.5), key trace points (§1.6), instrumentation priority (§1.7) |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection (§2.1), exporter config (§2.2), span naming (§2.3), attribute schema (§2.4), coexistence with PerfLog/Insight (§2.6) |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure (§3.1), key principles (§3.2), performance overhead (§3.3-3.6), conditional compilation (§3.7.3), code intrusiveness (§3.9) |
|
||||
| [04-code-samples.md](./04-code-samples.md) | Telemetry interface (§4.1), SpanGuard (§4.2), macros (§4.3), RPC instrumentation (§4.5.3) |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | rippled config (§5.1), config parser (§5.2), Application integration (§5.3), CMake (§5.4), Collector config (§5.5), Docker Compose (§5.6), Grafana (§5.8) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 1 core tasks (§6.2), Phase 2 RPC tasks (§6.3), quick wins (§6.10), definition of done (§6.11) |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Tempo dev setup (§7.1), Grafana dashboards (§7.6), alert rules (§7.6.3) |
|
||||
|
||||
---
|
||||
|
||||
## Task 0: Docker Observability Stack Setup
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Stand up the backend infrastructure to receive, store, and display traces.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/docker-compose.yml` in the repo with three services:
|
||||
1. **OpenTelemetry Collector** (`otel/opentelemetry-collector-contrib:0.92.0`)
|
||||
- Expose ports `4317` (OTLP gRPC) and `4318` (OTLP HTTP)
|
||||
- Expose port `13133` (health check)
|
||||
- Mount a config file `docker/telemetry/otel-collector-config.yaml`
|
||||
2. **Tempo** (`grafana/tempo:2.6.1`)
|
||||
- Expose port `3200` (HTTP API) and `4317` (OTLP gRPC, internal)
|
||||
3. **Grafana** (`grafana/grafana:latest`) — optional but useful
|
||||
- Expose port `3000`
|
||||
- Enable anonymous admin access for local dev (`GF_AUTH_ANONYMOUS_ENABLED=true`, `GF_AUTH_ANONYMOUS_ORG_ROLE=Admin`)
|
||||
- Provision Tempo as a data source via `docker/telemetry/grafana/provisioning/datasources/tempo.yaml`
|
||||
|
||||
- Create `docker/telemetry/otel-collector-config.yaml`:
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
logging:
|
||||
verbosity: detailed
|
||||
otlp/tempo:
|
||||
endpoint: tempo:4317
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, otlp/tempo]
|
||||
```
|
||||
|
||||
- Create Grafana Tempo datasource provisioning file at `docker/telemetry/grafana/provisioning/datasources/tempo.yaml`:
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
```
|
||||
|
||||
**Verification**: Run `docker compose -f docker/telemetry/docker-compose.yml up -d`, then:
|
||||
|
||||
- `curl http://localhost:13133` returns healthy (Collector)
|
||||
- `http://localhost:3000` opens Grafana (Tempo datasource available, no traces yet)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.5](./05-configuration-reference.md) — Collector config (dev YAML with Tempo exporter)
|
||||
- [05-configuration-reference.md §5.6](./05-configuration-reference.md) — Docker Compose development environment
|
||||
- [07-observability-backends.md §7.1](./07-observability-backends.md) — Tempo quick start and backend selection
|
||||
- [05-configuration-reference.md §5.8](./05-configuration-reference.md) — Grafana datasource provisioning and dashboards
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add OpenTelemetry C++ SDK Dependency
|
||||
|
||||
**Objective**: Make `opentelemetry-cpp` available to the build system.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `conanfile.py` to add `opentelemetry-cpp` as an **optional** dependency. The gRPC otel plugin flag (`"grpc/*:otel_plugin": False`) in the existing conanfile may need to remain false — we pull the OTel SDK separately.
|
||||
- Add a Conan option: `with_telemetry = [True, False]` defaulting to `False`
|
||||
- When `with_telemetry` is `True`, add `opentelemetry-cpp` to `self.requires()`
|
||||
- Required OTel Conan components: `opentelemetry-cpp` (which bundles api, sdk, and exporters). If the package isn't in Conan Center, consider using `FetchContent` in CMake or building from source as a fallback.
|
||||
- Edit `CMakeLists.txt`:
|
||||
- Add option: `option(XRPL_ENABLE_TELEMETRY "Enable OpenTelemetry tracing" OFF)`
|
||||
- When ON, `find_package(opentelemetry-cpp CONFIG REQUIRED)` and add compile definition `XRPL_ENABLE_TELEMETRY`
|
||||
- When OFF, do nothing (zero build impact)
|
||||
- Verify the build succeeds with `-DXRPL_ENABLE_TELEMETRY=OFF` (no regressions) and with `-DXRPL_ENABLE_TELEMETRY=ON` (SDK links successfully).
|
||||
|
||||
**Key files**:
|
||||
|
||||
- `conanfile.py`
|
||||
- `CMakeLists.txt`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.4](./05-configuration-reference.md) — CMake integration, `FindOpenTelemetry.cmake`, `XRPL_ENABLE_TELEMETRY` option
|
||||
- [03-implementation-strategy.md §3.2](./03-implementation-strategy.md) — Key principle: zero-cost when disabled via compile-time flags
|
||||
- [02-design-decisions.md §2.1](./02-design-decisions.md) — SDK selection rationale and required OTel components
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Create Core Telemetry Interface and NullTelemetry
|
||||
|
||||
**Objective**: Define the `Telemetry` abstract interface and a no-op implementation so the rest of the codebase can reference telemetry without hard-depending on the OTel SDK.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `include/xrpl/telemetry/Telemetry.h`:
|
||||
- Define `namespace xrpl::telemetry`
|
||||
- Define `struct Telemetry::Setup` holding: `enabled`, `exporterEndpoint`, `samplingRatio`, `serviceName`, `serviceVersion`, `serviceInstanceId`, `traceRpc`, `traceTransactions`, `traceConsensus`, `tracePeer`
|
||||
- Define abstract `class Telemetry` with:
|
||||
- `virtual void start() = 0;`
|
||||
- `virtual void stop() = 0;`
|
||||
- `virtual bool isEnabled() const = 0;`
|
||||
- `virtual nostd::shared_ptr<Tracer> getTracer(string_view name = "rippled") = 0;`
|
||||
- `virtual nostd::shared_ptr<Span> startSpan(string_view name, SpanKind kind = kInternal) = 0;`
|
||||
- `virtual nostd::shared_ptr<Span> startSpan(string_view name, Context const& parentContext, SpanKind kind = kInternal) = 0;`
|
||||
- `virtual bool shouldTraceRpc() const = 0;`
|
||||
- `virtual bool shouldTraceTransactions() const = 0;`
|
||||
- `virtual bool shouldTraceConsensus() const = 0;`
|
||||
- Factory: `std::unique_ptr<Telemetry> make_Telemetry(Setup const&, beast::Journal);`
|
||||
- Config parser: `Telemetry::Setup setup_Telemetry(Section const&, std::string const& nodePublicKey, std::string const& version);`
|
||||
|
||||
- Create `include/xrpl/telemetry/SpanGuard.h`:
|
||||
- RAII guard that takes an `nostd::shared_ptr<Span>`, creates a `Scope`, and calls `span->End()` in destructor.
|
||||
- Convenience: `setAttribute()`, `setOk()`, `setStatus()`, `addEvent()`, `recordException()`, `context()`
|
||||
- See [04-code-samples.md](./04-code-samples.md) §4.2 for the full implementation.
|
||||
|
||||
- Create `src/libxrpl/telemetry/NullTelemetry.cpp`:
|
||||
- Implements `Telemetry` with all no-ops.
|
||||
- `isEnabled()` returns `false`, `startSpan()` returns a noop span.
|
||||
- This is used when `XRPL_ENABLE_TELEMETRY` is OFF or `enabled=0` in config.
|
||||
|
||||
- Guard all OTel SDK headers behind `#ifdef XRPL_ENABLE_TELEMETRY`. The `NullTelemetry` implementation should compile without the OTel SDK present.
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `include/xrpl/telemetry/Telemetry.h`
|
||||
- `include/xrpl/telemetry/SpanGuard.h`
|
||||
- `src/libxrpl/telemetry/NullTelemetry.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.1](./04-code-samples.md) — Full `Telemetry` interface with `Setup` struct, lifecycle, tracer access, span creation, and component filtering methods
|
||||
- [04-code-samples.md §4.2](./04-code-samples.md) — Full `SpanGuard` RAII implementation and `NullSpanGuard` no-op class
|
||||
- [03-implementation-strategy.md §3.1](./03-implementation-strategy.md) — Directory structure: `include/xrpl/telemetry/` for headers, `src/libxrpl/telemetry/` for implementation
|
||||
- [03-implementation-strategy.md §3.7.3](./03-implementation-strategy.md) — Conditional instrumentation and zero-cost compile-time disabled pattern
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Implement OTel-Backed Telemetry
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Implement the real `Telemetry` class that initializes the OTel SDK, configures the OTLP exporter and batch processor, and creates tracers/spans.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `src/libxrpl/telemetry/Telemetry.cpp` (compiled only when `XRPL_ENABLE_TELEMETRY=ON`):
|
||||
- `class TelemetryImpl : public Telemetry` that:
|
||||
- In `start()`: creates a `TracerProvider` with:
|
||||
- Resource attributes: `service.name`, `service.version`, `service.instance.id`
|
||||
- An `OtlpHttpExporter` pointed at `setup.exporterEndpoint` (default `localhost:4318`)
|
||||
- A `BatchSpanProcessor` with configurable batch size and delay
|
||||
- A `TraceIdRatioBasedSampler` using `setup.samplingRatio`
|
||||
- Sets the global `TracerProvider`
|
||||
- In `stop()`: calls `ForceFlush()` then shuts down the provider
|
||||
- In `startSpan()`: delegates to `getTracer()->StartSpan(name, ...)`
|
||||
- `shouldTraceRpc()` etc. read from `Setup` fields
|
||||
|
||||
- Create `src/libxrpl/telemetry/TelemetryConfig.cpp`:
|
||||
- `setup_Telemetry()` parses the `[telemetry]` config section from `xrpld.cfg`
|
||||
- Maps config keys: `enabled`, `exporter`, `endpoint`, `sampling_ratio`, `trace_rpc`, `trace_transactions`, `trace_consensus`, `trace_peer`
|
||||
|
||||
- Wire `make_Telemetry()` factory:
|
||||
- If `setup.enabled` is true AND `XRPL_ENABLE_TELEMETRY` is defined: return `TelemetryImpl`
|
||||
- Otherwise: return `NullTelemetry`
|
||||
|
||||
- Add telemetry source files to CMake. When `XRPL_ENABLE_TELEMETRY=ON`, compile `Telemetry.cpp` and `TelemetryConfig.cpp` and link against `opentelemetry-cpp::api`, `opentelemetry-cpp::sdk`, `opentelemetry-cpp::otlp_grpc_exporter`. When OFF, compile only `NullTelemetry.cpp`.
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `src/libxrpl/telemetry/Telemetry.cpp`
|
||||
- `src/libxrpl/telemetry/TelemetryConfig.cpp`
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `CMakeLists.txt` (add telemetry library target)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.1](./04-code-samples.md) — `Telemetry` interface that `TelemetryImpl` must implement
|
||||
- [05-configuration-reference.md §5.2](./05-configuration-reference.md) — `setup_Telemetry()` config parser implementation
|
||||
- [02-design-decisions.md §2.2](./02-design-decisions.md) — OTLP/gRPC exporter config (endpoint, TLS options)
|
||||
- [02-design-decisions.md §2.4.1](./02-design-decisions.md) — Resource attributes: `service.name`, `service.version`, `service.instance.id`, `xrpl.network.id`
|
||||
- [03-implementation-strategy.md §3.4](./03-implementation-strategy.md) — Per-operation CPU costs and overhead budget for span creation
|
||||
- [03-implementation-strategy.md §3.5](./03-implementation-strategy.md) — Memory overhead: static (~456 KB) and dynamic (~1.2 MB) budgets
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Integrate Telemetry into Application Lifecycle
|
||||
|
||||
**Objective**: Wire the `Telemetry` object into the `ServiceRegistry` / `Application` so all components can access it.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `include/xrpl/core/ServiceRegistry.h`:
|
||||
- Forward-declare `namespace telemetry { class Telemetry; }` inside `namespace xrpl`
|
||||
- Add pure virtual method: `virtual telemetry::Telemetry& getTelemetry() = 0;`
|
||||
- (`Application` extends `ServiceRegistry`, so this is automatically available on `Application` too)
|
||||
|
||||
- Edit `src/xrpld/app/main/Application.cpp` (the `ApplicationImp` class):
|
||||
- Add member: `std::unique_ptr<telemetry::Telemetry> telemetry_;`
|
||||
- In the member initializer list, construct telemetry with an empty
|
||||
`serviceInstanceId` (node identity is not yet known):
|
||||
```cpp
|
||||
, telemetry_(
|
||||
telemetry::make_Telemetry(
|
||||
telemetry::setup_Telemetry(
|
||||
config_->section("telemetry"),
|
||||
"", // Updated later via setServiceInstanceId()
|
||||
BuildInfo::getVersionString()),
|
||||
logs_->journal("Telemetry")))
|
||||
```
|
||||
- In `setup()`, after `nodeIdentity_` is resolved, inject the node
|
||||
public key as the service instance ID:
|
||||
```cpp
|
||||
if (!config_->section("telemetry").exists("service_instance_id"))
|
||||
telemetry_->setServiceInstanceId(
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_->first));
|
||||
```
|
||||
- In `start()`: call `telemetry_->start()`
|
||||
- In `run()` (shutdown path): call `telemetry_->stop()` (to flush pending spans)
|
||||
- Implement `getTelemetry()` override: return `*telemetry_`
|
||||
|
||||
- Add `[telemetry]` section to the example config `cfg/xrpld-example.cfg`:
|
||||
```ini
|
||||
# [telemetry]
|
||||
# enabled=1
|
||||
# endpoint=http://localhost:4318/v1/traces
|
||||
# sampling_ratio=1.0
|
||||
# trace_rpc=1
|
||||
```
|
||||
|
||||
> **Access patterns**: Components holding `ServiceRegistry&` (e.g.
|
||||
> `NetworkOPsImp`) call `registry_.get().getTelemetry()`. Components
|
||||
> holding `Application&` (e.g. `ServerHandler`, `PeerImp`,
|
||||
> `RCLConsensusAdaptor`) call `app_.getTelemetry()` directly. Both
|
||||
> resolve to the same `Telemetry` instance.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `include/xrpl/core/ServiceRegistry.h`
|
||||
- `src/xrpld/app/main/Application.cpp`
|
||||
- `cfg/xrpld-example.cfg` (example config)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.3](./05-configuration-reference.md) — `ApplicationImp` changes: member declaration, constructor init, `start()`/`stop()` wiring, `getTelemetry()` override
|
||||
- [05-configuration-reference.md §5.1](./05-configuration-reference.md) — `[telemetry]` config section format and all option defaults
|
||||
- [03-implementation-strategy.md §3.9.2](./03-implementation-strategy.md) — File impact assessment: `Application.cpp` ~15 lines added, ~3 changed (Low risk)
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Create Instrumentation Macros
|
||||
|
||||
**Objective**: Define convenience macros that make instrumenting code one-liners, and that compile to zero-cost no-ops when telemetry is disabled.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `src/xrpld/telemetry/TracingInstrumentation.h`:
|
||||
- When `XRPL_ENABLE_TELEMETRY` is defined:
|
||||
|
||||
```cpp
|
||||
#define XRPL_TRACE_SPAN(telemetry, name) \
|
||||
auto _xrpl_span_ = (telemetry).startSpan(name); \
|
||||
::xrpl::telemetry::SpanGuard _xrpl_guard_(_xrpl_span_)
|
||||
|
||||
#define XRPL_TRACE_RPC(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceRpc()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_SET_ATTR(key, value) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->setAttribute(key, value); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_EXCEPTION(e) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->recordException(e); \
|
||||
}
|
||||
```
|
||||
|
||||
- When `XRPL_ENABLE_TELEMETRY` is NOT defined, all macros expand to `((void)0)`
|
||||
|
||||
**Key new file**:
|
||||
|
||||
- `src/xrpld/telemetry/TracingInstrumentation.h`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.3](./04-code-samples.md) — Full macro definitions for `XRPL_TRACE_SPAN`, `XRPL_TRACE_RPC`, `XRPL_TRACE_CONSENSUS`, `XRPL_TRACE_SET_ATTR`, `XRPL_TRACE_EXCEPTION` with both enabled and disabled branches
|
||||
- [03-implementation-strategy.md §3.7.3](./03-implementation-strategy.md) — Conditional instrumentation pattern: compile-time `#ifndef` and runtime `shouldTrace*()` checks
|
||||
- [03-implementation-strategy.md §3.9.7](./03-implementation-strategy.md) — Before/after code examples showing minimal intrusiveness (~1-3 lines per instrumentation point)
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Instrument RPC ServerHandler
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
**Objective**: Add tracing to the HTTP RPC entry point so every incoming RPC request creates a span.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/rpc/detail/ServerHandler.cpp`:
|
||||
- `#include` the `TracingInstrumentation.h` header
|
||||
- In `ServerHandler::onRequest(Session& session)`:
|
||||
- At the top of the method, add: `XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.request");`
|
||||
- After the RPC command name is extracted, set attribute: `XRPL_TRACE_SET_ATTR("xrpl.rpc.command", command);`
|
||||
- After the response status is known, set: `XRPL_TRACE_SET_ATTR("http.status_code", static_cast<int64_t>(statusCode));`
|
||||
- Wrap error paths with: `XRPL_TRACE_EXCEPTION(e);`
|
||||
- In `ServerHandler::processRequest(...)`:
|
||||
- Add a child span: `XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.process");`
|
||||
- Set method attribute: `XRPL_TRACE_SET_ATTR("xrpl.rpc.method", request_method);`
|
||||
- In `ServerHandler::onWSMessage(...)` (WebSocket path):
|
||||
- Add: `XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.ws.message");`
|
||||
|
||||
- The goal is to see spans like:
|
||||
```
|
||||
rpc.request
|
||||
└── rpc.process
|
||||
```
|
||||
in Tempo/Grafana for every HTTP RPC call.
|
||||
|
||||
**Key modified file**:
|
||||
|
||||
- `src/xrpld/rpc/detail/ServerHandler.cpp` (~15-25 lines added)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.3](./04-code-samples.md) — Complete `ServerHandler::onRequest()` instrumented code sample with W3C header extraction, span creation, attribute setting, and error handling
|
||||
- [01-architecture-analysis.md §1.5](./01-architecture-analysis.md) — RPC request flow diagram: HTTP request -> attributes -> jobqueue.enqueue -> rpc.command -> response
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — Key trace points table: `rpc.request` in `ServerHandler.cpp::onRequest()` (Priority: High)
|
||||
- [02-design-decisions.md §2.3](./02-design-decisions.md) — Span naming convention: `rpc.request`, `rpc.command.*`
|
||||
- [02-design-decisions.md §2.4.2](./02-design-decisions.md) — RPC span attributes: `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role`, `xrpl.rpc.params`
|
||||
- [03-implementation-strategy.md §3.9.2](./03-implementation-strategy.md) — File impact: `ServerHandler.cpp` ~40 lines added, ~10 changed (Low risk)
|
||||
|
||||
---
|
||||
|
||||
## Task 7: Instrument RPC Command Execution
|
||||
|
||||
**Objective**: Add per-command tracing inside the RPC handler so each command (e.g., `submit`, `account_info`, `server_info`) gets its own child span.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/rpc/detail/RPCHandler.cpp`:
|
||||
- `#include` the `TracingInstrumentation.h` header
|
||||
- In `doCommand(RPC::JsonContext& context, Json::Value& result)`:
|
||||
- At the top: `XRPL_TRACE_RPC(context.app.getTelemetry(), "rpc.command." + context.method);`
|
||||
- Set attributes:
|
||||
- `XRPL_TRACE_SET_ATTR("xrpl.rpc.command", context.method);`
|
||||
- `XRPL_TRACE_SET_ATTR("xrpl.rpc.version", static_cast<int64_t>(context.apiVersion));`
|
||||
- `XRPL_TRACE_SET_ATTR("xrpl.rpc.role", (context.role == Role::ADMIN) ? "admin" : "user");`
|
||||
- On success: `XRPL_TRACE_SET_ATTR("xrpl.rpc.status", "success");`
|
||||
- On error: `XRPL_TRACE_SET_ATTR("xrpl.rpc.status", "error");` and set the error message
|
||||
|
||||
- After this, traces in Tempo/Grafana should look like:
|
||||
```
|
||||
rpc.request (xrpl.rpc.command=account_info)
|
||||
└── rpc.process
|
||||
└── rpc.command.account_info (xrpl.rpc.version=2, xrpl.rpc.role=user, xrpl.rpc.status=success)
|
||||
```
|
||||
|
||||
**Key modified file**:
|
||||
|
||||
- `src/xrpld/rpc/detail/RPCHandler.cpp` (~15-20 lines added)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.3](./04-code-samples.md) — `ServerHandler::onRequest()` code sample (includes child span pattern for `rpc.command.*`)
|
||||
- [02-design-decisions.md §2.3](./02-design-decisions.md) — Span naming: `rpc.command.*` pattern with dynamic command name (e.g., `rpc.command.server_info`)
|
||||
- [02-design-decisions.md §2.4.2](./02-design-decisions.md) — RPC attribute schema: `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role`, `xrpl.rpc.status`
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — Key trace points table: `rpc.command.*` in `RPCHandler.cpp::doCommand()` (Priority: High)
|
||||
- [02-design-decisions.md §2.6.5](./02-design-decisions.md) — Correlation with PerfLog: how `doCommand()` can link trace_id with existing PerfLog entries
|
||||
- [03-implementation-strategy.md §3.4.4](./03-implementation-strategy.md) — RPC request overhead budget: ~1.75 μs total per request
|
||||
|
||||
---
|
||||
|
||||
## Task 8: Build, Run, and Verify End-to-End
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Prove the full pipeline works: rippled emits traces -> OTel Collector receives them -> Tempo stores them for Grafana visualization.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. **Start the Docker stack**:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
Verify Collector health: `curl http://localhost:13133`
|
||||
|
||||
2. **Build rippled with telemetry**:
|
||||
|
||||
```bash
|
||||
# Adjust for your actual build workflow
|
||||
conan install . --build=missing -o with_telemetry=True
|
||||
cmake --preset default -DXRPL_ENABLE_TELEMETRY=ON
|
||||
cmake --build --preset default
|
||||
```
|
||||
|
||||
3. **Configure rippled**:
|
||||
Add to `rippled.cfg` (or your local test config):
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=localhost:4317
|
||||
sampling_ratio=1.0
|
||||
trace_rpc=1
|
||||
```
|
||||
|
||||
4. **Start rippled** in standalone mode:
|
||||
|
||||
```bash
|
||||
./rippled --conf rippled.cfg -a --start
|
||||
```
|
||||
|
||||
5. **Generate RPC traffic**:
|
||||
|
||||
```bash
|
||||
# server_info
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"server_info","params":[{}]}'
|
||||
|
||||
# ledger
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"ledger","params":[{"ledger_index":"current"}]}'
|
||||
|
||||
# account_info (will error in standalone, that's fine — we trace errors too)
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"account_info","params":[{"account":"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"}]}'
|
||||
```
|
||||
|
||||
6. **Verify in Grafana (Tempo)**:
|
||||
- Open `http://localhost:3000`
|
||||
- Navigate to Explore → select Tempo datasource
|
||||
- Search for service `rippled`
|
||||
- Confirm you see traces with spans: `rpc.request` -> `rpc.process` -> `rpc.command.server_info`
|
||||
- Click into a trace and verify attributes: `xrpl.rpc.command`, `xrpl.rpc.status`, `xrpl.rpc.version`
|
||||
|
||||
7. **Verify zero-overhead when disabled**:
|
||||
- Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`, or set `enabled=0` in config
|
||||
- Run the same RPC calls
|
||||
- Confirm no new traces appear and no errors in rippled logs
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] Docker stack starts without errors
|
||||
- [ ] rippled builds with `-DXRPL_ENABLE_TELEMETRY=ON`
|
||||
- [ ] rippled starts and connects to OTel Collector (check rippled logs for telemetry messages)
|
||||
- [ ] Traces appear in Grafana/Tempo under service "rippled"
|
||||
- [ ] Span hierarchy is correct (parent-child relationships)
|
||||
- [ ] Span attributes are populated (`xrpl.rpc.command`, `xrpl.rpc.status`, etc.)
|
||||
- [ ] Error spans show error status and message
|
||||
- [ ] Building with `XRPL_ENABLE_TELEMETRY=OFF` produces no regressions
|
||||
- [ ] Setting `enabled=0` at runtime produces no traces and no errors
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [06-implementation-phases.md §6.11.1](./06-implementation-phases.md) — Phase 1 definition of done: SDK compiles, runtime toggle works, span creation verified in Tempo, config validation passes
|
||||
- [06-implementation-phases.md §6.11.2](./06-implementation-phases.md#6112-phase-2-rpc-tracing) — Phase 2 definition of done: 100% RPC coverage, traceparent propagation, <1ms p99 overhead, dashboard deployed
|
||||
- [06-implementation-phases.md §6.8](./06-implementation-phases.md) — Success metrics: trace coverage >95%, CPU overhead <3%, memory <5 MB, latency impact <2%
|
||||
- [03-implementation-strategy.md §3.9.5](./03-implementation-strategy.md) — Backward compatibility: config optional, protocol unchanged, `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary
|
||||
- [01-architecture-analysis.md §1.8](./01-architecture-analysis.md) — Observable outcomes: what traces, metrics, and dashboards to expect
|
||||
|
||||
---
|
||||
|
||||
## Task 9: Document POC Results and Next Steps
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
**Objective**: Capture findings, screenshots, and remaining work for the team.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Take screenshots of Grafana/Tempo showing:
|
||||
- The service list with "rippled"
|
||||
- A trace with the full span tree
|
||||
- Span detail view showing attributes
|
||||
- Document any issues encountered (build issues, SDK quirks, missing attributes)
|
||||
- Note performance observations (build time impact, any noticeable runtime overhead)
|
||||
- Write a short summary of what the POC proves and what it doesn't cover yet:
|
||||
- **Proves**: OTel SDK integrates with rippled, OTLP export works, RPC traces visible
|
||||
- **Doesn't cover**: Cross-node P2P context propagation, consensus tracing, protobuf trace context, W3C traceparent header extraction, tail-based sampling, production deployment
|
||||
- Outline next steps (mapping to the full plan phases):
|
||||
- [Phase 2](./06-implementation-phases.md) completion: [W3C header extraction](./02-design-decisions.md) (§2.5), WebSocket tracing, all [RPC handlers](./01-architecture-analysis.md) (§1.6)
|
||||
- [Phase 3](./06-implementation-phases.md): [Protobuf `TraceContext` message](./04-code-samples.md) (§4.4), [transaction relay tracing](./04-code-samples.md) (§4.5.1) across nodes
|
||||
- [Phase 4](./06-implementation-phases.md): [Consensus round and phase tracing](./04-code-samples.md) (§4.5.2)
|
||||
- [Phase 5](./06-implementation-phases.md): [Production collector config](./05-configuration-reference.md) (§5.5.2), [Grafana dashboards](./07-observability-backends.md) (§7.6), [alerting](./07-observability-backends.md) (§7.6.3)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [06-implementation-phases.md §6.1](./06-implementation-phases.md) — Full 5-phase timeline overview and Gantt chart
|
||||
- [06-implementation-phases.md §6.10](./06-implementation-phases.md) — Crawl-Walk-Run strategy: POC is the CRAWL phase, next steps are WALK and RUN
|
||||
- [06-implementation-phases.md §6.12](./06-implementation-phases.md) — Recommended implementation order (14 steps across 9 weeks)
|
||||
- [03-implementation-strategy.md §3.9](./03-implementation-strategy.md) — Code intrusiveness assessment and risk matrix for each remaining component
|
||||
- [07-observability-backends.md §7.2](./07-observability-backends.md) — Production backend selection (Tempo, Elastic APM, Honeycomb, Datadog)
|
||||
- [02-design-decisions.md §2.5](./02-design-decisions.md) — Context propagation design: W3C HTTP headers, protobuf P2P, JobQueue internal
|
||||
- [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) — Reference for team onboarding on distributed tracing concepts
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ------------------------------------ | --------- | -------------- | ---------- |
|
||||
| 0 | Docker observability stack | 4 | 0 | — |
|
||||
| 1 | OTel C++ SDK dependency | 0 | 2 | — |
|
||||
| 2 | Core Telemetry interface + NullImpl | 3 | 0 | 1 |
|
||||
| 3 | OTel-backed Telemetry implementation | 2 | 1 | 1, 2 |
|
||||
| 4 | Application lifecycle integration | 0 | 3 | 2, 3 |
|
||||
| 5 | Instrumentation macros | 1 | 0 | 2 |
|
||||
| 6 | Instrument RPC ServerHandler | 0 | 1 | 4, 5 |
|
||||
| 7 | Instrument RPC command execution | 0 | 1 | 4, 5 |
|
||||
| 8 | End-to-end verification | 0 | 0 | 0-7 |
|
||||
| 9 | Document results and next steps | 1 | 0 | 8 |
|
||||
|
||||
**Parallel work**: Tasks 0 and 1 can run in parallel. Tasks 2 and 5 have no dependency on each other. Tasks 6 and 7 can be done in parallel once Tasks 4 and 5 are complete.
|
||||
|
||||
---
|
||||
|
||||
## Next Steps (Post-POC)
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### Metrics Pipeline for Grafana Dashboards
|
||||
|
||||
The current POC exports **traces only**. Grafana's Explore view can query Tempo for individual traces, but time-series charts (latency histograms, request throughput, error rates) require a **metrics pipeline**. To enable this:
|
||||
|
||||
1. **Add a `spanmetrics` connector** to the OTel Collector config that derives RED metrics (Rate, Errors, Duration) from trace spans automatically:
|
||||
|
||||
```yaml
|
||||
connectors:
|
||||
spanmetrics:
|
||||
histogram:
|
||||
explicit:
|
||||
buckets: [1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 5s]
|
||||
dimensions:
|
||||
- name: xrpl.rpc.command
|
||||
- name: xrpl.rpc.status
|
||||
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [debug, otlp/tempo, spanmetrics]
|
||||
metrics:
|
||||
receivers: [spanmetrics]
|
||||
exporters: [prometheus]
|
||||
```
|
||||
|
||||
2. **Add Prometheus** to the Docker Compose stack to scrape the collector's metrics endpoint.
|
||||
|
||||
3. **Add Prometheus as a Grafana datasource** and build dashboards for:
|
||||
- RPC request latency (p50/p95/p99) by command
|
||||
- RPC throughput (requests/sec) by command
|
||||
- Error rate by command
|
||||
- Span duration distribution
|
||||
|
||||
### Additional Instrumentation
|
||||
|
||||
- **W3C `traceparent` header extraction** in `ServerHandler` to support cross-service context propagation from external callers
|
||||
- **WebSocket RPC tracing** in `ServerHandler::onWSMessage()`
|
||||
- **Transaction relay tracing** across nodes using protobuf `TraceContext` messages
|
||||
- **Consensus round and phase tracing** for validator coordination visibility
|
||||
- **Ledger close tracing** to measure close-to-validated latency
|
||||
|
||||
### Production Hardening
|
||||
|
||||
- **Tail-based sampling** in the OTel Collector to reduce volume while retaining error/slow traces
|
||||
- **TLS configuration** for the OTLP exporter in production deployments
|
||||
- **Resource limits** on the batch processor queue to prevent unbounded memory growth
|
||||
- **Health monitoring** for the telemetry pipeline itself (collector lag, export failures)
|
||||
|
||||
### POC Lessons Learned
|
||||
|
||||
Issues encountered during POC implementation that inform future work:
|
||||
|
||||
| Issue | Resolution | Impact on Future Work |
|
||||
| -------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------------------------------- |
|
||||
| Conan lockfile rejected `opentelemetry-cpp/1.18.0` | Used `--lockfile=""` to bypass | Lockfile must be regenerated when adding new dependencies |
|
||||
| Conan package only builds OTLP HTTP exporter, not gRPC | Switched from gRPC to HTTP exporter (`localhost:4318/v1/traces`) | HTTP exporter is the default; gRPC requires custom Conan profile |
|
||||
| CMake target `opentelemetry-cpp::api` etc. don't exist in Conan package | Use umbrella target `opentelemetry-cpp::opentelemetry-cpp` | Conan targets differ from upstream CMake targets |
|
||||
| OTel Collector `logging` exporter deprecated | Renamed to `debug` exporter | Use `debug` in all collector configs going forward |
|
||||
| Macro parameter `telemetry` collided with `::xrpl::telemetry::` namespace | Renamed macro params to `_tel_obj_`, `_span_name_` | Avoid common words as macro parameter names |
|
||||
| `opentelemetry::trace::Scope` creates new context on move | Store scope as member, create once in constructor | SpanGuard move semantics need care with Scope lifecycle |
|
||||
| `TracerProviderFactory::Create` returns `unique_ptr<sdk::TracerProvider>`, not `nostd::shared_ptr` | Use `std::shared_ptr` member, wrap in `nostd::shared_ptr` for global provider | OTel SDK factory return types don't match API provider types |
|
||||
673
OpenTelemetryPlan/presentation.md
Normal file
673
OpenTelemetryPlan/presentation.md
Normal file
@@ -0,0 +1,673 @@
|
||||
# OpenTelemetry Distributed Tracing for rippled
|
||||
|
||||
---
|
||||
|
||||
## Slide 1: Introduction
|
||||
|
||||
> **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
### What is OpenTelemetry?
|
||||
|
||||
OpenTelemetry is an open-source, CNCF-backed observability framework for distributed tracing, metrics, and logs.
|
||||
|
||||
### Why OpenTelemetry for rippled?
|
||||
|
||||
- **End-to-End Transaction Visibility**: Track transactions from submission → consensus → ledger inclusion
|
||||
- **Cross-Node Correlation**: Follow requests across multiple independent nodes using a unique `trace_id`
|
||||
- **Consensus Round Analysis**: Understand timing and behavior across validators
|
||||
- **Incident Debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Node A<br/>tx.receive<br/>trace_id: abc123"] --> B["Node B<br/>tx.relay<br/>trace_id: abc123"] --> C["Node C<br/>tx.validate<br/>trace_id: abc123"] --> D["Node D<br/>ledger.apply<br/>trace_id: abc123"]
|
||||
|
||||
style A fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style D fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Node A (blue, leftmost)**: The originating node that first receives the transaction and assigns a new `trace_id: abc123`; this ID becomes the correlation key for the entire distributed trace.
|
||||
- **Node B and Node C (green, middle)**: Relay and validation nodes — each creates its own span but carries the same `trace_id`, so their work is linked to the original submission without any central coordinator.
|
||||
- **Node D (orange, rightmost)**: The final node that applies the transaction to the ledger; the trace now spans the full lifecycle from submission to ledger inclusion.
|
||||
- **Left-to-right flow**: The horizontal progression shows the real-world message path — a transaction hops from node to node, and the shared `trace_id` stitches all hops into a single queryable trace.
|
||||
|
||||
> **Trace ID: abc123** — All nodes share the same trace, enabling cross-node correlation.
|
||||
|
||||
---
|
||||
|
||||
## Slide 2: OpenTelemetry vs Open Source Alternatives
|
||||
|
||||
> **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
| Feature | OpenTelemetry | Jaeger | Zipkin | SkyWalking | Pinpoint | Prometheus |
|
||||
| ------------------- | ---------------- | ---------------- | ------------------ | ---------- | ---------- | ---------- |
|
||||
| **Tracing** | YES | YES | YES | YES | YES | NO |
|
||||
| **Metrics** | YES | NO | NO | YES | YES | YES |
|
||||
| **Logs** | YES | NO | NO | YES | NO | NO |
|
||||
| **C++ SDK** | YES Official | YES (Deprecated) | YES (Unmaintained) | NO | NO | YES |
|
||||
| **Vendor Neutral** | YES Primary goal | NO | NO | NO | NO | NO |
|
||||
| **Instrumentation** | Manual + Auto | Manual | Manual | Auto-first | Auto-first | Manual |
|
||||
| **Backend** | Any (exporters) | Self | Self | Self | Self | Self |
|
||||
| **CNCF Status** | Incubating | Graduated | NO | Incubating | NO | Graduated |
|
||||
|
||||
> **Why OpenTelemetry?** It's the only actively maintained, full-featured C++ option with vendor neutrality — allowing export to Tempo, Prometheus, Grafana, or any commercial backend without changing instrumentation.
|
||||
|
||||
---
|
||||
|
||||
## Slide 3: Adoption Scope — Traces Only (Current Plan)
|
||||
|
||||
OpenTelemetry supports three signal types: **Traces**, **Metrics**, and **Logs**. rippled already captures metrics (StatsD via Beast Insight) and logs (Journal/PerfLog). The question is: how much of OTel do we adopt?
|
||||
|
||||
> **Scenario A**: Add distributed tracing. Keep StatsD for metrics and Journal for logs.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph rippled["rippled Process"]
|
||||
direction TB
|
||||
OTel["OTel SDK<br/>(Traces)"]
|
||||
Insight["Beast Insight<br/>(StatsD Metrics)"]
|
||||
Journal["Journal + PerfLog<br/>(Logging)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
Insight -->|"UDP"| StatsD["StatsD Server"]
|
||||
Journal -->|"File I/O"| LogFile["perf.log / debug.log"]
|
||||
|
||||
Collector --> Tempo["Tempo"]
|
||||
StatsD --> Graphite["Graphite / Grafana"]
|
||||
LogFile --> Loki["Loki (optional)"]
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Insight fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style Journal fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------- |
|
||||
| **What changes for operators** | Deploy OTel Collector + trace backend. Existing StatsD and log pipelines stay as-is. |
|
||||
| **Codebase impact** | New `Telemetry` module (~1500 LOC). Beast Insight and Journal untouched. |
|
||||
| **New capabilities** | Cross-node trace correlation, span-based debugging, request lifecycle visibility. |
|
||||
| **What we still can't do** | Correlate metrics with specific traces natively. StatsD metrics remain fire-and-forget with no trace exemplars. |
|
||||
| **Maintenance burden** | Three separate observability systems to maintain (OTel + StatsD + Journal). |
|
||||
| **Risk** | Lowest — additive change, no existing systems disturbed. |
|
||||
|
||||
---
|
||||
|
||||
## Slide 4: Future Adoption — Metrics & Logs via OTel
|
||||
|
||||
### Scenario B: + OTel Metrics (Replace StatsD)
|
||||
|
||||
> Migrate StatsD to OTel Metrics API, exposing Prometheus-compatible metrics. Remove Beast Insight.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph rippled["rippled Process"]
|
||||
direction TB
|
||||
OTel["OTel SDK<br/>(Traces + Metrics)"]
|
||||
Journal["Journal + PerfLog<br/>(Logging)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
Journal -->|"File I/O"| LogFile["perf.log / debug.log"]
|
||||
|
||||
Collector --> Tempo["Tempo<br/>(Traces)"]
|
||||
Collector --> Prom["Prometheus<br/>(Metrics)"]
|
||||
LogFile --> Loki["Loki (optional)"]
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Journal fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
- **Better metrics?** Yes — Prometheus gives native histograms (p50/p95/p99), multi-dimensional labels, and exemplars linking metric spikes to traces.
|
||||
- **Codebase**: Remove `Beast::Insight` + `StatsDCollector` (~2000 LOC). Single SDK for traces and metrics.
|
||||
- **Operator effort**: Rewrite dashboards from StatsD/Graphite queries to PromQL. Run both in parallel during transition.
|
||||
- **Risk**: Medium — operators must migrate monitoring infrastructure.
|
||||
|
||||
### Scenario C: + OTel Logs (Full Stack)
|
||||
|
||||
> Also replace Journal logging with OTel Logs API. Single SDK for everything.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph rippled["rippled Process"]
|
||||
OTel["OTel SDK<br/>(Traces + Metrics + Logs)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
|
||||
Collector --> Tempo["Tempo<br/>(Traces)"]
|
||||
Collector --> Prom["Prometheus<br/>(Metrics)"]
|
||||
Collector --> Loki["Loki / Elastic<br/>(Logs)"]
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
- **Structured logging**: OTel Logs API outputs structured records with `trace_id`, `span_id`, severity, and attributes by design.
|
||||
- **Full correlation**: Every log line carries `trace_id`. Click trace → see logs. Click metric spike → see trace → see logs.
|
||||
- **Codebase**: Remove Beast Insight (~2000 LOC) + simplify Journal/PerfLog (~3000 LOC). One dependency instead of three.
|
||||
- **Risk**: Highest — `beast::Journal` is deeply embedded in every component. Large refactor. OTel C++ Logs API is newer (stable since v1.11, less battle-tested).
|
||||
|
||||
### Recommendation
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Phase 1<br/><b>Traces Only</b><br/>(Current Plan)"] --> B["Phase 2<br/><b>+ Metrics</b><br/>(Replace StatsD)"] --> C["Phase 3<br/><b>+ Logs</b><br/>(Full OTel)"]
|
||||
|
||||
style A fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
| Phase | Signal | Strategy | Risk |
|
||||
| -------------------- | --------- | -------------------------------------------------------------- | ------ |
|
||||
| **Phase 1** (now) | Traces | Add OTel traces. Keep StatsD and Journal. Prove value. | Low |
|
||||
| **Phase 2** (future) | + Metrics | Migrate StatsD → Prometheus via OTel. Remove Beast Insight. | Medium |
|
||||
| **Phase 3** (future) | + Logs | Adopt OTel Logs API. Align with structured logging initiative. | High |
|
||||
|
||||
> **Key Takeaway**: Start with traces (unique value, lowest risk), then incrementally adopt metrics and logs as the OTel infrastructure proves itself.
|
||||
|
||||
---
|
||||
|
||||
## Slide 5: Comparison with rippled's Existing Solutions
|
||||
|
||||
### Current Observability Stack
|
||||
|
||||
| Aspect | PerfLog (JSON) | StatsD (Metrics) | OpenTelemetry (NEW) |
|
||||
| --------------------- | --------------------- | --------------------- | --------------------------- |
|
||||
| **Type** | Logging | Metrics | Distributed Tracing |
|
||||
| **Scope** | Single node | Single node | **Cross-node** |
|
||||
| **Data** | JSON log entries | Counters, gauges | Spans with context |
|
||||
| **Correlation** | By timestamp | By metric name | By `trace_id` |
|
||||
| **Overhead** | Low (file I/O) | Low (UDP) | Low-Medium (configurable) |
|
||||
| **Question Answered** | "What happened here?" | "How many? How fast?" | **"What was the journey?"** |
|
||||
|
||||
### Use Case Matrix
|
||||
|
||||
| Scenario | PerfLog | StatsD | OpenTelemetry |
|
||||
| -------------------------------- | ------- | ------ | ------------- |
|
||||
| "How many TXs per second?" | ❌ | ✅ | ❌ |
|
||||
| "Why was this specific TX slow?" | ⚠️ | ❌ | ✅ |
|
||||
| "Which node delayed consensus?" | ❌ | ❌ | ✅ |
|
||||
| "Show TX journey across 5 nodes" | ❌ | ❌ | ✅ |
|
||||
|
||||
> **Key Insight**: In the **traces-only** approach (Phase 1), OpenTelemetry **complements** existing systems. In future phases, OTel metrics and logs could **replace** StatsD and Journal respectively — see Slides 3-4 for the full adoption roadmap.
|
||||
|
||||
---
|
||||
|
||||
## Slide 6: Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### High-Level Integration Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
subgraph services["Core Services"]
|
||||
direction LR
|
||||
RPC["RPC Server<br/>(HTTP/WS)"] ~~~ Overlay["Overlay<br/>(P2P Network)"] ~~~ Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
end
|
||||
|
||||
Telemetry["Telemetry Module<br/>(OpenTelemetry SDK)"]
|
||||
|
||||
services --> Telemetry
|
||||
end
|
||||
|
||||
Telemetry -->|OTLP/gRPC| Collector["OTel Collector"]
|
||||
|
||||
Collector --> Tempo["Grafana Tempo"]
|
||||
Collector --> Elastic["Elastic APM"]
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#fff
|
||||
style services fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style Telemetry fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Collector fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Core Services (blue, top)**: RPC Server, Overlay, and Consensus are the three primary components that generate trace data — they represent the entry points for client requests, peer messages, and consensus rounds respectively.
|
||||
- **Telemetry Module (green, middle)**: The OpenTelemetry SDK sits below the core services and receives span data from all three; it acts as a single collection point within the rippled process.
|
||||
- **OTel Collector (orange, center)**: An external process that receives spans over OTLP/gRPC from the Telemetry Module; it decouples rippled from backend choices and handles batching, sampling, and routing.
|
||||
- **Backends (bottom row)**: Tempo and Elastic APM are interchangeable — the Collector fans out to any combination, so operators can switch backends without modifying rippled code.
|
||||
- **Top-to-bottom flow**: Data flows from instrumented code down through the SDK, out over the network to the Collector, and finally into storage/visualization backends.
|
||||
|
||||
### Context Propagation
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant NodeA as Node A
|
||||
participant NodeB as Node B
|
||||
|
||||
Client->>NodeA: Submit TX (no context)
|
||||
Note over NodeA: Creates trace_id: abc123<br/>span: tx.receive
|
||||
NodeA->>NodeB: Relay TX<br/>(traceparent: abc123)
|
||||
Note over NodeB: Links to trace_id: abc123<br/>span: tx.relay
|
||||
```
|
||||
|
||||
- **HTTP/RPC**: W3C Trace Context headers (`traceparent`)
|
||||
- **P2P Messages**: Protocol Buffer extension fields
|
||||
|
||||
---
|
||||
|
||||
## Slide 7: Implementation Plan
|
||||
|
||||
### 5-Phase Rollout (9 Weeks)
|
||||
|
||||
> **Note**: Dates shown are relative to project start, not calendar dates.
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
|
||||
section Phase 5
|
||||
Documentation :p5, after p4, 1w
|
||||
```
|
||||
|
||||
### Phase Details
|
||||
|
||||
| Phase | Focus | Key Deliverables | Effort |
|
||||
| ----- | ------------------- | -------------------------------------------- | ------- |
|
||||
| 1 | Core Infrastructure | SDK integration, Telemetry interface, Config | 10 days |
|
||||
| 2 | RPC Tracing | HTTP context extraction, Handler spans | 10 days |
|
||||
| 3 | Transaction Tracing | Protobuf context, P2P relay propagation | 10 days |
|
||||
| 4 | Consensus Tracing | Round spans, Proposal/validation tracing | 10 days |
|
||||
| 5 | Documentation | Runbook, Dashboards, Training | 7 days |
|
||||
|
||||
**Total Effort**: ~47 developer-days (2 developers)
|
||||
|
||||
> **Future Phases** (not in current scope): After traces are stable, OTel metrics can replace StatsD (~3 weeks), and OTel logs can replace Journal (~4 weeks, aligned with structured logging initiative). See Slides 3-4 for the full adoption roadmap.
|
||||
|
||||
---
|
||||
|
||||
## Slide 8: Performance Overhead
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### Estimated System Impact
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ----------------- | ---------- | ------------------------------------------------ |
|
||||
| **CPU** | 1-3% | Span creation and attribute setting |
|
||||
| **Memory** | ~10 MB | SDK statics + batch buffer + worker thread stack |
|
||||
| **Network** | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| **Latency (p99)** | <2% | With proper sampling configuration |
|
||||
|
||||
#### How We Arrived at These Numbers
|
||||
|
||||
**Assumptions (XRPL mainnet baseline)**:
|
||||
|
||||
| Parameter | Value | Source |
|
||||
| ------------------------- | ---------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| Transaction throughput | ~25 TPS (peaks to ~50) | Mainnet average |
|
||||
| Default peers per node | 21 | `peerfinder/detail/Tuning.h` (`defaultMaxPeers`) |
|
||||
| Consensus round frequency | ~1 round / 3-4 seconds | `ConsensusParms.h` (`ledgerMIN_CONSENSUS=1950ms`) |
|
||||
| Proposers per round | ~20-35 | Mainnet UNL size |
|
||||
| P2P message rate | ~160 msgs/sec | See message breakdown below |
|
||||
| Avg TX processing time | ~200 μs | Profiled baseline |
|
||||
| Single span creation cost | 500-1000 ns | OTel C++ SDK benchmarks (see [3.5.4](./03-implementation-strategy.md#354-performance-data-sources)) |
|
||||
|
||||
**P2P message breakdown** (per node, mainnet):
|
||||
|
||||
| Message Type | Rate | Derivation |
|
||||
| ------------- | ------------ | --------------------------------------------------------------------- |
|
||||
| TMTransaction | ~100/sec | ~25 TPS × ~4 relay hops per TX, deduplicated by HashRouter |
|
||||
| TMValidation | ~50/sec | ~35 validators × ~1 validation/3s round ≈ ~12/sec, plus relay fan-out |
|
||||
| TMProposeSet | ~10/sec | ~35 proposers / 3s round ≈ ~12/round, clustered in establish phase |
|
||||
| **Total** | **~160/sec** | **Only traced message types counted** |
|
||||
|
||||
**CPU (1-3%) — Calculation**:
|
||||
|
||||
Per-transaction tracing cost breakdown:
|
||||
|
||||
| Operation | Cost | Notes |
|
||||
| ----------------------------------------------- | ----------- | ------------------------------------------ |
|
||||
| `tx.receive` span (create + end + 4 attributes) | ~1400 ns | ~1000ns create + ~200ns end + 4×50ns attrs |
|
||||
| `tx.validate` span | ~1200 ns | ~1000ns create + ~200ns for 2 attributes |
|
||||
| `tx.relay` span | ~1200 ns | ~1000ns create + ~200ns for 2 attributes |
|
||||
| Context injection into P2P message | ~200 ns | Serialize trace_id + span_id into protobuf |
|
||||
| **Total per TX** | **~4.0 μs** | |
|
||||
|
||||
> **CPU overhead**: 4.0 μs / 200 μs baseline = **~2.0% per transaction**. Under high load with consensus + RPC spans overlapping, reaches ~3%. Consensus itself adds only ~36 μs per 3-second round (~0.001%), so the TX path dominates. On production server hardware (3+ GHz Xeon), span creation drops to ~500-600 ns, bringing per-TX cost to ~2.6 μs (~1.3%). See [Section 3.5.4](./03-implementation-strategy.md#354-performance-data-sources) for benchmark sources.
|
||||
|
||||
**Memory (~10 MB) — Calculation**:
|
||||
|
||||
| Component | Size | Notes |
|
||||
| --------------------------------------------- | ------------------ | ------------------------------------- |
|
||||
| TracerProvider + Exporter (gRPC channel init) | ~320 KB | Allocated once at startup |
|
||||
| BatchSpanProcessor (circular buffer) | ~16 KB | 2049 × 8-byte AtomicUniquePtr entries |
|
||||
| BatchSpanProcessor (worker thread stack) | ~8 MB | Default Linux thread stack size |
|
||||
| Active spans (in-flight, max ~1000) | ~500-800 KB | ~500-800 bytes/span × 1000 concurrent |
|
||||
| Export queue (batch buffer, max 2048 spans) | ~1 MB | ~500 bytes/span × 2048 queue depth |
|
||||
| Thread-local context storage (~100 threads) | ~6.4 KB | ~64 bytes/thread |
|
||||
| **Total** | **~10 MB ceiling** | |
|
||||
|
||||
> Memory plateaus once the export queue fills — the `max_queue_size=2048` config bounds growth.
|
||||
> The worker thread stack (~8 MB) dominates the static footprint but is virtual memory; actual RSS
|
||||
> depends on stack usage (typically much less). Active spans are larger than originally estimated
|
||||
> (~500-800 bytes) because the OTel SDK `Span` object includes a mutex (~40 bytes), `SpanData`
|
||||
> recordable (~250 bytes base), and `std::map`-based attribute storage (~200-500 bytes for 3-5
|
||||
> string attributes). See [Section 3.5.4](./03-implementation-strategy.md#354-performance-data-sources) for source references.
|
||||
|
||||
**Network (10-50 KB/s) — Calculation**:
|
||||
|
||||
Two sources of network overhead:
|
||||
|
||||
**(A) OTLP span export to Collector:**
|
||||
|
||||
| Sampling Rate | Effective Spans/sec | Avg Span Size (compressed) | Bandwidth |
|
||||
| -------------------------- | ------------------- | -------------------------- | ------------ |
|
||||
| 100% (dev only) | ~500 | ~500 bytes | ~250 KB/s |
|
||||
| **10% (recommended prod)** | **~50** | **~500 bytes** | **~25 KB/s** |
|
||||
| 1% (minimal) | ~5 | ~500 bytes | ~2.5 KB/s |
|
||||
|
||||
> The ~500 spans/sec at 100% comes from: ~100 TX spans + ~160 P2P context spans + ~23 consensus spans/round + ~50 RPC spans = ~500/sec. OTLP protobuf with gzip compression yields ~500 bytes/span average.
|
||||
|
||||
**(B) P2P trace context overhead** (added to existing messages, always-on regardless of sampling):
|
||||
|
||||
| Message Type | Rate | Context Size | Bandwidth |
|
||||
| ------------- | -------- | ------------ | ------------- |
|
||||
| TMTransaction | ~100/sec | 29 bytes | ~2.9 KB/s |
|
||||
| TMValidation | ~50/sec | 29 bytes | ~1.5 KB/s |
|
||||
| TMProposeSet | ~10/sec | 29 bytes | ~0.3 KB/s |
|
||||
| **Total P2P** | | | **~4.7 KB/s** |
|
||||
|
||||
> **Combined**: 25 KB/s (OTLP export at 10%) + 5 KB/s (P2P context) ≈ **~30 KB/s typical**. The 10-50 KB/s range covers 10-20% sampling under normal to peak mainnet load.
|
||||
|
||||
**Latency (<2%) — Calculation**:
|
||||
|
||||
| Path | Tracing Cost | Baseline | Overhead |
|
||||
| ------------------------------ | ------------ | -------- | -------- |
|
||||
| Fast RPC (e.g., `server_info`) | 2.75 μs | ~1 ms | 0.275% |
|
||||
| Slow RPC (e.g., `path_find`) | 2.75 μs | ~100 ms | 0.003% |
|
||||
| Transaction processing | 4.0 μs | ~200 μs | 2.0% |
|
||||
| Consensus round | 36 μs | ~3 sec | 0.001% |
|
||||
|
||||
> At p99, even the worst case (TX processing at 2.0%) is within the 1-3% range. RPC and consensus overhead are negligible. On production hardware, TX overhead drops to ~1.3%.
|
||||
|
||||
### Per-Message Overhead (Context Propagation)
|
||||
|
||||
Each P2P message carries trace context with the following overhead:
|
||||
|
||||
| Field | Size | Description |
|
||||
| ------------- | ------------- | ----------------------------------------- |
|
||||
| `trace_id` | 16 bytes | Unique identifier for the entire trace |
|
||||
| `span_id` | 8 bytes | Current span (becomes parent on receiver) |
|
||||
| `trace_flags` | 1 byte | Sampling decision flags |
|
||||
| `trace_state` | 0-4 bytes | Optional vendor-specific data |
|
||||
| **Total** | **~29 bytes** | **Added per traced P2P message** |
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph msg["P2P Message with Trace Context"]
|
||||
A["Original Message<br/>(variable size)"] --> B["+ TraceContext<br/>(~29 bytes)"]
|
||||
end
|
||||
|
||||
subgraph breakdown["Context Breakdown"]
|
||||
C["trace_id<br/>16 bytes"]
|
||||
D["span_id<br/>8 bytes"]
|
||||
E["flags<br/>1 byte"]
|
||||
F["state<br/>0-4 bytes"]
|
||||
end
|
||||
|
||||
B --> breakdown
|
||||
|
||||
style A fill:#424242,stroke:#212121,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style D fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style E fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style F fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Original Message (gray, left)**: The existing P2P message payload of variable size — this is unchanged; trace context is appended, never modifying the original data.
|
||||
- **+ TraceContext (green, right of message)**: The additional 29-byte context block attached to each traced message; the arrow from the original message shows it is a pure addition.
|
||||
- **Context Breakdown (right subgraph)**: The four fields — `trace_id` (16 bytes), `span_id` (8 bytes), `flags` (1 byte), and `state` (0-4 bytes) — show exactly what is added and their individual sizes.
|
||||
- **Color coding**: Blue fields (`trace_id`, `span_id`) are the core identifiers required for trace correlation; orange (`flags`) controls sampling decisions; purple (`state`) is optional vendor data typically omitted.
|
||||
|
||||
> **Note**: 29 bytes represents ~1-6% overhead depending on message size (500B simple TX to 5KB proposal), which is acceptable for the observability benefits provided.
|
||||
|
||||
### Mitigation Strategies
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Head Sampling<br/>10% default"] --> B["Tail Sampling<br/>Keep errors/slow"] --> C["Batch Export<br/>Reduce I/O"] --> D["Conditional Compile<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
|
||||
style A fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style D fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
> For a detailed explanation of head vs. tail sampling, see Slide 9.
|
||||
|
||||
### Kill Switches (Rollback Options)
|
||||
|
||||
1. **Config Disable**: Set `enabled=0` in config → instant disable, no restart needed for sampling
|
||||
2. **Rebuild**: Compile with `XRPL_ENABLE_TELEMETRY=OFF` → zero overhead (no-op)
|
||||
3. **Full Revert**: Clean separation allows easy commit reversion
|
||||
|
||||
---
|
||||
|
||||
## Slide 9: Sampling Strategies — Head vs. Tail
|
||||
|
||||
> Sampling controls **which traces are recorded and exported**. Without sampling, every operation generates a trace — at 500+ spans/sec, this overwhelms storage and network. Sampling lets you keep the signal, discard the noise.
|
||||
|
||||
### Head Sampling (Decision at Start)
|
||||
|
||||
The sampling decision is made **when a trace begins**, before any work is done. A random number is generated; if it falls within the configured ratio, the entire trace is recorded. Otherwise, the trace is silently dropped.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["New Request<br/>Arrives"] --> B{"Random < 10%?"}
|
||||
B -->|"Yes (1 in 10)"| C["Record Entire Trace<br/>(all spans)"]
|
||||
B -->|"No (9 in 10)"| D["Drop Entire Trace<br/>(zero overhead)"]
|
||||
|
||||
style C fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style D fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Where it runs** | Inside rippled (SDK-level). Configured via `sampling_ratio` in `rippled.cfg`. |
|
||||
| **When the decision happens** | At trace creation time — before the first span is even populated. |
|
||||
| **How it works** | `sampling_ratio=0.1` means each trace has a 10% probability of being recorded. Dropped traces incur near-zero overhead (no spans created, no attributes set, no export). |
|
||||
| **Propagation** | Once a trace is sampled, the `trace_flags` field (1 byte in the context header) tells downstream nodes to also sample it. Unsampled traces propagate `trace_flags=0`, so downstream nodes skip them too. |
|
||||
| **Pros** | Lowest overhead. Simple to configure. Predictable resource usage. |
|
||||
| **Cons** | **Blind** — it doesn't know if the trace will be interesting. A rare error or slow consensus round has only a 10% chance of being captured. |
|
||||
| **Best for** | High-volume, steady-state traffic where most traces look similar (e.g., routine RPC requests). |
|
||||
|
||||
**rippled configuration**:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
# Record 10% of traces (recommended for production)
|
||||
sampling_ratio=0.1
|
||||
```
|
||||
|
||||
### Tail Sampling (Decision at End)
|
||||
|
||||
The sampling decision is made **after the trace completes**, based on its actual content — was it slow? Did it error? Was it a consensus round? This requires buffering complete traces before deciding.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
A["All Traces<br/>Buffered (100%)"] --> B["OTel Collector<br/>Evaluates Rules"]
|
||||
|
||||
B --> C{"Error?"}
|
||||
C -->|Yes| K["KEEP"]
|
||||
|
||||
C -->|No| D{"Slow?<br/>(>5s consensus,<br/>>1s RPC)"}
|
||||
D -->|Yes| K
|
||||
|
||||
D -->|No| E{"Random < 10%?"}
|
||||
E -->|Yes| K
|
||||
E -->|No| F["DROP"]
|
||||
|
||||
style K fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style F fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style D fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style E fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Where it runs** | In the **OTel Collector** (external process), not inside rippled. rippled exports 100% of traces; the Collector decides what to keep. |
|
||||
| **When the decision happens** | After the Collector has received all spans for a trace (waits `decision_wait=10s` for stragglers). |
|
||||
| **How it works** | Policy rules evaluate the completed trace: keep all errors, keep slow operations above a threshold, keep all consensus rounds, then probabilistically sample the rest at 10%. |
|
||||
| **Pros** | **Never misses important traces**. Errors, slow requests, and consensus anomalies are always captured regardless of probability. |
|
||||
| **Cons** | Higher resource usage — rippled must export 100% of spans to the Collector, which buffers them in memory before deciding. The Collector needs more RAM (configured via `num_traces` and `decision_wait`). |
|
||||
| **Best for** | Production troubleshooting where you can't afford to miss errors or anomalies. |
|
||||
|
||||
**Collector configuration** (tail sampling rules for rippled):
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
tail_sampling:
|
||||
decision_wait: 10s # Wait for all spans in a trace
|
||||
num_traces: 100000 # Buffer up to 100K concurrent traces
|
||||
policies:
|
||||
- name: errors # Always keep error traces
|
||||
type: status_code
|
||||
status_code: { status_codes: [ERROR] }
|
||||
|
||||
- name: slow-consensus # Keep consensus rounds >5s
|
||||
type: latency
|
||||
latency: { threshold_ms: 5000 }
|
||||
|
||||
- name: slow-rpc # Keep slow RPC requests >1s
|
||||
type: latency
|
||||
latency: { threshold_ms: 1000 }
|
||||
|
||||
- name: probabilistic # Sample 10% of everything else
|
||||
type: probabilistic
|
||||
probabilistic: { sampling_percentage: 10 }
|
||||
```
|
||||
|
||||
### Head vs. Tail — Side-by-Side
|
||||
|
||||
| | Head Sampling | Tail Sampling |
|
||||
| ----------------------------- | ---------------------------------------- | ------------------------------------------------ |
|
||||
| **Decision point** | Trace start (inside rippled) | Trace end (in OTel Collector) |
|
||||
| **Knows trace content?** | No (random coin flip) | Yes (evaluates completed trace) |
|
||||
| **Overhead on rippled** | Lowest (dropped traces = no-op) | Higher (must export 100% to Collector) |
|
||||
| **Collector resource usage** | Low (receives only sampled traces) | Higher (buffers all traces before deciding) |
|
||||
| **Captures all errors?** | No (only if trace was randomly selected) | **Yes** (error policy catches them) |
|
||||
| **Captures slow operations?** | No (random) | **Yes** (latency policy catches them) |
|
||||
| **Configuration** | `rippled.cfg`: `sampling_ratio=0.1` | `otel-collector.yaml`: `tail_sampling` processor |
|
||||
| **Best for** | High-throughput steady-state | Troubleshooting & anomaly detection |
|
||||
|
||||
### Recommended Strategy for rippled
|
||||
|
||||
Use **both** in a layered approach:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph rippled["rippled (Head Sampling)"]
|
||||
HS["sampling_ratio=1.0<br/>(export everything)"]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector (Tail Sampling)"]
|
||||
TS["Keep: errors + slow + 10% random<br/>Drop: routine traces"]
|
||||
end
|
||||
|
||||
subgraph storage["Backend Storage"]
|
||||
ST["Only interesting traces<br/>stored long-term"]
|
||||
end
|
||||
|
||||
rippled -->|"100% of spans"| collector -->|"~15-20% kept"| storage
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#fff
|
||||
style collector fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style storage fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
> **Why this works**: rippled exports everything (no blind drops), the Collector applies intelligent filtering (keep errors/slow/anomalies, sample the rest), and only ~15-20% of traces reach storage. If Collector resource usage becomes a concern, add head sampling at `sampling_ratio=0.5` to halve the export volume while still giving the Collector enough data for good tail-sampling decisions.
|
||||
|
||||
---
|
||||
|
||||
## Slide 10: Data Collection & Privacy
|
||||
|
||||
### What Data is Collected
|
||||
|
||||
| Category | Attributes Collected | Purpose |
|
||||
| --------------- | ------------------------------------------------------------------------------------ | --------------------------- |
|
||||
| **Transaction** | `tx.hash`, `tx.type`, `tx.result`, `tx.fee`, `ledger_index` | Trace transaction lifecycle |
|
||||
| **Consensus** | `round`, `phase`, `mode`, `proposers` (count of proposing validators), `duration_ms` | Analyze consensus timing |
|
||||
| **RPC** | `command`, `version`, `status`, `duration_ms` | Monitor RPC performance |
|
||||
| **Peer** | `peer.id`(public key), `latency_ms`, `message.type`, `message.size` | Network topology analysis |
|
||||
| **Ledger** | `ledger.hash`, `ledger.index`, `close_time`, `tx_count` | Ledger progression tracking |
|
||||
| **Job** | `job.type`, `queue_ms`, `worker` | JobQueue performance |
|
||||
|
||||
### What is NOT Collected (Privacy Guarantees)
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph notCollected["❌ NOT Collected"]
|
||||
direction LR
|
||||
A["Private Keys"] ~~~ B["Account Balances"] ~~~ C["Transaction Amounts"]
|
||||
end
|
||||
|
||||
subgraph alsoNot["❌ Also Excluded"]
|
||||
direction LR
|
||||
D["IP Addresses<br/>(configurable)"] ~~~ E["Personal Data"] ~~~ F["Raw TX Payloads"]
|
||||
end
|
||||
|
||||
style A fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style C fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style D fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style E fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style F fill:#c62828,stroke:#8c2809,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **NOT Collected (top row, red)**: Private Keys, Account Balances, and Transaction Amounts are explicitly excluded — these are financial/security-sensitive fields that telemetry never touches.
|
||||
- **Also Excluded (bottom row, red)**: IP Addresses (configurable per deployment), Personal Data, and Raw TX Payloads are also excluded — these protect operator and user privacy.
|
||||
- **All-red styling**: Every box is styled in red to visually reinforce that these are hard exclusions, not optional — the telemetry system has no code path to collect any of these fields.
|
||||
- **Two-row layout**: The split between "NOT Collected" and "Also Excluded" distinguishes between financial data (top) and operational/personal data (bottom), making the privacy boundaries clear to auditors.
|
||||
|
||||
### Privacy Protection Mechanisms
|
||||
|
||||
| Mechanism | Description |
|
||||
| -------------------------- | ------------------------------------------------------------- |
|
||||
| **Account Hashing** | `xrpl.tx.account` is hashed at collector level before storage |
|
||||
| **Configurable Redaction** | Sensitive fields can be excluded via config |
|
||||
| **Sampling** | Only 10% of traces recorded by default (reduces exposure) |
|
||||
| **Local Control** | Node operators control what gets exported |
|
||||
| **No Raw Payloads** | Transaction content is never recorded, only metadata |
|
||||
|
||||
> **Key Principle**: Telemetry collects **operational metadata** (timing, counts, hashes) — never **sensitive content** (keys, balances, amounts).
|
||||
|
||||
---
|
||||
|
||||
_End of Presentation_
|
||||
113
SECURITY.md
113
SECURITY.md
@@ -22,10 +22,117 @@ Responsible investigation includes, but isn't limited to, the following:
|
||||
- Not targeting physical security measures, or attempting to use social engineering, spam, distributed denial of service (DDOS) attacks, etc.
|
||||
- Investigating bugs in a way that makes a reasonable, good faith effort not to be disruptive or harmful to the XRP Ledger and the broader ecosystem.
|
||||
|
||||
### Responsible Disclosure
|
||||
|
||||
If you discover a vulnerability or potential threat, or if you _think_
|
||||
you have, please reach out by dropping an email using the contact
|
||||
information below.
|
||||
|
||||
Your report should include the following:
|
||||
|
||||
- Your contact information (typically, an email address);
|
||||
- The description of the vulnerability;
|
||||
- The attack scenario (if any);
|
||||
- The steps to reproduce the vulnerability;
|
||||
- Any other relevant details or artifacts, including code, scripts or patches.
|
||||
|
||||
In your email, please describe the issue or potential threat. If possible, include a "repro" (code that can reproduce the issue) or describe the best way to reproduce and replicate the issue. Please make your report as detailed and comprehensive as possible.
|
||||
|
||||
For more information on responsible disclosure, please read this [Wikipedia article](https://en.wikipedia.org/wiki/Responsible_disclosure).
|
||||
|
||||
## Report Handling Process
|
||||
|
||||
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic pre-commitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
|
||||
|
||||
Once we receive a report, we:
|
||||
|
||||
1. Assign two people to independently evaluate the report;
|
||||
2. Consider their recommendations;
|
||||
3. If action is necessary, formulate a plan to address the issue;
|
||||
4. Communicate privately with the reporter to explain our plan.
|
||||
5. Prepare, test and release a version which fixes the issue; and
|
||||
6. Announce the vulnerability publicly.
|
||||
|
||||
We will triage and respond to your disclosure within 24 hours. Beyond that, we will work to analyze the issue in more detail, formulate, develop and test a fix.
|
||||
|
||||
While we commit to responding with 24 hours of your initial report with our triage assessment, we cannot guarantee a response time for the remaining steps. We will communicate with you throughout this process, letting you know where we are and keeping you updated on the timeframe.
|
||||
|
||||
## Bug Bounty Program
|
||||
|
||||
[Ripple](https://ripple.com) is generously sponsoring a bug bounty program for vulnerabilities in [`xrpld`](https://github.com/XRPLF/rippled) (and other related projects, like [`Clio`](https://github.com/XRPLF/clio), [`xrpl.js`](https://github.com/XRPLF/xrpl.js), [`xrpl-py`](https://github.com/XRPLF/xrpl-py), [`xrpl4j`](https://github.com/XRPLF/xrpl4j)).
|
||||
[Ripple](https://ripple.com) is generously sponsoring a bug bounty program for vulnerabilities in [`xrpld`](https://github.com/XRPLF/rippled) (and other related projects, like [`xrpl.js`](https://github.com/XRPLF/xrpl.js), [`xrpl-py`](https://github.com/XRPLF/xrpl-py), [`xrpl4j`](https://github.com/XRPLF/xrpl4j)).
|
||||
|
||||
This program allows us to recognize and reward individuals or groups that identify and report bugs.
|
||||
This program allows us to recognize and reward individuals or groups that identify and report bugs. In summary, in order to qualify for a bounty, the bug must be:
|
||||
|
||||
We have partnered with Bugcrowd to manage this program. It is a private program, and security researchers can participate based on invitation. If you need access to the program, please email bugs@ripple.com with your Bugcrowd handle or Bugcrowd registered email, and we will get you added to the program. Once you have been added, please submit vulnerability reports through Bugcrowd, not by email. The detailed bug bounty policy is available on the Bugcrowd website.
|
||||
1. **In scope**. Only bugs in software under the scope of the program qualify. Currently, that means `xrpld`, `xrpl.js`, `xrpl-py`, `xrpl4j`.
|
||||
2. **Relevant**. A security issue, posing a danger to user funds, privacy, or the operation of the XRP Ledger.
|
||||
3. **Original and previously unknown**. Bugs that are already known and discussed in public do not qualify. Previously reported bugs, even if publicly unknown, are not eligible.
|
||||
4. **Specific**. We welcome general security advice or recommendations, but we cannot pay bounties for that.
|
||||
5. **Fixable**. There has to be something we can do to permanently fix the problem. Note that bugs in other people’s software may still qualify in some cases. For example, if you find a bug in a library that we use which can compromise the security of software that is in scope and we can get it fixed, you may qualify for a bounty.
|
||||
6. **Unused**. If you use the exploit to attack the XRP Ledger, you do not qualify for a bounty. If you report a vulnerability used in an ongoing or past attack and there is specific, concrete evidence that suggests you are the attacker we reserve the right not to pay a bounty.
|
||||
|
||||
The amount paid varies dramatically. Vulnerabilities that are harmless on their own, but could form part of a critical exploit will usually receive a bounty. Full-blown exploits can receive much higher bounties. Please don’t hold back partial vulnerabilities while trying to construct a full-blown exploit. We will pay a bounty to anyone who reports a complete chain of vulnerabilities even if they have reported each component of the exploit separately and those vulnerabilities have been fixed in the meantime. However, to qualify for a the full bounty, you must to have been the first to report each of the partial exploits.
|
||||
|
||||
### Contacting Us
|
||||
|
||||
To report a qualifying bug, please send a detailed report to:
|
||||
|
||||
| Email Address | bugs@ripple.com |
|
||||
| :-----------: | :-------------------------------------------------- |
|
||||
| Short Key ID | `0xA9F514E0` |
|
||||
| Long Key ID | `0xD900855AA9F514E0` |
|
||||
| Fingerprint | `B72C 0654 2F2A E250 2763 A268 D900 855A A9F5 14E0` |
|
||||
|
||||
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
mQINBGkSZAQBEACprU199OhgdsOsygNjiQV4msuN3vDOUooehL+NwfsGfW79Tbqq
|
||||
Q2u7uQ3NZjW+M2T4nsDwuhkr7pe7xSReR5W8ssaczvtUyxkvbMClilcgZ2OSCAuC
|
||||
N9tzJsqOqkwBvXoNXkn//T2jnPz0ZU2wSF+NrEibq5FeuyGdoX3yXXBxq9pW9HzK
|
||||
HkQll63QSl6BzVSGRQq+B6lGgaZGLwf3mzmIND9Z5VGLNK2jKynyz9z091whNG/M
|
||||
kV+E7/r/bujHk7WIVId07G5/COTXmSr7kFnNEkd2Umw42dkgfiNKvlmJ9M7c1wLK
|
||||
KbL9Eb4ADuW6rRc5k4s1e6GT8R4/VPliWbCl9SE32hXH8uTkqVIFZP2eyM5WRRHs
|
||||
aKzitkQG9UK9gcb0kdgUkxOvvgPHAe5IuZlcHFzU4y0dBbU1VEFWVpiLU0q+IuNw
|
||||
5BRemeHc59YNsngkmAZ+/9zouoShRusZmC8Wzotv75C2qVBcjijPvmjWAUz0Zunm
|
||||
Lsr+O71vqHE73pERjD07wuD/ISjiYRYYE/bVrXtXLZijC7qAH4RE3nID+2ojcZyO
|
||||
/2jMQvt7un56RsGH4UBHi3aBHi9bUoDGCXKiQY981cEuNaOxpou7Mh3x/ONzzSvk
|
||||
sTV6nl1LOZHykN1JyKwaNbTSAiuyoN+7lOBqbV04DNYAHL88PrT21P83aQARAQAB
|
||||
tB1SaXBwbGUgTGFicyA8YnVnc0ByaXBwbGUuY29tPokCTgQTAQgAOBYhBLcsBlQv
|
||||
KuJQJ2OiaNkAhVqp9RTgBQJpEmQEAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA
|
||||
AAoJENkAhVqp9RTgBzgP/i7y+aDWl1maig1XMdyb+o0UGusumFSW4Hmj278wlKVv
|
||||
usgLPihYgHE0PKrv6WRyKOMC1tQEcYYN93M+OeQ1vFhS2YyURq6RCMmh4zq/awXG
|
||||
uZbG36OURB5NH8lGBOHiN/7O+nY0CgenBT2JWm+GW3nEOAVOVm4+r5GlpPlv+Dp1
|
||||
NPBThcKXFMnH73++NpSQoDzTfRYHPxhDAX3jkLi/moXfSanOLlR6l94XNNN0jBHW
|
||||
Quao0rzf4WSXq9g6AS224xhAA5JyIcFl8TX7hzj5HaFn3VWo3COoDu4U7H+BM0fl
|
||||
85yqiMQypp7EhN2gxpMMWaHY5TFM85U/bFXFYfEgihZ4/gt4uoIzsNI9jlX7mYvG
|
||||
KFdDij+oTlRsuOxdIy60B3dKcwOH9nZZCz0SPsN/zlRWgKzK4gDKdGhFkU9OlvPu
|
||||
94ZqscanoiWKDoZkF96+sjgfjkuHsDK7Lwc1Xi+T4drHG/3aVpkYabXox+lrKB/S
|
||||
yxZjeqOIQzWPhnLgCaLyvsKo5hxKzL0w3eURu8F3IS7RgOOlljv4M+Me9sEVcdNV
|
||||
aN3/tQwbaomSX1X5D5YXqhBwC3rU3wXwamsscRTGEpkV+JCX6KUqGP7nWmxCpAly
|
||||
FL05XuOd5SVHJjXLeuje0JqLUpN514uL+bThWwDbDTdAdwW3oK/2WbXz7IfJRLBj
|
||||
uQINBGkSZAQBEADdI3SL2F72qkrgFqXWE6HSRBu9bsAvTE5QrRPWk7ux6at537r4
|
||||
S4sIw2dOwLvbyIrDgKNq3LQ5wCK88NO/NeCOFm4AiCJSl3pJHXYnTDoUxTrrxx+o
|
||||
vSRI4I3fHEql/MqzgiAb0YUezjgFdh3vYheMPp/309PFbOLhiFqEcx80Mx5h06UH
|
||||
gDzu1qNj3Ec+31NLic5zwkrAkvFvD54d6bqYR3SEgMau6aYEewpGHbWBi2pLqSi2
|
||||
lQcAeOFixqGpTwDmAnYR8YtjBYepy0MojEAdTHcQQlOYSDk4q4elG+io2N8vECfU
|
||||
rD6ORecN48GXdZINYWTAdslrUeanmBdgQrYkSpce8TSghgT9P01SNaXxmyaehVUO
|
||||
lqI4pcg5G2oojAE8ncNS3TwDtt7daTaTC3bAdr4PXDVAzNAiewjMNZPB7xidkDGQ
|
||||
Y4W1LxTMXyJVWxehYOH7tsbBRKninlfRnLgYzmtIbNRAAvNcsxU6ihv3AV0WFknN
|
||||
YbSzotEv1Xq/5wk309x8zCDe+sP0cQicvbXafXmUzPAZzeqFg+VLFn7F9MP1WGlW
|
||||
B1u7VIvBF1Mp9Nd3EAGBAoLRdRu+0dVWIjPTQuPIuD9cCatJA0wVaKUrjYbBMl88
|
||||
a12LixNVGeSFS9N7ADHx0/o7GNT6l88YbaLP6zggUHpUD/bR+cDN7vllIQARAQAB
|
||||
iQI2BBgBCAAgFiEEtywGVC8q4lAnY6Jo2QCFWqn1FOAFAmkSZAQCGwwACgkQ2QCF
|
||||
Wqn1FOAfAA/8CYq4p0p4bobY20CKEMsZrkBTFJyPDqzFwMeTjgpzqbD7Y3Qq5QCK
|
||||
OBbvY02GWdiIsNOzKdBxiuam2xYP9WHZj4y7/uWEvT0qlPVmDFu+HXjoJ43oxwFd
|
||||
CUp2gMuQ4cSL3X94VRJ3BkVL+tgBm8CNY0vnTLLOO3kum/R69VsGJS1JSGUWjNM+
|
||||
4qwS3mz+73xJu1HmERyN2RZF/DGIZI2PyONQQ6aH85G1Dd2ohu2/DBAkQAMBrPbj
|
||||
FrbDaBLyFhODxU3kTWqnfLlaElSm2EGdIU2yx7n4BggEa//NZRMm5kyeo4vzhtlQ
|
||||
YIVUMLAOLZvnEqDnsLKp+22FzNR/O+htBQC4lPywl53oYSALdhz1IQlcAC1ru5KR
|
||||
XPzhIXV6IIzkcx9xNkEclZxmsuy5ERXyKEmLbIHAlzFmnrldlt2ZgXDtzaorLmxj
|
||||
klKibxd5tF50qOpOivz+oPtFo7n+HmFa1nlVAMxlDCUdM0pEVeYDKI5zfVwalyhZ
|
||||
NnjpakdZSXMwgc7NP/hH9buF35hKDp7EckT2y3JNYwHsDdy1icXN2q40XZw5tSIn
|
||||
zkPWdu3OUY8PISohN6Pw4h0RH4ZmoX97E8sEfmdKaT58U4Hf2aAv5r9IWCSrAVqY
|
||||
u5jvac29CzQR9Kal0A+8phHAXHNFD83SwzIC0syaT9ficAguwGH8X6Q=
|
||||
=nGuD
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ $# -ne 1 || "$1" == "--help" || "$1" == "-h" ]]; then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name <username>
|
||||
if [[ $# -ne 1 || "$1" == "--help" || "$1" == "-h" ]]
|
||||
then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name <username>
|
||||
|
||||
Where <username> is the Github username of the upstream repo. e.g. XRPLF
|
||||
Where <username> is the Github username of the upstream repo. e.g. XRPLF
|
||||
USAGE
|
||||
exit 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create upstream remotes based on origin
|
||||
@@ -15,9 +16,10 @@ shift
|
||||
user="$1"
|
||||
# Get the origin URL. Expect it be an SSH-style URL
|
||||
origin=$( git remote get-url origin )
|
||||
if [[ "${origin}" == "" ]]; then
|
||||
echo Invalid origin remote >&2
|
||||
exit 1
|
||||
if [[ "${origin}" == "" ]]
|
||||
then
|
||||
echo Invalid origin remote >&2
|
||||
exit 1
|
||||
fi
|
||||
# echo "Origin: ${origin}"
|
||||
# Parse the origin
|
||||
@@ -28,9 +30,11 @@ IFS='@' read sshuser server <<< "${remote}"
|
||||
# echo "SSHUser: ${sshuser}, Server: ${server}"
|
||||
IFS='/' read originuser repo <<< "${originpath}"
|
||||
# echo "Originuser: ${originuser}, Repo: ${repo}"
|
||||
if [[ "${sshuser}" == "" || "${server}" == "" || "${originuser}" == "" || "${repo}" == "" ]]; then
|
||||
echo "Can't parse origin URL: ${origin}" >&2
|
||||
exit 1
|
||||
if [[ "${sshuser}" == "" || "${server}" == "" || "${originuser}" == ""
|
||||
|| "${repo}" == "" ]]
|
||||
then
|
||||
echo "Can't parse origin URL: ${origin}" >&2
|
||||
exit 1
|
||||
fi
|
||||
upstream="https://${server}/${user}/${repo}"
|
||||
upstreampush="${remote}:${user}/${repo}"
|
||||
@@ -38,34 +42,42 @@ upstreamgroup="upstream upstream-push"
|
||||
current=$( git remote get-url upstream 2>/dev/null )
|
||||
currentpush=$( git remote get-url upstream-push 2>/dev/null )
|
||||
currentgroup=$( git config remotes.upstreams )
|
||||
if [[ "${current}" == "${upstream}" ]]; then
|
||||
echo "Upstream already set up correctly. Skip"
|
||||
elif [[ -n "${current}" && "${current}" != "${upstream}" && "${current}" != "${upstreampush}" ]]; then
|
||||
echo "Upstream already set up as: ${current}. Skip"
|
||||
if [[ "${current}" == "${upstream}" ]]
|
||||
then
|
||||
echo "Upstream already set up correctly. Skip"
|
||||
elif [[ -n "${current}" && "${current}" != "${upstream}" &&
|
||||
"${current}" != "${upstreampush}" ]]
|
||||
then
|
||||
echo "Upstream already set up as: ${current}. Skip"
|
||||
else
|
||||
if [[ "${current}" == "${upstreampush}" ]]; then
|
||||
echo "Upstream set to dangerous push URL. Update."
|
||||
_run git remote rename upstream upstream-push || \
|
||||
_run git remote remove upstream
|
||||
currentpush=$( git remote get-url upstream-push 2>/dev/null )
|
||||
fi
|
||||
_run git remote add upstream "${upstream}"
|
||||
if [[ "${current}" == "${upstreampush}" ]]
|
||||
then
|
||||
echo "Upstream set to dangerous push URL. Update."
|
||||
_run git remote rename upstream upstream-push || \
|
||||
_run git remote remove upstream
|
||||
currentpush=$( git remote get-url upstream-push 2>/dev/null )
|
||||
fi
|
||||
_run git remote add upstream "${upstream}"
|
||||
fi
|
||||
|
||||
if [[ "${currentpush}" == "${upstreampush}" ]]; then
|
||||
echo "upstream-push already set up correctly. Skip"
|
||||
elif [[ -n "${currentpush}" && "${currentpush}" != "${upstreampush}" ]]; then
|
||||
echo "upstream-push already set up as: ${currentpush}. Skip"
|
||||
if [[ "${currentpush}" == "${upstreampush}" ]]
|
||||
then
|
||||
echo "upstream-push already set up correctly. Skip"
|
||||
elif [[ -n "${currentpush}" && "${currentpush}" != "${upstreampush}" ]]
|
||||
then
|
||||
echo "upstream-push already set up as: ${currentpush}. Skip"
|
||||
else
|
||||
_run git remote add upstream-push "${upstreampush}"
|
||||
_run git remote add upstream-push "${upstreampush}"
|
||||
fi
|
||||
|
||||
if [[ "${currentgroup}" == "${upstreamgroup}" ]]; then
|
||||
echo "Upstreams group already set up correctly. Skip"
|
||||
elif [[ -n "${currentgroup}" && "${currentgroup}" != "${upstreamgroup}" ]]; then
|
||||
echo "Upstreams group already set up as: ${currentgroup}. Skip"
|
||||
if [[ "${currentgroup}" == "${upstreamgroup}" ]]
|
||||
then
|
||||
echo "Upstreams group already set up correctly. Skip"
|
||||
elif [[ -n "${currentgroup}" && "${currentgroup}" != "${upstreamgroup}" ]]
|
||||
then
|
||||
echo "Upstreams group already set up as: ${currentgroup}. Skip"
|
||||
else
|
||||
_run git config --add remotes.upstreams "${upstreamgroup}"
|
||||
_run git config --add remotes.upstreams "${upstreamgroup}"
|
||||
fi
|
||||
|
||||
_run git fetch --jobs=$(nproc) upstreams
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ $# -lt 3 || "$1" == "--help" || "$1" = "-h" ]]; then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name workbranch base/branch user/branch [user/branch [...]]
|
||||
if [[ $# -lt 3 || "$1" == "--help" || "$1" = "-h" ]]
|
||||
then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name workbranch base/branch user/branch [user/branch [...]]
|
||||
|
||||
* workbranch will be created locally from base/branch
|
||||
* base/branch and user/branch may be specified as user:branch to allow
|
||||
easy copying from Github PRs
|
||||
* Remotes for each user must already be set up
|
||||
* workbranch will be created locally from base/branch
|
||||
* base/branch and user/branch may be specified as user:branch to allow
|
||||
easy copying from Github PRs
|
||||
* Remotes for each user must already be set up
|
||||
USAGE
|
||||
exit 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
work="$1"
|
||||
@@ -23,8 +24,9 @@ unset branches[0]
|
||||
set -e
|
||||
|
||||
users=()
|
||||
for b in "${branches[@]}"; do
|
||||
users+=( $( echo $b | cut -d/ -f1 ) )
|
||||
for b in "${branches[@]}"
|
||||
do
|
||||
users+=( $( echo $b | cut -d/ -f1 ) )
|
||||
done
|
||||
|
||||
users=( $( printf '%s\n' "${users[@]}" | sort -u ) )
|
||||
@@ -32,9 +34,10 @@ users=( $( printf '%s\n' "${users[@]}" | sort -u ) )
|
||||
git fetch --multiple upstreams "${users[@]}"
|
||||
git checkout -B "$work" --no-track "$base"
|
||||
|
||||
for b in "${branches[@]}"; do
|
||||
git merge --squash "${b}"
|
||||
git commit -S # Use the commit message provided on the PR
|
||||
for b in "${branches[@]}"
|
||||
do
|
||||
git merge --squash "${b}"
|
||||
git commit -S # Use the commit message provided on the PR
|
||||
done
|
||||
|
||||
# Make sure the commits look right
|
||||
@@ -44,11 +47,13 @@ parts=( $( echo $base | sed "s/\// /" ) )
|
||||
repo="${parts[0]}"
|
||||
b="${parts[1]}"
|
||||
push=$repo
|
||||
if [[ "$push" == "upstream" ]]; then
|
||||
push="upstream-push"
|
||||
if [[ "$push" == "upstream" ]]
|
||||
then
|
||||
push="upstream-push"
|
||||
fi
|
||||
if [[ "$repo" == "upstream" ]]; then
|
||||
repo="upstreams"
|
||||
if [[ "$repo" == "upstream" ]]
|
||||
then
|
||||
repo="upstreams"
|
||||
fi
|
||||
cat << PUSH
|
||||
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ $# -ne 3 || "$1" == "--help" || "$1" = "-h" ]]; then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name workbranch base/branch version
|
||||
if [[ $# -ne 3 || "$1" == "--help" || "$1" = "-h" ]]
|
||||
then
|
||||
name=$( basename $0 )
|
||||
cat <<- USAGE
|
||||
Usage: $name workbranch base/branch version
|
||||
|
||||
* workbranch will be created locally from base/branch. If it exists,
|
||||
it will be reused, so make sure you don't overwrite any work.
|
||||
* base/branch may be specified as user:branch to allow easy copying
|
||||
from Github PRs.
|
||||
* workbranch will be created locally from base/branch. If it exists,
|
||||
it will be reused, so make sure you don't overwrite any work.
|
||||
* base/branch may be specified as user:branch to allow easy copying
|
||||
from Github PRs.
|
||||
USAGE
|
||||
exit 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
work="$1"
|
||||
@@ -29,9 +30,10 @@ git fetch upstreams
|
||||
git checkout -B "${work}" --no-track "${base}"
|
||||
|
||||
push=$( git rev-parse --abbrev-ref --symbolic-full-name '@{push}' \
|
||||
2>/dev/null ) || true
|
||||
if [[ "${push}" != "" ]]; then
|
||||
echo "Warning: ${push} may already exist."
|
||||
2>/dev/null ) || true
|
||||
if [[ "${push}" != "" ]]
|
||||
then
|
||||
echo "Warning: ${push} may already exist."
|
||||
fi
|
||||
|
||||
build=$( find -name BuildInfo.cpp )
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Pre-commit hook that runs clang-tidy on changed files using run-clang-tidy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
HEADER_EXTENSIONS = {".h", ".hpp", ".ipp"}
|
||||
SOURCE_EXTENSIONS = {".cpp"}
|
||||
INCLUDE_RE = re.compile(r"^\s*#\s*include\s*[<\"]([^>\"]+)[>\"]")
|
||||
|
||||
|
||||
def find_run_clang_tidy() -> str | None:
|
||||
for candidate in ("run-clang-tidy-21", "run-clang-tidy"):
|
||||
if path := shutil.which(candidate):
|
||||
return path
|
||||
return None
|
||||
|
||||
|
||||
def find_build_dir(repo_root: Path) -> Path | None:
|
||||
for name in (".build", "build"):
|
||||
candidate = repo_root / name
|
||||
if (candidate / "compile_commands.json").exists():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
def build_include_graph(build_dir: Path, repo_root: Path) -> tuple[dict, set]:
|
||||
"""
|
||||
Scan all files reachable from compile_commands.json and build an inverted include graph.
|
||||
|
||||
Returns:
|
||||
inverted: header_path -> set of files that include it
|
||||
source_files: set of all TU paths from compile_commands.json
|
||||
"""
|
||||
with open(build_dir / "compile_commands.json") as f:
|
||||
db = json.load(f)
|
||||
|
||||
source_files = {Path(e["file"]).resolve() for e in db}
|
||||
include_roots = [repo_root / "include", repo_root / "src"]
|
||||
inverted: dict[Path, set[Path]] = defaultdict(set)
|
||||
|
||||
to_scan: set[Path] = set(source_files)
|
||||
scanned: set[Path] = set()
|
||||
|
||||
while to_scan:
|
||||
file = to_scan.pop()
|
||||
if file in scanned or not file.exists():
|
||||
continue
|
||||
scanned.add(file)
|
||||
|
||||
content = file.read_text()
|
||||
|
||||
for line in content.splitlines():
|
||||
m = INCLUDE_RE.match(line)
|
||||
if not m:
|
||||
continue
|
||||
for root in include_roots:
|
||||
candidate = (root / m.group(1)).resolve()
|
||||
if candidate.exists():
|
||||
inverted[candidate].add(file)
|
||||
if candidate not in scanned:
|
||||
to_scan.add(candidate)
|
||||
break
|
||||
|
||||
return inverted, source_files
|
||||
|
||||
|
||||
def find_tus_for_headers(
|
||||
headers: list[Path],
|
||||
inverted: dict[Path, set[Path]],
|
||||
source_files: set[Path],
|
||||
) -> set[Path]:
|
||||
"""
|
||||
For each header, pick one TU that transitively includes it.
|
||||
Prefers a TU whose stem matches the header's stem, otherwise picks the first found.
|
||||
"""
|
||||
result: set[Path] = set()
|
||||
|
||||
for header in headers:
|
||||
preferred: Path | None = None
|
||||
visited: set[Path] = {header}
|
||||
stack: list[Path] = [header]
|
||||
|
||||
while stack:
|
||||
h = stack.pop()
|
||||
for inc in inverted.get(h, ()):
|
||||
if inc in source_files:
|
||||
if inc.stem == header.stem:
|
||||
preferred = inc
|
||||
break
|
||||
if preferred is None:
|
||||
preferred = inc
|
||||
if inc not in visited:
|
||||
visited.add(inc)
|
||||
stack.append(inc)
|
||||
if preferred is not None and preferred.stem == header.stem:
|
||||
break
|
||||
|
||||
if preferred is not None:
|
||||
result.add(preferred)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def resolve_files(
|
||||
input_files: list[str], build_dir: Path, repo_root: Path
|
||||
) -> list[str]:
|
||||
"""
|
||||
Split input into source files and headers. Source files are passed through;
|
||||
headers are resolved to the TUs that transitively include them.
|
||||
"""
|
||||
sources: list[Path] = []
|
||||
headers: list[Path] = []
|
||||
|
||||
for f in input_files:
|
||||
p = Path(f).resolve()
|
||||
if p.suffix in SOURCE_EXTENSIONS:
|
||||
sources.append(p)
|
||||
elif p.suffix in HEADER_EXTENSIONS:
|
||||
headers.append(p)
|
||||
|
||||
if not headers:
|
||||
return [str(p) for p in sources]
|
||||
|
||||
print(
|
||||
f"Resolving {len(headers)} header(s) to compilation units...", file=sys.stderr
|
||||
)
|
||||
inverted, source_files = build_include_graph(build_dir, repo_root)
|
||||
tus = find_tus_for_headers(headers, inverted, source_files)
|
||||
|
||||
if not tus:
|
||||
print(
|
||||
"Warning: no compilation units found that include the modified headers; "
|
||||
"skipping clang-tidy for headers.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
return sorted({str(p) for p in (*sources, *tus)})
|
||||
|
||||
|
||||
def staged_files(repo_root: Path) -> list[str]:
|
||||
result = subprocess.run(
|
||||
["git", "diff", "--staged", "--name-only", "--diff-filter=d"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=repo_root,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
print(
|
||||
"clang-tidy check failed: 'git diff --staged' command failed.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
if result.stderr:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
sys.exit(result.returncode or 1)
|
||||
return [str(repo_root / p) for p in result.stdout.splitlines() if p]
|
||||
|
||||
|
||||
def main():
|
||||
if not os.environ.get("TIDY"):
|
||||
return 0
|
||||
|
||||
repo_root = Path(__file__).parent.parent
|
||||
files = staged_files(repo_root)
|
||||
if not files:
|
||||
return 0
|
||||
|
||||
run_clang_tidy = find_run_clang_tidy()
|
||||
if not run_clang_tidy:
|
||||
print(
|
||||
"clang-tidy check failed: TIDY is enabled but neither "
|
||||
"'run-clang-tidy-21' nor 'run-clang-tidy' was found in PATH.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
|
||||
build_dir = find_build_dir(repo_root)
|
||||
if not build_dir:
|
||||
print(
|
||||
"clang-tidy check failed: no build directory with compile_commands.json found "
|
||||
"(looked for .build/ and build/)",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
|
||||
tidy_files = resolve_files(files, build_dir, repo_root)
|
||||
if not tidy_files:
|
||||
return 0
|
||||
|
||||
result = subprocess.run(
|
||||
[run_clang_tidy, "-quiet", "-p", str(build_dir), "-fix", "-allow-no-checks"]
|
||||
+ tidy_files
|
||||
)
|
||||
return result.returncode
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Converts quoted includes (#include "...") to angle-bracket includes
|
||||
(#include <...>), which is the required style in this project.
|
||||
|
||||
Usage: ./bin/pre-commit/fix_include_style.py <file1> <file2> ...
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PATTERN = re.compile(r'^(\s*#include\s*)"([^"]+)"', re.MULTILINE)
|
||||
|
||||
|
||||
def fix_includes(path: Path) -> bool:
|
||||
original = path.read_text(encoding="utf-8")
|
||||
fixed = PATTERN.sub(r"\1<\2>", original)
|
||||
if fixed != original:
|
||||
path.write_text(fixed, encoding="utf-8")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def main() -> int:
|
||||
files = [Path(f) for f in sys.argv[1:]]
|
||||
success = True
|
||||
|
||||
for path in files:
|
||||
success &= fix_includes(path)
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1416,12 +1416,6 @@
|
||||
# in this section to a comma-separated list of the addresses
|
||||
# of your Clio servers, in order to bypass xrpld's rate limiting.
|
||||
#
|
||||
# TLS/SSL can be enabled for gRPC by specifying ssl_cert and ssl_key.
|
||||
# Both parameters must be provided together. The ssl_cert_chain parameter
|
||||
# is optional and provides intermediate CA certificates for the certificate
|
||||
# chain. The ssl_client_ca parameter is optional and enables mutual TLS
|
||||
# (client certificate verification).
|
||||
#
|
||||
# This port is commented out but can be enabled by removing
|
||||
# the '#' from each corresponding line including the entry under [server]
|
||||
#
|
||||
@@ -1471,74 +1465,11 @@ admin = 127.0.0.1
|
||||
protocol = ws
|
||||
send_queue_limit = 500
|
||||
|
||||
# gRPC TLS/SSL Configuration
|
||||
#
|
||||
# The gRPC port supports optional TLS/SSL encryption. When TLS is not
|
||||
# configured, the gRPC server will accept unencrypted connections.
|
||||
#
|
||||
# ssl_cert = <filename>
|
||||
# ssl_key = <filename>
|
||||
#
|
||||
# To enable TLS for gRPC, both ssl_cert and ssl_key must be specified.
|
||||
# If only one is provided, xrpld will fail to start.
|
||||
#
|
||||
# ssl_cert: Path to the server's SSL certificate file in PEM format.
|
||||
# ssl_key: Path to the server's SSL private key file in PEM format.
|
||||
#
|
||||
# When configured, the gRPC server will only accept TLS-encrypted
|
||||
# connections. Clients must use TLS (secure) channel credentials rather
|
||||
# than plaintext / insecure connections.
|
||||
#
|
||||
# ssl_cert_chain = <filename>
|
||||
#
|
||||
# Optional. Path to intermediate CA certificate(s) in PEM format that
|
||||
# complete the server's certificate chain.
|
||||
#
|
||||
# This file should contain the intermediate CA certificate(s) needed
|
||||
# to build a trust chain from the server certificate (ssl_cert) to a
|
||||
# root CA that clients trust. Multiple certificates should be
|
||||
# concatenated in PEM format.
|
||||
#
|
||||
# This is needed when your server certificate was signed by an
|
||||
# intermediate CA rather than directly by a root CA. Without this,
|
||||
# clients may fail to verify your server certificate.
|
||||
#
|
||||
# If not specified, only the server certificate from ssl_cert will be
|
||||
# presented to clients.
|
||||
#
|
||||
# ssl_client_ca = <filename>
|
||||
#
|
||||
# Optional. Path to a CA certificate file in PEM format for verifying
|
||||
# client certificates (mutual TLS / mTLS).
|
||||
#
|
||||
# When specified, the gRPC server will verify client certificates
|
||||
# against this CA. This enables mutual authentication where both the
|
||||
# server and client verify each other's identity.
|
||||
#
|
||||
# This is typically NOT needed for public-facing gRPC servers. Only
|
||||
# use this if you want to restrict access to clients with valid
|
||||
# certificates signed by the specified CA.
|
||||
#
|
||||
# If not specified, the server will use one-way TLS (server
|
||||
# authentication only) and will accept connections from any client.
|
||||
#
|
||||
[port_grpc]
|
||||
port = 50051
|
||||
ip = 127.0.0.1
|
||||
secure_gateway = 127.0.0.1
|
||||
|
||||
# Optional TLS/SSL configuration for gRPC
|
||||
# To enable TLS, uncomment and configure both ssl_cert and ssl_key:
|
||||
#ssl_cert = /etc/ssl/certs/grpc-server.crt
|
||||
#ssl_key = /etc/ssl/private/grpc-server.key
|
||||
|
||||
# Optional: Include intermediate CA certificates for complete certificate chain
|
||||
#ssl_cert_chain = /etc/ssl/certs/grpc-intermediate-ca.crt
|
||||
|
||||
# Optional: Enable mutual TLS (client certificate verification)
|
||||
# Uncomment to require and verify client certificates:
|
||||
#ssl_client_ca = /etc/ssl/certs/grpc-client-ca.crt
|
||||
|
||||
#[port_ws_public]
|
||||
#port = 6005
|
||||
#ip = 127.0.0.1
|
||||
|
||||
@@ -118,7 +118,7 @@ if(MSVC)
|
||||
NOMINMAX
|
||||
# TODO: Resolve these warnings, don't just silence them
|
||||
_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:_CRTDBG_MAP_ALLOC>
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>,$<NOT:$<BOOL:${is_ci}>>>:_CRTDBG_MAP_ALLOC>
|
||||
)
|
||||
target_link_libraries(common INTERFACE -errorreport:none -machine:X64)
|
||||
else()
|
||||
|
||||
@@ -23,14 +23,14 @@ rm -f conan.lock
|
||||
# first create command will create a new lockfile, while the subsequent create
|
||||
# commands will merge any additional dependencies into the created lockfile.
|
||||
conan lock create . \
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/linux.profile
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/linux.profile
|
||||
conan lock create . \
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/macos.profile
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/macos.profile
|
||||
conan lock create . \
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/windows.profile
|
||||
--options '&:jemalloc=True' \
|
||||
--options '&:rocksdb=True' \
|
||||
--profile:all=conan/lockfile/windows.profile
|
||||
|
||||
@@ -190,6 +190,7 @@ words:
|
||||
- NOLINTNEXTLINE
|
||||
- nonxrp
|
||||
- noripple
|
||||
- nostd
|
||||
- nudb
|
||||
- nullptr
|
||||
- nunl
|
||||
@@ -275,6 +276,7 @@ words:
|
||||
- txjson
|
||||
- txn
|
||||
- txns
|
||||
- txqueue
|
||||
- txs
|
||||
- UBSAN
|
||||
- ubsan
|
||||
@@ -322,3 +324,9 @@ words:
|
||||
- xrplf
|
||||
- xxhash
|
||||
- xxhasher
|
||||
- xychart
|
||||
- otelc
|
||||
- zpages
|
||||
- traceql
|
||||
- Gantt
|
||||
- gantt
|
||||
|
||||
@@ -33,10 +33,10 @@ private:
|
||||
|
||||
public:
|
||||
/** Create an empty section. */
|
||||
explicit Section(std::string name = "");
|
||||
explicit Section(std::string const& name = "");
|
||||
|
||||
/** Returns the name of this section. */
|
||||
[[nodiscard]] std::string const&
|
||||
std::string const&
|
||||
name() const
|
||||
{
|
||||
return name_;
|
||||
@@ -45,7 +45,7 @@ public:
|
||||
/** Returns all the lines in the section.
|
||||
This includes everything.
|
||||
*/
|
||||
[[nodiscard]] std::vector<std::string> const&
|
||||
std::vector<std::string> const&
|
||||
lines() const
|
||||
{
|
||||
return lines_;
|
||||
@@ -54,7 +54,7 @@ public:
|
||||
/** Returns all the values in the section.
|
||||
Values are non-empty lines which are not key/value pairs.
|
||||
*/
|
||||
[[nodiscard]] std::vector<std::string> const&
|
||||
std::vector<std::string> const&
|
||||
values() const
|
||||
{
|
||||
return values_;
|
||||
@@ -82,7 +82,7 @@ public:
|
||||
* @return The retrieved value. A section with an empty legacy value returns
|
||||
an empty string.
|
||||
*/
|
||||
[[nodiscard]] std::string
|
||||
std::string
|
||||
legacy() const
|
||||
{
|
||||
if (lines_.empty())
|
||||
@@ -117,11 +117,11 @@ public:
|
||||
}
|
||||
|
||||
/** Returns `true` if a key with the given name exists. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
exists(std::string const& name) const;
|
||||
|
||||
template <class T = std::string>
|
||||
[[nodiscard]] std::optional<T>
|
||||
std::optional<T>
|
||||
get(std::string const& name) const
|
||||
{
|
||||
auto const iter = lookup_.find(name);
|
||||
@@ -132,7 +132,7 @@ public:
|
||||
|
||||
/// Returns a value if present, else another value.
|
||||
template <class T>
|
||||
[[nodiscard]] T
|
||||
T
|
||||
value_or(std::string const& name, T const& other) const
|
||||
{
|
||||
auto const v = get<T>(name);
|
||||
@@ -141,7 +141,7 @@ public:
|
||||
|
||||
// indicates if trailing comments were seen
|
||||
// during the appending of any lines/values
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
had_trailing_comments() const
|
||||
{
|
||||
return had_trailing_comments_;
|
||||
@@ -151,42 +151,42 @@ public:
|
||||
operator<<(std::ostream&, Section const& section);
|
||||
|
||||
// Returns `true` if there are no key/value pairs.
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
empty() const
|
||||
{
|
||||
return lookup_.empty();
|
||||
}
|
||||
|
||||
// Returns the number of key/value pairs.
|
||||
[[nodiscard]] std::size_t
|
||||
std::size_t
|
||||
size() const
|
||||
{
|
||||
return lookup_.size();
|
||||
}
|
||||
|
||||
// For iteration of key/value pairs.
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const
|
||||
{
|
||||
return lookup_.cbegin();
|
||||
}
|
||||
|
||||
// For iteration of key/value pairs.
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const
|
||||
{
|
||||
return lookup_.cbegin();
|
||||
}
|
||||
|
||||
// For iteration of key/value pairs.
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const
|
||||
{
|
||||
return lookup_.cend();
|
||||
}
|
||||
|
||||
// For iteration of key/value pairs.
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const
|
||||
{
|
||||
return lookup_.cend();
|
||||
@@ -206,7 +206,7 @@ private:
|
||||
|
||||
public:
|
||||
/** Returns `true` if a section with the given name exists. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
exists(std::string const& name) const;
|
||||
|
||||
/** Returns the section with the given name.
|
||||
@@ -216,7 +216,7 @@ public:
|
||||
Section&
|
||||
section(std::string const& name);
|
||||
|
||||
[[nodiscard]] Section const&
|
||||
Section const&
|
||||
section(std::string const& name) const;
|
||||
|
||||
Section const&
|
||||
@@ -264,7 +264,7 @@ public:
|
||||
* legacy value.
|
||||
* @return Contents of the legacy value.
|
||||
*/
|
||||
[[nodiscard]] std::string
|
||||
std::string
|
||||
legacy(std::string const& sectionName) const;
|
||||
|
||||
friend std::ostream&
|
||||
@@ -272,10 +272,11 @@ public:
|
||||
|
||||
// indicates if trailing comments were seen
|
||||
// in any loaded Sections
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
had_trailing_comments() const
|
||||
{
|
||||
return std::ranges::any_of(map_, [](auto s) { return s.second.had_trailing_comments(); });
|
||||
return std::any_of(
|
||||
map_.cbegin(), map_.cend(), [](auto s) { return s.second.had_trailing_comments(); });
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
{
|
||||
// Ensure the slice isn't a subset of the buffer.
|
||||
XRPL_ASSERT(
|
||||
s.empty() || size_ == 0 || s.data() < p_.get() || s.data() >= p_.get() + size_,
|
||||
s.size() == 0 || size_ == 0 || s.data() < p_.get() || s.data() >= p_.get() + size_,
|
||||
"xrpl::Buffer::operator=(Slice) : input not a subset");
|
||||
|
||||
if (auto p = alloc(s.size()))
|
||||
@@ -101,13 +101,13 @@ public:
|
||||
}
|
||||
|
||||
/** Returns the number of bytes in the buffer. */
|
||||
[[nodiscard]] std::size_t
|
||||
std::size_t
|
||||
size() const noexcept
|
||||
{
|
||||
return size_;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
empty() const noexcept
|
||||
{
|
||||
return 0 == size_;
|
||||
@@ -125,7 +125,7 @@ public:
|
||||
to a single byte, to facilitate pointer arithmetic.
|
||||
*/
|
||||
/** @{ */
|
||||
[[nodiscard]] std::uint8_t const*
|
||||
std::uint8_t const*
|
||||
data() const noexcept
|
||||
{
|
||||
return p_.get();
|
||||
@@ -169,25 +169,25 @@ public:
|
||||
return alloc(n);
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const noexcept
|
||||
{
|
||||
return p_.get();
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const noexcept
|
||||
{
|
||||
return p_.get();
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const noexcept
|
||||
{
|
||||
return p_.get() + size_;
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const noexcept
|
||||
{
|
||||
return p_.get() + size_;
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
namespace xrpl::compression_algorithms {
|
||||
namespace xrpl {
|
||||
|
||||
namespace compression_algorithms {
|
||||
|
||||
/** LZ4 block compression.
|
||||
* @tparam BufferFactory Callable object or lambda.
|
||||
@@ -139,4 +141,6 @@ lz4Decompress(
|
||||
return lz4Decompress(chunk, inSize, decompressed, decompressedSize);
|
||||
}
|
||||
|
||||
} // namespace xrpl::compression_algorithms
|
||||
} // namespace compression_algorithms
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -19,7 +19,7 @@ public:
|
||||
using Entry = std::pair<std::string, int>;
|
||||
using List = std::vector<Entry>;
|
||||
|
||||
[[nodiscard]] List
|
||||
List
|
||||
getCounts(int minimumThreshold) const;
|
||||
|
||||
public:
|
||||
@@ -59,19 +59,19 @@ public:
|
||||
return --count_;
|
||||
}
|
||||
|
||||
[[nodiscard]] int
|
||||
int
|
||||
getCount() const noexcept
|
||||
{
|
||||
return count_.load();
|
||||
}
|
||||
|
||||
[[nodiscard]] Counter*
|
||||
Counter*
|
||||
getNext() const noexcept
|
||||
{
|
||||
return next_;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::string const&
|
||||
std::string const&
|
||||
getName() const noexcept
|
||||
{
|
||||
return name_;
|
||||
|
||||
@@ -61,7 +61,7 @@ template <class E>
|
||||
class Unexpected
|
||||
{
|
||||
public:
|
||||
static_assert(!std::is_same_v<E, void>, "E must not be void");
|
||||
static_assert(!std::is_same<E, void>::value, "E must not be void");
|
||||
|
||||
Unexpected() = delete;
|
||||
|
||||
@@ -73,7 +73,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr E const&
|
||||
constexpr E const&
|
||||
value() const&
|
||||
{
|
||||
return val_;
|
||||
@@ -91,7 +91,7 @@ public:
|
||||
return std::move(val_);
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr E const&&
|
||||
constexpr E const&&
|
||||
value() const&&
|
||||
{
|
||||
return std::move(val_);
|
||||
@@ -125,13 +125,13 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool
|
||||
constexpr bool
|
||||
has_value() const
|
||||
{
|
||||
return Base::has_value();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr T const&
|
||||
constexpr T const&
|
||||
value() const
|
||||
{
|
||||
return Base::value();
|
||||
@@ -143,7 +143,7 @@ public:
|
||||
return Base::value();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr E const&
|
||||
constexpr E const&
|
||||
error() const
|
||||
{
|
||||
return Base::error();
|
||||
@@ -210,7 +210,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr E const&
|
||||
constexpr E const&
|
||||
error() const
|
||||
{
|
||||
return Base::error();
|
||||
|
||||
@@ -159,11 +159,11 @@ public:
|
||||
reset();
|
||||
|
||||
/** Get the raw pointer */
|
||||
[[nodiscard]] T*
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** Return the strong count */
|
||||
[[nodiscard]] std::size_t
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
template <class TT, class... Args>
|
||||
@@ -181,7 +181,7 @@ public:
|
||||
|
||||
private:
|
||||
/** Return the raw pointer held by this object. */
|
||||
[[nodiscard]] T*
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
/** Exchange the current raw pointer held by this object with the given
|
||||
@@ -260,7 +260,7 @@ public:
|
||||
lock() const;
|
||||
|
||||
/** Return true if the strong count is zero. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** Set the pointer to null and decrement the weak count.
|
||||
@@ -339,7 +339,7 @@ public:
|
||||
don't lock the weak pointer. Use the `lock` method if that's what's
|
||||
needed)
|
||||
*/
|
||||
[[nodiscard]] SharedIntrusive<T>
|
||||
SharedIntrusive<T>
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
@@ -357,31 +357,31 @@ public:
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise
|
||||
return null.
|
||||
*/
|
||||
[[nodiscard]] T*
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise
|
||||
* return 0
|
||||
*/
|
||||
[[nodiscard]] std::size_t
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
[[nodiscard]] SharedIntrusive<T>
|
||||
SharedIntrusive<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong
|
||||
@@ -412,7 +412,7 @@ private:
|
||||
private:
|
||||
/** Return the raw pointer held by this object.
|
||||
*/
|
||||
[[nodiscard]] T*
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
enum class RefStrength { strong, weak };
|
||||
|
||||
@@ -207,7 +207,7 @@ private:
|
||||
RefCountPair(CountType s, CountType w) noexcept;
|
||||
|
||||
/** Convert back to the packed integer form. */
|
||||
[[nodiscard]] FieldType
|
||||
FieldType
|
||||
combinedValue() const noexcept;
|
||||
|
||||
static constexpr CountType maxStrongValue =
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -29,7 +28,7 @@ struct LocalValues
|
||||
T t_;
|
||||
|
||||
Value() = default;
|
||||
explicit Value(T t) : t_(std::move(t))
|
||||
explicit Value(T const& t) : t_(t)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ private:
|
||||
std::string partition_;
|
||||
|
||||
public:
|
||||
Sink(std::string partition, beast::severities::Severity thresh, Logs& logs);
|
||||
Sink(std::string const& partition, beast::severities::Severity thresh, Logs& logs);
|
||||
|
||||
Sink(Sink const&) = delete;
|
||||
Sink&
|
||||
@@ -76,7 +76,7 @@ private:
|
||||
@return `true` if a system file is associated and opened for
|
||||
writing.
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isOpen() const noexcept;
|
||||
|
||||
/** Associate a system file with the log.
|
||||
|
||||
@@ -44,7 +44,7 @@ public:
|
||||
return data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] ProtectedDataType const&
|
||||
ProtectedDataType const&
|
||||
get() const
|
||||
{
|
||||
return data_;
|
||||
|
||||
@@ -252,9 +252,9 @@ public:
|
||||
// Assume unsigned values are... unsigned. i.e. positive
|
||||
explicit Number(internalrep mantissa, int exponent, normalized);
|
||||
|
||||
[[nodiscard]] constexpr rep
|
||||
constexpr rep
|
||||
mantissa() const noexcept;
|
||||
[[nodiscard]] constexpr int
|
||||
constexpr int
|
||||
exponent() const noexcept;
|
||||
|
||||
constexpr Number
|
||||
@@ -339,7 +339,7 @@ public:
|
||||
}
|
||||
|
||||
/** Return the sign of the amount */
|
||||
[[nodiscard]] constexpr int
|
||||
constexpr int
|
||||
signum() const noexcept
|
||||
{
|
||||
if (negative_)
|
||||
@@ -347,7 +347,7 @@ public:
|
||||
return (mantissa_ != 0u) ? 1 : 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] Number
|
||||
Number
|
||||
truncate() const noexcept;
|
||||
|
||||
friend constexpr bool
|
||||
@@ -490,13 +490,13 @@ private:
|
||||
MantissaRange::rep const& minMantissa,
|
||||
MantissaRange::rep const& maxMantissa);
|
||||
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isnormal() const noexcept;
|
||||
|
||||
// Copy the number, but modify the exponent by "exponentDelta". Because the
|
||||
// mantissa doesn't change, the result will be "mostly" normalized, but the
|
||||
// exponent could go out of range, so it will be checked.
|
||||
[[nodiscard]] Number
|
||||
Number
|
||||
shiftExponent(int exponentDelta) const;
|
||||
|
||||
// Safely convert rep (int64) mantissa to internalrep (uint64). If the rep
|
||||
|
||||
@@ -20,7 +20,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] uint256 const&
|
||||
uint256 const&
|
||||
as_uint256() const
|
||||
{
|
||||
return hash_;
|
||||
@@ -30,17 +30,17 @@ public:
|
||||
{
|
||||
return hash_;
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isZero() const
|
||||
{
|
||||
return hash_.isZero();
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isNonZero() const
|
||||
{
|
||||
return hash_.isNonZero();
|
||||
}
|
||||
[[nodiscard]] int
|
||||
int
|
||||
signum() const
|
||||
{
|
||||
return hash_.signum();
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
/** Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
*/
|
||||
[[nodiscard]] std::shared_ptr<T> const&
|
||||
std::shared_ptr<T> const&
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
@@ -67,30 +67,30 @@ public:
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise return
|
||||
null.
|
||||
*/
|
||||
[[nodiscard]] T*
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise return 0
|
||||
*/
|
||||
[[nodiscard]] std::size_t
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
[[nodiscard]] std::shared_ptr<T>
|
||||
std::shared_ptr<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong pointer.
|
||||
|
||||
@@ -66,10 +66,12 @@ class SlabAllocator
|
||||
}
|
||||
}
|
||||
|
||||
// Calling this destructor will release the allocated memory but
|
||||
// will not properly destroy any objects that are constructed in
|
||||
// the block itself.
|
||||
~SlabBlock() = default;
|
||||
~SlabBlock()
|
||||
{
|
||||
// Calling this destructor will release the allocated memory but
|
||||
// will not properly destroy any objects that are constructed in
|
||||
// the block itself.
|
||||
}
|
||||
|
||||
SlabBlock(SlabBlock const& other) = delete;
|
||||
SlabBlock&
|
||||
@@ -174,13 +176,15 @@ public:
|
||||
SlabAllocator&
|
||||
operator=(SlabAllocator&& other) = delete;
|
||||
|
||||
// FIXME: We can't destroy the memory blocks we've allocated, because
|
||||
// we can't be sure that they are not being used. Cleaning the
|
||||
// shutdown process up could make this possible.
|
||||
~SlabAllocator() = default;
|
||||
~SlabAllocator()
|
||||
{
|
||||
// FIXME: We can't destroy the memory blocks we've allocated, because
|
||||
// we can't be sure that they are not being used. Cleaning the
|
||||
// shutdown process up could make this possible.
|
||||
}
|
||||
|
||||
/** Returns the size of the memory block this allocator returns. */
|
||||
[[nodiscard]] constexpr std::size_t
|
||||
constexpr std::size_t
|
||||
size() const noexcept
|
||||
{
|
||||
return itemSize_;
|
||||
@@ -284,7 +288,7 @@ class SlabAllocatorSet
|
||||
{
|
||||
private:
|
||||
// The list of allocators that belong to this set
|
||||
boost::container::static_vector<SlabAllocator<Type>, 64> allocators_{};
|
||||
boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
|
||||
|
||||
std::size_t maxSize_ = 0;
|
||||
|
||||
@@ -343,7 +347,9 @@ public:
|
||||
SlabAllocatorSet&
|
||||
operator=(SlabAllocatorSet&& other) = delete;
|
||||
|
||||
~SlabAllocatorSet() = default;
|
||||
~SlabAllocatorSet()
|
||||
{
|
||||
}
|
||||
|
||||
/** Returns a suitably aligned pointer, if one is available.
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ public:
|
||||
@note The return type is guaranteed to be a pointer
|
||||
to a single byte, to facilitate pointer arithmetic.
|
||||
*/
|
||||
[[nodiscard]] std::uint8_t const*
|
||||
std::uint8_t const*
|
||||
data() const noexcept
|
||||
{
|
||||
return data_;
|
||||
@@ -123,25 +123,25 @@ public:
|
||||
size_ -= n;
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const noexcept
|
||||
{
|
||||
return data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const noexcept
|
||||
{
|
||||
return data_;
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const noexcept
|
||||
{
|
||||
return data_ + size_;
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const noexcept
|
||||
{
|
||||
return data_ + size_;
|
||||
@@ -158,7 +158,7 @@ public:
|
||||
@returns The requested subslice, if the request is valid.
|
||||
@throws std::out_of_range if pos > size()
|
||||
*/
|
||||
[[nodiscard]] Slice
|
||||
Slice
|
||||
substr(std::size_t pos, std::size_t count = std::numeric_limits<std::size_t>::max()) const
|
||||
{
|
||||
if (pos > size())
|
||||
@@ -211,14 +211,14 @@ operator<<(Stream& s, Slice const& v)
|
||||
}
|
||||
|
||||
template <class T, std::size_t N>
|
||||
std::enable_if_t<std::is_same_v<T, char> || std::is_same_v<T, unsigned char>, Slice>
|
||||
std::enable_if_t<std::is_same<T, char>::value || std::is_same<T, unsigned char>::value, Slice>
|
||||
makeSlice(std::array<T, N> const& a)
|
||||
{
|
||||
return Slice(a.data(), a.size());
|
||||
}
|
||||
|
||||
template <class T, class Alloc>
|
||||
std::enable_if_t<std::is_same_v<T, char> || std::is_same_v<T, unsigned char>, Slice>
|
||||
std::enable_if_t<std::is_same<T, char>::value || std::is_same<T, unsigned char>::value, Slice>
|
||||
makeSlice(std::vector<T, Alloc> const& v)
|
||||
{
|
||||
return Slice(v.data(), v.size());
|
||||
|
||||
@@ -222,19 +222,19 @@ private:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isWeak() const
|
||||
{
|
||||
if (!ptr)
|
||||
return true;
|
||||
return ptr.isWeak();
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isCached() const
|
||||
{
|
||||
return ptr && ptr.isStrong();
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isExpired() const
|
||||
{
|
||||
return ptr.expired();
|
||||
@@ -251,7 +251,7 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
using Entry = std::conditional_t<IsKeyCache, KeyOnlyEntry, ValueEntry>;
|
||||
typedef typename std::conditional<IsKeyCache, KeyOnlyEntry, ValueEntry>::type Entry;
|
||||
|
||||
using KeyOnlyCacheType = hardened_partitioned_hash_map<key_type, KeyOnlyEntry, Hash, KeyEqual>;
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ namespace xrpl {
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
std::enable_if_t<std::is_arithmetic_v<T>, std::string>
|
||||
typename std::enable_if<std::is_arithmetic<T>::value, std::string>::type
|
||||
to_string(T t)
|
||||
{
|
||||
return std::to_string(t);
|
||||
|
||||
@@ -102,7 +102,7 @@ public:
|
||||
{
|
||||
return reinterpret_cast<pointer>(data_.data());
|
||||
}
|
||||
[[nodiscard]] const_pointer
|
||||
const_pointer
|
||||
data() const
|
||||
{
|
||||
return reinterpret_cast<const_pointer>(data_.data());
|
||||
@@ -118,22 +118,22 @@ public:
|
||||
{
|
||||
return data() + bytes;
|
||||
}
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const
|
||||
{
|
||||
return data();
|
||||
}
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const
|
||||
{
|
||||
return data() + bytes;
|
||||
}
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const
|
||||
{
|
||||
return data();
|
||||
}
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const
|
||||
{
|
||||
return data() + bytes;
|
||||
@@ -269,7 +269,7 @@ public:
|
||||
class Container,
|
||||
class = std::enable_if_t<
|
||||
detail::is_contiguous_container<Container>::value &&
|
||||
std::is_trivially_copyable_v<typename Container::value_type>>>
|
||||
std::is_trivially_copyable<typename Container::value_type>::value>>
|
||||
explicit base_uint(Container const& c)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
@@ -281,7 +281,7 @@ public:
|
||||
template <class Container>
|
||||
std::enable_if_t<
|
||||
detail::is_contiguous_container<Container>::value &&
|
||||
std::is_trivially_copyable_v<typename Container::value_type>,
|
||||
std::is_trivially_copyable<typename Container::value_type>::value,
|
||||
base_uint&>
|
||||
operator=(Container const& c)
|
||||
{
|
||||
@@ -310,7 +310,7 @@ public:
|
||||
return fromVoid(from.data());
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr int
|
||||
constexpr int
|
||||
signum() const
|
||||
{
|
||||
for (int i = 0; i < WIDTH; i++)
|
||||
@@ -433,14 +433,14 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
[[nodiscard]] base_uint
|
||||
base_uint
|
||||
next() const
|
||||
{
|
||||
auto ret = *this;
|
||||
return ++ret;
|
||||
}
|
||||
|
||||
[[nodiscard]] base_uint
|
||||
base_uint
|
||||
prev() const
|
||||
{
|
||||
auto ret = *this;
|
||||
@@ -517,12 +517,12 @@ public:
|
||||
}
|
||||
|
||||
// Deprecated.
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isZero() const
|
||||
{
|
||||
return *this == beast::zero;
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isNonZero() const
|
||||
{
|
||||
return *this != beast::zero;
|
||||
|
||||
@@ -49,7 +49,8 @@ template <class E, class... Args>
|
||||
Throw(Args&&... args)
|
||||
{
|
||||
static_assert(
|
||||
std::is_convertible_v<E*, std::exception*>, "Exception must derive from std::exception.");
|
||||
std::is_convertible<E*, std::exception*>::value,
|
||||
"Exception must derive from std::exception.");
|
||||
|
||||
E e(std::forward<Args>(args)...);
|
||||
LogThrow(std::string("Throwing exception of type " + beast::type_name<E>() + ": ") + e.what());
|
||||
|
||||
@@ -72,12 +72,14 @@ template <class HashAlgorithm = beast::xxhasher>
|
||||
class hardened_hash
|
||||
{
|
||||
private:
|
||||
detail::seed_pair m_seeds{detail::make_seed_pair<>()};
|
||||
detail::seed_pair m_seeds;
|
||||
|
||||
public:
|
||||
using result_type = typename HashAlgorithm::result_type;
|
||||
|
||||
hardened_hash() = default;
|
||||
hardened_hash() : m_seeds(detail::make_seed_pair<>())
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
result_type
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
{
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
partition_map_type* map_{nullptr};
|
||||
typename partition_map_type::iterator ait_{};
|
||||
typename partition_map_type::iterator ait_;
|
||||
typename map_type::iterator mit_;
|
||||
|
||||
iterator() = default;
|
||||
@@ -126,7 +126,7 @@ public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
|
||||
partition_map_type* map_{nullptr};
|
||||
typename partition_map_type::iterator ait_{};
|
||||
typename partition_map_type::iterator ait_;
|
||||
typename map_type::iterator mit_;
|
||||
|
||||
const_iterator() = default;
|
||||
|
||||
@@ -15,8 +15,8 @@ namespace xrpl {
|
||||
#ifndef __INTELLISENSE__
|
||||
static_assert(
|
||||
// NOLINTNEXTLINE(misc-redundant-expression)
|
||||
std::is_integral_v<beast::xor_shift_engine::result_type> &&
|
||||
std::is_unsigned_v<beast::xor_shift_engine::result_type>,
|
||||
std::is_integral<beast::xor_shift_engine::result_type>::value &&
|
||||
std::is_unsigned<beast::xor_shift_engine::result_type>::value,
|
||||
"The XRPL default PRNG engine must return an unsigned integral type.");
|
||||
|
||||
static_assert(
|
||||
@@ -91,7 +91,7 @@ default_prng()
|
||||
*/
|
||||
/** @{ */
|
||||
template <class Engine, class Integral>
|
||||
std::enable_if_t<std::is_integral_v<Integral> && detail::is_engine<Engine>::value, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
|
||||
rand_int(Engine& engine, Integral min, Integral max)
|
||||
{
|
||||
XRPL_ASSERT(max > min, "xrpl::rand_int : max over min inputs");
|
||||
@@ -103,35 +103,35 @@ rand_int(Engine& engine, Integral min, Integral max)
|
||||
}
|
||||
|
||||
template <class Integral>
|
||||
std::enable_if_t<std::is_integral_v<Integral>, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value, Integral>
|
||||
rand_int(Integral min, Integral max)
|
||||
{
|
||||
return rand_int(default_prng(), min, max);
|
||||
}
|
||||
|
||||
template <class Engine, class Integral>
|
||||
std::enable_if_t<std::is_integral_v<Integral> && detail::is_engine<Engine>::value, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
|
||||
rand_int(Engine& engine, Integral max)
|
||||
{
|
||||
return rand_int(engine, Integral(0), max);
|
||||
}
|
||||
|
||||
template <class Integral>
|
||||
std::enable_if_t<std::is_integral_v<Integral>, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value, Integral>
|
||||
rand_int(Integral max)
|
||||
{
|
||||
return rand_int(default_prng(), max);
|
||||
}
|
||||
|
||||
template <class Integral, class Engine>
|
||||
std::enable_if_t<std::is_integral_v<Integral> && detail::is_engine<Engine>::value, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
|
||||
rand_int(Engine& engine)
|
||||
{
|
||||
return rand_int(engine, std::numeric_limits<Integral>::max());
|
||||
}
|
||||
|
||||
template <class Integral = int>
|
||||
std::enable_if_t<std::is_integral_v<Integral>, Integral>
|
||||
std::enable_if_t<std::is_integral<Integral>::value, Integral>
|
||||
rand_int()
|
||||
{
|
||||
return rand_int(default_prng(), std::numeric_limits<Integral>::max());
|
||||
@@ -142,7 +142,7 @@ rand_int()
|
||||
/** @{ */
|
||||
template <class Byte, class Engine>
|
||||
std::enable_if_t<
|
||||
(std::is_same_v<Byte, unsigned char> || std::is_same_v<Byte, std::uint8_t>) &&
|
||||
(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value) &&
|
||||
detail::is_engine<Engine>::value,
|
||||
Byte>
|
||||
rand_byte(Engine& engine)
|
||||
@@ -152,7 +152,9 @@ rand_byte(Engine& engine)
|
||||
}
|
||||
|
||||
template <class Byte = std::uint8_t>
|
||||
std::enable_if_t<(std::is_same_v<Byte, unsigned char> || std::is_same_v<Byte, std::uint8_t>), Byte>
|
||||
std::enable_if_t<
|
||||
(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value),
|
||||
Byte>
|
||||
rand_byte()
|
||||
{
|
||||
return rand_byte<Byte>(default_prng());
|
||||
|
||||
@@ -12,9 +12,9 @@ namespace xrpl {
|
||||
|
||||
template <class Src, class Dest>
|
||||
concept SafeToCast = (std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
|
||||
(std::is_signed_v<Src> || std::is_unsigned_v<Dest>) &&
|
||||
(std::is_signed_v<Src> != std::is_signed_v<Dest> ? sizeof(Dest) > sizeof(Src)
|
||||
: sizeof(Dest) >= sizeof(Src));
|
||||
(std::is_signed<Src>::value || std::is_unsigned<Dest>::value) &&
|
||||
(std::is_signed<Src>::value != std::is_signed<Dest>::value ? sizeof(Dest) > sizeof(Src)
|
||||
: sizeof(Dest) >= sizeof(Src));
|
||||
|
||||
template <class Dest, class Src>
|
||||
constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
|
||||
|
||||
@@ -10,9 +10,9 @@ std::string
|
||||
strHex(FwdIt begin, FwdIt end)
|
||||
{
|
||||
static_assert(
|
||||
std::is_convertible_v<
|
||||
std::is_convertible<
|
||||
typename std::iterator_traits<FwdIt>::iterator_category,
|
||||
std::forward_iterator_tag>,
|
||||
std::forward_iterator_tag>::value,
|
||||
"FwdIt must be a forward iterator");
|
||||
std::string result;
|
||||
result.reserve(2 * std::distance(begin, end));
|
||||
|
||||
@@ -44,7 +44,8 @@ public:
|
||||
|
||||
template <
|
||||
class OtherInt,
|
||||
class = std::enable_if_t<std::is_integral_v<OtherInt> && sizeof(OtherInt) <= sizeof(Int)>>
|
||||
class = typename std::enable_if<
|
||||
std::is_integral<OtherInt>::value && sizeof(OtherInt) <= sizeof(Int)>::type>
|
||||
explicit constexpr tagged_integer(OtherInt value) noexcept : m_value(value)
|
||||
{
|
||||
static_assert(sizeof(tagged_integer) == sizeof(Int), "tagged_integer is adding padding");
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
return m_ios;
|
||||
}
|
||||
|
||||
[[nodiscard]] boost::asio::io_context const&
|
||||
boost::asio::io_context const&
|
||||
get_io_context() const
|
||||
{
|
||||
return m_ios;
|
||||
|
||||
@@ -63,7 +63,7 @@ struct abstract_clock_wrapper : public abstract_clock<Facade>
|
||||
using typename abstract_clock<Facade>::duration;
|
||||
using typename abstract_clock<Facade>::time_point;
|
||||
|
||||
[[nodiscard]] time_point
|
||||
time_point
|
||||
now() const override
|
||||
{
|
||||
return Clock::now();
|
||||
|
||||
@@ -32,7 +32,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] time_point
|
||||
time_point
|
||||
now() const override
|
||||
{
|
||||
return now_;
|
||||
|
||||
@@ -9,7 +9,7 @@ namespace beast {
|
||||
|
||||
/** Expire aged container items past the specified age. */
|
||||
template <class AgedContainer, class Rep, class Period>
|
||||
std::enable_if_t<is_aged_container<AgedContainer>::value, std::size_t>
|
||||
typename std::enable_if<is_aged_container<AgedContainer>::value, std::size_t>::type
|
||||
expire(AgedContainer& c, std::chrono::duration<Rep, Period> const& age)
|
||||
{
|
||||
std::size_t n(0);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
namespace beast::detail {
|
||||
namespace beast {
|
||||
namespace detail {
|
||||
|
||||
// Extracts the key portion of value
|
||||
template <bool maybe_map>
|
||||
@@ -29,4 +30,5 @@ struct aged_associative_container_extract_t<false>
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace beast::detail
|
||||
} // namespace detail
|
||||
} // namespace beast
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace beast {
|
||||
|
||||
@@ -115,7 +114,7 @@ public:
|
||||
return &m_iter->value;
|
||||
}
|
||||
|
||||
[[nodiscard]] time_point const&
|
||||
time_point const&
|
||||
when() const
|
||||
{
|
||||
return m_iter->when;
|
||||
@@ -132,11 +131,11 @@ private:
|
||||
friend class aged_container_iterator;
|
||||
|
||||
template <class OtherIterator>
|
||||
aged_container_iterator(OtherIterator iter) : m_iter(std::move(iter))
|
||||
aged_container_iterator(OtherIterator const& iter) : m_iter(iter)
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] Iterator const&
|
||||
Iterator const&
|
||||
iterator() const
|
||||
{
|
||||
return m_iter;
|
||||
|
||||
@@ -57,7 +57,8 @@ template <
|
||||
class T,
|
||||
class Clock = std::chrono::steady_clock,
|
||||
class Compare = std::less<Key>,
|
||||
class Allocator = std::allocator<std::conditional_t<IsMap, std::pair<Key const, T>, Key>>>
|
||||
class Allocator =
|
||||
std::allocator<typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type>>
|
||||
class aged_ordered_container
|
||||
{
|
||||
public:
|
||||
@@ -66,7 +67,7 @@ public:
|
||||
using duration = typename clock_type::duration;
|
||||
using key_type = Key;
|
||||
using mapped_type = T;
|
||||
using value_type = std::conditional_t<IsMap, std::pair<Key const, T>, Key>;
|
||||
using value_type = typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type;
|
||||
using size_type = std::size_t;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
|
||||
@@ -109,7 +110,8 @@ private:
|
||||
|
||||
template <
|
||||
class... Args,
|
||||
class = std::enable_if_t<std::is_constructible_v<value_type, Args...>>>
|
||||
class =
|
||||
typename std::enable_if<std::is_constructible<value_type, Args...>::value>::type>
|
||||
element(time_point const& when_, Args&&... args)
|
||||
: value(std::forward<Args>(args)...), when(when_)
|
||||
{
|
||||
@@ -133,7 +135,9 @@ private:
|
||||
return Compare::operator()(lhs.first, rhs.first);
|
||||
}
|
||||
|
||||
pair_value_compare() = default;
|
||||
pair_value_compare()
|
||||
{
|
||||
}
|
||||
|
||||
pair_value_compare(pair_value_compare const& other) : Compare(other)
|
||||
{
|
||||
@@ -186,7 +190,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] Compare const&
|
||||
Compare const&
|
||||
compare() const
|
||||
{
|
||||
return *this;
|
||||
@@ -196,7 +200,7 @@ private:
|
||||
using list_type = typename boost::intrusive::
|
||||
make_list<element, boost::intrusive::constant_time_size<false>>::type;
|
||||
|
||||
using cont_type = std::conditional_t<
|
||||
using cont_type = typename std::conditional<
|
||||
IsMulti,
|
||||
typename boost::intrusive::make_multiset<
|
||||
element,
|
||||
@@ -205,7 +209,7 @@ private:
|
||||
typename boost::intrusive::make_set<
|
||||
element,
|
||||
boost::intrusive::constant_time_size<true>,
|
||||
boost::intrusive::compare<KeyValueCompare>>::type>;
|
||||
boost::intrusive::compare<KeyValueCompare>>::type>::type;
|
||||
|
||||
using ElementAllocator =
|
||||
typename std::allocator_traits<Allocator>::template rebind_alloc<element>;
|
||||
@@ -295,7 +299,7 @@ private:
|
||||
return KeyValueCompare::compare();
|
||||
}
|
||||
|
||||
[[nodiscard]] Compare const&
|
||||
Compare const&
|
||||
compare() const
|
||||
{
|
||||
return KeyValueCompare::compare();
|
||||
@@ -307,7 +311,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] KeyValueCompare const&
|
||||
KeyValueCompare const&
|
||||
key_compare() const
|
||||
{
|
||||
return *this;
|
||||
@@ -319,7 +323,7 @@ private:
|
||||
return beast::detail::empty_base_optimization<ElementAllocator>::member();
|
||||
}
|
||||
|
||||
[[nodiscard]] ElementAllocator const&
|
||||
ElementAllocator const&
|
||||
alloc() const
|
||||
{
|
||||
return beast::detail::empty_base_optimization<ElementAllocator>::member();
|
||||
@@ -370,7 +374,7 @@ private:
|
||||
|
||||
public:
|
||||
using key_compare = Compare;
|
||||
using value_compare = std::conditional_t<IsMap, pair_value_compare, Compare>;
|
||||
using value_compare = typename std::conditional<IsMap, pair_value_compare, Compare>::type;
|
||||
using allocator_type = Allocator;
|
||||
using reference = value_type&;
|
||||
using const_reference = value_type const&;
|
||||
@@ -398,8 +402,6 @@ public:
|
||||
|
||||
class chronological_t
|
||||
{
|
||||
chronological_t() = default;
|
||||
|
||||
public:
|
||||
// A set iterator (IsMap==false) is always const
|
||||
// because the elements of a set are immutable.
|
||||
@@ -487,7 +489,7 @@ public:
|
||||
iterator
|
||||
iterator_to(value_type& value)
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return list.iterator_to(*reinterpret_cast<element*>(
|
||||
reinterpret_cast<uint8_t*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -496,16 +498,20 @@ public:
|
||||
const_iterator
|
||||
iterator_to(value_type const& value) const
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return list.iterator_to(*reinterpret_cast<element const*>(
|
||||
reinterpret_cast<uint8_t const*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
}
|
||||
|
||||
private:
|
||||
chronological_t()
|
||||
{
|
||||
}
|
||||
|
||||
chronological_t(chronological_t const&) = delete;
|
||||
chronological_t(chronological_t&&) = delete;
|
||||
|
||||
private:
|
||||
friend class aged_ordered_container;
|
||||
list_type mutable list;
|
||||
} chronological;
|
||||
@@ -611,30 +617,30 @@ public:
|
||||
class K,
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
at(K const& k);
|
||||
|
||||
template <
|
||||
class K,
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type const&
|
||||
at(K const& k) const;
|
||||
|
||||
template <
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
operator[](Key const& key);
|
||||
|
||||
template <
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
operator[](Key&& key);
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -718,7 +724,7 @@ public:
|
||||
iterator
|
||||
iterator_to(value_type& value)
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return m_cont.iterator_to(*reinterpret_cast<element*>(
|
||||
reinterpret_cast<uint8_t*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -727,7 +733,7 @@ public:
|
||||
const_iterator
|
||||
iterator_to(value_type const& value) const
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return m_cont.iterator_to(*reinterpret_cast<element const*>(
|
||||
reinterpret_cast<uint8_t const*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -769,35 +775,37 @@ public:
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(value_type const& value) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
insert(value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(value_type const& value) -> std::enable_if_t<maybe_multi, iterator>;
|
||||
insert(value_type const& value) -> typename std::enable_if<maybe_multi, iterator>::type;
|
||||
|
||||
// set
|
||||
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
|
||||
auto
|
||||
insert(value_type&& value)
|
||||
-> std::enable_if_t<!maybe_multi && !maybe_map, std::pair<iterator, bool>>;
|
||||
insert(value_type&& value) ->
|
||||
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multiset
|
||||
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
|
||||
auto
|
||||
insert(value_type&& value) -> std::enable_if_t<maybe_multi && !maybe_map, iterator>;
|
||||
insert(value_type&& value) ->
|
||||
typename std::enable_if<maybe_multi && !maybe_map, iterator>::type;
|
||||
|
||||
//---
|
||||
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(const_iterator hint, value_type const& value)
|
||||
-> std::enable_if_t<!maybe_multi, iterator>;
|
||||
insert(const_iterator hint, value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, iterator>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type const& value)
|
||||
{
|
||||
// VFALCO TODO Figure out how to utilize 'hint'
|
||||
@@ -807,11 +815,12 @@ public:
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(const_iterator hint, value_type&& value) -> std::enable_if_t<!maybe_multi, iterator>;
|
||||
insert(const_iterator hint, value_type&& value) ->
|
||||
typename std::enable_if<!maybe_multi, iterator>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type&& value)
|
||||
{
|
||||
// VFALCO TODO Figure out how to utilize 'hint'
|
||||
@@ -820,9 +829,9 @@ public:
|
||||
|
||||
// map, multimap
|
||||
template <class P, bool maybe_map = IsMap>
|
||||
std::enable_if_t<
|
||||
maybe_map && std::is_constructible_v<value_type, P&&>,
|
||||
std::conditional_t<IsMulti, iterator, std::pair<iterator, bool>>>
|
||||
typename std::enable_if<
|
||||
maybe_map && std::is_constructible<value_type, P&&>::value,
|
||||
typename std::conditional<IsMulti, iterator, std::pair<iterator, bool>>::type>::type
|
||||
insert(P&& value)
|
||||
{
|
||||
return emplace(std::forward<P>(value));
|
||||
@@ -830,9 +839,9 @@ public:
|
||||
|
||||
// map, multimap
|
||||
template <class P, bool maybe_map = IsMap>
|
||||
std::enable_if_t<
|
||||
maybe_map && std::is_constructible_v<value_type, P&&>,
|
||||
std::conditional_t<IsMulti, iterator, std::pair<iterator, bool>>>
|
||||
typename std::enable_if<
|
||||
maybe_map && std::is_constructible<value_type, P&&>::value,
|
||||
typename std::conditional<IsMulti, iterator, std::pair<iterator, bool>>::type>::type
|
||||
insert(const_iterator hint, P&& value)
|
||||
{
|
||||
return emplace_hint(hint, std::forward<P>(value));
|
||||
@@ -855,22 +864,23 @@ public:
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace(Args&&... args) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
emplace(Args&&... args) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multiset, multimap
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace(Args&&... args) -> std::enable_if_t<maybe_multi, iterator>;
|
||||
emplace(Args&&... args) -> typename std::enable_if<maybe_multi, iterator>::type;
|
||||
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace_hint(const_iterator hint, Args&&... args)
|
||||
-> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
emplace_hint(const_iterator hint, Args&&... args) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multiset, multimap
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
emplace_hint(const_iterator /*hint*/, Args&&... args)
|
||||
{
|
||||
// VFALCO TODO Figure out how to utilize 'hint'
|
||||
@@ -1154,12 +1164,12 @@ private:
|
||||
|
||||
template <
|
||||
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
|
||||
std::enable_if_t<maybe_propagate>
|
||||
typename std::enable_if<maybe_propagate>::type
|
||||
swap_data(aged_ordered_container& other) noexcept;
|
||||
|
||||
template <
|
||||
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
|
||||
std::enable_if_t<!maybe_propagate>
|
||||
typename std::enable_if<!maybe_propagate>::type
|
||||
swap_data(aged_ordered_container& other) noexcept;
|
||||
|
||||
private:
|
||||
@@ -1386,7 +1396,7 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::opera
|
||||
|
||||
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
|
||||
template <class K, bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::at(K const& k)
|
||||
{
|
||||
auto const iter(m_cont.find(k, std::cref(m_config.key_compare())));
|
||||
@@ -1408,7 +1418,7 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::at(K
|
||||
|
||||
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
|
||||
template <bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator[](
|
||||
Key const& key)
|
||||
{
|
||||
@@ -1427,7 +1437,7 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::opera
|
||||
|
||||
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
|
||||
template <bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator[](Key&& key)
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
@@ -1462,7 +1472,8 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
value_type const& value) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
auto const result(m_cont.insert_check(extract(value), std::cref(m_config.key_compare()), d));
|
||||
@@ -1481,7 +1492,7 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
value_type const& value) -> std::enable_if_t<maybe_multi, iterator>
|
||||
value_type const& value) -> typename std::enable_if<maybe_multi, iterator>::type
|
||||
{
|
||||
auto const before(m_cont.upper_bound(extract(value), std::cref(m_config.key_compare())));
|
||||
element* const p(new_element(value));
|
||||
@@ -1495,7 +1506,8 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi, bool maybe_map>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
value_type&& value) -> std::enable_if_t<!maybe_multi && !maybe_map, std::pair<iterator, bool>>
|
||||
value_type&& value) ->
|
||||
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
auto const result(m_cont.insert_check(extract(value), std::cref(m_config.key_compare()), d));
|
||||
@@ -1514,7 +1526,7 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi, bool maybe_map>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
value_type&& value) -> std::enable_if_t<maybe_multi && !maybe_map, iterator>
|
||||
value_type&& value) -> typename std::enable_if<maybe_multi && !maybe_map, iterator>::type
|
||||
{
|
||||
auto const before(m_cont.upper_bound(extract(value), std::cref(m_config.key_compare())));
|
||||
element* const p(new_element(std::move(value)));
|
||||
@@ -1531,7 +1543,7 @@ template <bool maybe_multi>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
const_iterator hint,
|
||||
value_type const& value) -> std::enable_if_t<!maybe_multi, iterator>
|
||||
value_type const& value) -> typename std::enable_if<!maybe_multi, iterator>::type
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
auto const result(
|
||||
@@ -1552,7 +1564,7 @@ template <bool maybe_multi>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
|
||||
const_iterator hint,
|
||||
value_type&& value) -> std::enable_if_t<!maybe_multi, iterator>
|
||||
value_type&& value) -> typename std::enable_if<!maybe_multi, iterator>::type
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
auto const result(
|
||||
@@ -1572,7 +1584,7 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args)
|
||||
-> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
-> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
// VFALCO NOTE Its unfortunate that we need to
|
||||
// construct element here
|
||||
@@ -1594,7 +1606,7 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
|
||||
template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args)
|
||||
-> std::enable_if_t<maybe_multi, iterator>
|
||||
-> typename std::enable_if<maybe_multi, iterator>::type
|
||||
{
|
||||
element* const p(new_element(std::forward<Args>(args)...));
|
||||
auto const before(m_cont.upper_bound(extract(p->value), std::cref(m_config.key_compare())));
|
||||
@@ -1609,7 +1621,7 @@ template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace_hint(
|
||||
const_iterator hint,
|
||||
Args&&... args) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
Args&&... args) -> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
// VFALCO NOTE Its unfortunate that we need to
|
||||
// construct element here
|
||||
@@ -1759,7 +1771,7 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::touch
|
||||
|
||||
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
|
||||
template <bool maybe_propagate>
|
||||
std::enable_if_t<maybe_propagate>
|
||||
typename std::enable_if<maybe_propagate>::type
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap_data(
|
||||
aged_ordered_container& other) noexcept
|
||||
{
|
||||
@@ -1770,7 +1782,7 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap_
|
||||
|
||||
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
|
||||
template <bool maybe_propagate>
|
||||
std::enable_if_t<!maybe_propagate>
|
||||
typename std::enable_if<!maybe_propagate>::type
|
||||
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap_data(
|
||||
aged_ordered_container& other) noexcept
|
||||
{
|
||||
|
||||
@@ -62,7 +62,8 @@ template <
|
||||
class Clock = std::chrono::steady_clock,
|
||||
class Hash = std::hash<Key>,
|
||||
class KeyEqual = std::equal_to<Key>,
|
||||
class Allocator = std::allocator<std::conditional_t<IsMap, std::pair<Key const, T>, Key>>>
|
||||
class Allocator =
|
||||
std::allocator<typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type>>
|
||||
class aged_unordered_container
|
||||
{
|
||||
public:
|
||||
@@ -71,7 +72,7 @@ public:
|
||||
using duration = typename clock_type::duration;
|
||||
using key_type = Key;
|
||||
using mapped_type = T;
|
||||
using value_type = std::conditional_t<IsMap, std::pair<Key const, T>, Key>;
|
||||
using value_type = typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type;
|
||||
using size_type = std::size_t;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
|
||||
@@ -114,7 +115,8 @@ private:
|
||||
|
||||
template <
|
||||
class... Args,
|
||||
class = std::enable_if_t<std::is_constructible_v<value_type, Args...>>>
|
||||
class =
|
||||
typename std::enable_if<std::is_constructible<value_type, Args...>::value>::type>
|
||||
element(time_point const& when_, Args&&... args)
|
||||
: value(std::forward<Args>(args)...), when(when_)
|
||||
{
|
||||
@@ -131,7 +133,9 @@ private:
|
||||
using argument_type = element;
|
||||
using result_type = size_t;
|
||||
|
||||
ValueHash() = default;
|
||||
ValueHash()
|
||||
{
|
||||
}
|
||||
|
||||
ValueHash(Hash const& h) : Hash(h)
|
||||
{
|
||||
@@ -149,7 +153,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] Hash const&
|
||||
Hash const&
|
||||
hash_function() const
|
||||
{
|
||||
return *this;
|
||||
@@ -165,7 +169,9 @@ private:
|
||||
using second_argument_type = element;
|
||||
using result_type = bool;
|
||||
|
||||
KeyValueEqual() = default;
|
||||
KeyValueEqual()
|
||||
{
|
||||
}
|
||||
|
||||
KeyValueEqual(KeyEqual const& keyEqual) : KeyEqual(keyEqual)
|
||||
{
|
||||
@@ -195,7 +201,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] KeyEqual const&
|
||||
KeyEqual const&
|
||||
key_eq() const
|
||||
{
|
||||
return *this;
|
||||
@@ -205,7 +211,7 @@ private:
|
||||
using list_type = typename boost::intrusive::
|
||||
make_list<element, boost::intrusive::constant_time_size<false>>::type;
|
||||
|
||||
using cont_type = std::conditional_t<
|
||||
using cont_type = typename std::conditional<
|
||||
IsMulti,
|
||||
typename boost::intrusive::make_unordered_multiset<
|
||||
element,
|
||||
@@ -218,7 +224,7 @@ private:
|
||||
boost::intrusive::constant_time_size<true>,
|
||||
boost::intrusive::hash<ValueHash>,
|
||||
boost::intrusive::equal<KeyValueEqual>,
|
||||
boost::intrusive::cache_begin<true>>::type>;
|
||||
boost::intrusive::cache_begin<true>>::type>::type;
|
||||
|
||||
using bucket_type = typename cont_type::bucket_type;
|
||||
using bucket_traits = typename cont_type::bucket_traits;
|
||||
@@ -348,7 +354,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] ValueHash const&
|
||||
ValueHash const&
|
||||
value_hash() const
|
||||
{
|
||||
return *this;
|
||||
@@ -360,7 +366,7 @@ private:
|
||||
return ValueHash::hash_function();
|
||||
}
|
||||
|
||||
[[nodiscard]] Hash const&
|
||||
Hash const&
|
||||
hash_function() const
|
||||
{
|
||||
return ValueHash::hash_function();
|
||||
@@ -372,7 +378,7 @@ private:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] KeyValueEqual const&
|
||||
KeyValueEqual const&
|
||||
key_value_equal() const
|
||||
{
|
||||
return *this;
|
||||
@@ -384,7 +390,7 @@ private:
|
||||
return key_value_equal().key_eq();
|
||||
}
|
||||
|
||||
[[nodiscard]] KeyEqual const&
|
||||
KeyEqual const&
|
||||
key_eq() const
|
||||
{
|
||||
return key_value_equal().key_eq();
|
||||
@@ -396,7 +402,7 @@ private:
|
||||
return beast::detail::empty_base_optimization<ElementAllocator>::member();
|
||||
}
|
||||
|
||||
[[nodiscard]] ElementAllocator const&
|
||||
ElementAllocator const&
|
||||
alloc() const
|
||||
{
|
||||
return beast::detail::empty_base_optimization<ElementAllocator>::member();
|
||||
@@ -433,7 +439,7 @@ private:
|
||||
m_vec.clear();
|
||||
}
|
||||
|
||||
[[nodiscard]] size_type
|
||||
size_type
|
||||
max_bucket_count() const
|
||||
{
|
||||
return m_vec.max_size();
|
||||
@@ -445,7 +451,7 @@ private:
|
||||
return m_max_load_factor;
|
||||
}
|
||||
|
||||
[[nodiscard]] float const&
|
||||
float const&
|
||||
max_load_factor() const
|
||||
{
|
||||
return m_max_load_factor;
|
||||
@@ -656,7 +662,7 @@ public:
|
||||
iterator
|
||||
iterator_to(value_type& value)
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return list.iterator_to(*reinterpret_cast<element*>(
|
||||
reinterpret_cast<uint8_t*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -665,17 +671,20 @@ public:
|
||||
const_iterator
|
||||
iterator_to(value_type const& value) const
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return list.iterator_to(*reinterpret_cast<element const*>(
|
||||
reinterpret_cast<uint8_t const*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
}
|
||||
|
||||
private:
|
||||
chronological_t()
|
||||
{
|
||||
}
|
||||
|
||||
chronological_t(chronological_t const&) = delete;
|
||||
chronological_t(chronological_t&&) = delete;
|
||||
chronological_t() = default;
|
||||
|
||||
private:
|
||||
friend class aged_unordered_container;
|
||||
list_type mutable list;
|
||||
} chronological;
|
||||
@@ -853,30 +862,30 @@ public:
|
||||
class K,
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
at(K const& k);
|
||||
|
||||
template <
|
||||
class K,
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type const&
|
||||
at(K const& k) const;
|
||||
|
||||
template <
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
operator[](Key const& key);
|
||||
|
||||
template <
|
||||
bool maybe_multi = IsMulti,
|
||||
bool maybe_map = IsMap,
|
||||
class = std::enable_if_t<maybe_map && !maybe_multi>>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
class = typename std::enable_if<maybe_map && !maybe_multi>::type>
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
operator[](Key&& key);
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -924,7 +933,7 @@ public:
|
||||
iterator
|
||||
iterator_to(value_type& value)
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return m_cont.iterator_to(*reinterpret_cast<element*>(
|
||||
reinterpret_cast<uint8_t*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -933,7 +942,7 @@ public:
|
||||
const_iterator
|
||||
iterator_to(value_type const& value) const
|
||||
{
|
||||
static_assert(std::is_standard_layout_v<element>, "must be standard layout");
|
||||
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
|
||||
return m_cont.iterator_to(*reinterpret_cast<element const*>(
|
||||
reinterpret_cast<uint8_t const*>(&value) -
|
||||
((std::size_t)std::addressof(((element*)0)->member))));
|
||||
@@ -975,27 +984,29 @@ public:
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(value_type const& value) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
insert(value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert(value_type const& value) -> std::enable_if_t<maybe_multi, iterator>;
|
||||
insert(value_type const& value) -> typename std::enable_if<maybe_multi, iterator>::type;
|
||||
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
|
||||
auto
|
||||
insert(value_type&& value)
|
||||
-> std::enable_if_t<!maybe_multi && !maybe_map, std::pair<iterator, bool>>;
|
||||
insert(value_type&& value) ->
|
||||
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
|
||||
auto
|
||||
insert(value_type&& value) -> std::enable_if_t<maybe_multi && !maybe_map, iterator>;
|
||||
insert(value_type&& value) ->
|
||||
typename std::enable_if<maybe_multi && !maybe_map, iterator>::type;
|
||||
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<!maybe_multi, iterator>
|
||||
typename std::enable_if<!maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type const& value)
|
||||
{
|
||||
// Hint is ignored but we provide the interface so
|
||||
@@ -1005,7 +1016,7 @@ public:
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type const& value)
|
||||
{
|
||||
// VFALCO TODO The hint could be used to let
|
||||
@@ -1015,7 +1026,7 @@ public:
|
||||
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<!maybe_multi, iterator>
|
||||
typename std::enable_if<!maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type&& value)
|
||||
{
|
||||
// Hint is ignored but we provide the interface so
|
||||
@@ -1025,7 +1036,7 @@ public:
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
insert(const_iterator /*hint*/, value_type&& value)
|
||||
{
|
||||
// VFALCO TODO The hint could be used to let
|
||||
@@ -1035,9 +1046,9 @@ public:
|
||||
|
||||
// map, multimap
|
||||
template <class P, bool maybe_map = IsMap>
|
||||
std::enable_if_t<
|
||||
maybe_map && std::is_constructible_v<value_type, P&&>,
|
||||
std::conditional_t<IsMulti, iterator, std::pair<iterator, bool>>>
|
||||
typename std::enable_if<
|
||||
maybe_map && std::is_constructible<value_type, P&&>::value,
|
||||
typename std::conditional<IsMulti, iterator, std::pair<iterator, bool>>::type>::type
|
||||
insert(P&& value)
|
||||
{
|
||||
return emplace(std::forward<P>(value));
|
||||
@@ -1045,9 +1056,9 @@ public:
|
||||
|
||||
// map, multimap
|
||||
template <class P, bool maybe_map = IsMap>
|
||||
std::enable_if_t<
|
||||
maybe_map && std::is_constructible_v<value_type, P&&>,
|
||||
std::conditional_t<IsMulti, iterator, std::pair<iterator, bool>>>
|
||||
typename std::enable_if<
|
||||
maybe_map && std::is_constructible<value_type, P&&>::value,
|
||||
typename std::conditional<IsMulti, iterator, std::pair<iterator, bool>>::type>::type
|
||||
insert(const_iterator hint, P&& value)
|
||||
{
|
||||
return emplace_hint(hint, std::forward<P>(value));
|
||||
@@ -1069,22 +1080,23 @@ public:
|
||||
// set, map
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace(Args&&... args) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
emplace(Args&&... args) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multiset, multimap
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace(Args&&... args) -> std::enable_if_t<maybe_multi, iterator>;
|
||||
emplace(Args&&... args) -> typename std::enable_if<maybe_multi, iterator>::type;
|
||||
|
||||
// set, map
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
auto
|
||||
emplace_hint(const_iterator /*hint*/, Args&&... args)
|
||||
-> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
emplace_hint(const_iterator /*hint*/, Args&&... args) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multiset, multimap
|
||||
template <bool maybe_multi = IsMulti, class... Args>
|
||||
std::enable_if_t<maybe_multi, iterator>
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
emplace_hint(const_iterator /*hint*/, Args&&... args)
|
||||
{
|
||||
// VFALCO TODO The hint could be used for multi, to let
|
||||
@@ -1316,7 +1328,7 @@ public:
|
||||
class OtherHash,
|
||||
class OtherAllocator,
|
||||
bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<!maybe_multi, bool>
|
||||
typename std::enable_if<!maybe_multi, bool>::type
|
||||
operator==(aged_unordered_container<
|
||||
false,
|
||||
OtherIsMap,
|
||||
@@ -1335,7 +1347,7 @@ public:
|
||||
class OtherHash,
|
||||
class OtherAllocator,
|
||||
bool maybe_multi = IsMulti>
|
||||
std::enable_if_t<maybe_multi, bool>
|
||||
typename std::enable_if<maybe_multi, bool>::type
|
||||
operator==(aged_unordered_container<
|
||||
true,
|
||||
OtherIsMap,
|
||||
@@ -1389,13 +1401,14 @@ private:
|
||||
// map, set
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert_unchecked(value_type const& value)
|
||||
-> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>;
|
||||
insert_unchecked(value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
|
||||
|
||||
// multimap, multiset
|
||||
template <bool maybe_multi = IsMulti>
|
||||
auto
|
||||
insert_unchecked(value_type const& value) -> std::enable_if_t<maybe_multi, iterator>;
|
||||
insert_unchecked(value_type const& value) ->
|
||||
typename std::enable_if<maybe_multi, iterator>::type;
|
||||
|
||||
template <class InputIt>
|
||||
void
|
||||
@@ -1436,7 +1449,7 @@ private:
|
||||
|
||||
template <
|
||||
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
|
||||
std::enable_if_t<maybe_propagate>
|
||||
typename std::enable_if<maybe_propagate>::type
|
||||
swap_data(aged_unordered_container& other) noexcept
|
||||
{
|
||||
std::swap(m_config.key_compare(), other.m_config.key_compare());
|
||||
@@ -1446,7 +1459,7 @@ private:
|
||||
|
||||
template <
|
||||
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
|
||||
std::enable_if_t<!maybe_propagate>
|
||||
typename std::enable_if<!maybe_propagate>::type
|
||||
swap_data(aged_unordered_container& other) noexcept
|
||||
{
|
||||
std::swap(m_config.key_compare(), other.m_config.key_compare());
|
||||
@@ -2101,7 +2114,7 @@ template <
|
||||
class KeyEqual,
|
||||
class Allocator>
|
||||
template <class K, bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::at(K const& k)
|
||||
{
|
||||
auto const iter(
|
||||
@@ -2142,7 +2155,7 @@ template <
|
||||
class KeyEqual,
|
||||
class Allocator>
|
||||
template <bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::operator[](
|
||||
Key const& key)
|
||||
{
|
||||
@@ -2171,7 +2184,7 @@ template <
|
||||
class KeyEqual,
|
||||
class Allocator>
|
||||
template <bool maybe_multi, bool maybe_map, class>
|
||||
std::conditional_t<IsMap, T, void*>&
|
||||
typename std::conditional<IsMap, T, void*>::type&
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::operator[](
|
||||
Key&& key)
|
||||
{
|
||||
@@ -2226,7 +2239,8 @@ template <
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::insert(
|
||||
value_type const& value) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
typename cont_type::insert_commit_data d;
|
||||
@@ -2258,7 +2272,7 @@ template <
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::insert(
|
||||
value_type const& value) -> std::enable_if_t<maybe_multi, iterator>
|
||||
value_type const& value) -> typename std::enable_if<maybe_multi, iterator>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
element* const p(new_element(value));
|
||||
@@ -2280,7 +2294,8 @@ template <
|
||||
template <bool maybe_multi, bool maybe_map>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::insert(
|
||||
value_type&& value) -> std::enable_if_t<!maybe_multi && !maybe_map, std::pair<iterator, bool>>
|
||||
value_type&& value) ->
|
||||
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
typename cont_type::insert_commit_data d;
|
||||
@@ -2312,7 +2327,7 @@ template <
|
||||
template <bool maybe_multi, bool maybe_map>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::insert(
|
||||
value_type&& value) -> std::enable_if_t<maybe_multi && !maybe_map, iterator>
|
||||
value_type&& value) -> typename std::enable_if<maybe_multi && !maybe_map, iterator>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
element* const p(new_element(std::move(value)));
|
||||
@@ -2335,7 +2350,7 @@ template <
|
||||
template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::emplace(
|
||||
Args&&... args) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
Args&&... args) -> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
// VFALCO NOTE Its unfortunate that we need to
|
||||
@@ -2400,7 +2415,7 @@ template <
|
||||
template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::emplace(
|
||||
Args&&... args) -> std::enable_if_t<maybe_multi, iterator>
|
||||
Args&&... args) -> typename std::enable_if<maybe_multi, iterator>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
element* const p(new_element(std::forward<Args>(args)...));
|
||||
@@ -2423,7 +2438,7 @@ template <bool maybe_multi, class... Args>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::emplace_hint(
|
||||
const_iterator /*hint*/,
|
||||
Args&&... args) -> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
Args&&... args) -> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
maybe_rehash(1);
|
||||
// VFALCO NOTE Its unfortunate that we need to
|
||||
@@ -2575,7 +2590,7 @@ template <
|
||||
class OtherHash,
|
||||
class OtherAllocator,
|
||||
bool maybe_multi>
|
||||
std::enable_if_t<!maybe_multi, bool>
|
||||
typename std::enable_if<!maybe_multi, bool>::type
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::operator==(
|
||||
aged_unordered_container<
|
||||
false,
|
||||
@@ -2615,7 +2630,7 @@ template <
|
||||
class OtherHash,
|
||||
class OtherAllocator,
|
||||
bool maybe_multi>
|
||||
std::enable_if_t<maybe_multi, bool>
|
||||
typename std::enable_if<maybe_multi, bool>::type
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::operator==(
|
||||
aged_unordered_container<
|
||||
true,
|
||||
@@ -2662,8 +2677,8 @@ template <
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::
|
||||
insert_unchecked(value_type const& value)
|
||||
-> std::enable_if_t<!maybe_multi, std::pair<iterator, bool>>
|
||||
insert_unchecked(value_type const& value) ->
|
||||
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
|
||||
{
|
||||
typename cont_type::insert_commit_data d;
|
||||
auto const result(m_cont.insert_check(
|
||||
@@ -2694,7 +2709,8 @@ template <
|
||||
template <bool maybe_multi>
|
||||
auto
|
||||
aged_unordered_container<IsMulti, IsMap, Key, T, Clock, Hash, KeyEqual, Allocator>::
|
||||
insert_unchecked(value_type const& value) -> std::enable_if_t<maybe_multi, iterator>
|
||||
insert_unchecked(value_type const& value) ->
|
||||
typename std::enable_if<maybe_multi, iterator>::type
|
||||
{
|
||||
element* const p(new_element(value));
|
||||
chronological.list.push_back(*p);
|
||||
|
||||
@@ -11,11 +11,12 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace beast::detail {
|
||||
namespace beast {
|
||||
namespace detail {
|
||||
|
||||
template <class T>
|
||||
struct is_empty_base_optimization_derived
|
||||
: std::integral_constant<bool, std::is_empty_v<T> && !boost::is_final<T>::value>
|
||||
: std::integral_constant<bool, std::is_empty<T>::value && !boost::is_final<T>::value>
|
||||
{
|
||||
};
|
||||
|
||||
@@ -43,7 +44,7 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] T const&
|
||||
T const&
|
||||
member() const noexcept
|
||||
{
|
||||
return *this;
|
||||
@@ -85,4 +86,5 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace beast::detail
|
||||
} // namespace detail
|
||||
} // namespace beast
|
||||
|
||||
@@ -16,7 +16,7 @@ struct CopyConst
|
||||
{
|
||||
explicit CopyConst() = default;
|
||||
|
||||
using type = std::remove_const_t<U>;
|
||||
using type = typename std::remove_const<U>::type;
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
@@ -128,7 +128,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] reference
|
||||
reference
|
||||
dereference() const noexcept
|
||||
{
|
||||
return static_cast<reference>(*m_node);
|
||||
@@ -287,14 +287,14 @@ public:
|
||||
/** Determine if the list is empty.
|
||||
@return `true` if the list is empty.
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
empty() const noexcept
|
||||
{
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
/** Returns the number of elements in the list. */
|
||||
[[nodiscard]] size_type
|
||||
size_type
|
||||
size() const noexcept
|
||||
{
|
||||
return m_size;
|
||||
@@ -314,7 +314,7 @@ public:
|
||||
@invariant The list may not be empty.
|
||||
@return A const reference to the first element.
|
||||
*/
|
||||
[[nodiscard]] const_reference
|
||||
const_reference
|
||||
front() const noexcept
|
||||
{
|
||||
return element_from(m_head.m_next);
|
||||
@@ -334,7 +334,7 @@ public:
|
||||
@invariant The list may not be empty.
|
||||
@return A const reference to the last element.
|
||||
*/
|
||||
[[nodiscard]] const_reference
|
||||
const_reference
|
||||
back() const noexcept
|
||||
{
|
||||
return element_from(m_tail.m_prev);
|
||||
@@ -352,7 +352,7 @@ public:
|
||||
/** Obtain a const iterator to the beginning of the list.
|
||||
@return A const iterator pointing to the beginning of the list.
|
||||
*/
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const noexcept
|
||||
{
|
||||
return const_iterator(m_head.m_next);
|
||||
@@ -361,7 +361,7 @@ public:
|
||||
/** Obtain a const iterator to the beginning of the list.
|
||||
@return A const iterator pointing to the beginning of the list.
|
||||
*/
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const noexcept
|
||||
{
|
||||
return const_iterator(m_head.m_next);
|
||||
@@ -379,7 +379,7 @@ public:
|
||||
/** Obtain a const iterator to the end of the list.
|
||||
@return A constiterator pointing to the end of the list.
|
||||
*/
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const noexcept
|
||||
{
|
||||
return const_iterator(&m_tail);
|
||||
@@ -388,7 +388,7 @@ public:
|
||||
/** Obtain a const iterator to the end of the list
|
||||
@return A constiterator pointing to the end of the list.
|
||||
*/
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const noexcept
|
||||
{
|
||||
return const_iterator(&m_tail);
|
||||
@@ -549,7 +549,7 @@ public:
|
||||
@param element The element to obtain an iterator for.
|
||||
@return A const iterator to the element.
|
||||
*/
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
const_iterator_to(T const& element) const noexcept
|
||||
{
|
||||
return const_iterator(static_cast<Node const*>(&element));
|
||||
|
||||
@@ -13,18 +13,22 @@ class LockFreeStackIterator
|
||||
{
|
||||
protected:
|
||||
using Node = typename Container::Node;
|
||||
using NodePtr = std::conditional_t<IsConst, Node const*, Node*>;
|
||||
using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
|
||||
|
||||
public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = typename Container::value_type;
|
||||
using difference_type = typename Container::difference_type;
|
||||
using pointer =
|
||||
std::conditional_t<IsConst, typename Container::const_pointer, typename Container::pointer>;
|
||||
using reference = std::
|
||||
conditional_t<IsConst, typename Container::const_reference, typename Container::reference>;
|
||||
using pointer = typename std::
|
||||
conditional<IsConst, typename Container::const_pointer, typename Container::pointer>::type;
|
||||
using reference = typename std::conditional<
|
||||
IsConst,
|
||||
typename Container::const_reference,
|
||||
typename Container::reference>::type;
|
||||
|
||||
LockFreeStackIterator() = default;
|
||||
LockFreeStackIterator() : m_node()
|
||||
{
|
||||
}
|
||||
|
||||
LockFreeStackIterator(NodePtr node) : m_node(node)
|
||||
{
|
||||
@@ -77,7 +81,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
NodePtr m_node{};
|
||||
NodePtr m_node;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -162,7 +166,7 @@ public:
|
||||
operator=(LockFreeStack const&) = delete;
|
||||
|
||||
/** Returns true if the stack is empty. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
empty() const
|
||||
{
|
||||
return m_head.load() == &m_end;
|
||||
@@ -237,25 +241,25 @@ public:
|
||||
return iterator(&m_end);
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const
|
||||
{
|
||||
return const_iterator(m_head.load());
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const
|
||||
{
|
||||
return const_iterator(&m_end);
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const
|
||||
{
|
||||
return const_iterator(m_head.load());
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const
|
||||
{
|
||||
return const_iterator(&m_end);
|
||||
|
||||
@@ -37,15 +37,15 @@ public:
|
||||
parse(std::string_view input);
|
||||
|
||||
/** Produce a string from semantic version components. */
|
||||
[[nodiscard]] std::string
|
||||
std::string
|
||||
print() const;
|
||||
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isRelease() const noexcept
|
||||
{
|
||||
return preReleaseIdentifiers.empty();
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
isPreRelease() const noexcept
|
||||
{
|
||||
return !isRelease();
|
||||
|
||||
@@ -69,7 +69,7 @@ template <class T>
|
||||
struct is_uniquely_represented
|
||||
: public std::integral_constant<
|
||||
bool,
|
||||
std::is_integral_v<T> || std::is_enum_v<T> || std::is_pointer_v<T>>
|
||||
std::is_integral<T>::value || std::is_enum<T>::value || std::is_pointer<T>::value>
|
||||
{
|
||||
explicit is_uniquely_represented() = default;
|
||||
};
|
||||
@@ -210,7 +210,7 @@ hash_append(Hasher& h, T const& t) noexcept
|
||||
template <class Hasher, class T>
|
||||
inline std::enable_if_t<
|
||||
!is_contiguously_hashable<T, Hasher>::value &&
|
||||
(std::is_integral_v<T> || std::is_pointer_v<T> || std::is_enum_v<T>)>
|
||||
(std::is_integral<T>::value || std::is_pointer<T>::value || std::is_enum<T>::value)>
|
||||
hash_append(Hasher& h, T t) noexcept
|
||||
{
|
||||
detail::reverse_bytes(t);
|
||||
@@ -218,7 +218,7 @@ hash_append(Hasher& h, T t) noexcept
|
||||
}
|
||||
|
||||
template <class Hasher, class T>
|
||||
inline std::enable_if_t<std::is_floating_point_v<T>>
|
||||
inline std::enable_if_t<std::is_floating_point<T>::value>
|
||||
hash_append(Hasher& h, T t) noexcept
|
||||
{
|
||||
if (t == 0)
|
||||
|
||||
@@ -121,13 +121,13 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
template <class Seed, std::enable_if_t<std::is_unsigned_v<Seed>>* = nullptr>
|
||||
template <class Seed, std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
|
||||
explicit xxhasher(Seed seed) : seed_(seed)
|
||||
{
|
||||
resetBuffers();
|
||||
}
|
||||
|
||||
template <class Seed, std::enable_if_t<std::is_unsigned_v<Seed>>* = nullptr>
|
||||
template <class Seed, std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
|
||||
xxhasher(Seed seed, Seed) : seed_(seed)
|
||||
{
|
||||
resetBuffers();
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** Interface for a manager that allows collection of metrics.
|
||||
|
||||
@@ -116,4 +117,5 @@ public:
|
||||
/** @} */
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A metric for measuring an integral value.
|
||||
|
||||
@@ -22,7 +23,9 @@ public:
|
||||
/** Create a null metric.
|
||||
A null metric reports no information.
|
||||
*/
|
||||
Counter() = default;
|
||||
Counter()
|
||||
{
|
||||
}
|
||||
|
||||
/** Create the metric reference the specified implementation.
|
||||
Normally this won't be called directly. Instead, call the appropriate
|
||||
@@ -88,4 +91,5 @@ private:
|
||||
std::shared_ptr<CounterImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
class Counter;
|
||||
|
||||
@@ -17,4 +18,5 @@ public:
|
||||
increment(value_type amount) = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A metric for reporting event timing.
|
||||
|
||||
@@ -24,7 +25,9 @@ public:
|
||||
/** Create a null metric.
|
||||
A null metric reports no information.
|
||||
*/
|
||||
Event() = default;
|
||||
Event()
|
||||
{
|
||||
}
|
||||
|
||||
/** Create the metric reference the specified implementation.
|
||||
Normally this won't be called directly. Instead, call the appropriate
|
||||
@@ -45,7 +48,7 @@ public:
|
||||
m_impl->notify(ceil<value_type>(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] std::shared_ptr<EventImpl> const&
|
||||
std::shared_ptr<EventImpl> const&
|
||||
impl() const
|
||||
{
|
||||
return m_impl;
|
||||
@@ -55,4 +58,5 @@ private:
|
||||
std::shared_ptr<EventImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
class Event;
|
||||
|
||||
@@ -17,4 +18,5 @@ public:
|
||||
notify(value_type const& value) = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A metric for measuring an integral value.
|
||||
|
||||
@@ -24,7 +25,9 @@ public:
|
||||
/** Create a null metric.
|
||||
A null metric reports no information.
|
||||
*/
|
||||
Gauge() = default;
|
||||
Gauge()
|
||||
{
|
||||
}
|
||||
|
||||
/** Create the metric reference the specified implementation.
|
||||
Normally this won't be called directly. Instead, call the appropriate
|
||||
@@ -108,7 +111,7 @@ public:
|
||||
}
|
||||
/** @} */
|
||||
|
||||
[[nodiscard]] std::shared_ptr<GaugeImpl> const&
|
||||
std::shared_ptr<GaugeImpl> const&
|
||||
impl() const
|
||||
{
|
||||
return m_impl;
|
||||
@@ -118,4 +121,5 @@ private:
|
||||
std::shared_ptr<GaugeImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
class Gauge;
|
||||
|
||||
@@ -20,4 +21,5 @@ public:
|
||||
increment(difference_type amount) = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A collector front-end that manages a group of metrics. */
|
||||
class Group : public Collector
|
||||
@@ -14,8 +15,9 @@ public:
|
||||
using ptr = std::shared_ptr<Group>;
|
||||
|
||||
/** Returns the name of this group, for diagnostics. */
|
||||
[[nodiscard]] virtual std::string const&
|
||||
virtual std::string const&
|
||||
name() const = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A container for managing a set of metric groups. */
|
||||
class Groups
|
||||
@@ -31,4 +32,5 @@ public:
|
||||
std::unique_ptr<Groups>
|
||||
make_Groups(Collector::ptr const& collector);
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A reference to a handler for performing polled collection. */
|
||||
class Hook final
|
||||
@@ -13,7 +14,9 @@ public:
|
||||
/** Create a null hook.
|
||||
A null hook has no associated handler.
|
||||
*/
|
||||
Hook() = default;
|
||||
Hook()
|
||||
{
|
||||
}
|
||||
|
||||
/** Create a hook referencing the specified implementation.
|
||||
Normally this won't be called directly. Instead, call the appropriate
|
||||
@@ -24,7 +27,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::shared_ptr<HookImpl> const&
|
||||
std::shared_ptr<HookImpl> const&
|
||||
impl() const
|
||||
{
|
||||
return m_impl;
|
||||
@@ -34,4 +37,5 @@ private:
|
||||
std::shared_ptr<HookImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
class HookImpl : public std::enable_shared_from_this<HookImpl>
|
||||
{
|
||||
@@ -13,4 +14,5 @@ public:
|
||||
virtual ~HookImpl() = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A metric for measuring an integral value.
|
||||
|
||||
@@ -21,7 +22,9 @@ public:
|
||||
/** Create a null metric.
|
||||
A null metric reports no information.
|
||||
*/
|
||||
Meter() = default;
|
||||
Meter()
|
||||
{
|
||||
}
|
||||
|
||||
/** Create the metric reference the specified implementation.
|
||||
Normally this won't be called directly. Instead, call the appropriate
|
||||
@@ -63,7 +66,7 @@ public:
|
||||
}
|
||||
/** @} */
|
||||
|
||||
[[nodiscard]] std::shared_ptr<MeterImpl> const&
|
||||
std::shared_ptr<MeterImpl> const&
|
||||
impl() const
|
||||
{
|
||||
return m_impl;
|
||||
@@ -73,4 +76,5 @@ private:
|
||||
std::shared_ptr<MeterImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
class Meter;
|
||||
|
||||
@@ -17,4 +18,5 @@ public:
|
||||
increment(value_type amount) = 0;
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
#include <xrpl/beast/insight/Collector.h>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A Collector which does not collect metrics. */
|
||||
class NullCollector : public Collector
|
||||
@@ -14,4 +15,5 @@ public:
|
||||
New();
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
#include <xrpl/beast/net/IPEndpoint.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
namespace beast::insight {
|
||||
namespace beast {
|
||||
namespace insight {
|
||||
|
||||
/** A Collector that reports metrics to a StatsD server.
|
||||
Reference:
|
||||
@@ -24,4 +25,5 @@ public:
|
||||
New(IP::Endpoint const& address, std::string const& prefix, Journal journal);
|
||||
};
|
||||
|
||||
} // namespace beast::insight
|
||||
} // namespace insight
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
namespace beast::IP {
|
||||
namespace beast {
|
||||
namespace IP {
|
||||
|
||||
/** Convert to Endpoint.
|
||||
The port is set to zero.
|
||||
@@ -26,7 +27,8 @@ to_asio_address(Endpoint const& endpoint);
|
||||
boost::asio::ip::tcp::endpoint
|
||||
to_asio_endpoint(Endpoint const& endpoint);
|
||||
|
||||
} // namespace beast::IP
|
||||
} // namespace IP
|
||||
} // namespace beast
|
||||
|
||||
namespace beast {
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <boost/asio/ip/address_v4.hpp>
|
||||
|
||||
namespace beast::IP {
|
||||
namespace beast {
|
||||
namespace IP {
|
||||
|
||||
using AddressV4 = boost::asio::ip::address_v4;
|
||||
|
||||
@@ -22,4 +23,5 @@ is_public(AddressV4 const& addr);
|
||||
char
|
||||
get_class(AddressV4 const& address);
|
||||
|
||||
} // namespace beast::IP
|
||||
} // namespace IP
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
#include <boost/asio/ip/address_v6.hpp>
|
||||
|
||||
namespace beast::IP {
|
||||
namespace beast {
|
||||
namespace IP {
|
||||
|
||||
using AddressV6 = boost::asio::ip::address_v6;
|
||||
|
||||
@@ -16,4 +17,5 @@ is_private(AddressV6 const& addr);
|
||||
bool
|
||||
is_public(AddressV6 const& addr);
|
||||
|
||||
} // namespace beast::IP
|
||||
} // namespace IP
|
||||
} // namespace beast
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace beast::IP {
|
||||
namespace beast {
|
||||
namespace IP {
|
||||
|
||||
using Port = std::uint16_t;
|
||||
|
||||
@@ -20,7 +21,7 @@ public:
|
||||
Endpoint();
|
||||
|
||||
/** Create an endpoint from the address and optional port. */
|
||||
explicit Endpoint(Address addr, Port port = 0);
|
||||
explicit Endpoint(Address const& addr, Port port = 0);
|
||||
|
||||
/** Create an Endpoint from a string.
|
||||
If the port is omitted, the endpoint will have a zero port.
|
||||
@@ -32,25 +33,25 @@ public:
|
||||
from_string(std::string const& s);
|
||||
|
||||
/** Returns a string representing the endpoint. */
|
||||
[[nodiscard]] std::string
|
||||
std::string
|
||||
to_string() const;
|
||||
|
||||
/** Returns the port number on the endpoint. */
|
||||
[[nodiscard]] Port
|
||||
Port
|
||||
port() const
|
||||
{
|
||||
return m_port;
|
||||
}
|
||||
|
||||
/** Returns a new Endpoint with a different port. */
|
||||
[[nodiscard]] Endpoint
|
||||
Endpoint
|
||||
at_port(Port port) const
|
||||
{
|
||||
return Endpoint(m_addr, port);
|
||||
}
|
||||
|
||||
/** Returns the address portion of this endpoint. */
|
||||
[[nodiscard]] Address const&
|
||||
Address const&
|
||||
address() const
|
||||
{
|
||||
return m_addr;
|
||||
@@ -58,22 +59,22 @@ public:
|
||||
|
||||
/** Convenience accessors for the address part. */
|
||||
/** @{ */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
is_v4() const
|
||||
{
|
||||
return m_addr.is_v4();
|
||||
}
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
is_v6() const
|
||||
{
|
||||
return m_addr.is_v6();
|
||||
}
|
||||
[[nodiscard]] AddressV4
|
||||
AddressV4
|
||||
to_v4() const
|
||||
{
|
||||
return m_addr.to_v4();
|
||||
}
|
||||
[[nodiscard]] AddressV6
|
||||
AddressV6
|
||||
to_v6() const
|
||||
{
|
||||
return m_addr.to_v6();
|
||||
@@ -183,7 +184,8 @@ operator<<(OutputStream& os, Endpoint const& endpoint)
|
||||
std::istream&
|
||||
operator>>(std::istream& is, Endpoint& endpoint);
|
||||
|
||||
} // namespace beast::IP
|
||||
} // namespace IP
|
||||
} // namespace beast
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace beast::rfc2616 {
|
||||
namespace beast {
|
||||
namespace rfc2616 {
|
||||
|
||||
namespace detail {
|
||||
|
||||
@@ -369,4 +370,5 @@ is_keep_alive(boost::beast::http::message<isRequest, Body, Fields> const& m)
|
||||
"close");
|
||||
}
|
||||
|
||||
} // namespace beast::rfc2616
|
||||
} // namespace rfc2616
|
||||
} // namespace beast
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
namespace beast::test {
|
||||
namespace beast {
|
||||
namespace test {
|
||||
|
||||
/** Mix-in to support tests using asio coroutines.
|
||||
|
||||
@@ -124,4 +125,5 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
|
||||
spawn(fn...);
|
||||
}
|
||||
|
||||
} // namespace beast::test
|
||||
} // namespace test
|
||||
} // namespace beast
|
||||
|
||||
@@ -15,7 +15,7 @@ template <typename T>
|
||||
std::string
|
||||
type_name()
|
||||
{
|
||||
using TR = std::remove_reference_t<T>;
|
||||
using TR = typename std::remove_reference<T>::type;
|
||||
|
||||
std::string name = typeid(TR).name();
|
||||
|
||||
@@ -27,15 +27,15 @@ type_name()
|
||||
}
|
||||
#endif
|
||||
|
||||
if (std::is_const_v<TR>)
|
||||
if (std::is_const<TR>::value)
|
||||
name += " const";
|
||||
if (std::is_volatile_v<TR>)
|
||||
if (std::is_volatile<TR>::value)
|
||||
name += " volatile";
|
||||
if (std::is_lvalue_reference_v<T>)
|
||||
if (std::is_lvalue_reference<T>::value)
|
||||
{
|
||||
name += "&";
|
||||
}
|
||||
else if (std::is_rvalue_reference_v<T>)
|
||||
else if (std::is_rvalue_reference<T>::value)
|
||||
{
|
||||
name += "&&";
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
|
||||
namespace beast::unit_test {
|
||||
namespace beast {
|
||||
namespace unit_test {
|
||||
|
||||
/** Utility for producing nicely composed output of amounts with units. */
|
||||
class amount
|
||||
@@ -41,4 +42,5 @@ operator<<(std::ostream& s, amount const& t)
|
||||
return s;
|
||||
}
|
||||
|
||||
} // namespace beast::unit_test
|
||||
} // namespace unit_test
|
||||
} // namespace beast
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace beast::unit_test::detail {
|
||||
namespace beast {
|
||||
namespace unit_test {
|
||||
namespace detail {
|
||||
|
||||
/** Adapter to constrain a container interface.
|
||||
The interface allows for limited read only operations. Derived classes
|
||||
@@ -25,7 +27,7 @@ protected:
|
||||
return m_cont;
|
||||
}
|
||||
|
||||
[[nodiscard]] cont_type const&
|
||||
cont_type const&
|
||||
cont() const
|
||||
{
|
||||
return m_cont;
|
||||
@@ -39,14 +41,14 @@ public:
|
||||
using const_iterator = typename cont_type::const_iterator;
|
||||
|
||||
/** Returns `true` if the container is empty. */
|
||||
[[nodiscard]] bool
|
||||
bool
|
||||
empty() const
|
||||
{
|
||||
return m_cont.empty();
|
||||
}
|
||||
|
||||
/** Returns the number of items in the container. */
|
||||
[[nodiscard]] size_type
|
||||
size_type
|
||||
size() const
|
||||
{
|
||||
return m_cont.size();
|
||||
@@ -54,25 +56,25 @@ public:
|
||||
|
||||
/** Returns forward iterators for traversal. */
|
||||
/** @{ */
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
begin() const
|
||||
{
|
||||
return m_cont.cbegin();
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cbegin() const
|
||||
{
|
||||
return m_cont.cbegin();
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
end() const
|
||||
{
|
||||
return m_cont.cend();
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator
|
||||
const_iterator
|
||||
cend() const
|
||||
{
|
||||
return m_cont.cend();
|
||||
@@ -80,4 +82,6 @@ public:
|
||||
/** @} */
|
||||
};
|
||||
|
||||
} // namespace beast::unit_test::detail
|
||||
} // namespace detail
|
||||
} // namespace unit_test
|
||||
} // namespace beast
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
|
||||
#include <xrpl/beast/unit_test/suite_list.h>
|
||||
|
||||
namespace beast::unit_test {
|
||||
namespace beast {
|
||||
namespace unit_test {
|
||||
|
||||
namespace detail {
|
||||
|
||||
@@ -41,4 +42,5 @@ global_suites()
|
||||
return detail::global_suites();
|
||||
}
|
||||
|
||||
} // namespace beast::unit_test
|
||||
} // namespace unit_test
|
||||
} // namespace beast
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user