mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-05 06:25:36 +00:00
Compare commits
34 Commits
a1q123456/
...
pratik/Fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbb03e153e | ||
|
|
63b6ec98ea | ||
|
|
7ef9fb4290 | ||
|
|
49dcb6b60b | ||
|
|
26dd1fafe3 | ||
|
|
4b99771021 | ||
|
|
e6664fe4cf | ||
|
|
b5001bc258 | ||
|
|
5dacfa1938 | ||
|
|
394b256f02 | ||
|
|
5bfa38f6c5 | ||
|
|
7c6c49bf98 | ||
|
|
6334be1ff0 | ||
|
|
a9c3bb84ba | ||
|
|
ca99e40290 | ||
|
|
7612c1af0c | ||
|
|
67e40be1ab | ||
|
|
0132174a7b | ||
|
|
8773cc4bbf | ||
|
|
dabdadfff5 | ||
|
|
bfe2cd7893 | ||
|
|
0584c20f36 | ||
|
|
3ced0b27b7 | ||
|
|
f83b27f7dd | ||
|
|
cdb41b5376 | ||
|
|
f223c89a9f | ||
|
|
efe07c09f3 | ||
|
|
79cde8b199 | ||
|
|
2078ce01cf | ||
|
|
2770a9cdf3 | ||
|
|
05ef3b1ad8 | ||
|
|
7dd4dbe285 | ||
|
|
b32a5f2c08 | ||
|
|
df76002a44 |
@@ -89,6 +89,7 @@ words:
|
||||
- endmacro
|
||||
- exceptioned
|
||||
- Falco
|
||||
- fcontext
|
||||
- finalizers
|
||||
- firewalled
|
||||
- fmtdur
|
||||
@@ -101,6 +102,7 @@ words:
|
||||
- gpgcheck
|
||||
- gpgkey
|
||||
- hotwallet
|
||||
- hwaddress
|
||||
- hwrap
|
||||
- ifndef
|
||||
- inequation
|
||||
@@ -217,6 +219,7 @@ words:
|
||||
- soci
|
||||
- socidb
|
||||
- sslws
|
||||
- stackful
|
||||
- statsd
|
||||
- STATSDCOLLECTOR
|
||||
- stissue
|
||||
|
||||
6
.github/scripts/levelization/README.md
vendored
6
.github/scripts/levelization/README.md
vendored
@@ -70,7 +70,7 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization](generate.py) script takes no parameters,
|
||||
The [levelization](generate.sh) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
@@ -104,7 +104,7 @@ It generates many files of [results](results):
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `generate.py`,
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
@@ -128,7 +128,7 @@ The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `generate.py`
|
||||
1. Run `levelization.sh`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A .github/scripts/levelization/results/paths.txt | grep -w B`
|
||||
|
||||
369
.github/scripts/levelization/generate.py
vendored
369
.github/scripts/levelization/generate.py
vendored
@@ -1,369 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Usage: generate.py
|
||||
This script takes no parameters, reads no environment variables,
|
||||
and can be run from any directory, as long as it is in the expected
|
||||
location in the repo.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Set, Optional
|
||||
|
||||
|
||||
# Compile regex patterns once at module level
|
||||
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
|
||||
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
|
||||
|
||||
|
||||
def dictionary_sort_key(s: str) -> str:
|
||||
"""
|
||||
Create a sort key that mimics 'sort -d' (dictionary order).
|
||||
Dictionary order only considers blanks and alphanumeric characters.
|
||||
This means punctuation like '.' is ignored during sorting.
|
||||
"""
|
||||
# Keep only alphanumeric characters and spaces
|
||||
return "".join(c for c in s if c.isalnum() or c.isspace())
|
||||
|
||||
|
||||
def get_level(file_path: str) -> str:
|
||||
"""
|
||||
Extract the level from a file path (second and third directory components).
|
||||
Equivalent to bash: cut -d/ -f 2,3
|
||||
|
||||
Examples:
|
||||
src/xrpld/app/main.cpp -> xrpld.app
|
||||
src/libxrpl/protocol/STObject.cpp -> libxrpl.protocol
|
||||
include/xrpl/basics/base_uint.h -> xrpl.basics
|
||||
"""
|
||||
parts = file_path.split("/")
|
||||
|
||||
# Get fields 2 and 3 (indices 1 and 2 in 0-based indexing)
|
||||
if len(parts) >= 3:
|
||||
level = f"{parts[1]}/{parts[2]}"
|
||||
elif len(parts) >= 2:
|
||||
level = f"{parts[1]}/toplevel"
|
||||
else:
|
||||
level = file_path
|
||||
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if "." in level.split("/")[-1]: # Avoid Path object creation
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level = level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return level.replace("/", ".")
|
||||
|
||||
|
||||
def extract_include_level(include_line: str) -> Optional[str]:
|
||||
"""
|
||||
Extract the include path from an #include directive.
|
||||
Gets the first two directory components from the include path.
|
||||
Equivalent to bash: cut -d/ -f 1,2
|
||||
|
||||
Examples:
|
||||
#include <xrpl/basics/base_uint.h> -> xrpl.basics
|
||||
#include "xrpld/app/main/Application.h" -> xrpld.app
|
||||
"""
|
||||
# Remove everything before the quote or angle bracket
|
||||
match = INCLUDE_PATH_PATTERN.search(include_line)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
include_path = match.group(1)
|
||||
parts = include_path.split("/")
|
||||
|
||||
# Get first two fields (indices 0 and 1)
|
||||
if len(parts) >= 2:
|
||||
include_level = f"{parts[0]}/{parts[1]}"
|
||||
else:
|
||||
include_level = include_path
|
||||
|
||||
# If the "includelevel" indicates a file, cut off the filename
|
||||
if "." in include_level.split("/")[-1]: # Avoid Path object creation
|
||||
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return include_level.replace("/", ".")
|
||||
|
||||
|
||||
def find_repo_root(start_path: Path, depth_limit: int = 10) -> Path:
|
||||
"""
|
||||
Find the repository root by looking for .git directory or src/include folders.
|
||||
Walks up the directory tree from the start path.
|
||||
"""
|
||||
current = start_path.resolve()
|
||||
|
||||
# Walk up the directory tree
|
||||
for _ in range(depth_limit): # Limit search depth to prevent infinite loops
|
||||
# Check if this directory has src or include folders
|
||||
has_src = (current / "src").exists()
|
||||
has_include = (current / "include").exists()
|
||||
|
||||
if has_src or has_include:
|
||||
return current
|
||||
|
||||
# Check if this is a git repository root
|
||||
if (current / ".git").exists():
|
||||
# Check if it has src or include nearby
|
||||
if has_src or has_include:
|
||||
return current
|
||||
|
||||
# Move up one level
|
||||
parent = current.parent
|
||||
if parent == current: # Reached filesystem root
|
||||
break
|
||||
current = parent
|
||||
|
||||
# If we couldn't find it, raise an error
|
||||
raise RuntimeError(
|
||||
"Could not find repository root. "
|
||||
"Expected to find a directory containing 'src' and/or 'include' folders."
|
||||
)
|
||||
|
||||
|
||||
def get_scan_directories(repo_root: Path) -> List[Path]:
|
||||
"""
|
||||
Get the list of directories to scan for include files.
|
||||
Returns paths that actually exist.
|
||||
"""
|
||||
directories = []
|
||||
|
||||
for dir_name in ["include", "src"]:
|
||||
dir_path = repo_root / dir_name
|
||||
if dir_path.exists() and dir_path.is_dir():
|
||||
directories.append(dir_path)
|
||||
|
||||
if not directories:
|
||||
raise RuntimeError(f"No 'src' or 'include' directories found in {repo_root}")
|
||||
|
||||
return directories
|
||||
|
||||
|
||||
def main():
|
||||
# Change to the script's directory
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
os.chdir(script_dir)
|
||||
|
||||
# If the shell is interactive, clean up any flotsam before analyzing
|
||||
# Match bash behavior: check if PS1 is set (indicates interactive shell)
|
||||
# When running a script, PS1 is not set even if stdin/stdout are TTYs
|
||||
if os.environ.get("PS1"):
|
||||
try:
|
||||
subprocess.run(["git", "clean", "-ix"], check=False, timeout=30)
|
||||
except (subprocess.TimeoutExpired, KeyboardInterrupt):
|
||||
print("Skipping git clean...")
|
||||
except Exception:
|
||||
# If git clean fails for any reason, just continue
|
||||
pass
|
||||
|
||||
# Clean up and create results directory
|
||||
results_dir = script_dir / "results"
|
||||
if results_dir.exists():
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(results_dir)
|
||||
results_dir.mkdir()
|
||||
|
||||
# Find the repository root by searching for src and include directories
|
||||
try:
|
||||
repo_root = find_repo_root(script_dir)
|
||||
scan_dirs = get_scan_directories(repo_root)
|
||||
|
||||
print(f"Found repository root: {repo_root}")
|
||||
print(f"Scanning directories:")
|
||||
for scan_dir in scan_dirs:
|
||||
print(f" - {scan_dir.relative_to(repo_root)}")
|
||||
except RuntimeError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print("\nScanning for raw includes...")
|
||||
# Find all #include directives
|
||||
raw_includes: List[Tuple[str, str]] = []
|
||||
rawincludes_file = results_dir / "rawincludes.txt"
|
||||
|
||||
# Write to file as we go to avoid storing everything in memory
|
||||
with open(rawincludes_file, "w", buffering=8192) as raw_f:
|
||||
for dir_path in scan_dirs:
|
||||
print(f" Scanning {dir_path.relative_to(repo_root)}...")
|
||||
|
||||
for file_path in dir_path.rglob("*"):
|
||||
if not file_path.is_file():
|
||||
continue
|
||||
|
||||
try:
|
||||
rel_path_str = str(file_path.relative_to(repo_root))
|
||||
|
||||
# Read file with larger buffer for better performance
|
||||
with open(
|
||||
file_path,
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
errors="ignore",
|
||||
buffering=8192,
|
||||
) as f:
|
||||
for line in f:
|
||||
# Quick check before regex
|
||||
if "#include" not in line or "boost" in line:
|
||||
continue
|
||||
|
||||
if INCLUDE_PATTERN.match(line):
|
||||
line_stripped = line.strip()
|
||||
entry = f"{rel_path_str}:{line_stripped}\n"
|
||||
print(entry, end="")
|
||||
raw_f.write(entry)
|
||||
raw_includes.append((rel_path_str, line_stripped))
|
||||
except Exception as e:
|
||||
print(f"Error reading {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
# Build levelization paths and count directly (no need to sort first)
|
||||
print("Build levelization paths")
|
||||
path_counts: Dict[Tuple[str, str], int] = defaultdict(int)
|
||||
|
||||
for file_path, include_line in raw_includes:
|
||||
level = get_level(file_path)
|
||||
include_level = extract_include_level(include_line)
|
||||
|
||||
if include_level and level != include_level:
|
||||
path_counts[(level, include_level)] += 1
|
||||
|
||||
# Sort and deduplicate paths (using dictionary order like bash 'sort -d')
|
||||
print("Sort and deduplicate paths")
|
||||
|
||||
paths_file = results_dir / "paths.txt"
|
||||
with open(paths_file, "w") as f:
|
||||
# Sort using dictionary order: only alphanumeric and spaces matter
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
for (level, include_level), count in sorted_items:
|
||||
line = f"{count:7} {level} {include_level}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Split into flat-file database
|
||||
print("Split into flat-file database")
|
||||
includes_dir = results_dir / "includes"
|
||||
included_by_dir = results_dir / "included_by"
|
||||
includes_dir.mkdir()
|
||||
included_by_dir.mkdir()
|
||||
|
||||
# Batch writes by grouping data first to avoid repeated file opens
|
||||
includes_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
|
||||
included_by_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
|
||||
|
||||
# Process in sorted order to match bash script behavior (dictionary order)
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
for (level, include_level), count in sorted_items:
|
||||
includes_data[level].append((include_level, count))
|
||||
included_by_data[include_level].append((level, count))
|
||||
|
||||
# Write all includes files in sorted order (dictionary order)
|
||||
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
|
||||
entries = includes_data[level]
|
||||
with open(includes_dir / level, "w") as f:
|
||||
for include_level, count in entries:
|
||||
line = f"{include_level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Write all included_by files in sorted order (dictionary order)
|
||||
for include_level in sorted(included_by_data.keys(), key=dictionary_sort_key):
|
||||
entries = included_by_data[include_level]
|
||||
with open(included_by_dir / include_level, "w") as f:
|
||||
for level, count in entries:
|
||||
line = f"{level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Search for loops
|
||||
print("Search for loops")
|
||||
loops_file = results_dir / "loops.txt"
|
||||
ordering_file = results_dir / "ordering.txt"
|
||||
|
||||
loops_found: Set[Tuple[str, str]] = set()
|
||||
|
||||
# Pre-load all include files into memory to avoid repeated I/O
|
||||
# This is the biggest optimization - we were reading files repeatedly in nested loops
|
||||
# Use list of tuples to preserve file order
|
||||
includes_cache: Dict[str, List[Tuple[str, int]]] = {}
|
||||
includes_lookup: Dict[str, Dict[str, int]] = {} # For fast lookup
|
||||
|
||||
# Note: bash script uses 'for source in *' which uses standard glob sorting,
|
||||
# NOT dictionary order. So we use standard sorted() here, not dictionary_sort_key.
|
||||
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
|
||||
if not include_file.is_file():
|
||||
continue
|
||||
|
||||
includes_cache[include_file.name] = []
|
||||
includes_lookup[include_file.name] = {}
|
||||
with open(include_file, "r") as f:
|
||||
for line in f:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
include_name = parts[0]
|
||||
include_count = int(parts[1])
|
||||
includes_cache[include_file.name].append(
|
||||
(include_name, include_count)
|
||||
)
|
||||
includes_lookup[include_file.name][include_name] = include_count
|
||||
|
||||
with open(loops_file, "w", buffering=8192) as loops_f, open(
|
||||
ordering_file, "w", buffering=8192
|
||||
) as ordering_f:
|
||||
|
||||
# Use standard sorting to match bash glob expansion 'for source in *'
|
||||
for source in sorted(includes_cache.keys()):
|
||||
source_includes = includes_cache[source]
|
||||
|
||||
for include, include_freq in source_includes:
|
||||
# Check if include file exists and references source
|
||||
if include not in includes_lookup:
|
||||
continue
|
||||
|
||||
source_freq = includes_lookup[include].get(source)
|
||||
|
||||
if source_freq is not None:
|
||||
# Found a loop
|
||||
loop_key = tuple(sorted([source, include]))
|
||||
if loop_key in loops_found:
|
||||
continue
|
||||
loops_found.add(loop_key)
|
||||
|
||||
loops_f.write(f"Loop: {source} {include}\n")
|
||||
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
diff = include_freq - source_freq
|
||||
if diff > 3:
|
||||
loops_f.write(f" {source} > {include}\n\n")
|
||||
elif diff < -3:
|
||||
loops_f.write(f" {include} > {source}\n\n")
|
||||
elif source_freq == include_freq:
|
||||
loops_f.write(f" {include} == {source}\n\n")
|
||||
else:
|
||||
loops_f.write(f" {include} ~= {source}\n\n")
|
||||
else:
|
||||
ordering_f.write(f"{source} > {include}\n")
|
||||
|
||||
# Print results
|
||||
print("\nOrdering:")
|
||||
with open(ordering_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
print("\nLoops:")
|
||||
with open(loops_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
130
.github/scripts/levelization/generate.sh
vendored
Executable file
130
.github/scripts/levelization/generate.sh
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: generate.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
# Ensure all sorting is ASCII-order consistently across platforms.
|
||||
export LANG=C
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../../..
|
||||
echo Raw includes:
|
||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir included_by
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and deduplicate paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a included_by/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
@@ -153,7 +153,6 @@ tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.core > xrpl.basics
|
||||
xrpl.core > xrpl.json
|
||||
xrpl.core > xrpl.ledger
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
xrpl.ledger > xrpl.protocol
|
||||
|
||||
71
.github/scripts/strategy-matrix/generate.py
vendored
71
.github/scripts/strategy-matrix/generate.py
vendored
@@ -51,20 +51,22 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Only generate a subset of configurations in PRs.
|
||||
if not all:
|
||||
# Debian:
|
||||
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
|
||||
# fee to 500.
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
|
||||
# coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
|
||||
# - Bookworm using Clang 17: Release on linux/amd64, set the
|
||||
# reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64.
|
||||
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
|
||||
# the reference fee to 500.
|
||||
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
|
||||
# code coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
|
||||
# enable voidstar.
|
||||
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
|
||||
# set the reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
|
||||
if os["distro_name"] == "debian":
|
||||
skip = True
|
||||
if os["distro_version"] == "bookworm":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
|
||||
@@ -72,12 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
cmake_args = f"-Dvoidstar=ON {cmake_args}"
|
||||
@@ -85,6 +89,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
|
||||
@@ -92,6 +97,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -99,14 +105,15 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# RHEL:
|
||||
# - 9 using GCC 12: Debug on linux/amd64.
|
||||
# - 10 using Clang: Release on linux/amd64.
|
||||
# - 9 using GCC 12: Debug and Unity on linux/amd64.
|
||||
# - 10 using Clang: Release and no Unity on linux/amd64.
|
||||
if os["distro_name"] == "rhel":
|
||||
skip = True
|
||||
if os["distro_version"] == "9":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -114,6 +121,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -121,16 +129,17 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# Ubuntu:
|
||||
# - Jammy using GCC 12: Debug on linux/arm64.
|
||||
# - Noble using GCC 14: Release on linux/amd64.
|
||||
# - Noble using Clang 18: Debug on linux/amd64.
|
||||
# - Noble using Clang 19: Release on linux/arm64.
|
||||
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
|
||||
# - Noble using GCC 14: Release and Unity on linux/amd64.
|
||||
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
|
||||
# - Noble using Clang 19: Release and Unity on linux/arm64.
|
||||
if os["distro_name"] == "ubuntu":
|
||||
skip = True
|
||||
if os["distro_version"] == "jammy":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -138,18 +147,21 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -157,16 +169,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# MacOS:
|
||||
# - Debug on macos/arm64.
|
||||
# - Debug and no Unity on macos/arm64.
|
||||
if os["distro_name"] == "macos" and not (
|
||||
build_type == "Debug" and architecture["platform"] == "macos/arm64"
|
||||
build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "macos/arm64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Windows:
|
||||
# - Release on windows/amd64.
|
||||
# - Release and Unity on windows/amd64.
|
||||
if os["distro_name"] == "windows" and not (
|
||||
build_type == "Release" and architecture["platform"] == "windows/amd64"
|
||||
build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "windows/amd64"
|
||||
):
|
||||
continue
|
||||
|
||||
@@ -193,17 +209,18 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
):
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
||||
# Unity on linux/amd64
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
|
||||
|
||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
||||
# or debian-bookworm-gcc-12-amd64-release.
|
||||
# or debian-bookworm-gcc-12-amd64-release-unity.
|
||||
config_name = os["distro_name"]
|
||||
if (n := os["distro_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
@@ -217,16 +234,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
config_name += f"-{build_type.lower()}"
|
||||
if "-Dcoverage=ON" in cmake_args:
|
||||
config_name += "-coverage"
|
||||
if "-Dunity=ON" in cmake_args:
|
||||
config_name += "-unity"
|
||||
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
):
|
||||
if os[
|
||||
"distro_version"
|
||||
] == "bookworm" and f"{os['compiler_name']}-{os['compiler_version']}" in [
|
||||
"clang-20",
|
||||
"gcc-13",
|
||||
]:
|
||||
# Add ASAN + UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
|
||||
2
.github/scripts/strategy-matrix/linux.json
vendored
2
.github/scripts/strategy-matrix/linux.json
vendored
@@ -208,5 +208,5 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
5
.github/scripts/strategy-matrix/macos.json
vendored
5
.github/scripts/strategy-matrix/macos.json
vendored
@@ -15,5 +15,8 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": ["-DCMAKE_POLICY_VERSION_MINIMUM=3.5"]
|
||||
"cmake_args": [
|
||||
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
|
||||
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/scripts/strategy-matrix/windows.json
vendored
2
.github/scripts/strategy-matrix/windows.json
vendored
@@ -15,5 +15,5 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
20
.github/workflows/reusable-build-test-config.yml
vendored
20
.github/workflows/reusable-build-test-config.yml
vendored
@@ -205,14 +205,18 @@ jobs:
|
||||
- name: Set sanitizer options
|
||||
if: ${{ !inputs.build_only && env.SANITIZERS_ENABLED == 'true' }}
|
||||
run: |
|
||||
echo "ASAN_OPTIONS=print_stacktrace=1:detect_container_overflow=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=second_deadlock_stack=1:halt_on_error=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
echo "ASAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-asan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-tsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-ubsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=include=${GITHUB_WORKSPACE}/sanitizers/suppressions/runtime-lsan-options.txt:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Run the separate tests
|
||||
# We continue on error here because we want to try the Embedded tests before
|
||||
# failing. This will give us details on all the failures at once.
|
||||
continue-on-error: true
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
id: separate_tests
|
||||
# Windows locks some of the build files while running tests, and parallel jobs can collide
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -228,8 +232,14 @@ jobs:
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
PARALLELISM: ${{ env.SANITIZERS_ENABLED == 'true' && '1' || steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
./xrpld --unittest --unittest-jobs "${BUILD_NPROC}"
|
||||
./xrpld --unittest --unittest-jobs "${PARALLELISM}"
|
||||
|
||||
# Pipeline should fail if the separate tests failed.
|
||||
- name: Check results of the SeparateTests
|
||||
if: ${{ !inputs.build_only && steps.separate_tests.outcome == 'failure' }}
|
||||
run: exit 1
|
||||
|
||||
- name: Debug failure (Linux)
|
||||
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Check levelization
|
||||
run: python .github/scripts/levelization/generate.py
|
||||
run: .github/scripts/levelization/generate.sh
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
removed from loops.txt, it's probably an improvement, while if
|
||||
something was added, it's probably a regression.
|
||||
|
||||
Run '.github/scripts/levelization/generate.py' in your repo, commit
|
||||
Run '.github/scripts/levelization/generate.sh' in your repo, commit
|
||||
and push the changes. See .github/scripts/levelization/README.md for
|
||||
more info.
|
||||
run: |
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -71,6 +71,3 @@ DerivedData
|
||||
/.augment
|
||||
/.claude
|
||||
/CLAUDE.md
|
||||
|
||||
# Python
|
||||
__pycache__
|
||||
|
||||
37
BUILD.md
37
BUILD.md
@@ -368,36 +368,6 @@ The workaround for this error is to add two lines to your profile:
|
||||
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
```
|
||||
|
||||
### Set Up Ccache
|
||||
|
||||
To speed up repeated compilations, we recommend that you install
|
||||
[ccache](https://ccache.dev), a tool that wraps your compiler so that it can
|
||||
cache build objects locally.
|
||||
|
||||
#### Linux
|
||||
|
||||
You can install it using the package manager, e.g. `sudo apt install ccache`
|
||||
(Ubuntu) or `sudo dnf install ccache` (RHEL).
|
||||
|
||||
#### macOS
|
||||
|
||||
You can install it using Homebrew, i.e. `brew install ccache`.
|
||||
|
||||
#### Windows
|
||||
|
||||
You can install it using Chocolatey, i.e. `choco install ccache`. If you already
|
||||
have Ccache installed, then `choco upgrade ccache` will update it to the latest
|
||||
version. However, if you see an error such as:
|
||||
|
||||
```
|
||||
terminate called after throwing an instance of 'std::bad_alloc'
|
||||
what(): std::bad_alloc
|
||||
C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(617,5): error MSB6006: "cl.exe" exited with code 3.
|
||||
```
|
||||
|
||||
then please install a specific version of Ccache that we know works, via: `choco
|
||||
install ccache --version 4.11.3 --allow-downgrade`.
|
||||
|
||||
### Build and Test
|
||||
|
||||
1. Create a build directory and move into it.
|
||||
@@ -575,10 +545,16 @@ See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
|
||||
[Unity builds][5] may be faster for the first build
|
||||
(at the cost of much more memory) since they concatenate sources into fewer
|
||||
translation units. Non-unity builds may be faster for incremental builds,
|
||||
and can be helpful for detecting `#include` omissions.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Conan
|
||||
@@ -645,6 +621,7 @@ If you want to experiment with a new package, follow these steps:
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
||||
[3]: https://docs.conan.io/en/latest/getting_started.html
|
||||
[5]: https://en.wikipedia.org/wiki/Unity_build
|
||||
[6]: https://github.com/boostorg/beast/issues/2648
|
||||
[7]: https://github.com/boostorg/beast/issues/2661
|
||||
[gcovr]: https://gcovr.com/en/stable/getting-started.html
|
||||
|
||||
@@ -9,5 +9,8 @@ function (xrpl_add_test name)
|
||||
|
||||
isolate_headers(${target} "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/tests/${name}" PRIVATE)
|
||||
|
||||
# Make sure the test isn't optimized away in unity builds
|
||||
set_target_properties(${target} PROPERTIES UNITY_BUILD_MODE GROUP UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
|
||||
|
||||
add_test(NAME ${target} COMMAND ${target})
|
||||
endfunction ()
|
||||
|
||||
@@ -4,7 +4,12 @@
|
||||
|
||||
include(target_protobuf_sources)
|
||||
|
||||
# Protocol buffers cannot participate in a unity build,
|
||||
# because all the generated sources
|
||||
# define a bunch of `static const` variables with the same names,
|
||||
# so we just build them as a separate library.
|
||||
add_library(xrpl.libpb)
|
||||
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS include/xrpl/proto/xrpl.proto)
|
||||
|
||||
@@ -155,4 +160,12 @@ if (xrpld)
|
||||
# antithesis_instrumentation.h, which is not exported as INTERFACE
|
||||
target_include_directories(xrpld PRIVATE ${CMAKE_SOURCE_DIR}/external/antithesis-sdk)
|
||||
endif ()
|
||||
|
||||
# any files that don't play well with unity should be added here
|
||||
if (tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp src/test/ledger/Invariants_test.cpp PROPERTIES SKIP_UNITY_BUILD_INCLUSION
|
||||
TRUE)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
@@ -30,6 +30,14 @@ if (tests)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake." OFF)
|
||||
if (unity)
|
||||
if (NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
|
||||
endif ()
|
||||
|
||||
if (is_clang AND is_linux)
|
||||
option(voidstar "Enable Antithesis instrumentation." OFF)
|
||||
endif ()
|
||||
|
||||
@@ -32,13 +32,13 @@ target_link_libraries(
|
||||
if (Boost_COMPILER)
|
||||
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
|
||||
endif ()
|
||||
if (SANITIZERS_ENABLED AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif ()
|
||||
# if (SANITIZERS_ENABLED AND is_clang)
|
||||
# # TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
# if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
# get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# endif ()
|
||||
# message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
# file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
# target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
# -fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
# endif ()
|
||||
|
||||
10
conanfile.py
10
conanfile.py
@@ -1,4 +1,5 @@
|
||||
import re
|
||||
import os
|
||||
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
@@ -23,6 +24,7 @@ class Xrpl(ConanFile):
|
||||
"shared": [True, False],
|
||||
"static": [True, False],
|
||||
"tests": [True, False],
|
||||
"unity": [True, False],
|
||||
"xrpld": [True, False],
|
||||
}
|
||||
|
||||
@@ -54,6 +56,7 @@ class Xrpl(ConanFile):
|
||||
"shared": False,
|
||||
"static": True,
|
||||
"tests": False,
|
||||
"unity": False,
|
||||
"xrpld": False,
|
||||
"date/*:header_only": True,
|
||||
"ed25519/*:shared": False,
|
||||
@@ -124,6 +127,12 @@ class Xrpl(ConanFile):
|
||||
if self.settings.compiler in ["clang", "gcc"]:
|
||||
self.options["boost"].without_cobalt = True
|
||||
|
||||
# Check if environment variable exists
|
||||
if "SANITIZERS" in os.environ:
|
||||
sanitizers = os.environ["SANITIZERS"]
|
||||
if "Address" in sanitizers:
|
||||
self.default_options["fPIC"] = False
|
||||
|
||||
def requirements(self):
|
||||
# Conan 2 requires transitive headers to be specified
|
||||
transitive_headers_opt = (
|
||||
@@ -166,6 +175,7 @@ class Xrpl(ConanFile):
|
||||
tc.variables["rocksdb"] = self.options.rocksdb
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
tc.variables["static"] = self.options.static
|
||||
tc.variables["unity"] = self.options.unity
|
||||
tc.variables["xrpld"] = self.options.xrpld
|
||||
tc.generate()
|
||||
|
||||
|
||||
8
docs/build/sanitizers.md
vendored
8
docs/build/sanitizers.md
vendored
@@ -89,8 +89,8 @@ cmake --build . --parallel 4
|
||||
**IMPORTANT**: ASAN with Boost produces many false positives. Use these options:
|
||||
|
||||
```bash
|
||||
export ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=path/to/asan.supp:halt_on_error=0:log_path=asan.log"
|
||||
export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsan.log"
|
||||
export ASAN_OPTIONS="include=sanitizers/suppressions/runtime-asan-options.txt:suppressions=sanitizers/suppressions/asan.supp"
|
||||
export LSAN_OPTIONS="include=sanitizers/suppressions/runtime-lsan-options.txt:suppressions=sanitizers/suppressions/lsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
@@ -108,7 +108,7 @@ export LSAN_OPTIONS="suppressions=path/to/lsan.supp:halt_on_error=0:log_path=lsa
|
||||
### ThreadSanitizer (TSan)
|
||||
|
||||
```bash
|
||||
export TSAN_OPTIONS="suppressions=path/to/tsan.supp halt_on_error=0 log_path=tsan.log"
|
||||
export TSAN_OPTIONS="include=sanitizers/suppressions/runtime-tsan-options.txt:suppressions=sanitizers/suppressions/tsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
@@ -129,7 +129,7 @@ More details [here](https://github.com/google/sanitizers/wiki/AddressSanitizerLe
|
||||
### UndefinedBehaviorSanitizer (UBSan)
|
||||
|
||||
```bash
|
||||
export UBSAN_OPTIONS="suppressions=path/to/ubsan.supp:print_stacktrace=1:halt_on_error=0:log_path=ubsan.log"
|
||||
export UBSAN_OPTIONS="include=sanitizers/suppressions/runtime-ubsan-options.txt:suppressions=sanitizers/suppressions/ubsan.supp"
|
||||
|
||||
# Run tests
|
||||
./xrpld --unittest --unittest-jobs=5
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#ifndef XRPL_BASICS_LOCALVALUE_H_INCLUDED
|
||||
#define XRPL_BASICS_LOCALVALUE_H_INCLUDED
|
||||
|
||||
#include <boost/thread/tss.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
@@ -42,21 +40,63 @@ struct LocalValues
|
||||
|
||||
// Keys are the address of a LocalValue.
|
||||
std::unordered_map<void const*, std::unique_ptr<BasicValue>> values;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cleanup(LocalValues* lvs)
|
||||
// Wrapper to ensure proper cleanup when thread exits
|
||||
struct LocalValuesHolder
|
||||
{
|
||||
LocalValues* ptr = nullptr;
|
||||
|
||||
~LocalValuesHolder()
|
||||
{
|
||||
if (lvs && !lvs->onCoro)
|
||||
delete lvs;
|
||||
if (ptr && !ptr->onCoro)
|
||||
delete ptr;
|
||||
}
|
||||
};
|
||||
|
||||
template <class = void>
|
||||
boost::thread_specific_ptr<detail::LocalValues>&
|
||||
getLocalValues()
|
||||
inline LocalValuesHolder&
|
||||
getLocalValuesHolder()
|
||||
{
|
||||
static boost::thread_specific_ptr<detail::LocalValues> tsp(&detail::LocalValues::cleanup);
|
||||
return tsp;
|
||||
thread_local LocalValuesHolder holder;
|
||||
return holder;
|
||||
}
|
||||
|
||||
inline LocalValues*&
|
||||
getLocalValuesPtr()
|
||||
{
|
||||
return getLocalValuesHolder().ptr;
|
||||
}
|
||||
|
||||
inline LocalValues*
|
||||
getOrCreateLocalValues()
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
if (!ptr)
|
||||
{
|
||||
ptr = new LocalValues();
|
||||
ptr->onCoro = false;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// For coroutine support, we need explicit swap functions
|
||||
inline LocalValues*
|
||||
releaseLocalValues()
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
auto* result = ptr;
|
||||
ptr = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void
|
||||
resetLocalValues(LocalValues* lvs)
|
||||
{
|
||||
auto& ptr = getLocalValuesPtr();
|
||||
// Clean up old value if it's not a coroutine's LocalValues
|
||||
if (ptr && !ptr->onCoro)
|
||||
delete ptr;
|
||||
ptr = lvs;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
@@ -89,19 +129,10 @@ template <class T>
|
||||
T&
|
||||
LocalValue<T>::operator*()
|
||||
{
|
||||
auto lvs = detail::getLocalValues().get();
|
||||
if (!lvs)
|
||||
{
|
||||
lvs = new detail::LocalValues();
|
||||
lvs->onCoro = false;
|
||||
detail::getLocalValues().reset(lvs);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto const iter = lvs->values.find(this);
|
||||
if (iter != lvs->values.end())
|
||||
return *reinterpret_cast<T*>(iter->second->get());
|
||||
}
|
||||
auto lvs = detail::getOrCreateLocalValues();
|
||||
auto const iter = lvs->values.find(this);
|
||||
if (iter != lvs->values.end())
|
||||
return *reinterpret_cast<T*>(iter->second->get());
|
||||
|
||||
return *reinterpret_cast<T*>(
|
||||
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_)).first->second->get());
|
||||
|
||||
@@ -699,8 +699,12 @@ Number::normalizeToRange(T minMantissa, T maxMantissa) const
|
||||
XRPL_ASSERT_PARTS(!negative, "xrpl::Number::normalizeToRange", "Number is non-negative for unsigned range.");
|
||||
Number::normalize(negative, mantissa, exponent, minMantissa, maxMantissa);
|
||||
|
||||
auto const sign = negative ? -1 : 1;
|
||||
return std::make_pair(static_cast<T>(sign * mantissa), exponent);
|
||||
// Cast mantissa to signed type first to avoid unsigned integer overflow
|
||||
// when multiplying by negative sign
|
||||
T signedMantissa = static_cast<T>(mantissa);
|
||||
if (negative)
|
||||
signedMantissa = -signedMantissa;
|
||||
return std::make_pair(signedMantissa, exponent);
|
||||
}
|
||||
|
||||
inline constexpr Number
|
||||
|
||||
@@ -358,6 +358,7 @@ public:
|
||||
base_uint&
|
||||
operator&=(base_uint const& b)
|
||||
{
|
||||
XRPL_ASSERT(WIDTH == b.WIDTH, "input size mismatch");
|
||||
for (int i = 0; i < WIDTH; i++)
|
||||
data_[i] &= b.data_[i];
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#ifndef XRPL_BASICS_CONTRACT_H_INCLUDED
|
||||
#define XRPL_BASICS_CONTRACT_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/sanitizers.h>
|
||||
#include <xrpl/beast/type_name.h>
|
||||
|
||||
#include <exception>
|
||||
@@ -25,7 +26,7 @@ LogThrow(std::string const& title);
|
||||
control to the next matching exception handler, if any.
|
||||
Otherwise, std::terminate will be called.
|
||||
*/
|
||||
[[noreturn]] inline void
|
||||
[[noreturn]] XRPL_NO_SANITIZE_ADDRESS inline void
|
||||
Rethrow()
|
||||
{
|
||||
LogThrow("Re-throwing exception");
|
||||
@@ -33,7 +34,7 @@ Rethrow()
|
||||
}
|
||||
|
||||
template <class E, class... Args>
|
||||
[[noreturn]] inline void
|
||||
[[noreturn]] XRPL_NO_SANITIZE_ADDRESS inline void
|
||||
Throw(Args&&... args)
|
||||
{
|
||||
static_assert(std::is_convertible<E*, std::exception*>::value, "Exception must derive from std::exception.");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef XRPL_BASICS_ROCKSDB_H_INCLUDED
|
||||
#define XRPL_BASICS_ROCKSDB_H_INCLUDED
|
||||
#ifndef XRPL_UNITY_ROCKSDB_H_INCLUDED
|
||||
#define XRPL_UNITY_ROCKSDB_H_INCLUDED
|
||||
|
||||
#if XRPL_ROCKSDB_AVAILABLE
|
||||
// #include <rocksdb2/port/port_posix.h>
|
||||
|
||||
6
include/xrpl/basics/sanitizers.h
Normal file
6
include/xrpl/basics/sanitizers.h
Normal file
@@ -0,0 +1,6 @@
|
||||
// Helper to disable ASan/HwASan for specific functions
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define XRPL_NO_SANITIZE_ADDRESS __attribute__((no_sanitize("address", "hwaddress")))
|
||||
#else
|
||||
#define XRPL_NO_SANITIZE_ADDRESS
|
||||
#endif
|
||||
@@ -73,13 +73,16 @@ JobQueue::Coro::resume()
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
}
|
||||
auto saved = detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(&lvs_);
|
||||
auto saved = detail::releaseLocalValues();
|
||||
detail::resetLocalValues(&lvs_);
|
||||
std::lock_guard lock(mutex_);
|
||||
XRPL_ASSERT(static_cast<bool>(coro_), "xrpl::JobQueue::Coro::resume : is runnable");
|
||||
coro_();
|
||||
detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(saved);
|
||||
|
||||
// Restore the thread's original LocalValues
|
||||
detail::releaseLocalValues();
|
||||
detail::resetLocalValues(saved);
|
||||
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
#ifndef XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
|
||||
#define XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/SHAMapHash.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/ledger/CachedSLEs.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
// Forward declarations
|
||||
namespace NodeStore {
|
||||
class Database;
|
||||
}
|
||||
namespace Resource {
|
||||
class Manager;
|
||||
}
|
||||
namespace perf {
|
||||
class PerfLog;
|
||||
}
|
||||
|
||||
class AcceptedLedger;
|
||||
class AmendmentTable;
|
||||
class Cluster;
|
||||
class CollectorManager;
|
||||
class DatabaseCon;
|
||||
class Family;
|
||||
class HashRouter;
|
||||
class InboundLedgers;
|
||||
class InboundTransactions;
|
||||
class JobQueue;
|
||||
class LedgerCleaner;
|
||||
class LedgerMaster;
|
||||
class LedgerReplayer;
|
||||
class LoadFeeTrack;
|
||||
class LoadManager;
|
||||
class ManifestCache;
|
||||
class NetworkOPs;
|
||||
class OpenLedger;
|
||||
class OrderBookDB;
|
||||
class Overlay;
|
||||
class PathRequests;
|
||||
class PeerReservationTable;
|
||||
class PendingSaves;
|
||||
class RelationalDatabase;
|
||||
class ServerHandler;
|
||||
class SHAMapStore;
|
||||
class TimeKeeper;
|
||||
class TransactionMaster;
|
||||
class TxQ;
|
||||
class ValidatorList;
|
||||
class ValidatorSite;
|
||||
|
||||
template <class Adaptor>
|
||||
class Validations;
|
||||
class RCLValidationsAdaptor;
|
||||
using RCLValidations = Validations<RCLValidationsAdaptor>;
|
||||
|
||||
using NodeCache = TaggedCache<SHAMapHash, Blob>;
|
||||
|
||||
/** Service registry for dependency injection.
|
||||
|
||||
This abstract interface provides access to various services and components
|
||||
used throughout the application. It separates the service locator pattern
|
||||
from the Application lifecycle management.
|
||||
|
||||
Components that need access to services can hold a reference to
|
||||
ServiceRegistry rather than Application when they only need service
|
||||
access and not lifecycle management.
|
||||
|
||||
*/
|
||||
class ServiceRegistry
|
||||
{
|
||||
public:
|
||||
ServiceRegistry() = default;
|
||||
virtual ~ServiceRegistry() = default;
|
||||
|
||||
// Core infrastructure services
|
||||
virtual CollectorManager&
|
||||
getCollectorManager() = 0;
|
||||
|
||||
virtual Family&
|
||||
getNodeFamily() = 0;
|
||||
|
||||
virtual TimeKeeper&
|
||||
timeKeeper() = 0;
|
||||
|
||||
virtual JobQueue&
|
||||
getJobQueue() = 0;
|
||||
|
||||
virtual NodeCache&
|
||||
getTempNodeCache() = 0;
|
||||
|
||||
virtual CachedSLEs&
|
||||
cachedSLEs() = 0;
|
||||
|
||||
// Protocol and validation services
|
||||
virtual AmendmentTable&
|
||||
getAmendmentTable() = 0;
|
||||
|
||||
virtual HashRouter&
|
||||
getHashRouter() = 0;
|
||||
|
||||
virtual LoadFeeTrack&
|
||||
getFeeTrack() = 0;
|
||||
|
||||
virtual LoadManager&
|
||||
getLoadManager() = 0;
|
||||
|
||||
virtual RCLValidations&
|
||||
getValidations() = 0;
|
||||
|
||||
virtual ValidatorList&
|
||||
validators() = 0;
|
||||
|
||||
virtual ValidatorSite&
|
||||
validatorSites() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
validatorManifests() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
publisherManifests() = 0;
|
||||
|
||||
// Network services
|
||||
virtual Overlay&
|
||||
overlay() = 0;
|
||||
|
||||
virtual Cluster&
|
||||
cluster() = 0;
|
||||
|
||||
virtual PeerReservationTable&
|
||||
peerReservations() = 0;
|
||||
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
|
||||
// Storage services
|
||||
virtual NodeStore::Database&
|
||||
getNodeStore() = 0;
|
||||
|
||||
virtual SHAMapStore&
|
||||
getSHAMapStore() = 0;
|
||||
|
||||
virtual RelationalDatabase&
|
||||
getRelationalDatabase() = 0;
|
||||
|
||||
// Ledger services
|
||||
virtual InboundLedgers&
|
||||
getInboundLedgers() = 0;
|
||||
|
||||
virtual InboundTransactions&
|
||||
getInboundTransactions() = 0;
|
||||
|
||||
virtual TaggedCache<uint256, AcceptedLedger>&
|
||||
getAcceptedLedgerCache() = 0;
|
||||
|
||||
virtual LedgerMaster&
|
||||
getLedgerMaster() = 0;
|
||||
|
||||
virtual LedgerCleaner&
|
||||
getLedgerCleaner() = 0;
|
||||
|
||||
virtual LedgerReplayer&
|
||||
getLedgerReplayer() = 0;
|
||||
|
||||
virtual PendingSaves&
|
||||
pendingSaves() = 0;
|
||||
|
||||
virtual OpenLedger&
|
||||
openLedger() = 0;
|
||||
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
|
||||
// Transaction and operation services
|
||||
virtual NetworkOPs&
|
||||
getOPs() = 0;
|
||||
|
||||
virtual OrderBookDB&
|
||||
getOrderBookDB() = 0;
|
||||
|
||||
virtual TransactionMaster&
|
||||
getMasterTransaction() = 0;
|
||||
|
||||
virtual TxQ&
|
||||
getTxQ() = 0;
|
||||
|
||||
virtual PathRequests&
|
||||
getPathRequests() = 0;
|
||||
|
||||
// Server services
|
||||
virtual ServerHandler&
|
||||
getServerHandler() = 0;
|
||||
|
||||
virtual perf::PerfLog&
|
||||
getPerfLog() = 0;
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
@@ -30,6 +30,9 @@ public:
|
||||
bool sslVerify,
|
||||
beast::Journal j);
|
||||
|
||||
static void
|
||||
cleanupSSLContext();
|
||||
|
||||
static void
|
||||
get(bool bSSL,
|
||||
boost::asio::io_context& io_context,
|
||||
|
||||
@@ -230,7 +230,7 @@ missing_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
missing_field_error(Json::StaticString name)
|
||||
missing_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return missing_field_error(std::string(name));
|
||||
}
|
||||
@@ -248,7 +248,7 @@ object_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
object_field_error(Json::StaticString name)
|
||||
object_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return object_field_error(std::string(name));
|
||||
}
|
||||
@@ -260,7 +260,7 @@ invalid_field_message(std::string const& name)
|
||||
}
|
||||
|
||||
inline std::string
|
||||
invalid_field_message(Json::StaticString name)
|
||||
invalid_field_message(Json::StaticString const& name)
|
||||
{
|
||||
return invalid_field_message(std::string(name));
|
||||
}
|
||||
@@ -272,7 +272,7 @@ invalid_field_error(std::string const& name)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
invalid_field_error(Json::StaticString name)
|
||||
invalid_field_error(Json::StaticString const& name)
|
||||
{
|
||||
return invalid_field_error(std::string(name));
|
||||
}
|
||||
@@ -284,7 +284,7 @@ expected_field_message(std::string const& name, std::string const& type)
|
||||
}
|
||||
|
||||
inline std::string
|
||||
expected_field_message(Json::StaticString name, std::string const& type)
|
||||
expected_field_message(Json::StaticString const& name, std::string const& type)
|
||||
{
|
||||
return expected_field_message(std::string(name), type);
|
||||
}
|
||||
@@ -296,7 +296,7 @@ expected_field_error(std::string const& name, std::string const& type)
|
||||
}
|
||||
|
||||
inline Json::Value
|
||||
expected_field_error(Json::StaticString name, std::string const& type)
|
||||
expected_field_error(Json::StaticString const& name, std::string const& type)
|
||||
{
|
||||
return expected_field_error(std::string(name), type);
|
||||
}
|
||||
|
||||
@@ -47,6 +47,17 @@ public:
|
||||
return id_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the SHAMapNodeID of a child node at the specified branch.
|
||||
*
|
||||
* @param m The branch number (0-15) indicating which child to descend to.
|
||||
* In the SHAMap's 16-way radix tree, each inner node has up to
|
||||
* 16 children, indexed by the corresponding nibble (4 bits) of
|
||||
* the key at the current depth.
|
||||
* @return SHAMapNodeID of the child node at branch m.
|
||||
* @throws std::logic_error if this node is at the maximum leaf depth (64)
|
||||
* or if the node's id doesn't match its depth mask.
|
||||
*/
|
||||
SHAMapNodeID
|
||||
getChildNodeID(unsigned int m) const;
|
||||
|
||||
|
||||
@@ -1,29 +1,25 @@
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppressions.
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
|
||||
#
|
||||
# ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0"
|
||||
# ASAN_OPTIONS="suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0:detect_stack_use_after_return=0"
|
||||
#
|
||||
# The detect_container_overflow=0 option disables false positives from:
|
||||
# - Boost intrusive containers (slist_iterator.hpp, hashtable.hpp, aged_unordered_container.h)
|
||||
# - Boost context/coroutine stack switching (Workers.cpp, thread.h)
|
||||
# Boost coroutines cause multiple ASAN false positives due to swapcontext/fiber stack switching.
|
||||
# ASAN cannot correctly track stack memory across coroutine context switches, leading to:
|
||||
# - stack-use-after-return errors
|
||||
# - stack-use-after-scope errors
|
||||
# - stack-buffer-overflow errors in seemingly unrelated code (e.g., std::chrono::steady_clock::now())
|
||||
# - stack-buffer-underflow errors in seemingly unrelated code (e.g., xxhasher::retrieveHash(), clock_gettime)
|
||||
#
|
||||
# See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
|
||||
# These are suppressed via:
|
||||
# 1. Runtime option: detect_stack_use_after_return=0 (in ASAN_OPTIONS in CI workflow)
|
||||
# 2. Compile-time flag: -fno-sanitize-address-use-after-scope (in cmake/XrplSanitizers.cmake)
|
||||
#
|
||||
# Note: stack-buffer-overflow false positives from coroutines cannot be fully suppressed
|
||||
# without disabling ASAN entirely for Boost. Clang builds use -fsanitize-blacklist to
|
||||
# exclude Boost headers, but GCC does not support this feature.
|
||||
#
|
||||
# See: https://github.com/google/sanitizers/issues/189
|
||||
|
||||
# Boost
|
||||
interceptor_name:boost/asio
|
||||
|
||||
# Leaks in Doctest tests: xrpl.test.*
|
||||
interceptor_name:src/libxrpl/net/HTTPClient.cpp
|
||||
interceptor_name:src/libxrpl/net/RegisterSSLCerts.cpp
|
||||
interceptor_name:src/tests/libxrpl/net/HTTPClient.cpp
|
||||
interceptor_name:xrpl/net/AutoSocket.h
|
||||
interceptor_name:xrpl/net/HTTPClient.h
|
||||
interceptor_name:xrpl/net/HTTPClientSSLContext.h
|
||||
interceptor_name:xrpl/net/RegisterSSLCerts.h
|
||||
|
||||
# Suppress false positive stack-buffer errors in thread stack allocation
|
||||
# Related to ASan's __asan_handle_no_return warnings (github.com/google/sanitizers/issues/189)
|
||||
# These occur during multi-threaded test initialization on macOS
|
||||
interceptor_name:memcpy
|
||||
# Boost - false positives from stackful coroutines
|
||||
interceptor_name:clock_gettime
|
||||
interceptor_name:__bzero
|
||||
interceptor_name:__asan_memset
|
||||
interceptor_name:__asan_memcpy
|
||||
interceptor_name:nudb
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppresions.
|
||||
|
||||
# Suppress leaks detected by asan in rippled code.
|
||||
leak:src/libxrpl/net/HTTPClient.cpp
|
||||
leak:src/libxrpl/net/RegisterSSLCerts.cpp
|
||||
leak:src/tests/libxrpl/net/HTTPClient.cpp
|
||||
leak:xrpl/net/AutoSocket.h
|
||||
leak:xrpl/net/HTTPClient.h
|
||||
leak:xrpl/net/HTTPClientSSLContext.h
|
||||
leak:xrpl/net/RegisterSSLCerts.h
|
||||
leak:ripple::HTTPClient
|
||||
leak:ripple::HTTPClientImp
|
||||
|
||||
# Suppress leaks detected by asan in boost code.
|
||||
leak:boost::asio
|
||||
leak:boost/asio
|
||||
# These are false positives from Boost.Asio SSL internals that use OpenSSL BIO structures.
|
||||
# The BIO structures are managed by OpenSSL's internal reference counting and freed at process exit.
|
||||
|
||||
#leak:boost::asio
|
||||
#leak:boost/asio
|
||||
|
||||
# OpenSSL BIO memory is managed internally and freed at process exit
|
||||
leak:CRYPTO_malloc
|
||||
leak:bio_make_pair
|
||||
leak:BIO_new_bio_pair
|
||||
|
||||
4
sanitizers/suppressions/runtime-asan-options.txt
Normal file
4
sanitizers/suppressions/runtime-asan-options.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
detect_container_overflow=0
|
||||
detect_stack_use_after_return=0
|
||||
debug=true
|
||||
halt_on_error=false
|
||||
1
sanitizers/suppressions/runtime-lsan-options.txt
Normal file
1
sanitizers/suppressions/runtime-lsan-options.txt
Normal file
@@ -0,0 +1 @@
|
||||
halt_on_error=false
|
||||
3
sanitizers/suppressions/runtime-tsan-options.txt
Normal file
3
sanitizers/suppressions/runtime-tsan-options.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
halt_on_error=false
|
||||
verbosity=1
|
||||
second_deadlock_stack=1
|
||||
1
sanitizers/suppressions/runtime-ubsan-options.txt
Normal file
1
sanitizers/suppressions/runtime-ubsan-options.txt
Normal file
@@ -0,0 +1 @@
|
||||
halt_on_error=false
|
||||
@@ -27,3 +27,8 @@ src:core/JobQueue.cpp
|
||||
src:libxrpl/beast/utility/beast_Journal.cpp
|
||||
src:test/beast/beast_PropertyStream_test.cpp
|
||||
src:src/test/app/Invariants_test.cpp
|
||||
|
||||
# Boost coroutines cause false positive stack-buffer-underflow in xxhasher
|
||||
# This is a known ASAN limitation with stackful coroutines
|
||||
# See: https://github.com/google/sanitizers/issues/189
|
||||
src:beast/hash/xxhasher.h
|
||||
|
||||
@@ -140,6 +140,7 @@ unsigned-integer-overflow:src/libxrpl/protocol/tokens.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/shamap/SHAMap.cpp
|
||||
unsigned-integer-overflow:src/test/app/Batch_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Invariants_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Loan_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/NFToken_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Offer_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Path_test.cpp
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <numeric>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
@@ -107,7 +108,7 @@ public:
|
||||
int& exponent,
|
||||
internalrep const& minMantissa,
|
||||
internalrep const& maxMantissa,
|
||||
std::string location);
|
||||
std::string_view location);
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
template <UnsignedMantissa T>
|
||||
@@ -116,7 +117,7 @@ public:
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
void
|
||||
doRound(rep& drops, std::string location);
|
||||
doRound(rep& drops, std::string_view location);
|
||||
|
||||
private:
|
||||
void
|
||||
@@ -238,7 +239,7 @@ Number::Guard::doRoundUp(
|
||||
int& exponent,
|
||||
internalrep const& minMantissa,
|
||||
internalrep const& maxMantissa,
|
||||
std::string location)
|
||||
std::string_view location)
|
||||
{
|
||||
auto r = round();
|
||||
if (r == 1 || (r == 0 && (mantissa & 1) == 1))
|
||||
@@ -254,7 +255,7 @@ Number::Guard::doRoundUp(
|
||||
}
|
||||
bringIntoRange(negative, mantissa, exponent, minMantissa);
|
||||
if (exponent > maxExponent)
|
||||
throw std::overflow_error(location);
|
||||
Throw<std::overflow_error>(std::string(location));
|
||||
}
|
||||
|
||||
template <UnsignedMantissa T>
|
||||
@@ -276,7 +277,7 @@ Number::Guard::doRoundDown(bool& negative, T& mantissa, int& exponent, internalr
|
||||
|
||||
// Modify the result to the correctly rounded value
|
||||
void
|
||||
Number::Guard::doRound(rep& drops, std::string location)
|
||||
Number::Guard::doRound(rep& drops, std::string_view location)
|
||||
{
|
||||
auto r = round();
|
||||
if (r == 1 || (r == 0 && (drops & 1) == 1))
|
||||
@@ -290,7 +291,7 @@ Number::Guard::doRound(rep& drops, std::string location)
|
||||
// or "(maxRep + 1) / 10", neither of which will round up when
|
||||
// converting to rep, though the latter might overflow _before_
|
||||
// rounding.
|
||||
throw std::overflow_error(location); // LCOV_EXCL_LINE
|
||||
throw std::overflow_error(std::string(location)); // LCOV_EXCL_LINE
|
||||
}
|
||||
++drops;
|
||||
}
|
||||
@@ -427,8 +428,8 @@ doNormalize(
|
||||
mantissa_ = m;
|
||||
|
||||
g.doRoundUp(negative, mantissa_, exponent_, minMantissa, maxMantissa, "Number::normalize 2");
|
||||
XRPL_ASSERT_PARTS(
|
||||
mantissa_ >= minMantissa && mantissa_ <= maxMantissa, "xrpl::doNormalize", "final mantissa fits in range");
|
||||
// XRPL_ASSERT_PARTS(
|
||||
// mantissa_ >= minMantissa && mantissa_ <= maxMantissa, "xrpl::doNormalize", "final mantissa fits in range");
|
||||
}
|
||||
|
||||
template <>
|
||||
|
||||
@@ -191,17 +191,17 @@ Value::Value(ValueType type) : type_(type), allocated_(0)
|
||||
}
|
||||
}
|
||||
|
||||
Value::Value(Int value) : type_(intValue)
|
||||
Value::Value(Int value) : type_(intValue), allocated_(0)
|
||||
{
|
||||
value_.int_ = value;
|
||||
}
|
||||
|
||||
Value::Value(UInt value) : type_(uintValue)
|
||||
Value::Value(UInt value) : type_(uintValue), allocated_(0)
|
||||
{
|
||||
value_.uint_ = value;
|
||||
}
|
||||
|
||||
Value::Value(double value) : type_(realValue)
|
||||
Value::Value(double value) : type_(realValue), allocated_(0)
|
||||
{
|
||||
value_.real_ = value;
|
||||
}
|
||||
@@ -227,7 +227,7 @@ Value::Value(StaticString const& value) : type_(stringValue), allocated_(false)
|
||||
value_.string_ = const_cast<char*>(value.c_str());
|
||||
}
|
||||
|
||||
Value::Value(bool value) : type_(booleanValue)
|
||||
Value::Value(bool value) : type_(booleanValue), allocated_(0)
|
||||
{
|
||||
value_.bool_ = value;
|
||||
}
|
||||
|
||||
@@ -26,6 +26,12 @@ HTTPClient::initializeSSLContext(
|
||||
httpClientSSLContext.emplace(sslVerifyDir, sslVerifyFile, sslVerify, j);
|
||||
}
|
||||
|
||||
void
|
||||
HTTPClient::cleanupSSLContext()
|
||||
{
|
||||
httpClientSSLContext.reset();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Fetch a web page via http or https.
|
||||
|
||||
@@ -85,8 +85,7 @@ registerSSLCerts(boost::asio::ssl::context& ctx, boost::system::error_code& ec,
|
||||
// There is a very unpleasant interaction between <wincrypt> and
|
||||
// openssl x509 types (namely the former has macros that stomp
|
||||
// on the latter), these undefs allow this TU to be safely used in
|
||||
// unity builds without messing up subsequent TUs. Although we
|
||||
// no longer use unity builds, leaving the undefs here does no harm.
|
||||
// unity builds without messing up subsequent TUs.
|
||||
#if BOOST_OS_WINDOWS
|
||||
#undef X509_NAME
|
||||
#undef X509_EXTENSIONS
|
||||
|
||||
@@ -69,7 +69,7 @@ make_name(std::string const& object, std::string const& field)
|
||||
if (field.empty())
|
||||
return object;
|
||||
|
||||
return object + "." + field;
|
||||
return {object + "." + field};
|
||||
}
|
||||
|
||||
static inline Json::Value
|
||||
|
||||
@@ -67,7 +67,8 @@ SHAMapNodeID::getChildNodeID(unsigned int m) const
|
||||
if (depth_ >= SHAMap::leafDepth)
|
||||
Throw<std::logic_error>("Request for child node ID of " + to_string(*this));
|
||||
|
||||
if (id_ != (id_ & depthMask(depth_)))
|
||||
auto const idAtDepth = id_ & depthMask(depth_);
|
||||
if (id_ != idAtDepth)
|
||||
Throw<std::logic_error>("Incorrect mask for " + to_string(*this));
|
||||
|
||||
SHAMapNodeID node{depth_ + 1, id_};
|
||||
|
||||
@@ -148,7 +148,7 @@ private:
|
||||
std::vector<std::string> emptyCfgKeys;
|
||||
struct publisher
|
||||
{
|
||||
publisher(FetchListConfig const& c) : cfg{c}
|
||||
publisher(FetchListConfig const& c) : cfg{c}, isRetry{false}
|
||||
{
|
||||
}
|
||||
std::shared_ptr<TrustedPublisherServer> server;
|
||||
|
||||
@@ -183,6 +183,9 @@ private:
|
||||
};
|
||||
|
||||
// Helper function to run HTTP client test
|
||||
// Note: Caller must ensure HTTPClient::initializeSSLContext has been called
|
||||
// before this function, and HTTPClient::cleanupSSLContext is called after
|
||||
// all tests are completed.
|
||||
bool
|
||||
runHTTPTest(
|
||||
TestHTTPServer& server,
|
||||
@@ -190,14 +193,9 @@ runHTTPTest(
|
||||
bool& completed,
|
||||
int& resultStatus,
|
||||
std::string& resultData,
|
||||
boost::system::error_code& resultError)
|
||||
boost::system::error_code& resultError,
|
||||
beast::Journal& j)
|
||||
{
|
||||
// Create a null journal for testing
|
||||
beast::Journal j{TestSink::instance()};
|
||||
|
||||
// Initialize HTTPClient SSL context
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
HTTPClient::get(
|
||||
false, // no SSL
|
||||
server.ioc(),
|
||||
@@ -230,6 +228,9 @@ runHTTPTest(
|
||||
}
|
||||
}
|
||||
|
||||
// Drain any remaining handlers to ensure proper cleanup of HTTPClientImp
|
||||
server.ioc().poll();
|
||||
|
||||
return completed;
|
||||
}
|
||||
|
||||
@@ -258,18 +259,27 @@ TEST(HTTPClient, case_insensitive_content_length)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/test", completed, resultStatus, resultData, resultError);
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/test", completed, resultStatus, resultData, resultError, j);
|
||||
// Verify results
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_EQ(resultData, testBody);
|
||||
}
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, basic_http_request)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
TestHTTPServer server;
|
||||
std::string testBody = "Test response body";
|
||||
server.setResponseBody(testBody);
|
||||
@@ -280,16 +290,23 @@ TEST(HTTPClient, basic_http_request)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/basic", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/basic", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_EQ(resultData, testBody);
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, empty_response)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
TestHTTPServer server;
|
||||
server.setResponseBody(""); // Empty body
|
||||
server.setHeader("Content-Length", "0");
|
||||
@@ -299,16 +316,23 @@ TEST(HTTPClient, empty_response)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/empty", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/empty", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, 200);
|
||||
EXPECT_TRUE(resultData.empty());
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
TEST(HTTPClient, different_status_codes)
|
||||
{
|
||||
// Initialize SSL context once for the entire test
|
||||
beast::Journal j{TestSink::instance()};
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
std::vector<unsigned int> statusCodes = {200, 404, 500};
|
||||
|
||||
for (auto status : statusCodes)
|
||||
@@ -322,10 +346,13 @@ TEST(HTTPClient, different_status_codes)
|
||||
std::string resultData;
|
||||
boost::system::error_code resultError;
|
||||
|
||||
bool testCompleted = runHTTPTest(server, "/status", completed, resultStatus, resultData, resultError);
|
||||
bool testCompleted = runHTTPTest(server, "/status", completed, resultStatus, resultData, resultError, j);
|
||||
|
||||
EXPECT_TRUE(testCompleted);
|
||||
EXPECT_FALSE(resultError);
|
||||
EXPECT_EQ(resultStatus, static_cast<int>(status));
|
||||
}
|
||||
|
||||
// Clean up SSL context to prevent memory leaks
|
||||
HTTPClient::cleanupSSLContext();
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/beast/utility/PropertyStream.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <xrpl/shamap/TreeNodeCache.h>
|
||||
|
||||
@@ -92,7 +91,7 @@ class Validations;
|
||||
class RCLValidationsAdaptor;
|
||||
using RCLValidations = Validations<RCLValidationsAdaptor>;
|
||||
|
||||
class Application : public ServiceRegistry, public beast::PropertyStream::Source
|
||||
class Application : public beast::PropertyStream::Source
|
||||
{
|
||||
public:
|
||||
/* VFALCO NOTE
|
||||
@@ -147,12 +146,92 @@ public:
|
||||
virtual boost::asio::io_context&
|
||||
getIOContext() = 0;
|
||||
|
||||
virtual CollectorManager&
|
||||
getCollectorManager() = 0;
|
||||
virtual Family&
|
||||
getNodeFamily() = 0;
|
||||
virtual TimeKeeper&
|
||||
timeKeeper() = 0;
|
||||
virtual JobQueue&
|
||||
getJobQueue() = 0;
|
||||
virtual NodeCache&
|
||||
getTempNodeCache() = 0;
|
||||
virtual CachedSLEs&
|
||||
cachedSLEs() = 0;
|
||||
virtual AmendmentTable&
|
||||
getAmendmentTable() = 0;
|
||||
virtual HashRouter&
|
||||
getHashRouter() = 0;
|
||||
virtual LoadFeeTrack&
|
||||
getFeeTrack() = 0;
|
||||
virtual LoadManager&
|
||||
getLoadManager() = 0;
|
||||
virtual Overlay&
|
||||
overlay() = 0;
|
||||
virtual TxQ&
|
||||
getTxQ() = 0;
|
||||
virtual ValidatorList&
|
||||
validators() = 0;
|
||||
virtual ValidatorSite&
|
||||
validatorSites() = 0;
|
||||
virtual ManifestCache&
|
||||
validatorManifests() = 0;
|
||||
virtual ManifestCache&
|
||||
publisherManifests() = 0;
|
||||
virtual Cluster&
|
||||
cluster() = 0;
|
||||
virtual PeerReservationTable&
|
||||
peerReservations() = 0;
|
||||
virtual RCLValidations&
|
||||
getValidations() = 0;
|
||||
virtual NodeStore::Database&
|
||||
getNodeStore() = 0;
|
||||
virtual InboundLedgers&
|
||||
getInboundLedgers() = 0;
|
||||
virtual InboundTransactions&
|
||||
getInboundTransactions() = 0;
|
||||
|
||||
virtual TaggedCache<uint256, AcceptedLedger>&
|
||||
getAcceptedLedgerCache() = 0;
|
||||
|
||||
virtual LedgerMaster&
|
||||
getLedgerMaster() = 0;
|
||||
virtual LedgerCleaner&
|
||||
getLedgerCleaner() = 0;
|
||||
virtual LedgerReplayer&
|
||||
getLedgerReplayer() = 0;
|
||||
virtual NetworkOPs&
|
||||
getOPs() = 0;
|
||||
virtual OrderBookDB&
|
||||
getOrderBookDB() = 0;
|
||||
virtual ServerHandler&
|
||||
getServerHandler() = 0;
|
||||
virtual TransactionMaster&
|
||||
getMasterTransaction() = 0;
|
||||
virtual perf::PerfLog&
|
||||
getPerfLog() = 0;
|
||||
|
||||
virtual std::pair<PublicKey, SecretKey> const&
|
||||
nodeIdentity() = 0;
|
||||
|
||||
virtual std::optional<PublicKey const>
|
||||
getValidationPublicKey() const = 0;
|
||||
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
virtual PathRequests&
|
||||
getPathRequests() = 0;
|
||||
virtual SHAMapStore&
|
||||
getSHAMapStore() = 0;
|
||||
virtual PendingSaves&
|
||||
pendingSaves() = 0;
|
||||
virtual OpenLedger&
|
||||
openLedger() = 0;
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
virtual RelationalDatabase&
|
||||
getRelationalDatabase() = 0;
|
||||
|
||||
virtual std::chrono::milliseconds
|
||||
getIOLatency() = 0;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user