Compare commits

...

5 Commits

Author SHA1 Message Date
Jingchen
4796ed57a4 Update README.md 2026-02-04 20:00:20 +00:00
Jingchen
73e5323859 Update README.md 2026-02-04 19:57:25 +00:00
JCW
3d6c575f5c Replace levelization shell script with the python version to optimise the performance
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2026-02-04 14:37:57 +00:00
Bart
3a172301ce chore: Remove unity builds (#6300)
Unity builds were intended to speed up builds, by bundling multiple files into compilation units. However, now that ccache is available on all platforms, there is no need for unity builds anymore, as ccache stores compiled individual build objects for reuse. This change therefore removes the ability to make unity builds.
2026-02-03 22:55:22 +00:00
Jingchen
6c1a92fe93 refactor: Add ServiceRegistry to help modularization (#6222)
Currently we're passing the `Application` object around, whereby the `Application` class acts more like a service registry that gives other classes access to other services. In order to allow modularization, we should replace `Application` with a service registry class so that modules depending on `Application` for other services can be moved easily. This change adds the `ServiceRegistry` class.
2026-02-03 19:08:27 +00:00
19 changed files with 640 additions and 299 deletions

View File

@@ -70,7 +70,7 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
## Validation
The [levelization](generate.sh) script takes no parameters,
The [levelization](generate.py) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
@@ -104,7 +104,7 @@ It generates many files of [results](results):
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `levelization.sh`,
done anything else to improve levelization, run `generate.py`,
and commit the updated results.
The `loops.txt` and `ordering.txt` files relate the modules
@@ -128,7 +128,7 @@ The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.
1. Run `levelization.sh`
1. Run `generate.py`
2. Grep the modules in `paths.txt`.
- For example, if a cycle is found `A ~= B`, simply `grep -w
A .github/scripts/levelization/results/paths.txt | grep -w B`

369
.github/scripts/levelization/generate.py vendored Normal file
View File

@@ -0,0 +1,369 @@
#!/usr/bin/env python3
"""
Usage: generate.py
This script takes no parameters, reads no environment variables,
and can be run from any directory, as long as it is in the expected
location in the repo.
"""
import os
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Set, Optional
# Compile regex patterns once at module level
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
def dictionary_sort_key(s: str) -> str:
"""
Create a sort key that mimics 'sort -d' (dictionary order).
Dictionary order only considers blanks and alphanumeric characters.
This means punctuation like '.' is ignored during sorting.
"""
# Keep only alphanumeric characters and spaces
return "".join(c for c in s if c.isalnum() or c.isspace())
def get_level(file_path: str) -> str:
"""
Extract the level from a file path (second and third directory components).
Equivalent to bash: cut -d/ -f 2,3
Examples:
src/xrpld/app/main.cpp -> xrpld.app
src/libxrpl/protocol/STObject.cpp -> libxrpl.protocol
include/xrpl/basics/base_uint.h -> xrpl.basics
"""
parts = file_path.split("/")
# Get fields 2 and 3 (indices 1 and 2 in 0-based indexing)
if len(parts) >= 3:
level = f"{parts[1]}/{parts[2]}"
elif len(parts) >= 2:
level = f"{parts[1]}/toplevel"
else:
level = file_path
# If the "level" indicates a file, cut off the filename
if "." in level.split("/")[-1]: # Avoid Path object creation
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level = level.rsplit("/", 1)[0] + "/toplevel"
return level.replace("/", ".")
def extract_include_level(include_line: str) -> Optional[str]:
"""
Extract the include path from an #include directive.
Gets the first two directory components from the include path.
Equivalent to bash: cut -d/ -f 1,2
Examples:
#include <xrpl/basics/base_uint.h> -> xrpl.basics
#include "xrpld/app/main/Application.h" -> xrpld.app
"""
# Remove everything before the quote or angle bracket
match = INCLUDE_PATH_PATTERN.search(include_line)
if not match:
return None
include_path = match.group(1)
parts = include_path.split("/")
# Get first two fields (indices 0 and 1)
if len(parts) >= 2:
include_level = f"{parts[0]}/{parts[1]}"
else:
include_level = include_path
# If the "includelevel" indicates a file, cut off the filename
if "." in include_level.split("/")[-1]: # Avoid Path object creation
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
return include_level.replace("/", ".")
def find_repo_root(start_path: Path, depth_limit: int = 10) -> Path:
"""
Find the repository root by looking for .git directory or src/include folders.
Walks up the directory tree from the start path.
"""
current = start_path.resolve()
# Walk up the directory tree
for _ in range(depth_limit): # Limit search depth to prevent infinite loops
# Check if this directory has src or include folders
has_src = (current / "src").exists()
has_include = (current / "include").exists()
if has_src or has_include:
return current
# Check if this is a git repository root
if (current / ".git").exists():
# Check if it has src or include nearby
if has_src or has_include:
return current
# Move up one level
parent = current.parent
if parent == current: # Reached filesystem root
break
current = parent
# If we couldn't find it, raise an error
raise RuntimeError(
"Could not find repository root. "
"Expected to find a directory containing 'src' and/or 'include' folders."
)
def get_scan_directories(repo_root: Path) -> List[Path]:
"""
Get the list of directories to scan for include files.
Returns paths that actually exist.
"""
directories = []
for dir_name in ["include", "src"]:
dir_path = repo_root / dir_name
if dir_path.exists() and dir_path.is_dir():
directories.append(dir_path)
if not directories:
raise RuntimeError(f"No 'src' or 'include' directories found in {repo_root}")
return directories
def main():
# Change to the script's directory
script_dir = Path(__file__).parent.resolve()
os.chdir(script_dir)
# If the shell is interactive, clean up any flotsam before analyzing
# Match bash behavior: check if PS1 is set (indicates interactive shell)
# When running a script, PS1 is not set even if stdin/stdout are TTYs
if os.environ.get("PS1"):
try:
subprocess.run(["git", "clean", "-ix"], check=False, timeout=30)
except (subprocess.TimeoutExpired, KeyboardInterrupt):
print("Skipping git clean...")
except Exception:
# If git clean fails for any reason, just continue
pass
# Clean up and create results directory
results_dir = script_dir / "results"
if results_dir.exists():
import shutil
shutil.rmtree(results_dir)
results_dir.mkdir()
# Find the repository root by searching for src and include directories
try:
repo_root = find_repo_root(script_dir)
scan_dirs = get_scan_directories(repo_root)
print(f"Found repository root: {repo_root}")
print(f"Scanning directories:")
for scan_dir in scan_dirs:
print(f" - {scan_dir.relative_to(repo_root)}")
except RuntimeError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
print("\nScanning for raw includes...")
# Find all #include directives
raw_includes: List[Tuple[str, str]] = []
rawincludes_file = results_dir / "rawincludes.txt"
# Write to file as we go to avoid storing everything in memory
with open(rawincludes_file, "w", buffering=8192) as raw_f:
for dir_path in scan_dirs:
print(f" Scanning {dir_path.relative_to(repo_root)}...")
for file_path in dir_path.rglob("*"):
if not file_path.is_file():
continue
try:
rel_path_str = str(file_path.relative_to(repo_root))
# Read file with larger buffer for better performance
with open(
file_path,
"r",
encoding="utf-8",
errors="ignore",
buffering=8192,
) as f:
for line in f:
# Quick check before regex
if "#include" not in line or "boost" in line:
continue
if INCLUDE_PATTERN.match(line):
line_stripped = line.strip()
entry = f"{rel_path_str}:{line_stripped}\n"
print(entry, end="")
raw_f.write(entry)
raw_includes.append((rel_path_str, line_stripped))
except Exception as e:
print(f"Error reading {file_path}: {e}", file=sys.stderr)
# Build levelization paths and count directly (no need to sort first)
print("Build levelization paths")
path_counts: Dict[Tuple[str, str], int] = defaultdict(int)
for file_path, include_line in raw_includes:
level = get_level(file_path)
include_level = extract_include_level(include_line)
if include_level and level != include_level:
path_counts[(level, include_level)] += 1
# Sort and deduplicate paths (using dictionary order like bash 'sort -d')
print("Sort and deduplicate paths")
paths_file = results_dir / "paths.txt"
with open(paths_file, "w") as f:
# Sort using dictionary order: only alphanumeric and spaces matter
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
line = f"{count:7} {level} {include_level}\n"
print(line.rstrip())
f.write(line)
# Split into flat-file database
print("Split into flat-file database")
includes_dir = results_dir / "includes"
included_by_dir = results_dir / "included_by"
includes_dir.mkdir()
included_by_dir.mkdir()
# Batch writes by grouping data first to avoid repeated file opens
includes_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
included_by_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
# Process in sorted order to match bash script behavior (dictionary order)
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
includes_data[level].append((include_level, count))
included_by_data[include_level].append((level, count))
# Write all includes files in sorted order (dictionary order)
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
entries = includes_data[level]
with open(includes_dir / level, "w") as f:
for include_level, count in entries:
line = f"{include_level} {count}\n"
print(line.rstrip())
f.write(line)
# Write all included_by files in sorted order (dictionary order)
for include_level in sorted(included_by_data.keys(), key=dictionary_sort_key):
entries = included_by_data[include_level]
with open(included_by_dir / include_level, "w") as f:
for level, count in entries:
line = f"{level} {count}\n"
print(line.rstrip())
f.write(line)
# Search for loops
print("Search for loops")
loops_file = results_dir / "loops.txt"
ordering_file = results_dir / "ordering.txt"
loops_found: Set[Tuple[str, str]] = set()
# Pre-load all include files into memory to avoid repeated I/O
# This is the biggest optimization - we were reading files repeatedly in nested loops
# Use list of tuples to preserve file order
includes_cache: Dict[str, List[Tuple[str, int]]] = {}
includes_lookup: Dict[str, Dict[str, int]] = {} # For fast lookup
# Note: bash script uses 'for source in *' which uses standard glob sorting,
# NOT dictionary order. So we use standard sorted() here, not dictionary_sort_key.
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
if not include_file.is_file():
continue
includes_cache[include_file.name] = []
includes_lookup[include_file.name] = {}
with open(include_file, "r") as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 2:
include_name = parts[0]
include_count = int(parts[1])
includes_cache[include_file.name].append(
(include_name, include_count)
)
includes_lookup[include_file.name][include_name] = include_count
with open(loops_file, "w", buffering=8192) as loops_f, open(
ordering_file, "w", buffering=8192
) as ordering_f:
# Use standard sorting to match bash glob expansion 'for source in *'
for source in sorted(includes_cache.keys()):
source_includes = includes_cache[source]
for include, include_freq in source_includes:
# Check if include file exists and references source
if include not in includes_lookup:
continue
source_freq = includes_lookup[include].get(source)
if source_freq is not None:
# Found a loop
loop_key = tuple(sorted([source, include]))
if loop_key in loops_found:
continue
loops_found.add(loop_key)
loops_f.write(f"Loop: {source} {include}\n")
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
diff = include_freq - source_freq
if diff > 3:
loops_f.write(f" {source} > {include}\n\n")
elif diff < -3:
loops_f.write(f" {include} > {source}\n\n")
elif source_freq == include_freq:
loops_f.write(f" {include} == {source}\n\n")
else:
loops_f.write(f" {include} ~= {source}\n\n")
else:
ordering_f.write(f"{source} > {include}\n")
# Print results
print("\nOrdering:")
with open(ordering_file, "r") as f:
print(f.read(), end="")
print("\nLoops:")
with open(loops_file, "r") as f:
print(f.read(), end="")
if __name__ == "__main__":
main()

View File

@@ -1,130 +0,0 @@
#!/bin/bash
# Usage: generate.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
pushd $( dirname $0 )
if [ -v PS1 ]
then
# if the shell is interactive, clean up any flotsam before analyzing
git clean -ix
fi
# Ensure all sorting is ASCII-order consistently across platforms.
export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}
popd
pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir included_by
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
do
level=$( echo ${file} | cut -d/ -f 2,3 )
# If the "level" indicates a file, cut off the filename
if [[ "${level##*.}" != "${level}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level="$( dirname ${level} )/toplevel"
fi
level=$( echo ${level} | tr '/' '.' )
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
cut -d/ -f 1,2 )
if [[ "${includelevel##*.}" != "${includelevel}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
includelevel="$( dirname ${includelevel} )/toplevel"
fi
includelevel=$( echo ${includelevel} | tr '/' '.' )
if [[ "$level" != "$includelevel" ]]
then
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and deduplicate paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
IFS=${oldifs}
unset oldifs
echo Split into flat-file database
exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a included_by/${include}
done
exec 4>&- #close fd 4
loops="$( pwd )/loops.txt"
ordering="$( pwd )/ordering.txt"
pushd includes
echo Search for loops
# Redirect stdout to a file
exec 4>&1
exec 1>"${loops}"
for source in *
do
if [[ -f "$source" ]]
then
exec 5<"${source}" # open for input
while read -r -u 5 include includefreq
do
if [[ -f $include ]]
then
if grep -q -w $source $include
then
if grep -q -w "Loop: $include $source" "${loops}"
then
continue
fi
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
echo "Loop: $source $include"
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
then
echo -e " $source > $include\n"
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
then
echo -e " $include > $source\n"
elif [[ $sourcefreq -eq $includefreq ]]
then
echo -e " $include == $source\n"
else
echo -e " $include ~= $source\n"
fi
else
echo "$source > $include" >> "${ordering}"
fi
fi
done
exec 5>&- #close fd 5
fi
done
exec 1>&4 #close fd 1
exec 4>&- #close fd 4
cat "${ordering}"
cat "${loops}"
popd
popd
popd

View File

@@ -153,6 +153,7 @@ tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.core > xrpl.basics
xrpl.core > xrpl.json
xrpl.core > xrpl.ledger
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol

View File

@@ -51,22 +51,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
# Only generate a subset of configurations in PRs.
if not all:
# Debian:
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
# the reference fee to 500.
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
# code coverage (which will be done below).
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
# enable voidstar.
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
# set the reference fee to 1000.
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
# fee to 500.
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
# coverage (which will be done below).
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
# - Bookworm using Clang 17: Release on linux/amd64, set the
# reference fee to 1000.
# - Bookworm using Clang 20: Debug on linux/amd64.
if os["distro_name"] == "debian":
skip = True
if os["distro_version"] == "bookworm":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
@@ -74,14 +72,12 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/arm64"
):
cmake_args = f"-Dvoidstar=ON {cmake_args}"
@@ -89,7 +85,6 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
@@ -97,7 +92,6 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
and build_type == "Debug"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
@@ -105,15 +99,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
# - 9 using GCC 12: Debug on linux/amd64.
# - 10 using Clang: Release on linux/amd64.
if os["distro_name"] == "rhel":
skip = True
if os["distro_version"] == "9":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Debug"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
@@ -121,7 +114,6 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
and build_type == "Release"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
@@ -129,17 +121,16 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# Ubuntu:
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
# - Noble using GCC 14: Release and Unity on linux/amd64.
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
# - Noble using Clang 19: Release and Unity on linux/arm64.
# - Jammy using GCC 12: Debug on linux/arm64.
# - Noble using GCC 14: Release on linux/amd64.
# - Noble using Clang 18: Debug on linux/amd64.
# - Noble using Clang 19: Release on linux/arm64.
if os["distro_name"] == "ubuntu":
skip = True
if os["distro_version"] == "jammy":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/arm64"
):
skip = False
@@ -147,21 +138,18 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
and build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "linux/arm64"
):
skip = False
@@ -169,20 +157,16 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# MacOS:
# - Debug and no Unity on macos/arm64.
# - Debug on macos/arm64.
if os["distro_name"] == "macos" and not (
build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "macos/arm64"
build_type == "Debug" and architecture["platform"] == "macos/arm64"
):
continue
# Windows:
# - Release and Unity on windows/amd64.
# - Release on windows/amd64.
if os["distro_name"] == "windows" and not (
build_type == "Release"
and "-Dunity=ON" in cmake_args
and architecture["platform"] == "windows/amd64"
build_type == "Release" and architecture["platform"] == "windows/amd64"
):
continue
@@ -209,18 +193,17 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
):
continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
# Unity on linux/amd64
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
# linux/amd64
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
and build_type == "Debug"
and "-Dunity=OFF" in cmake_args
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
# Generate a unique name for the configuration, e.g. macos-arm64-debug
# or debian-bookworm-gcc-12-amd64-release-unity.
# or debian-bookworm-gcc-12-amd64-release.
config_name = os["distro_name"]
if (n := os["distro_version"]) != "":
config_name += f"-{n}"
@@ -234,8 +217,6 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
config_name += f"-{build_type.lower()}"
if "-Dcoverage=ON" in cmake_args:
config_name += "-coverage"
if "-Dunity=ON" in cmake_args:
config_name += "-unity"
# Add the configuration to the list, with the most unique fields first,
# so that they are easier to identify in the GitHub Actions UI, as long

View File

@@ -208,5 +208,5 @@
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
"cmake_args": [""]
}

View File

@@ -15,8 +15,5 @@
}
],
"build_type": ["Debug", "Release"],
"cmake_args": [
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
]
"cmake_args": ["-DCMAKE_POLICY_VERSION_MINIMUM=3.5"]
}

View File

@@ -15,5 +15,5 @@
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
"cmake_args": [""]
}

View File

@@ -20,7 +20,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check levelization
run: .github/scripts/levelization/generate.sh
run: python .github/scripts/levelization/generate.py
- name: Check for differences
env:
MESSAGE: |
@@ -32,7 +32,7 @@ jobs:
removed from loops.txt, it's probably an improvement, while if
something was added, it's probably a regression.
Run '.github/scripts/levelization/generate.sh' in your repo, commit
Run '.github/scripts/levelization/generate.py' in your repo, commit
and push the changes. See .github/scripts/levelization/README.md for
more info.
run: |

3
.gitignore vendored
View File

@@ -71,3 +71,6 @@ DerivedData
/.augment
/.claude
/CLAUDE.md
# Python
__pycache__

View File

@@ -368,6 +368,36 @@ The workaround for this error is to add two lines to your profile:
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
```
### Set Up Ccache
To speed up repeated compilations, we recommend that you install
[ccache](https://ccache.dev), a tool that wraps your compiler so that it can
cache build objects locally.
#### Linux
You can install it using the package manager, e.g. `sudo apt install ccache`
(Ubuntu) or `sudo dnf install ccache` (RHEL).
#### macOS
You can install it using Homebrew, i.e. `brew install ccache`.
#### Windows
You can install it using Chocolatey, i.e. `choco install ccache`. If you already
have Ccache installed, then `choco upgrade ccache` will update it to the latest
version. However, if you see an error such as:
```
terminate called after throwing an instance of 'std::bad_alloc'
what(): std::bad_alloc
C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(617,5): error MSB6006: "cl.exe" exited with code 3.
```
then please install a specific version of Ccache that we know works, via: `choco
install ccache --version 4.11.3 --allow-downgrade`.
### Build and Test
1. Create a build directory and move into it.
@@ -545,16 +575,10 @@ See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
| `assert` | OFF | Enable assertions. |
| `coverage` | OFF | Prepare the coverage report. |
| `tests` | OFF | Build tests. |
| `unity` | OFF | Configure a unity build. |
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
| `werr` | OFF | Treat compilation warnings as errors |
| `wextra` | OFF | Enable additional compilation warnings |
[Unity builds][5] may be faster for the first build
(at the cost of much more memory) since they concatenate sources into fewer
translation units. Non-unity builds may be faster for incremental builds,
and can be helpful for detecting `#include` omissions.
## Troubleshooting
### Conan
@@ -621,7 +645,6 @@ If you want to experiment with a new package, follow these steps:
[1]: https://github.com/conan-io/conan-center-index/issues/13168
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
[3]: https://docs.conan.io/en/latest/getting_started.html
[5]: https://en.wikipedia.org/wiki/Unity_build
[6]: https://github.com/boostorg/beast/issues/2648
[7]: https://github.com/boostorg/beast/issues/2661
[gcovr]: https://gcovr.com/en/stable/getting-started.html

View File

@@ -9,8 +9,5 @@ function (xrpl_add_test name)
isolate_headers(${target} "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/tests/${name}" PRIVATE)
# Make sure the test isn't optimized away in unity builds
set_target_properties(${target} PROPERTIES UNITY_BUILD_MODE GROUP UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
add_test(NAME ${target} COMMAND ${target})
endfunction ()

View File

@@ -4,12 +4,7 @@
include(target_protobuf_sources)
# Protocol buffers cannot participate in a unity build,
# because all the generated sources
# define a bunch of `static const` variables with the same names,
# so we just build them as a separate library.
add_library(xrpl.libpb)
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
PROTOS include/xrpl/proto/xrpl.proto)
@@ -160,12 +155,4 @@ if (xrpld)
# antithesis_instrumentation.h, which is not exported as INTERFACE
target_include_directories(xrpld PRIVATE ${CMAKE_SOURCE_DIR}/external/antithesis-sdk)
endif ()
# any files that don't play well with unity should be added here
if (tests)
set_source_files_properties(
# these two seem to produce conflicts in beast teardown template methods
src/test/rpc/ValidatorRPC_test.cpp src/test/ledger/Invariants_test.cpp PROPERTIES SKIP_UNITY_BUILD_INCLUSION
TRUE)
endif ()
endif ()

View File

@@ -30,14 +30,6 @@ if (tests)
endif ()
endif ()
option(unity "Creates a build using UNITY support in cmake." OFF)
if (unity)
if (NOT is_ci)
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
endif ()
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
endif ()
if (is_clang AND is_linux)
option(voidstar "Enable Antithesis instrumentation." OFF)
endif ()

View File

@@ -23,7 +23,6 @@ class Xrpl(ConanFile):
"shared": [True, False],
"static": [True, False],
"tests": [True, False],
"unity": [True, False],
"xrpld": [True, False],
}
@@ -55,7 +54,6 @@ class Xrpl(ConanFile):
"shared": False,
"static": True,
"tests": False,
"unity": False,
"xrpld": False,
"date/*:header_only": True,
"ed25519/*:shared": False,
@@ -168,7 +166,6 @@ class Xrpl(ConanFile):
tc.variables["rocksdb"] = self.options.rocksdb
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
tc.variables["static"] = self.options.static
tc.variables["unity"] = self.options.unity
tc.variables["xrpld"] = self.options.xrpld
tc.generate()

View File

@@ -1,5 +1,5 @@
#ifndef XRPL_UNITY_ROCKSDB_H_INCLUDED
#define XRPL_UNITY_ROCKSDB_H_INCLUDED
#ifndef XRPL_BASICS_ROCKSDB_H_INCLUDED
#define XRPL_BASICS_ROCKSDB_H_INCLUDED
#if XRPL_ROCKSDB_AVAILABLE
// #include <rocksdb2/port/port_posix.h>

View File

@@ -0,0 +1,202 @@
#ifndef XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
#define XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/SHAMapHash.h>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/ledger/CachedSLEs.h>
namespace xrpl {
// Forward declarations
namespace NodeStore {
class Database;
}
namespace Resource {
class Manager;
}
namespace perf {
class PerfLog;
}
class AcceptedLedger;
class AmendmentTable;
class Cluster;
class CollectorManager;
class DatabaseCon;
class Family;
class HashRouter;
class InboundLedgers;
class InboundTransactions;
class JobQueue;
class LedgerCleaner;
class LedgerMaster;
class LedgerReplayer;
class LoadFeeTrack;
class LoadManager;
class ManifestCache;
class NetworkOPs;
class OpenLedger;
class OrderBookDB;
class Overlay;
class PathRequests;
class PeerReservationTable;
class PendingSaves;
class RelationalDatabase;
class ServerHandler;
class SHAMapStore;
class TimeKeeper;
class TransactionMaster;
class TxQ;
class ValidatorList;
class ValidatorSite;
template <class Adaptor>
class Validations;
class RCLValidationsAdaptor;
using RCLValidations = Validations<RCLValidationsAdaptor>;
using NodeCache = TaggedCache<SHAMapHash, Blob>;
/** Service registry for dependency injection.
This abstract interface provides access to various services and components
used throughout the application. It separates the service locator pattern
from the Application lifecycle management.
Components that need access to services can hold a reference to
ServiceRegistry rather than Application when they only need service
access and not lifecycle management.
*/
class ServiceRegistry
{
public:
ServiceRegistry() = default;
virtual ~ServiceRegistry() = default;
// Core infrastructure services
virtual CollectorManager&
getCollectorManager() = 0;
virtual Family&
getNodeFamily() = 0;
virtual TimeKeeper&
timeKeeper() = 0;
virtual JobQueue&
getJobQueue() = 0;
virtual NodeCache&
getTempNodeCache() = 0;
virtual CachedSLEs&
cachedSLEs() = 0;
// Protocol and validation services
virtual AmendmentTable&
getAmendmentTable() = 0;
virtual HashRouter&
getHashRouter() = 0;
virtual LoadFeeTrack&
getFeeTrack() = 0;
virtual LoadManager&
getLoadManager() = 0;
virtual RCLValidations&
getValidations() = 0;
virtual ValidatorList&
validators() = 0;
virtual ValidatorSite&
validatorSites() = 0;
virtual ManifestCache&
validatorManifests() = 0;
virtual ManifestCache&
publisherManifests() = 0;
// Network services
virtual Overlay&
overlay() = 0;
virtual Cluster&
cluster() = 0;
virtual PeerReservationTable&
peerReservations() = 0;
virtual Resource::Manager&
getResourceManager() = 0;
// Storage services
virtual NodeStore::Database&
getNodeStore() = 0;
virtual SHAMapStore&
getSHAMapStore() = 0;
virtual RelationalDatabase&
getRelationalDatabase() = 0;
// Ledger services
virtual InboundLedgers&
getInboundLedgers() = 0;
virtual InboundTransactions&
getInboundTransactions() = 0;
virtual TaggedCache<uint256, AcceptedLedger>&
getAcceptedLedgerCache() = 0;
virtual LedgerMaster&
getLedgerMaster() = 0;
virtual LedgerCleaner&
getLedgerCleaner() = 0;
virtual LedgerReplayer&
getLedgerReplayer() = 0;
virtual PendingSaves&
pendingSaves() = 0;
virtual OpenLedger&
openLedger() = 0;
virtual OpenLedger const&
openLedger() const = 0;
// Transaction and operation services
virtual NetworkOPs&
getOPs() = 0;
virtual OrderBookDB&
getOrderBookDB() = 0;
virtual TransactionMaster&
getMasterTransaction() = 0;
virtual TxQ&
getTxQ() = 0;
virtual PathRequests&
getPathRequests() = 0;
// Server services
virtual ServerHandler&
getServerHandler() = 0;
virtual perf::PerfLog&
getPerfLog() = 0;
};
} // namespace xrpl
#endif

View File

@@ -85,7 +85,8 @@ registerSSLCerts(boost::asio::ssl::context& ctx, boost::system::error_code& ec,
// There is a very unpleasant interaction between <wincrypt> and
// openssl x509 types (namely the former has macros that stomp
// on the latter), these undefs allow this TU to be safely used in
// unity builds without messing up subsequent TUs.
// unity builds without messing up subsequent TUs. Although we
// no longer use unity builds, leaving the undefs here does no harm.
#if BOOST_OS_WINDOWS
#undef X509_NAME
#undef X509_EXTENSIONS

View File

@@ -6,6 +6,7 @@
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/beast/utility/PropertyStream.h>
#include <xrpl/core/ServiceRegistry.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/shamap/TreeNodeCache.h>
@@ -91,7 +92,7 @@ class Validations;
class RCLValidationsAdaptor;
using RCLValidations = Validations<RCLValidationsAdaptor>;
class Application : public beast::PropertyStream::Source
class Application : public ServiceRegistry, public beast::PropertyStream::Source
{
public:
/* VFALCO NOTE
@@ -146,92 +147,12 @@ public:
virtual boost::asio::io_context&
getIOContext() = 0;
virtual CollectorManager&
getCollectorManager() = 0;
virtual Family&
getNodeFamily() = 0;
virtual TimeKeeper&
timeKeeper() = 0;
virtual JobQueue&
getJobQueue() = 0;
virtual NodeCache&
getTempNodeCache() = 0;
virtual CachedSLEs&
cachedSLEs() = 0;
virtual AmendmentTable&
getAmendmentTable() = 0;
virtual HashRouter&
getHashRouter() = 0;
virtual LoadFeeTrack&
getFeeTrack() = 0;
virtual LoadManager&
getLoadManager() = 0;
virtual Overlay&
overlay() = 0;
virtual TxQ&
getTxQ() = 0;
virtual ValidatorList&
validators() = 0;
virtual ValidatorSite&
validatorSites() = 0;
virtual ManifestCache&
validatorManifests() = 0;
virtual ManifestCache&
publisherManifests() = 0;
virtual Cluster&
cluster() = 0;
virtual PeerReservationTable&
peerReservations() = 0;
virtual RCLValidations&
getValidations() = 0;
virtual NodeStore::Database&
getNodeStore() = 0;
virtual InboundLedgers&
getInboundLedgers() = 0;
virtual InboundTransactions&
getInboundTransactions() = 0;
virtual TaggedCache<uint256, AcceptedLedger>&
getAcceptedLedgerCache() = 0;
virtual LedgerMaster&
getLedgerMaster() = 0;
virtual LedgerCleaner&
getLedgerCleaner() = 0;
virtual LedgerReplayer&
getLedgerReplayer() = 0;
virtual NetworkOPs&
getOPs() = 0;
virtual OrderBookDB&
getOrderBookDB() = 0;
virtual ServerHandler&
getServerHandler() = 0;
virtual TransactionMaster&
getMasterTransaction() = 0;
virtual perf::PerfLog&
getPerfLog() = 0;
virtual std::pair<PublicKey, SecretKey> const&
nodeIdentity() = 0;
virtual std::optional<PublicKey const>
getValidationPublicKey() const = 0;
virtual Resource::Manager&
getResourceManager() = 0;
virtual PathRequests&
getPathRequests() = 0;
virtual SHAMapStore&
getSHAMapStore() = 0;
virtual PendingSaves&
pendingSaves() = 0;
virtual OpenLedger&
openLedger() = 0;
virtual OpenLedger const&
openLedger() const = 0;
virtual RelationalDatabase&
getRelationalDatabase() = 0;
virtual std::chrono::milliseconds
getIOLatency() = 0;