Compare commits

..

3 Commits

Author SHA1 Message Date
Jingchen
058b38a488 Merge branch 'develop' into a1q123456/add-levelization-python-script 2026-03-06 11:40:39 +00:00
JCW
f638cbae3e Fixed errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2026-02-17 17:08:37 +00:00
JCW
94737f399a Replace levelization shell script with the python version to optimise the performance 2026-02-17 17:08:03 +00:00
18 changed files with 423 additions and 317 deletions

View File

@@ -1,98 +0,0 @@
# Custom CMake command definitions for gersemi formatting.
# These stubs teach gersemi the signatures of project-specific commands
# so it can format their invocations correctly.
function(git_branch branch_val)
endfunction()
function(isolate_headers target A B scope)
endfunction()
function(create_symbolic_link target link)
endfunction()
function(xrpl_add_test name)
endfunction()
macro(exclude_from_default target_)
endmacro()
macro(exclude_if_included target_)
endmacro()
function(target_protobuf_sources target prefix)
set(options APPEND_PATH DESCRIPTORS)
set(oneValueArgs
LANGUAGE
OUT_VAR
EXPORT_MACRO
TARGET
PROTOC_OUT_DIR
PLUGIN
PLUGIN_OPTIONS
PROTOC_EXE
)
set(multiValueArgs
PROTOS
IMPORT_DIRS
GENERATE_EXTENSIONS
PROTOC_OPTIONS
DEPENDENCIES
)
cmake_parse_arguments(
THIS_FUNCTION_PREFIX
"${options}"
"${oneValueArgs}"
"${multiValueArgs}"
${ARGN}
)
endfunction()
function(add_module parent name)
endfunction()
function(target_link_modules parent scope)
endfunction()
function(setup_target_for_coverage_gcovr)
set(options NONE)
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
cmake_parse_arguments(
THIS_FUNCTION_PREFIX
"${options}"
"${oneValueArgs}"
"${multiValueArgs}"
${ARGN}
)
endfunction()
function(add_code_coverage_to_target name scope)
endfunction()
function(verbose_find_path variable name)
set(options
NO_CACHE
REQUIRED
OPTIONAL
NO_DEFAULT_PATH
NO_PACKAGE_ROOT_PATH
NO_CMAKE_PATH
NO_CMAKE_ENVIRONMENT_PATH
NO_SYSTEM_ENVIRONMENT_PATH
NO_CMAKE_SYSTEM_PATH
NO_CMAKE_INSTALL_PREFIX
CMAKE_FIND_ROOT_PATH_BOTH
ONLY_CMAKE_FIND_ROOT_PATH
NO_CMAKE_FIND_ROOT_PATH
)
set(oneValueArgs REGISTRY_VIEW VALIDATOR DOC)
set(multiValueArgs NAMES HINTS PATHS PATH_SUFFIXES)
cmake_parse_arguments(
THIS_FUNCTION_PREFIX
"${options}"
"${oneValueArgs}"
"${multiValueArgs}"
${ARGN}
)
endfunction()

View File

@@ -1 +0,0 @@
definitions: [.gersemi]

View File

@@ -70,7 +70,7 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
## Validation
The [levelization](generate.sh) script takes no parameters,
The [levelization](generate.py) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
@@ -104,7 +104,7 @@ It generates many files of [results](results):
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `levelization.sh`,
done anything else to improve levelization, run `generate.py`,
and commit the updated results.
The `loops.txt` and `ordering.txt` files relate the modules
@@ -128,7 +128,7 @@ The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.
1. Run `levelization.sh`
1. Run `generate.py`
2. Grep the modules in `paths.txt`.
- For example, if a cycle is found `A ~= B`, simply `grep -w
A .github/scripts/levelization/results/paths.txt | grep -w B`

369
.github/scripts/levelization/generate.py vendored Normal file
View File

@@ -0,0 +1,369 @@
#!/usr/bin/env python3
"""
Usage: generate.py
This script takes no parameters, and can be run from any directory,
as long as it is in the expected.
location in the repo.
"""
import os
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Set, Optional
# Compile regex patterns once at module level
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
def dictionary_sort_key(s: str) -> str:
"""
Create a sort key that mimics 'sort -d' (dictionary order).
Dictionary order only considers blanks and alphanumeric characters.
This means punctuation like '.' is ignored during sorting.
"""
# Keep only alphanumeric characters and spaces
return "".join(c for c in s if c.isalnum() or c.isspace())
def get_level(file_path: str) -> str:
"""
Extract the level from a file path (second and third directory components).
Equivalent to bash: cut -d/ -f 2,3
Examples:
src/xrpld/app/main.cpp -> xrpld.app
src/libxrpl/protocol/STObject.cpp -> libxrpl.protocol
include/xrpl/basics/base_uint.h -> xrpl.basics
"""
parts = file_path.split("/")
# Get fields 2 and 3 (indices 1 and 2 in 0-based indexing)
if len(parts) >= 3:
level = f"{parts[1]}/{parts[2]}"
elif len(parts) >= 2:
level = f"{parts[1]}/toplevel"
else:
level = file_path
# If the "level" indicates a file, cut off the filename
if "." in level.split("/")[-1]: # Avoid Path object creation
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level = level.rsplit("/", 1)[0] + "/toplevel"
return level.replace("/", ".")
def extract_include_level(include_line: str) -> Optional[str]:
"""
Extract the include path from an #include directive.
Gets the first two directory components from the include path.
Equivalent to bash: cut -d/ -f 1,2
Examples:
#include <xrpl/basics/base_uint.h> -> xrpl.basics
#include "xrpld/app/main/Application.h" -> xrpld.app
"""
# Remove everything before the quote or angle bracket
match = INCLUDE_PATH_PATTERN.search(include_line)
if not match:
return None
include_path = match.group(1)
parts = include_path.split("/")
# Get first two fields (indices 0 and 1)
if len(parts) >= 2:
include_level = f"{parts[0]}/{parts[1]}"
else:
include_level = include_path
# If the "includelevel" indicates a file, cut off the filename
if "." in include_level.split("/")[-1]: # Avoid Path object creation
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
return include_level.replace("/", ".")
def find_repo_root(start_path: Path, depth_limit: int = 10) -> Path:
"""
Find the repository root by looking for .git directory or src/include folders.
Walks up the directory tree from the start path.
"""
current = start_path.resolve()
# Walk up the directory tree
for _ in range(depth_limit): # Limit search depth to prevent infinite loops
# Check if this directory has src or include folders
has_src = (current / "src").exists()
has_include = (current / "include").exists()
if has_src or has_include:
return current
# Check if this is a git repository root
if (current / ".git").exists():
# Check if it has src or include nearby
if has_src or has_include:
return current
# Move up one level
parent = current.parent
if parent == current: # Reached filesystem root
break
current = parent
# If we couldn't find it, raise an error
raise RuntimeError(
"Could not find repository root. "
"Expected to find a directory containing 'src' and/or 'include' folders."
)
def get_scan_directories(repo_root: Path) -> List[Path]:
"""
Get the list of directories to scan for include files.
Returns paths that actually exist.
"""
directories = []
for dir_name in ["include", "src"]:
dir_path = repo_root / dir_name
if dir_path.exists() and dir_path.is_dir():
directories.append(dir_path)
if not directories:
raise RuntimeError(f"No 'src' or 'include' directories found in {repo_root}")
return directories
def main():
# Change to the script's directory
script_dir = Path(__file__).parent.resolve()
os.chdir(script_dir)
# If the shell is interactive, clean up any flotsam before analyzing
# Match bash behavior: check if PS1 is set (indicates interactive shell)
# When running a script, PS1 is not set even if stdin/stdout are TTYs
if os.environ.get("PS1"):
try:
subprocess.run(["git", "clean", "-ix"], check=False, timeout=30)
except (subprocess.TimeoutExpired, KeyboardInterrupt):
print("Skipping git clean...")
except Exception:
# If git clean fails for any reason, just continue
pass
# Clean up and create results directory
results_dir = script_dir / "results"
if results_dir.exists():
import shutil
shutil.rmtree(results_dir)
results_dir.mkdir()
# Find the repository root by searching for src and include directories
try:
repo_root = find_repo_root(script_dir)
scan_dirs = get_scan_directories(repo_root)
print(f"Found repository root: {repo_root}")
print(f"Scanning directories:")
for scan_dir in scan_dirs:
print(f" - {scan_dir.relative_to(repo_root)}")
except RuntimeError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
print("\nScanning for raw includes...")
# Find all #include directives
raw_includes: List[Tuple[str, str]] = []
rawincludes_file = results_dir / "rawincludes.txt"
# Write to file as we go to avoid storing everything in memory
with open(rawincludes_file, "w", buffering=8192) as raw_f:
for dir_path in scan_dirs:
print(f" Scanning {dir_path.relative_to(repo_root)}...")
for file_path in dir_path.rglob("*"):
if not file_path.is_file():
continue
try:
rel_path_str = str(file_path.relative_to(repo_root))
# Read file with larger buffer for better performance
with open(
file_path,
"r",
encoding="utf-8",
errors="ignore",
buffering=8192,
) as f:
for line in f:
# Quick check before regex
if "#include" not in line or "boost" in line:
continue
if INCLUDE_PATTERN.match(line):
line_stripped = line.strip()
entry = f"{rel_path_str}:{line_stripped}\n"
print(entry, end="")
raw_f.write(entry)
raw_includes.append((rel_path_str, line_stripped))
except Exception as e:
print(f"Error reading {file_path}: {e}", file=sys.stderr)
# Build levelization paths and count directly (no need to sort first)
print("Build levelization paths")
path_counts: Dict[Tuple[str, str], int] = defaultdict(int)
for file_path, include_line in raw_includes:
level = get_level(file_path)
include_level = extract_include_level(include_line)
if include_level and level != include_level:
path_counts[(level, include_level)] += 1
# Sort and deduplicate paths (using dictionary order like bash 'sort -d')
print("Sort and deduplicate paths")
paths_file = results_dir / "paths.txt"
with open(paths_file, "w") as f:
# Sort using dictionary order: only alphanumeric and spaces matter
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
line = f"{count:7} {level} {include_level}\n"
print(line.rstrip())
f.write(line)
# Split into flat-file database
print("Split into flat-file database")
includes_dir = results_dir / "includes"
included_by_dir = results_dir / "included_by"
includes_dir.mkdir()
included_by_dir.mkdir()
# Batch writes by grouping data first to avoid repeated file opens
includes_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
included_by_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
# Process in sorted order to match bash script behavior (dictionary order)
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
includes_data[level].append((include_level, count))
included_by_data[include_level].append((level, count))
# Write all includes files in sorted order (dictionary order)
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
entries = includes_data[level]
with open(includes_dir / level, "w") as f:
for include_level, count in entries:
line = f"{include_level} {count}\n"
print(line.rstrip())
f.write(line)
# Write all included_by files in sorted order (dictionary order)
for include_level in sorted(included_by_data.keys(), key=dictionary_sort_key):
entries = included_by_data[include_level]
with open(included_by_dir / include_level, "w") as f:
for level, count in entries:
line = f"{level} {count}\n"
print(line.rstrip())
f.write(line)
# Search for loops
print("Search for loops")
loops_file = results_dir / "loops.txt"
ordering_file = results_dir / "ordering.txt"
loops_found: Set[Tuple[str, str]] = set()
# Pre-load all include files into memory to avoid repeated I/O
# This is the biggest optimization - we were reading files repeatedly in nested loops
# Use list of tuples to preserve file order
includes_cache: Dict[str, List[Tuple[str, int]]] = {}
includes_lookup: Dict[str, Dict[str, int]] = {} # For fast lookup
# Note: bash script uses 'for source in *' which uses standard glob sorting,
# NOT dictionary order. So we use standard sorted() here, not dictionary_sort_key.
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
if not include_file.is_file():
continue
includes_cache[include_file.name] = []
includes_lookup[include_file.name] = {}
with open(include_file, "r") as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 2:
include_name = parts[0]
include_count = int(parts[1])
includes_cache[include_file.name].append(
(include_name, include_count)
)
includes_lookup[include_file.name][include_name] = include_count
with open(loops_file, "w", buffering=8192) as loops_f, open(
ordering_file, "w", buffering=8192
) as ordering_f:
# Use standard sorting to match bash glob expansion 'for source in *'
for source in sorted(includes_cache.keys()):
source_includes = includes_cache[source]
for include, include_freq in source_includes:
# Check if include file exists and references source
if include not in includes_lookup:
continue
source_freq = includes_lookup[include].get(source)
if source_freq is not None:
# Found a loop
loop_key = tuple(sorted([source, include]))
if loop_key in loops_found:
continue
loops_found.add(loop_key)
loops_f.write(f"Loop: {source} {include}\n")
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
diff = include_freq - source_freq
if diff > 3:
loops_f.write(f" {source} > {include}\n\n")
elif diff < -3:
loops_f.write(f" {include} > {source}\n\n")
elif source_freq == include_freq:
loops_f.write(f" {include} == {source}\n\n")
else:
loops_f.write(f" {include} ~= {source}\n\n")
else:
ordering_f.write(f"{source} > {include}\n")
# Print results
print("\nOrdering:")
with open(ordering_file, "r") as f:
print(f.read(), end="")
print("\nLoops:")
with open(loops_file, "r") as f:
print(f.read(), end="")
if __name__ == "__main__":
main()

View File

@@ -1,130 +0,0 @@
#!/bin/bash
# Usage: generate.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
pushd $( dirname $0 )
if [ -v PS1 ]
then
# if the shell is interactive, clean up any flotsam before analyzing
git clean -ix
fi
# Ensure all sorting is ASCII-order consistently across platforms.
export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}
popd
pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir included_by
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
do
level=$( echo ${file} | cut -d/ -f 2,3 )
# If the "level" indicates a file, cut off the filename
if [[ "${level##*.}" != "${level}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level="$( dirname ${level} )/toplevel"
fi
level=$( echo ${level} | tr '/' '.' )
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
cut -d/ -f 1,2 )
if [[ "${includelevel##*.}" != "${includelevel}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
includelevel="$( dirname ${includelevel} )/toplevel"
fi
includelevel=$( echo ${includelevel} | tr '/' '.' )
if [[ "$level" != "$includelevel" ]]
then
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and deduplicate paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
IFS=${oldifs}
unset oldifs
echo Split into flat-file database
exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a included_by/${include}
done
exec 4>&- #close fd 4
loops="$( pwd )/loops.txt"
ordering="$( pwd )/ordering.txt"
pushd includes
echo Search for loops
# Redirect stdout to a file
exec 4>&1
exec 1>"${loops}"
for source in *
do
if [[ -f "$source" ]]
then
exec 5<"${source}" # open for input
while read -r -u 5 include includefreq
do
if [[ -f $include ]]
then
if grep -q -w $source $include
then
if grep -q -w "Loop: $include $source" "${loops}"
then
continue
fi
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
echo "Loop: $source $include"
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
then
echo -e " $source > $include\n"
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
then
echo -e " $include > $source\n"
elif [[ $sourcefreq -eq $includefreq ]]
then
echo -e " $include == $source\n"
else
echo -e " $include ~= $source\n"
fi
else
echo "$source > $include" >> "${ordering}"
fi
fi
done
exec 5>&- #close fd 5
fi
done
exec 1>&4 #close fd 1
exec 4>&- #close fd 4
cat "${ordering}"
cat "${loops}"
popd
popd
popd

View File

@@ -134,7 +134,6 @@ test.peerfinder > xrpld.core
test.peerfinder > xrpld.peerfinder
test.peerfinder > xrpl.protocol
test.protocol > test.toplevel
test.protocol > test.unit_test
test.protocol > xrpl.basics
test.protocol > xrpl.json
test.protocol > xrpl.protocol
@@ -172,7 +171,6 @@ test.shamap > xrpl.shamap
test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
test.unit_test > xrpl.protocol
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net

View File

@@ -20,7 +20,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check levelization
run: .github/scripts/levelization/generate.sh
run: python .github/scripts/levelization/generate.py
- name: Check for differences
env:
MESSAGE: |
@@ -32,7 +32,7 @@ jobs:
removed from loops.txt, it's probably an improvement, while if
something was added, it's probably a regression.
Run '.github/scripts/levelization/generate.sh' in your repo, commit
Run '.github/scripts/levelization/generate.py' in your repo, commit
and push the changes. See .github/scripts/levelization/README.md for
more info.
run: |

3
.gitignore vendored
View File

@@ -75,6 +75,9 @@ DerivedData
/.claude
/CLAUDE.md
# Python
__pycache__
# Direnv's directory
/.direnv

View File

@@ -11,12 +11,7 @@ function(xrpl_add_test name)
)
add_executable(${target} ${ARGN} ${sources})
isolate_headers(
${target}
"${CMAKE_SOURCE_DIR}"
"${CMAKE_SOURCE_DIR}/tests/${name}"
PRIVATE
)
isolate_headers(${target} "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/tests/${name}" PRIVATE)
add_test(NAME ${target} COMMAND ${target})
endfunction()

View File

@@ -10,25 +10,16 @@ include(target_protobuf_sources)
# so we just build them as a separate library.
add_library(xrpl.libpb)
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
target_protobuf_sources(
xrpl.libpb
xrpl/proto
LANGUAGE cpp
IMPORT_DIRS include/xrpl/proto
PROTOS include/xrpl/proto/xrpl.proto
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
PROTOS include/xrpl/proto/xrpl.proto
)
file(GLOB_RECURSE protos "include/xrpl/proto/org/*.proto")
target_protobuf_sources(
xrpl.libpb
xrpl/proto
LANGUAGE cpp
IMPORT_DIRS include/xrpl/proto
PROTOS "${protos}"
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
PROTOS "${protos}"
)
target_protobuf_sources(
xrpl.libpb
xrpl/proto
xrpl.libpb xrpl/proto
LANGUAGE grpc
IMPORT_DIRS include/xrpl/proto
PROTOS "${protos}"

View File

@@ -39,15 +39,19 @@ list(
)
setup_target_for_coverage_gcovr(
NAME coverage
FORMAT ${coverage_format}
NAME
coverage
FORMAT
${coverage_format}
EXCLUDE
"src/test"
"src/tests"
"include/xrpl/beast/test"
"include/xrpl/beast/unit_test"
"${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES xrpld xrpl.tests
"src/test"
"src/tests"
"include/xrpl/beast/test"
"include/xrpl/beast/unit_test"
"${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES
xrpld
xrpl.tests
)
add_code_coverage_to_target(opts INTERFACE)

View File

@@ -42,11 +42,7 @@ function(verbose_find_path variable name)
endif()
endfunction()
verbose_find_path(
doxygen_plantuml_jar_path
plantuml.jar
PATH_SUFFIXES share/plantuml
)
verbose_find_path(doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml)
verbose_find_path(doxygen_dot_path dot)
# https://en.cppreference.com/w/Cppreference:Archives

View File

@@ -25,16 +25,10 @@ function(add_module parent name)
${target}
PUBLIC "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
)
isolate_headers(
${target}
"${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/include/${parent}/${name}"
PUBLIC
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/include/${parent}/${name}" PUBLIC
)
isolate_headers(
${target}
"${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}"
PRIVATE
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}" PRIVATE
)
endfunction()

View File

@@ -27,11 +27,6 @@ public:
SecretKey&
operator=(SecretKey const&) = default;
bool
operator==(SecretKey const&) = delete;
bool
operator!=(SecretKey const&) = delete;
~SecretKey();
SecretKey(std::array<std::uint8_t, 32> const& data);
@@ -82,6 +77,18 @@ public:
}
};
inline bool
operator==(SecretKey const& lhs, SecretKey const& rhs)
{
return lhs.size() == rhs.size() && std::memcmp(lhs.data(), rhs.data(), rhs.size()) == 0;
}
inline bool
operator!=(SecretKey const& lhs, SecretKey const& rhs)
{
return !(lhs == rhs);
}
//------------------------------------------------------------------------------
/** Parse a secret key */

View File

@@ -1,5 +1,4 @@
#include <test/jtx.h>
#include <test/unit_test/utils.h>
#include <xrpld/app/misc/ValidatorList.h>
@@ -457,7 +456,7 @@ public:
auto const token = loadValidatorToken(tokenBlob);
BEAST_EXPECT(token);
BEAST_EXPECT(test::equal(token->validationSecret, *valSecret));
BEAST_EXPECT(token->validationSecret == *valSecret);
BEAST_EXPECT(token->manifest == manifest);
}
{

View File

@@ -1,5 +1,4 @@
#include <test/jtx/Env.h>
#include <test/unit_test/utils.h>
#include <xrpld/app/misc/ValidatorKeys.h>
#include <xrpld/core/Config.h>
@@ -96,7 +95,7 @@ public:
if (BEAST_EXPECT(k.keys))
{
BEAST_EXPECT(k.keys->publicKey == seedPublicKey);
BEAST_EXPECT(test::equal(k.keys->secretKey, seedSecretKey));
BEAST_EXPECT(k.keys->secretKey == seedSecretKey);
}
BEAST_EXPECT(k.nodeID == seedNodeID);
BEAST_EXPECT(k.manifest.empty());
@@ -123,7 +122,7 @@ public:
if (BEAST_EXPECT(k.keys))
{
BEAST_EXPECT(k.keys->publicKey == tokenPublicKey);
BEAST_EXPECT(test::equal(k.keys->secretKey, tokenSecretKey));
BEAST_EXPECT(k.keys->secretKey == tokenSecretKey);
}
BEAST_EXPECT(k.nodeID == tokenNodeID);
BEAST_EXPECT(k.manifest == tokenManifest);

View File

@@ -1,5 +1,3 @@
#include <test/unit_test/utils.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/beast/utility/rngfill.h>
#include <xrpl/crypto/csprng.h>
@@ -185,7 +183,7 @@ public:
TokenType::NodePrivate, "pnen77YEeUd4fFKG7iycBWcwKpTaeFRkW2WFostaATy1DSupwXe");
BEAST_EXPECT(sk2);
BEAST_EXPECT(test::equal(sk1, *sk2));
BEAST_EXPECT(sk1 == *sk2);
}
{
@@ -195,7 +193,7 @@ public:
TokenType::NodePrivate, "paKv46LztLqK3GaKz1rG2nQGN6M4JLyRtxFBYFTw4wAVHtGys36");
BEAST_EXPECT(sk2);
BEAST_EXPECT(test::equal(sk1, *sk2));
BEAST_EXPECT(sk1 == *sk2);
}
// Try converting short, long and malformed data
@@ -263,20 +261,20 @@ public:
BEAST_EXPECT(!si.empty());
auto const ski = parseBase58<SecretKey>(TokenType::NodePrivate, si);
BEAST_EXPECT(ski && test::equal(keys[i], *ski));
BEAST_EXPECT(ski && keys[i] == *ski);
for (std::size_t j = i; j != keys.size(); ++j)
{
BEAST_EXPECT(test::equal(keys[i], keys[j]) == (i == j));
BEAST_EXPECT((keys[i] == keys[j]) == (i == j));
auto const sj = toBase58(TokenType::NodePrivate, keys[j]);
BEAST_EXPECT((si == sj) == (i == j));
auto const skj = parseBase58<SecretKey>(TokenType::NodePrivate, sj);
BEAST_EXPECT(skj && test::equal(keys[j], *skj));
BEAST_EXPECT(skj && keys[j] == *skj);
BEAST_EXPECT(test::equal(*ski, *skj) == (i == j));
BEAST_EXPECT((*ski == *skj) == (i == j));
}
}
}
@@ -294,7 +292,7 @@ public:
auto kp = generateKeyPair(KeyType::secp256k1, Seed{makeSlice(test.seed)});
BEAST_EXPECT(kp.first == PublicKey{makeSlice(test.pubkey)});
BEAST_EXPECT(test::equal(kp.second, SecretKey{makeSlice(test.seckey)}));
BEAST_EXPECT(kp.second == SecretKey{makeSlice(test.seckey)});
BEAST_EXPECT(calcAccountID(kp.first) == *id);
}
}
@@ -312,7 +310,7 @@ public:
auto kp = generateKeyPair(KeyType::ed25519, Seed{makeSlice(test.seed)});
BEAST_EXPECT(kp.first == PublicKey{makeSlice(test.pubkey)});
BEAST_EXPECT(test::equal(kp.second, SecretKey{makeSlice(test.seckey)}));
BEAST_EXPECT(kp.second == SecretKey{makeSlice(test.seckey)});
BEAST_EXPECT(calcAccountID(kp.first) == *id);
}
}

View File

@@ -1,18 +0,0 @@
#include <xrpl/protocol/SecretKey.h>
#include <cstring>
namespace xrpl {
namespace test {
/// Compare two SecretKey objects for equality.
/// SecretKey::operator== is deleted, so a named function is used
/// to avoid member-function lookup shadowing free-function overloads.
inline bool
equal(SecretKey const& lhs, SecretKey const& rhs)
{
return lhs.size() == rhs.size() && std::memcmp(lhs.data(), rhs.data(), rhs.size()) == 0;
}
} // namespace test
} // namespace xrpl