mirror of
https://github.com/Xahau/xahaud.git
synced 2026-04-25 21:47:47 +00:00
Compare commits
12 Commits
test-hook-
...
fix-enhanc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62d7b120b8 | ||
|
|
66f7294120 | ||
|
|
7f6ac75617 | ||
|
|
4150f0383c | ||
|
|
25123b370a | ||
|
|
f90ed41802 | ||
|
|
8c4c158d3a | ||
|
|
2d2951875d | ||
|
|
9bfca63574 | ||
|
|
1ba444ae7f | ||
|
|
f96d9b6e51 | ||
|
|
c7e5801a35 |
4
.github/workflows/levelization.yml
vendored
4
.github/workflows/levelization.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check levelization
|
||||
run: Builds/levelization/levelization.sh
|
||||
run: python Builds/levelization/levelization.py
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
run: |
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run './Builds/levelization/levelization.sh' in your repo,
|
||||
2. Run 'python Builds/levelization/levelization.py' in your repo,
|
||||
commit, and push.
|
||||
|
||||
See Builds/levelization/README.md for more info.
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -53,6 +53,9 @@ Builds/levelization/results/paths.txt
|
||||
Builds/levelization/results/includes/
|
||||
Builds/levelization/results/includedby/
|
||||
|
||||
# Python
|
||||
__pycache__
|
||||
|
||||
# Ignore tmp directory.
|
||||
tmp
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ that `test` code should *never* be included in `ripple` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization.sh](levelization.sh) script takes no parameters,
|
||||
The [levelization.py](levelization.py) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
@@ -84,7 +84,7 @@ It generates many files of [results](results):
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
done anything else to improve levelization, run `levelization.py`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
@@ -108,7 +108,7 @@ The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `levelization.sh`
|
||||
1. Run `levelization.py`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
* For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A Builds/levelization/results/paths.txt | grep -w B`
|
||||
|
||||
283
Builds/levelization/levelization.py
Executable file
283
Builds/levelization/levelization.py
Executable file
@@ -0,0 +1,283 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Usage: levelization.py
|
||||
This script takes no parameters, and can be called from any directory in the file system.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
# Compile regex patterns once at module level
|
||||
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
|
||||
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
|
||||
|
||||
|
||||
def dictionary_sort_key(s):
|
||||
"""
|
||||
Create a sort key that mimics 'sort -d' (dictionary order).
|
||||
Dictionary order only considers blanks and alphanumeric characters.
|
||||
"""
|
||||
return "".join(c for c in s if c.isalnum() or c.isspace())
|
||||
|
||||
|
||||
def get_level(file_path):
|
||||
"""
|
||||
Extract the level from a file path (second and third directory components).
|
||||
Equivalent to bash: cut -d/ -f 2,3
|
||||
|
||||
Examples:
|
||||
src/ripple/app/main.cpp -> ripple.app
|
||||
src/test/app/Import_test.cpp -> test.app
|
||||
"""
|
||||
parts = file_path.split("/")
|
||||
|
||||
if len(parts) >= 3:
|
||||
level = f"{parts[1]}/{parts[2]}"
|
||||
elif len(parts) >= 2:
|
||||
level = f"{parts[1]}/toplevel"
|
||||
else:
|
||||
level = file_path
|
||||
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if "." in level.split("/")[-1]:
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level = level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return level.replace("/", ".")
|
||||
|
||||
|
||||
def extract_include_level(include_line):
|
||||
"""
|
||||
Extract the include path from an #include directive.
|
||||
Gets the first two directory components from the include path.
|
||||
Equivalent to bash: cut -d/ -f 1,2
|
||||
|
||||
Examples:
|
||||
#include <ripple/basics/base_uint.h> -> ripple.basics
|
||||
#include "ripple/app/main/Application.h" -> ripple.app
|
||||
"""
|
||||
match = INCLUDE_PATH_PATTERN.search(include_line)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
include_path = match.group(1)
|
||||
parts = include_path.split("/")
|
||||
|
||||
if len(parts) >= 2:
|
||||
include_level = f"{parts[0]}/{parts[1]}"
|
||||
else:
|
||||
include_level = include_path
|
||||
|
||||
# If the "includelevel" indicates a file, cut off the filename
|
||||
if "." in include_level.split("/")[-1]:
|
||||
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return include_level.replace("/", ".")
|
||||
|
||||
|
||||
def find_repository_directories(start_path, depth_limit=10):
|
||||
"""
|
||||
Find the repository root by looking for src or include folders.
|
||||
Walks up the directory tree from the start path.
|
||||
"""
|
||||
current = start_path.resolve()
|
||||
|
||||
for _ in range(depth_limit):
|
||||
src_path = current / "src"
|
||||
include_path = current / "include"
|
||||
has_src = src_path.exists()
|
||||
has_include = include_path.exists()
|
||||
|
||||
if has_src or has_include:
|
||||
dirs = []
|
||||
if has_src:
|
||||
dirs.append(src_path)
|
||||
if has_include:
|
||||
dirs.append(include_path)
|
||||
return current, dirs
|
||||
|
||||
parent = current.parent
|
||||
if parent == current:
|
||||
break
|
||||
current = parent
|
||||
|
||||
raise RuntimeError(
|
||||
"Could not find repository root. "
|
||||
"Expected to find a directory containing 'src' and/or 'include' folders."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
os.chdir(script_dir)
|
||||
|
||||
# Clean up and create results directory.
|
||||
results_dir = script_dir / "results"
|
||||
if results_dir.exists():
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(results_dir)
|
||||
results_dir.mkdir()
|
||||
|
||||
# Find the repository root.
|
||||
try:
|
||||
repo_root, scan_dirs = find_repository_directories(script_dir)
|
||||
print(f"Found repository root: {repo_root}")
|
||||
for scan_dir in scan_dirs:
|
||||
print(f" Scanning: {scan_dir.relative_to(repo_root)}")
|
||||
except RuntimeError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Find all #include directives.
|
||||
print("\nScanning for raw includes...")
|
||||
raw_includes = []
|
||||
rawincludes_file = results_dir / "rawincludes.txt"
|
||||
|
||||
with open(rawincludes_file, "w", buffering=8192) as raw_f:
|
||||
for dir_path in scan_dirs:
|
||||
for file_path in dir_path.rglob("*"):
|
||||
if not file_path.is_file():
|
||||
continue
|
||||
try:
|
||||
rel_path_str = str(file_path.relative_to(repo_root))
|
||||
with open(
|
||||
file_path, "r", encoding="utf-8", errors="ignore", buffering=8192
|
||||
) as f:
|
||||
for line in f:
|
||||
if "#include" not in line or "boost" in line:
|
||||
continue
|
||||
if INCLUDE_PATTERN.match(line):
|
||||
line_stripped = line.strip()
|
||||
entry = f"{rel_path_str}:{line_stripped}\n"
|
||||
print(entry, end="")
|
||||
raw_f.write(entry)
|
||||
raw_includes.append((rel_path_str, line_stripped))
|
||||
except Exception as e:
|
||||
print(f"Error reading {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
# Build levelization paths and count directly.
|
||||
print("Build levelization paths")
|
||||
path_counts = defaultdict(int)
|
||||
|
||||
for file_path, include_line in raw_includes:
|
||||
include_level = extract_include_level(include_line)
|
||||
if not include_level:
|
||||
continue
|
||||
level = get_level(file_path)
|
||||
if level != include_level:
|
||||
path_counts[(level, include_level)] += 1
|
||||
|
||||
# Sort and deduplicate paths.
|
||||
print("Sort and deduplicate paths")
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
|
||||
paths_file = results_dir / "paths.txt"
|
||||
with open(paths_file, "w") as f:
|
||||
for (level, include_level), count in sorted_items:
|
||||
line = f"{count:7} {level} {include_level}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Split into flat-file database.
|
||||
print("Split into flat-file database")
|
||||
includes_dir = results_dir / "includes"
|
||||
includedby_dir = results_dir / "includedby"
|
||||
includes_dir.mkdir()
|
||||
includedby_dir.mkdir()
|
||||
|
||||
includes_data = defaultdict(list)
|
||||
includedby_data = defaultdict(list)
|
||||
|
||||
for (level, include_level), count in sorted_items:
|
||||
includes_data[level].append((include_level, count))
|
||||
includedby_data[include_level].append((level, count))
|
||||
|
||||
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
|
||||
with open(includes_dir / level, "w") as f:
|
||||
for include_level, count in includes_data[level]:
|
||||
line = f"{include_level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
for include_level in sorted(includedby_data.keys(), key=dictionary_sort_key):
|
||||
with open(includedby_dir / include_level, "w") as f:
|
||||
for level, count in includedby_data[include_level]:
|
||||
line = f"{level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Search for loops.
|
||||
print("Search for loops")
|
||||
loops_file = results_dir / "loops.txt"
|
||||
ordering_file = results_dir / "ordering.txt"
|
||||
|
||||
# Pre-load all include files into memory for fast lookup.
|
||||
includes_cache = {}
|
||||
includes_lookup = {}
|
||||
|
||||
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
|
||||
if not include_file.is_file():
|
||||
continue
|
||||
includes_cache[include_file.name] = []
|
||||
includes_lookup[include_file.name] = {}
|
||||
with open(include_file, "r") as f:
|
||||
for line in f:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
name, count = parts[0], int(parts[1])
|
||||
includes_cache[include_file.name].append((name, count))
|
||||
includes_lookup[include_file.name][name] = count
|
||||
|
||||
loops_found = set()
|
||||
|
||||
with open(loops_file, "w", buffering=8192) as loops_f, open(
|
||||
ordering_file, "w", buffering=8192
|
||||
) as ordering_f:
|
||||
for source in sorted(includes_cache.keys()):
|
||||
for include, include_freq in includes_cache[source]:
|
||||
if include not in includes_lookup:
|
||||
continue
|
||||
|
||||
source_freq = includes_lookup[include].get(source)
|
||||
|
||||
if source_freq is not None:
|
||||
loop_key = tuple(sorted([source, include]))
|
||||
if loop_key in loops_found:
|
||||
continue
|
||||
loops_found.add(loop_key)
|
||||
|
||||
loops_f.write(f"Loop: {source} {include}\n")
|
||||
|
||||
diff = include_freq - source_freq
|
||||
if diff > 3:
|
||||
loops_f.write(f" {source} > {include}\n\n")
|
||||
elif diff < -3:
|
||||
loops_f.write(f" {include} > {source}\n\n")
|
||||
elif source_freq == include_freq:
|
||||
loops_f.write(f" {include} == {source}\n\n")
|
||||
else:
|
||||
loops_f.write(f" {include} ~= {source}\n\n")
|
||||
else:
|
||||
ordering_f.write(f"{source} > {include}\n")
|
||||
|
||||
# Print results.
|
||||
print("\nOrdering:")
|
||||
with open(ordering_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
print("\nLoops:")
|
||||
with open(loops_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,130 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: levelization.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
# Ensure all sorting is ASCII-order consistently across platforms.
|
||||
export LANG=C
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../..
|
||||
echo Raw includes:
|
||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir includedby
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and dedup paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a includedby/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
@@ -71,6 +71,7 @@ cmake .. -G Ninja \
|
||||
-Dxrpld=TRUE \
|
||||
-Dtests=TRUE &&
|
||||
ccache -z &&
|
||||
ccache -p &&
|
||||
ninja -j $3 && echo "=== Re-running final link with verbose output ===" && rm -f rippled && ninja -v rippled &&
|
||||
ccache -s &&
|
||||
strip -s rippled &&
|
||||
|
||||
@@ -68,6 +68,17 @@ target_link_libraries(xrpl.imports.main
|
||||
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
|
||||
)
|
||||
|
||||
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
|
||||
if(TARGET date::date-tz)
|
||||
target_link_libraries(xrpl.imports.main INTERFACE date::date-tz)
|
||||
endif()
|
||||
|
||||
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
|
||||
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
|
||||
target_compile_definitions(xrpl.imports.main INTERFACE
|
||||
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
|
||||
)
|
||||
|
||||
include(add_module)
|
||||
include(target_link_modules)
|
||||
|
||||
|
||||
@@ -196,6 +196,7 @@ ENV PATH=/usr/local/bin:$PATH
|
||||
RUN /hbb_exe/activate-exec bash -c "ccache -M 100G && \
|
||||
ccache -o cache_dir=/cache/ccache && \
|
||||
ccache -o compiler_check=content && \
|
||||
ccache -o direct_mode=true && \
|
||||
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \
|
||||
echo 'core.cache:storage_path=/cache/conan2' > ~/.conan2/global.conf && \
|
||||
echo 'core.download:download_cache=/cache/conan2_download' >> ~/.conan2/global.conf && \
|
||||
|
||||
@@ -582,9 +582,9 @@ class BaseFee_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testSignersListSet(FeatureBitset features)
|
||||
testSignerListSet(FeatureBitset features)
|
||||
{
|
||||
testcase("signers list set w/ hook params");
|
||||
testcase("signer list set w/ hook params");
|
||||
|
||||
using namespace test::jtx;
|
||||
using namespace std::literals;
|
||||
@@ -810,7 +810,7 @@ class BaseFee_test : public beast::unit_test::suite
|
||||
testPaymentChannelFund(features);
|
||||
testSetHook(features);
|
||||
testSetRegularKey(features);
|
||||
testSignersListSet(features);
|
||||
testSignerListSet(features);
|
||||
testTicketCreate(features);
|
||||
testTrustSet(features);
|
||||
testURITokenBurnFee(features);
|
||||
|
||||
@@ -879,7 +879,7 @@ inline std::string ImportTCSetRegularKey::w_signers = R"json({
|
||||
}
|
||||
})json";
|
||||
|
||||
class ImportTCSignersListSet
|
||||
class ImportTCSignerListSet
|
||||
{
|
||||
public:
|
||||
static std::string w_seed_bad_fee;
|
||||
@@ -891,7 +891,7 @@ public:
|
||||
static std::string w_signers_empty;
|
||||
};
|
||||
|
||||
inline std::string ImportTCSignersListSet::w_seed_bad_fee = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_seed_bad_fee = R"json({
|
||||
"ledger": {
|
||||
"acroot": "64F75A08037D9F8ED8A103893401EB2AD726E7D6AAC3EAA249005916A9354892",
|
||||
"close": 743008501,
|
||||
@@ -952,7 +952,7 @@ inline std::string ImportTCSignersListSet::w_seed_bad_fee = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_seed = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_seed = R"json({
|
||||
"ledger": {
|
||||
"acroot": "8112FF5F3FEEA34894A16CCCD64A24D552521F2E699780A587A9E6F5F5117CE5",
|
||||
"close": 743008510,
|
||||
@@ -993,7 +993,7 @@ inline std::string ImportTCSignersListSet::w_seed = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_regular_key = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_regular_key = R"json({
|
||||
"ledger": {
|
||||
"acroot": "2A25CA219781A3144C72FD5FB6EB62763214E050050DA6176624A046C51EECBD",
|
||||
"close": 743015350,
|
||||
@@ -1034,7 +1034,7 @@ inline std::string ImportTCSignersListSet::w_regular_key = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_signers = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_signers = R"json({
|
||||
"ledger": {
|
||||
"acroot": "BC35E65B52724CF258BDAC8B8E0D3B9CA0F012F5B243F6AAD1B671EDABD5188E",
|
||||
"close": 745594953,
|
||||
@@ -1075,7 +1075,7 @@ inline std::string ImportTCSignersListSet::w_signers = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_seed_empty = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_seed_empty = R"json({
|
||||
"ledger": {
|
||||
"acroot": "ECCAFDE52A6D5F1E36EB82EAA5247FF1D8ADE51FCF1ED0842850193018A510F7",
|
||||
"close": 743056482,
|
||||
@@ -1116,7 +1116,7 @@ inline std::string ImportTCSignersListSet::w_seed_empty = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_regular_key_empty = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_regular_key_empty = R"json({
|
||||
"ledger": {
|
||||
"acroot": "E222F46D5F35C79FDA3BB98973E2024EF9F6FA7B26471CC9CEF2CE033FA0E6E7",
|
||||
"close": 743169800,
|
||||
@@ -1157,7 +1157,7 @@ inline std::string ImportTCSignersListSet::w_regular_key_empty = R"json({
|
||||
}
|
||||
}
|
||||
})json";
|
||||
inline std::string ImportTCSignersListSet::w_signers_empty = R"json({
|
||||
inline std::string ImportTCSignerListSet::w_signers_empty = R"json({
|
||||
"ledger": {
|
||||
"acroot": "987438A87AD998B7D7ED04A280FB5414C76E8475D621A55FB8463F15CEEEAD49",
|
||||
"close": 743172592,
|
||||
@@ -1261,4 +1261,4 @@ inline std::string ImportTCHalving::base_genesis = R"json({
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -1898,7 +1898,7 @@ class Import_test : public beast::unit_test::suite
|
||||
// different keys.
|
||||
{
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers);
|
||||
env(import::import(alice, xpopJson),
|
||||
msig(bob, dave),
|
||||
fee((3 * feeDrops) * 10),
|
||||
@@ -1910,7 +1910,7 @@ class Import_test : public beast::unit_test::suite
|
||||
// different keys. - empty innerSigners
|
||||
{
|
||||
Json::Value xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers);
|
||||
xpopJson[jss::transaction][jss::blob] =
|
||||
"12000C22000000002400000014201B0000002B201D00005359202300000002"
|
||||
"6840000000001E84B073008114AE123A8556F3CF91154711376AFB0F894F83"
|
||||
@@ -1927,7 +1927,7 @@ class Import_test : public beast::unit_test::suite
|
||||
// different keys.
|
||||
{
|
||||
Json::Value xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers);
|
||||
xpopJson[jss::transaction][jss::blob] =
|
||||
"12000C22000000002400000014201B0000002B201D00005359202300000002"
|
||||
"6840000000001E84B073008114AE123A8556F3CF91154711376AFB0F894F83"
|
||||
@@ -1953,7 +1953,7 @@ class Import_test : public beast::unit_test::suite
|
||||
// temMALFORMED - Import: inner txn signature verify failed
|
||||
{
|
||||
Json::Value xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers);
|
||||
xpopJson[jss::transaction][jss::blob] =
|
||||
"12000C2200000008240000001A201B000003B9201D00005359202300000000"
|
||||
"6840000000001E84B073008114AE123A8556F3CF91154711376AFB0F894F83"
|
||||
@@ -2768,7 +2768,7 @@ class Import_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
}
|
||||
|
||||
// tefIMPORT_BLACKHOLED - SignersListSet (w/seed)
|
||||
// tefIMPORT_BLACKHOLED - SignerListSet (w/seed)
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
@@ -2792,7 +2792,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed);
|
||||
import::loadXpop(ImportTCSignerListSet::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tefIMPORT_BLACKHOLED),
|
||||
fee(feeDrops * 10),
|
||||
@@ -3244,7 +3244,7 @@ class Import_test : public beast::unit_test::suite
|
||||
env(noop(alice), sig(bob), fee(feeDrops), ter(tefBAD_AUTH));
|
||||
}
|
||||
|
||||
// w/ signers list -> dne
|
||||
// w/ signer list -> dne
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
@@ -3975,7 +3975,7 @@ class Import_test : public beast::unit_test::suite
|
||||
env(noop(alice), sig(carol), fee(feeDrops), ter(tesSUCCESS));
|
||||
}
|
||||
|
||||
// w/ signers list -> funded (update regular key)
|
||||
// w/ signer list -> funded (update regular key)
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
@@ -4049,7 +4049,7 @@ class Import_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(acctSle->getAccountID(sfRegularKey) == dave.id());
|
||||
env(noop(alice), sig(dave), fee(feeDrops), ter(tesSUCCESS));
|
||||
|
||||
// confirm signers list not set
|
||||
// confirm signer list not set
|
||||
auto const k = keylet::signers(alice);
|
||||
BEAST_EXPECT(env.current()->read(k) == nullptr);
|
||||
}
|
||||
@@ -4351,9 +4351,9 @@ class Import_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testSignersListSet(FeatureBitset features)
|
||||
testSignerListSet(FeatureBitset features)
|
||||
{
|
||||
testcase("signers list set tx");
|
||||
testcase("signer list set tx");
|
||||
|
||||
using namespace test::jtx;
|
||||
using namespace std::literals;
|
||||
@@ -4394,7 +4394,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed_bad_fee);
|
||||
import::loadXpop(ImportTCSignerListSet::w_seed_bad_fee);
|
||||
Json::Value tx = import::import(alice, xpopJson);
|
||||
tx[jss::Sequence] = 0;
|
||||
// tx[jss::Fee] = 0;
|
||||
@@ -4438,7 +4438,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed);
|
||||
import::loadXpop(ImportTCSignerListSet::w_seed);
|
||||
Json::Value tx = import::import(alice, xpopJson);
|
||||
tx[jss::Sequence] = 0;
|
||||
tx[jss::Fee] = 0;
|
||||
@@ -4523,7 +4523,7 @@ class Import_test : public beast::unit_test::suite
|
||||
// import tx
|
||||
auto const burnAmt = XRP(2);
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_regular_key);
|
||||
import::loadXpop(ImportTCSignerListSet::w_regular_key);
|
||||
Json::Value tx = import::import(alice, xpopJson);
|
||||
tx[jss::Sequence] = 0;
|
||||
tx[jss::Fee] = 0;
|
||||
@@ -4614,7 +4614,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers);
|
||||
Json::Value tx = import::import(alice, xpopJson);
|
||||
tx[jss::Sequence] = 0;
|
||||
tx[jss::Fee] = 0;
|
||||
@@ -4685,7 +4685,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed);
|
||||
import::loadXpop(ImportTCSignerListSet::w_seed);
|
||||
env(import::import(alice, xpopJson),
|
||||
fee(feeDrops * 10),
|
||||
ter(tesSUCCESS));
|
||||
@@ -4771,7 +4771,7 @@ class Import_test : public beast::unit_test::suite
|
||||
auto const envAlice = env.balance(alice);
|
||||
BEAST_EXPECT(envAlice == XRP(1000));
|
||||
|
||||
// set the signers list
|
||||
// set the signer list
|
||||
env(signers(alice, 2, {{bob, 1}, {carol, 1}}));
|
||||
env(noop(alice),
|
||||
msig(bob, carol),
|
||||
@@ -4787,7 +4787,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed_empty);
|
||||
import::loadXpop(ImportTCSignerListSet::w_seed_empty);
|
||||
env(import::import(alice, xpopJson),
|
||||
fee(feeDrops * 10),
|
||||
ter(tesSUCCESS));
|
||||
@@ -4852,7 +4852,7 @@ class Import_test : public beast::unit_test::suite
|
||||
env(noop(alice), sig(bob), fee(feeDrops), ter(tesSUCCESS));
|
||||
env.close();
|
||||
|
||||
// set the signers list
|
||||
// set the signer list
|
||||
env(signers(alice, 2, {{bob, 1}, {carol, 1}}));
|
||||
env(noop(alice),
|
||||
msig(bob, carol),
|
||||
@@ -4868,7 +4868,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_regular_key_empty);
|
||||
import::loadXpop(ImportTCSignerListSet::w_regular_key_empty);
|
||||
env(import::import(alice, xpopJson),
|
||||
fee(feeDrops * 10),
|
||||
sig(bob),
|
||||
@@ -4935,7 +4935,7 @@ class Import_test : public beast::unit_test::suite
|
||||
auto const envAlice = env.balance(alice);
|
||||
BEAST_EXPECT(envAlice == XRP(1000));
|
||||
|
||||
// set the signers list
|
||||
// set the signer list
|
||||
env(signers(alice, 2, {{bob, 1}, {carol, 1}}));
|
||||
env(noop(alice),
|
||||
msig(bob, carol),
|
||||
@@ -4951,7 +4951,7 @@ class Import_test : public beast::unit_test::suite
|
||||
|
||||
// import tx
|
||||
auto const xpopJson =
|
||||
import::loadXpop(ImportTCSignersListSet::w_signers_empty);
|
||||
import::loadXpop(ImportTCSignerListSet::w_signers_empty);
|
||||
env(import::import(alice, xpopJson),
|
||||
msig(bob, carol),
|
||||
fee((3 * feeDrops) * 10),
|
||||
@@ -6228,7 +6228,7 @@ public:
|
||||
testAccountSetFlags(features);
|
||||
testSetRegularKey(features);
|
||||
testSetRegularKeyFlags(features);
|
||||
testSignersListSet(features);
|
||||
testSignerListSet(features);
|
||||
testUsingTickets(features);
|
||||
testAccountIndex(features);
|
||||
testHookIssuer(features);
|
||||
|
||||
@@ -4492,14 +4492,14 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
// SignersListSet
|
||||
// SignerListSet
|
||||
// | otxn | tsh | sls |
|
||||
// | A | A | S |
|
||||
// | A | S | S |
|
||||
void
|
||||
testSignersListSetTSH(FeatureBitset features)
|
||||
testSignerListSetTSH(FeatureBitset features)
|
||||
{
|
||||
testcase("signers list set tsh");
|
||||
testcase("signer list set tsh");
|
||||
|
||||
using namespace test::jtx;
|
||||
using namespace std::literals;
|
||||
@@ -4527,7 +4527,7 @@ private:
|
||||
// set tsh hook
|
||||
setTSHHook(env, account, testStrong);
|
||||
|
||||
// signers list set
|
||||
// signer list set
|
||||
env(signers(account, 2, {{signer1, 1}, {signer2, 1}}),
|
||||
fee(XRP(1)),
|
||||
ter(tesSUCCESS));
|
||||
@@ -4566,7 +4566,7 @@ private:
|
||||
// set tsh hook
|
||||
setTSHHook(env, signer2, testStrong);
|
||||
|
||||
// signers list set
|
||||
// signer list set
|
||||
env(signers(account, 2, {{signer1, 1}, {signer2, 1}}),
|
||||
fee(XRP(1)),
|
||||
ter(tesSUCCESS));
|
||||
@@ -6914,7 +6914,7 @@ private:
|
||||
testPaymentChannelFundTSH(features);
|
||||
testSetHookTSH(features);
|
||||
testSetRegularKeyTSH(features);
|
||||
testSignersListSetTSH(features);
|
||||
testSignerListSetTSH(features);
|
||||
testTicketCreateTSH(features);
|
||||
testTrustSetTSH(features);
|
||||
testURITokenMintTSH(features);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -880,9 +880,9 @@ private:
|
||||
}
|
||||
|
||||
void
|
||||
testSignersListSet(FeatureBitset features)
|
||||
testSignerListSet(FeatureBitset features)
|
||||
{
|
||||
testcase("signers list set");
|
||||
testcase("signer list set");
|
||||
|
||||
using namespace test::jtx;
|
||||
using namespace std::literals;
|
||||
@@ -895,7 +895,7 @@ private:
|
||||
env.fund(XRP(1000), alice, signer1, signer2);
|
||||
env.close();
|
||||
|
||||
// signers list set
|
||||
// signer list set
|
||||
env(signers(alice, 2, {{signer1, 1}, {signer2, 1}}), ter(tesSUCCESS));
|
||||
env.close();
|
||||
|
||||
@@ -1384,7 +1384,7 @@ private:
|
||||
testPaymentChannelFund(features);
|
||||
testSetHook(features);
|
||||
testSetRegularKey(features);
|
||||
testSignersListSet(features);
|
||||
testSignerListSet(features);
|
||||
testTicketCreate(features);
|
||||
testTrustSet(features);
|
||||
testURITokenMint(features);
|
||||
|
||||
@@ -65,29 +65,16 @@ hso_delete(void (*f)(Json::Value& jv))
|
||||
Json::Value
|
||||
hso(std::vector<uint8_t> const& wasmBytes, void (*f)(Json::Value& jv))
|
||||
{
|
||||
if (wasmBytes.size() == 0)
|
||||
throw std::runtime_error("empty hook wasm passed to hso()");
|
||||
|
||||
Json::Value jv;
|
||||
jv[jss::CreateCode] = strHex(wasmBytes);
|
||||
{
|
||||
jv[jss::HookOn] =
|
||||
"0000000000000000000000000000000000000000000000000000000000000000";
|
||||
jv[jss::HookNamespace] = to_string(uint256{beast::zero});
|
||||
jv[jss::HookApiVersion] = Json::Value{0};
|
||||
}
|
||||
|
||||
if (f)
|
||||
f(jv);
|
||||
|
||||
return jv;
|
||||
return hso(strHex(wasmBytes), f);
|
||||
}
|
||||
|
||||
Json::Value
|
||||
hso(std::string const& wasmHex, void (*f)(Json::Value& jv))
|
||||
{
|
||||
if (wasmHex.size() == 0)
|
||||
throw std::runtime_error("empty hook wasm passed to hso()");
|
||||
throw std::runtime_error(
|
||||
"empty hook wasm passed to hso(): run "
|
||||
"src/test/app/build_test_hooks.sh to generate the hook wasm");
|
||||
|
||||
Json::Value jv;
|
||||
jv[jss::CreateCode] = wasmHex;
|
||||
|
||||
Reference in New Issue
Block a user