mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-05 06:25:36 +00:00
Compare commits
3 Commits
a1q123456/
...
vlntb/numb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a86a90edd7 | ||
|
|
ed13af878e | ||
|
|
3c1505a29d |
6
.github/scripts/levelization/README.md
vendored
6
.github/scripts/levelization/README.md
vendored
@@ -70,7 +70,7 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization](generate.py) script takes no parameters,
|
||||
The [levelization](generate.sh) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
@@ -104,7 +104,7 @@ It generates many files of [results](results):
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `generate.py`,
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
@@ -128,7 +128,7 @@ The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `generate.py`
|
||||
1. Run `levelization.sh`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A .github/scripts/levelization/results/paths.txt | grep -w B`
|
||||
|
||||
369
.github/scripts/levelization/generate.py
vendored
369
.github/scripts/levelization/generate.py
vendored
@@ -1,369 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Usage: generate.py
|
||||
This script takes no parameters, reads no environment variables,
|
||||
and can be run from any directory, as long as it is in the expected
|
||||
location in the repo.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Set, Optional
|
||||
|
||||
|
||||
# Compile regex patterns once at module level
|
||||
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
|
||||
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
|
||||
|
||||
|
||||
def dictionary_sort_key(s: str) -> str:
|
||||
"""
|
||||
Create a sort key that mimics 'sort -d' (dictionary order).
|
||||
Dictionary order only considers blanks and alphanumeric characters.
|
||||
This means punctuation like '.' is ignored during sorting.
|
||||
"""
|
||||
# Keep only alphanumeric characters and spaces
|
||||
return "".join(c for c in s if c.isalnum() or c.isspace())
|
||||
|
||||
|
||||
def get_level(file_path: str) -> str:
|
||||
"""
|
||||
Extract the level from a file path (second and third directory components).
|
||||
Equivalent to bash: cut -d/ -f 2,3
|
||||
|
||||
Examples:
|
||||
src/xrpld/app/main.cpp -> xrpld.app
|
||||
src/libxrpl/protocol/STObject.cpp -> libxrpl.protocol
|
||||
include/xrpl/basics/base_uint.h -> xrpl.basics
|
||||
"""
|
||||
parts = file_path.split("/")
|
||||
|
||||
# Get fields 2 and 3 (indices 1 and 2 in 0-based indexing)
|
||||
if len(parts) >= 3:
|
||||
level = f"{parts[1]}/{parts[2]}"
|
||||
elif len(parts) >= 2:
|
||||
level = f"{parts[1]}/toplevel"
|
||||
else:
|
||||
level = file_path
|
||||
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if "." in level.split("/")[-1]: # Avoid Path object creation
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level = level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return level.replace("/", ".")
|
||||
|
||||
|
||||
def extract_include_level(include_line: str) -> Optional[str]:
|
||||
"""
|
||||
Extract the include path from an #include directive.
|
||||
Gets the first two directory components from the include path.
|
||||
Equivalent to bash: cut -d/ -f 1,2
|
||||
|
||||
Examples:
|
||||
#include <xrpl/basics/base_uint.h> -> xrpl.basics
|
||||
#include "xrpld/app/main/Application.h" -> xrpld.app
|
||||
"""
|
||||
# Remove everything before the quote or angle bracket
|
||||
match = INCLUDE_PATH_PATTERN.search(include_line)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
include_path = match.group(1)
|
||||
parts = include_path.split("/")
|
||||
|
||||
# Get first two fields (indices 0 and 1)
|
||||
if len(parts) >= 2:
|
||||
include_level = f"{parts[0]}/{parts[1]}"
|
||||
else:
|
||||
include_level = include_path
|
||||
|
||||
# If the "includelevel" indicates a file, cut off the filename
|
||||
if "." in include_level.split("/")[-1]: # Avoid Path object creation
|
||||
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return include_level.replace("/", ".")
|
||||
|
||||
|
||||
def find_repo_root(start_path: Path, depth_limit: int = 10) -> Path:
|
||||
"""
|
||||
Find the repository root by looking for .git directory or src/include folders.
|
||||
Walks up the directory tree from the start path.
|
||||
"""
|
||||
current = start_path.resolve()
|
||||
|
||||
# Walk up the directory tree
|
||||
for _ in range(depth_limit): # Limit search depth to prevent infinite loops
|
||||
# Check if this directory has src or include folders
|
||||
has_src = (current / "src").exists()
|
||||
has_include = (current / "include").exists()
|
||||
|
||||
if has_src or has_include:
|
||||
return current
|
||||
|
||||
# Check if this is a git repository root
|
||||
if (current / ".git").exists():
|
||||
# Check if it has src or include nearby
|
||||
if has_src or has_include:
|
||||
return current
|
||||
|
||||
# Move up one level
|
||||
parent = current.parent
|
||||
if parent == current: # Reached filesystem root
|
||||
break
|
||||
current = parent
|
||||
|
||||
# If we couldn't find it, raise an error
|
||||
raise RuntimeError(
|
||||
"Could not find repository root. "
|
||||
"Expected to find a directory containing 'src' and/or 'include' folders."
|
||||
)
|
||||
|
||||
|
||||
def get_scan_directories(repo_root: Path) -> List[Path]:
|
||||
"""
|
||||
Get the list of directories to scan for include files.
|
||||
Returns paths that actually exist.
|
||||
"""
|
||||
directories = []
|
||||
|
||||
for dir_name in ["include", "src"]:
|
||||
dir_path = repo_root / dir_name
|
||||
if dir_path.exists() and dir_path.is_dir():
|
||||
directories.append(dir_path)
|
||||
|
||||
if not directories:
|
||||
raise RuntimeError(f"No 'src' or 'include' directories found in {repo_root}")
|
||||
|
||||
return directories
|
||||
|
||||
|
||||
def main():
|
||||
# Change to the script's directory
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
os.chdir(script_dir)
|
||||
|
||||
# If the shell is interactive, clean up any flotsam before analyzing
|
||||
# Match bash behavior: check if PS1 is set (indicates interactive shell)
|
||||
# When running a script, PS1 is not set even if stdin/stdout are TTYs
|
||||
if os.environ.get("PS1"):
|
||||
try:
|
||||
subprocess.run(["git", "clean", "-ix"], check=False, timeout=30)
|
||||
except (subprocess.TimeoutExpired, KeyboardInterrupt):
|
||||
print("Skipping git clean...")
|
||||
except Exception:
|
||||
# If git clean fails for any reason, just continue
|
||||
pass
|
||||
|
||||
# Clean up and create results directory
|
||||
results_dir = script_dir / "results"
|
||||
if results_dir.exists():
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(results_dir)
|
||||
results_dir.mkdir()
|
||||
|
||||
# Find the repository root by searching for src and include directories
|
||||
try:
|
||||
repo_root = find_repo_root(script_dir)
|
||||
scan_dirs = get_scan_directories(repo_root)
|
||||
|
||||
print(f"Found repository root: {repo_root}")
|
||||
print(f"Scanning directories:")
|
||||
for scan_dir in scan_dirs:
|
||||
print(f" - {scan_dir.relative_to(repo_root)}")
|
||||
except RuntimeError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print("\nScanning for raw includes...")
|
||||
# Find all #include directives
|
||||
raw_includes: List[Tuple[str, str]] = []
|
||||
rawincludes_file = results_dir / "rawincludes.txt"
|
||||
|
||||
# Write to file as we go to avoid storing everything in memory
|
||||
with open(rawincludes_file, "w", buffering=8192) as raw_f:
|
||||
for dir_path in scan_dirs:
|
||||
print(f" Scanning {dir_path.relative_to(repo_root)}...")
|
||||
|
||||
for file_path in dir_path.rglob("*"):
|
||||
if not file_path.is_file():
|
||||
continue
|
||||
|
||||
try:
|
||||
rel_path_str = str(file_path.relative_to(repo_root))
|
||||
|
||||
# Read file with larger buffer for better performance
|
||||
with open(
|
||||
file_path,
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
errors="ignore",
|
||||
buffering=8192,
|
||||
) as f:
|
||||
for line in f:
|
||||
# Quick check before regex
|
||||
if "#include" not in line or "boost" in line:
|
||||
continue
|
||||
|
||||
if INCLUDE_PATTERN.match(line):
|
||||
line_stripped = line.strip()
|
||||
entry = f"{rel_path_str}:{line_stripped}\n"
|
||||
print(entry, end="")
|
||||
raw_f.write(entry)
|
||||
raw_includes.append((rel_path_str, line_stripped))
|
||||
except Exception as e:
|
||||
print(f"Error reading {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
# Build levelization paths and count directly (no need to sort first)
|
||||
print("Build levelization paths")
|
||||
path_counts: Dict[Tuple[str, str], int] = defaultdict(int)
|
||||
|
||||
for file_path, include_line in raw_includes:
|
||||
level = get_level(file_path)
|
||||
include_level = extract_include_level(include_line)
|
||||
|
||||
if include_level and level != include_level:
|
||||
path_counts[(level, include_level)] += 1
|
||||
|
||||
# Sort and deduplicate paths (using dictionary order like bash 'sort -d')
|
||||
print("Sort and deduplicate paths")
|
||||
|
||||
paths_file = results_dir / "paths.txt"
|
||||
with open(paths_file, "w") as f:
|
||||
# Sort using dictionary order: only alphanumeric and spaces matter
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
for (level, include_level), count in sorted_items:
|
||||
line = f"{count:7} {level} {include_level}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Split into flat-file database
|
||||
print("Split into flat-file database")
|
||||
includes_dir = results_dir / "includes"
|
||||
included_by_dir = results_dir / "included_by"
|
||||
includes_dir.mkdir()
|
||||
included_by_dir.mkdir()
|
||||
|
||||
# Batch writes by grouping data first to avoid repeated file opens
|
||||
includes_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
|
||||
included_by_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
|
||||
|
||||
# Process in sorted order to match bash script behavior (dictionary order)
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
for (level, include_level), count in sorted_items:
|
||||
includes_data[level].append((include_level, count))
|
||||
included_by_data[include_level].append((level, count))
|
||||
|
||||
# Write all includes files in sorted order (dictionary order)
|
||||
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
|
||||
entries = includes_data[level]
|
||||
with open(includes_dir / level, "w") as f:
|
||||
for include_level, count in entries:
|
||||
line = f"{include_level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Write all included_by files in sorted order (dictionary order)
|
||||
for include_level in sorted(included_by_data.keys(), key=dictionary_sort_key):
|
||||
entries = included_by_data[include_level]
|
||||
with open(included_by_dir / include_level, "w") as f:
|
||||
for level, count in entries:
|
||||
line = f"{level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Search for loops
|
||||
print("Search for loops")
|
||||
loops_file = results_dir / "loops.txt"
|
||||
ordering_file = results_dir / "ordering.txt"
|
||||
|
||||
loops_found: Set[Tuple[str, str]] = set()
|
||||
|
||||
# Pre-load all include files into memory to avoid repeated I/O
|
||||
# This is the biggest optimization - we were reading files repeatedly in nested loops
|
||||
# Use list of tuples to preserve file order
|
||||
includes_cache: Dict[str, List[Tuple[str, int]]] = {}
|
||||
includes_lookup: Dict[str, Dict[str, int]] = {} # For fast lookup
|
||||
|
||||
# Note: bash script uses 'for source in *' which uses standard glob sorting,
|
||||
# NOT dictionary order. So we use standard sorted() here, not dictionary_sort_key.
|
||||
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
|
||||
if not include_file.is_file():
|
||||
continue
|
||||
|
||||
includes_cache[include_file.name] = []
|
||||
includes_lookup[include_file.name] = {}
|
||||
with open(include_file, "r") as f:
|
||||
for line in f:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
include_name = parts[0]
|
||||
include_count = int(parts[1])
|
||||
includes_cache[include_file.name].append(
|
||||
(include_name, include_count)
|
||||
)
|
||||
includes_lookup[include_file.name][include_name] = include_count
|
||||
|
||||
with open(loops_file, "w", buffering=8192) as loops_f, open(
|
||||
ordering_file, "w", buffering=8192
|
||||
) as ordering_f:
|
||||
|
||||
# Use standard sorting to match bash glob expansion 'for source in *'
|
||||
for source in sorted(includes_cache.keys()):
|
||||
source_includes = includes_cache[source]
|
||||
|
||||
for include, include_freq in source_includes:
|
||||
# Check if include file exists and references source
|
||||
if include not in includes_lookup:
|
||||
continue
|
||||
|
||||
source_freq = includes_lookup[include].get(source)
|
||||
|
||||
if source_freq is not None:
|
||||
# Found a loop
|
||||
loop_key = tuple(sorted([source, include]))
|
||||
if loop_key in loops_found:
|
||||
continue
|
||||
loops_found.add(loop_key)
|
||||
|
||||
loops_f.write(f"Loop: {source} {include}\n")
|
||||
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
diff = include_freq - source_freq
|
||||
if diff > 3:
|
||||
loops_f.write(f" {source} > {include}\n\n")
|
||||
elif diff < -3:
|
||||
loops_f.write(f" {include} > {source}\n\n")
|
||||
elif source_freq == include_freq:
|
||||
loops_f.write(f" {include} == {source}\n\n")
|
||||
else:
|
||||
loops_f.write(f" {include} ~= {source}\n\n")
|
||||
else:
|
||||
ordering_f.write(f"{source} > {include}\n")
|
||||
|
||||
# Print results
|
||||
print("\nOrdering:")
|
||||
with open(ordering_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
print("\nLoops:")
|
||||
with open(loops_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
130
.github/scripts/levelization/generate.sh
vendored
Executable file
130
.github/scripts/levelization/generate.sh
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: generate.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
# Ensure all sorting is ASCII-order consistently across platforms.
|
||||
export LANG=C
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../../..
|
||||
echo Raw includes:
|
||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir included_by
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and deduplicate paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a included_by/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
@@ -153,7 +153,6 @@ tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.core > xrpl.basics
|
||||
xrpl.core > xrpl.json
|
||||
xrpl.core > xrpl.ledger
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
xrpl.ledger > xrpl.protocol
|
||||
|
||||
61
.github/scripts/strategy-matrix/generate.py
vendored
61
.github/scripts/strategy-matrix/generate.py
vendored
@@ -51,20 +51,22 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Only generate a subset of configurations in PRs.
|
||||
if not all:
|
||||
# Debian:
|
||||
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
|
||||
# fee to 500.
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
|
||||
# coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
|
||||
# - Bookworm using Clang 17: Release on linux/amd64, set the
|
||||
# reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64.
|
||||
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
|
||||
# the reference fee to 500.
|
||||
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
|
||||
# code coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
|
||||
# enable voidstar.
|
||||
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
|
||||
# set the reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
|
||||
if os["distro_name"] == "debian":
|
||||
skip = True
|
||||
if os["distro_version"] == "bookworm":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
|
||||
@@ -72,12 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
cmake_args = f"-Dvoidstar=ON {cmake_args}"
|
||||
@@ -85,6 +89,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
|
||||
@@ -92,6 +97,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -99,14 +105,15 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# RHEL:
|
||||
# - 9 using GCC 12: Debug on linux/amd64.
|
||||
# - 10 using Clang: Release on linux/amd64.
|
||||
# - 9 using GCC 12: Debug and Unity on linux/amd64.
|
||||
# - 10 using Clang: Release and no Unity on linux/amd64.
|
||||
if os["distro_name"] == "rhel":
|
||||
skip = True
|
||||
if os["distro_version"] == "9":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -114,6 +121,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
@@ -121,16 +129,17 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# Ubuntu:
|
||||
# - Jammy using GCC 12: Debug on linux/arm64.
|
||||
# - Noble using GCC 14: Release on linux/amd64.
|
||||
# - Noble using Clang 18: Debug on linux/amd64.
|
||||
# - Noble using Clang 19: Release on linux/arm64.
|
||||
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
|
||||
# - Noble using GCC 14: Release and Unity on linux/amd64.
|
||||
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
|
||||
# - Noble using Clang 19: Release and Unity on linux/arm64.
|
||||
if os["distro_name"] == "ubuntu":
|
||||
skip = True
|
||||
if os["distro_version"] == "jammy":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -138,18 +147,21 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
|
||||
and build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
@@ -157,16 +169,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# MacOS:
|
||||
# - Debug on macos/arm64.
|
||||
# - Debug and no Unity on macos/arm64.
|
||||
if os["distro_name"] == "macos" and not (
|
||||
build_type == "Debug" and architecture["platform"] == "macos/arm64"
|
||||
build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "macos/arm64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Windows:
|
||||
# - Release on windows/amd64.
|
||||
# - Release and Unity on windows/amd64.
|
||||
if os["distro_name"] == "windows" and not (
|
||||
build_type == "Release" and architecture["platform"] == "windows/amd64"
|
||||
build_type == "Release"
|
||||
and "-Dunity=ON" in cmake_args
|
||||
and architecture["platform"] == "windows/amd64"
|
||||
):
|
||||
continue
|
||||
|
||||
@@ -193,17 +209,18 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
):
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
||||
# Unity on linux/amd64
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and "-Dunity=OFF" in cmake_args
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}"
|
||||
|
||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
||||
# or debian-bookworm-gcc-12-amd64-release.
|
||||
# or debian-bookworm-gcc-12-amd64-release-unity.
|
||||
config_name = os["distro_name"]
|
||||
if (n := os["distro_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
@@ -217,6 +234,8 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
config_name += f"-{build_type.lower()}"
|
||||
if "-Dcoverage=ON" in cmake_args:
|
||||
config_name += "-coverage"
|
||||
if "-Dunity=ON" in cmake_args:
|
||||
config_name += "-unity"
|
||||
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
|
||||
2
.github/scripts/strategy-matrix/linux.json
vendored
2
.github/scripts/strategy-matrix/linux.json
vendored
@@ -208,5 +208,5 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
5
.github/scripts/strategy-matrix/macos.json
vendored
5
.github/scripts/strategy-matrix/macos.json
vendored
@@ -15,5 +15,8 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": ["-DCMAKE_POLICY_VERSION_MINIMUM=3.5"]
|
||||
"cmake_args": [
|
||||
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
|
||||
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/scripts/strategy-matrix/windows.json
vendored
2
.github/scripts/strategy-matrix/windows.json
vendored
@@ -15,5 +15,5 @@
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Check levelization
|
||||
run: python .github/scripts/levelization/generate.py
|
||||
run: .github/scripts/levelization/generate.sh
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
removed from loops.txt, it's probably an improvement, while if
|
||||
something was added, it's probably a regression.
|
||||
|
||||
Run '.github/scripts/levelization/generate.py' in your repo, commit
|
||||
Run '.github/scripts/levelization/generate.sh' in your repo, commit
|
||||
and push the changes. See .github/scripts/levelization/README.md for
|
||||
more info.
|
||||
run: |
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -64,13 +64,7 @@ DerivedData
|
||||
/.vs/
|
||||
/.vscode/
|
||||
|
||||
# zed IDE.
|
||||
/.zed/
|
||||
|
||||
# AI tools.
|
||||
/.augment
|
||||
/.claude
|
||||
/CLAUDE.md
|
||||
|
||||
# Python
|
||||
__pycache__
|
||||
|
||||
37
BUILD.md
37
BUILD.md
@@ -368,36 +368,6 @@ The workaround for this error is to add two lines to your profile:
|
||||
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
```
|
||||
|
||||
### Set Up Ccache
|
||||
|
||||
To speed up repeated compilations, we recommend that you install
|
||||
[ccache](https://ccache.dev), a tool that wraps your compiler so that it can
|
||||
cache build objects locally.
|
||||
|
||||
#### Linux
|
||||
|
||||
You can install it using the package manager, e.g. `sudo apt install ccache`
|
||||
(Ubuntu) or `sudo dnf install ccache` (RHEL).
|
||||
|
||||
#### macOS
|
||||
|
||||
You can install it using Homebrew, i.e. `brew install ccache`.
|
||||
|
||||
#### Windows
|
||||
|
||||
You can install it using Chocolatey, i.e. `choco install ccache`. If you already
|
||||
have Ccache installed, then `choco upgrade ccache` will update it to the latest
|
||||
version. However, if you see an error such as:
|
||||
|
||||
```
|
||||
terminate called after throwing an instance of 'std::bad_alloc'
|
||||
what(): std::bad_alloc
|
||||
C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(617,5): error MSB6006: "cl.exe" exited with code 3.
|
||||
```
|
||||
|
||||
then please install a specific version of Ccache that we know works, via: `choco
|
||||
install ccache --version 4.11.3 --allow-downgrade`.
|
||||
|
||||
### Build and Test
|
||||
|
||||
1. Create a build directory and move into it.
|
||||
@@ -575,10 +545,16 @@ See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
|
||||
[Unity builds][5] may be faster for the first build
|
||||
(at the cost of much more memory) since they concatenate sources into fewer
|
||||
translation units. Non-unity builds may be faster for incremental builds,
|
||||
and can be helpful for detecting `#include` omissions.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Conan
|
||||
@@ -645,6 +621,7 @@ If you want to experiment with a new package, follow these steps:
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
||||
[3]: https://docs.conan.io/en/latest/getting_started.html
|
||||
[5]: https://en.wikipedia.org/wiki/Unity_build
|
||||
[6]: https://github.com/boostorg/beast/issues/2648
|
||||
[7]: https://github.com/boostorg/beast/issues/2661
|
||||
[gcovr]: https://gcovr.com/en/stable/getting-started.html
|
||||
|
||||
@@ -9,5 +9,8 @@ function (xrpl_add_test name)
|
||||
|
||||
isolate_headers(${target} "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/tests/${name}" PRIVATE)
|
||||
|
||||
# Make sure the test isn't optimized away in unity builds
|
||||
set_target_properties(${target} PROPERTIES UNITY_BUILD_MODE GROUP UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
|
||||
|
||||
add_test(NAME ${target} COMMAND ${target})
|
||||
endfunction ()
|
||||
|
||||
@@ -4,7 +4,12 @@
|
||||
|
||||
include(target_protobuf_sources)
|
||||
|
||||
# Protocol buffers cannot participate in a unity build,
|
||||
# because all the generated sources
|
||||
# define a bunch of `static const` variables with the same names,
|
||||
# so we just build them as a separate library.
|
||||
add_library(xrpl.libpb)
|
||||
set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
|
||||
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
|
||||
PROTOS include/xrpl/proto/xrpl.proto)
|
||||
|
||||
@@ -155,4 +160,12 @@ if (xrpld)
|
||||
# antithesis_instrumentation.h, which is not exported as INTERFACE
|
||||
target_include_directories(xrpld PRIVATE ${CMAKE_SOURCE_DIR}/external/antithesis-sdk)
|
||||
endif ()
|
||||
|
||||
# any files that don't play well with unity should be added here
|
||||
if (tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp src/test/ledger/Invariants_test.cpp PROPERTIES SKIP_UNITY_BUILD_INCLUSION
|
||||
TRUE)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
@@ -30,6 +30,14 @@ if (tests)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake." OFF)
|
||||
if (unity)
|
||||
if (NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
set(CMAKE_UNITY_BUILD ON CACHE BOOL "Do a unity build")
|
||||
endif ()
|
||||
|
||||
if (is_clang AND is_linux)
|
||||
option(voidstar "Enable Antithesis instrumentation." OFF)
|
||||
endif ()
|
||||
|
||||
@@ -23,6 +23,7 @@ class Xrpl(ConanFile):
|
||||
"shared": [True, False],
|
||||
"static": [True, False],
|
||||
"tests": [True, False],
|
||||
"unity": [True, False],
|
||||
"xrpld": [True, False],
|
||||
}
|
||||
|
||||
@@ -54,6 +55,7 @@ class Xrpl(ConanFile):
|
||||
"shared": False,
|
||||
"static": True,
|
||||
"tests": False,
|
||||
"unity": False,
|
||||
"xrpld": False,
|
||||
"date/*:header_only": True,
|
||||
"ed25519/*:shared": False,
|
||||
@@ -166,6 +168,7 @@ class Xrpl(ConanFile):
|
||||
tc.variables["rocksdb"] = self.options.rocksdb
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
tc.variables["static"] = self.options.static
|
||||
tc.variables["unity"] = self.options.unity
|
||||
tc.variables["xrpld"] = self.options.xrpld
|
||||
tc.generate()
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef XRPL_BASICS_ROCKSDB_H_INCLUDED
|
||||
#define XRPL_BASICS_ROCKSDB_H_INCLUDED
|
||||
#ifndef XRPL_UNITY_ROCKSDB_H_INCLUDED
|
||||
#define XRPL_UNITY_ROCKSDB_H_INCLUDED
|
||||
|
||||
#if XRPL_ROCKSDB_AVAILABLE
|
||||
// #include <rocksdb2/port/port_posix.h>
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
#ifndef XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
|
||||
#define XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/SHAMapHash.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/ledger/CachedSLEs.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
// Forward declarations
|
||||
namespace NodeStore {
|
||||
class Database;
|
||||
}
|
||||
namespace Resource {
|
||||
class Manager;
|
||||
}
|
||||
namespace perf {
|
||||
class PerfLog;
|
||||
}
|
||||
|
||||
class AcceptedLedger;
|
||||
class AmendmentTable;
|
||||
class Cluster;
|
||||
class CollectorManager;
|
||||
class DatabaseCon;
|
||||
class Family;
|
||||
class HashRouter;
|
||||
class InboundLedgers;
|
||||
class InboundTransactions;
|
||||
class JobQueue;
|
||||
class LedgerCleaner;
|
||||
class LedgerMaster;
|
||||
class LedgerReplayer;
|
||||
class LoadFeeTrack;
|
||||
class LoadManager;
|
||||
class ManifestCache;
|
||||
class NetworkOPs;
|
||||
class OpenLedger;
|
||||
class OrderBookDB;
|
||||
class Overlay;
|
||||
class PathRequests;
|
||||
class PeerReservationTable;
|
||||
class PendingSaves;
|
||||
class RelationalDatabase;
|
||||
class ServerHandler;
|
||||
class SHAMapStore;
|
||||
class TimeKeeper;
|
||||
class TransactionMaster;
|
||||
class TxQ;
|
||||
class ValidatorList;
|
||||
class ValidatorSite;
|
||||
|
||||
template <class Adaptor>
|
||||
class Validations;
|
||||
class RCLValidationsAdaptor;
|
||||
using RCLValidations = Validations<RCLValidationsAdaptor>;
|
||||
|
||||
using NodeCache = TaggedCache<SHAMapHash, Blob>;
|
||||
|
||||
/** Service registry for dependency injection.
|
||||
|
||||
This abstract interface provides access to various services and components
|
||||
used throughout the application. It separates the service locator pattern
|
||||
from the Application lifecycle management.
|
||||
|
||||
Components that need access to services can hold a reference to
|
||||
ServiceRegistry rather than Application when they only need service
|
||||
access and not lifecycle management.
|
||||
|
||||
*/
|
||||
class ServiceRegistry
|
||||
{
|
||||
public:
|
||||
ServiceRegistry() = default;
|
||||
virtual ~ServiceRegistry() = default;
|
||||
|
||||
// Core infrastructure services
|
||||
virtual CollectorManager&
|
||||
getCollectorManager() = 0;
|
||||
|
||||
virtual Family&
|
||||
getNodeFamily() = 0;
|
||||
|
||||
virtual TimeKeeper&
|
||||
timeKeeper() = 0;
|
||||
|
||||
virtual JobQueue&
|
||||
getJobQueue() = 0;
|
||||
|
||||
virtual NodeCache&
|
||||
getTempNodeCache() = 0;
|
||||
|
||||
virtual CachedSLEs&
|
||||
cachedSLEs() = 0;
|
||||
|
||||
// Protocol and validation services
|
||||
virtual AmendmentTable&
|
||||
getAmendmentTable() = 0;
|
||||
|
||||
virtual HashRouter&
|
||||
getHashRouter() = 0;
|
||||
|
||||
virtual LoadFeeTrack&
|
||||
getFeeTrack() = 0;
|
||||
|
||||
virtual LoadManager&
|
||||
getLoadManager() = 0;
|
||||
|
||||
virtual RCLValidations&
|
||||
getValidations() = 0;
|
||||
|
||||
virtual ValidatorList&
|
||||
validators() = 0;
|
||||
|
||||
virtual ValidatorSite&
|
||||
validatorSites() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
validatorManifests() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
publisherManifests() = 0;
|
||||
|
||||
// Network services
|
||||
virtual Overlay&
|
||||
overlay() = 0;
|
||||
|
||||
virtual Cluster&
|
||||
cluster() = 0;
|
||||
|
||||
virtual PeerReservationTable&
|
||||
peerReservations() = 0;
|
||||
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
|
||||
// Storage services
|
||||
virtual NodeStore::Database&
|
||||
getNodeStore() = 0;
|
||||
|
||||
virtual SHAMapStore&
|
||||
getSHAMapStore() = 0;
|
||||
|
||||
virtual RelationalDatabase&
|
||||
getRelationalDatabase() = 0;
|
||||
|
||||
// Ledger services
|
||||
virtual InboundLedgers&
|
||||
getInboundLedgers() = 0;
|
||||
|
||||
virtual InboundTransactions&
|
||||
getInboundTransactions() = 0;
|
||||
|
||||
virtual TaggedCache<uint256, AcceptedLedger>&
|
||||
getAcceptedLedgerCache() = 0;
|
||||
|
||||
virtual LedgerMaster&
|
||||
getLedgerMaster() = 0;
|
||||
|
||||
virtual LedgerCleaner&
|
||||
getLedgerCleaner() = 0;
|
||||
|
||||
virtual LedgerReplayer&
|
||||
getLedgerReplayer() = 0;
|
||||
|
||||
virtual PendingSaves&
|
||||
pendingSaves() = 0;
|
||||
|
||||
virtual OpenLedger&
|
||||
openLedger() = 0;
|
||||
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
|
||||
// Transaction and operation services
|
||||
virtual NetworkOPs&
|
||||
getOPs() = 0;
|
||||
|
||||
virtual OrderBookDB&
|
||||
getOrderBookDB() = 0;
|
||||
|
||||
virtual TransactionMaster&
|
||||
getMasterTransaction() = 0;
|
||||
|
||||
virtual TxQ&
|
||||
getTxQ() = 0;
|
||||
|
||||
virtual PathRequests&
|
||||
getPathRequests() = 0;
|
||||
|
||||
// Server services
|
||||
virtual ServerHandler&
|
||||
getServerHandler() = 0;
|
||||
|
||||
virtual perf::PerfLog&
|
||||
getPerfLog() = 0;
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
@@ -45,6 +45,9 @@ public:
|
||||
|
||||
static int const cMinOffset = -96;
|
||||
static int const cMaxOffset = 80;
|
||||
// The -100 is used to allow 0 to sort less than small positive values
|
||||
// which will have a large negative exponent.
|
||||
static int const cZeroOffset = -100;
|
||||
|
||||
// Maximum native value supported by the code
|
||||
constexpr static std::uint64_t cMinValue = 1'000'000'000'000'000ull;
|
||||
@@ -524,7 +527,11 @@ STAmount::fromNumber(A const& a, Number const& number)
|
||||
|
||||
auto const [mantissa, exponent] = working.normalizeToRange(cMinValue, cMaxValue);
|
||||
|
||||
return STAmount{asset, mantissa, exponent, negative};
|
||||
// normalizeToRange produces values in canonical mantissa range [cMinValue, cMaxValue],
|
||||
// but may produce out-of-range exponents for overflow/underflow cases.
|
||||
// Use the regular constructor - canonicalize() will detect already-normalized mantissa
|
||||
// and skip redundant scaling loops, while still handling overflow/underflow.
|
||||
return STAmount{asset, static_cast<std::uint64_t>(mantissa), exponent, negative};
|
||||
}
|
||||
|
||||
inline void
|
||||
@@ -537,9 +544,7 @@ STAmount::negate()
|
||||
inline void
|
||||
STAmount::clear()
|
||||
{
|
||||
// The -100 is used to allow 0 to sort less than a small positive values
|
||||
// which have a negative exponent.
|
||||
mOffset = integral() ? 0 : -100;
|
||||
mOffset = integral() ? 0 : cZeroOffset;
|
||||
mValue = 0;
|
||||
mIsNegative = false;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
|
||||
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -38,7 +38,10 @@ Number::getround()
|
||||
Number::rounding_mode
|
||||
Number::setround(rounding_mode mode)
|
||||
{
|
||||
return std::exchange(mode_, mode);
|
||||
auto const old = mode_;
|
||||
if (old != mode)
|
||||
mode_ = mode;
|
||||
return old;
|
||||
}
|
||||
|
||||
MantissaRange::mantissa_scale
|
||||
@@ -52,6 +55,8 @@ Number::setMantissaScale(MantissaRange::mantissa_scale scale)
|
||||
{
|
||||
if (scale != MantissaRange::small && scale != MantissaRange::large)
|
||||
LogicError("Unknown mantissa scale");
|
||||
if (range_.get().scale == scale)
|
||||
return;
|
||||
range_ = scale == MantissaRange::small ? smallRange : largeRange;
|
||||
}
|
||||
|
||||
|
||||
@@ -85,8 +85,7 @@ registerSSLCerts(boost::asio::ssl::context& ctx, boost::system::error_code& ec,
|
||||
// There is a very unpleasant interaction between <wincrypt> and
|
||||
// openssl x509 types (namely the former has macros that stomp
|
||||
// on the latter), these undefs allow this TU to be safely used in
|
||||
// unity builds without messing up subsequent TUs. Although we
|
||||
// no longer use unity builds, leaving the undefs here does no harm.
|
||||
// unity builds without messing up subsequent TUs.
|
||||
#if BOOST_OS_WINDOWS
|
||||
#undef X509_NAME
|
||||
#undef X509_EXTENSIONS
|
||||
|
||||
@@ -866,34 +866,44 @@ STAmount::canonicalize()
|
||||
|
||||
if (mValue == 0)
|
||||
{
|
||||
mOffset = -100;
|
||||
mOffset = cZeroOffset;
|
||||
mIsNegative = false;
|
||||
return;
|
||||
}
|
||||
|
||||
while ((mValue < cMinValue) && (mOffset > cMinOffset))
|
||||
// Fast path: if mantissa is already in canonical range, skip scaling loops.
|
||||
// This handles values from normalizeToRange that only need overflow/underflow checks.
|
||||
bool const mantissaCanonical = (mValue >= cMinValue) && (mValue <= cMaxValue);
|
||||
|
||||
if (!mantissaCanonical)
|
||||
{
|
||||
mValue *= 10;
|
||||
--mOffset;
|
||||
}
|
||||
|
||||
while (mValue > cMaxValue)
|
||||
{
|
||||
if (mOffset >= cMaxOffset)
|
||||
Throw<std::runtime_error>("value overflow");
|
||||
|
||||
mValue /= 10;
|
||||
++mOffset;
|
||||
// Mantissa needs normalization
|
||||
while ((mValue < cMinValue) && (mOffset > cMinOffset))
|
||||
{
|
||||
mValue *= 10;
|
||||
--mOffset;
|
||||
}
|
||||
|
||||
while (mValue > cMaxValue)
|
||||
{
|
||||
if (mOffset >= cMaxOffset)
|
||||
Throw<std::runtime_error>("value overflow");
|
||||
|
||||
mValue /= 10;
|
||||
++mOffset;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for underflow (applies whether we scaled or not)
|
||||
if ((mOffset < cMinOffset) || (mValue < cMinValue))
|
||||
{
|
||||
mValue = 0;
|
||||
mIsNegative = false;
|
||||
mOffset = -100;
|
||||
mOffset = cZeroOffset;
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for overflow (applies whether we scaled or not)
|
||||
if (mOffset > cMaxOffset)
|
||||
Throw<std::runtime_error>("value overflow");
|
||||
|
||||
@@ -903,7 +913,7 @@ STAmount::canonicalize()
|
||||
XRPL_ASSERT(
|
||||
(mValue == 0) || ((mOffset >= cMinOffset) && (mOffset <= cMaxOffset)),
|
||||
"xrpl::STAmount::canonicalize : offset inside range");
|
||||
XRPL_ASSERT((mValue != 0) || (mOffset != -100), "xrpl::STAmount::canonicalize : value or offset set");
|
||||
XRPL_ASSERT((mValue != 0) || (mOffset == cZeroOffset), "xrpl::STAmount::canonicalize : value or offset set");
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -876,48 +876,42 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.fund(XRP(1000), alice, buyer, gw);
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
uint256 const nftAlice0ID = token::getNextID(env, alice, 0, tfTransferable);
|
||||
env(token::mint(alice, 0u), txflags(tfTransferable));
|
||||
env.close();
|
||||
uint8_t aliceCount = 1;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 1);
|
||||
|
||||
uint256 const nftXrpOnlyID = token::getNextID(env, alice, 0, tfOnlyXRP | tfTransferable);
|
||||
env(token::mint(alice, 0), txflags(tfOnlyXRP | tfTransferable));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 1);
|
||||
|
||||
uint256 nftNoXferID = token::getNextID(env, alice, 0);
|
||||
env(token::mint(alice, 0));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 1);
|
||||
|
||||
// alice creates sell offers for her nfts.
|
||||
uint256 const plainOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
env(token::createOffer(alice, nftAlice0ID, XRP(10)), txflags(tfSellNFToken));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 2);
|
||||
|
||||
uint256 const audOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
env(token::createOffer(alice, nftAlice0ID, gwAUD(30)), txflags(tfSellNFToken));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
uint256 const xrpOnlyOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
env(token::createOffer(alice, nftXrpOnlyID, XRP(20)), txflags(tfSellNFToken));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 4);
|
||||
|
||||
uint256 const noXferOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
env(token::createOffer(alice, nftNoXferID, XRP(30)), txflags(tfSellNFToken));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 5);
|
||||
|
||||
// alice creates a sell offer that will expire soon.
|
||||
uint256 const aliceExpOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
@@ -925,17 +919,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
txflags(tfSellNFToken),
|
||||
token::expiration(lastClose(env) + 5));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
|
||||
// buyer creates a Buy offer that will expire soon.
|
||||
uint256 const buyerExpOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftAlice0ID, XRP(40)),
|
||||
token::owner(alice),
|
||||
token::expiration(lastClose(env) + 5));
|
||||
env.close();
|
||||
uint8_t buyerCount = 1;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 6);
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// preflight
|
||||
@@ -943,12 +927,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
// Set a negative fee.
|
||||
env(token::acceptSellOffer(buyer, noXferOfferIndex), fee(STAmount(10ull, true)), ter(temBAD_FEE));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// Set an invalid flag.
|
||||
env(token::acceptSellOffer(buyer, noXferOfferIndex), txflags(0x00008000), ter(temINVALID_FLAG));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// Supply nether an sfNFTokenBuyOffer nor an sfNFTokenSellOffer field.
|
||||
{
|
||||
@@ -956,7 +940,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
jv.removeMember(sfNFTokenSellOffer.jsonName);
|
||||
env(jv, ter(temMALFORMED));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
|
||||
// A buy offer may not contain a sfNFTokenBrokerFee field.
|
||||
@@ -965,7 +949,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
|
||||
env(jv, ter(temMALFORMED));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
|
||||
// A sell offer may not contain a sfNFTokenBrokerFee field.
|
||||
@@ -974,7 +958,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
|
||||
env(jv, ter(temMALFORMED));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
|
||||
// A brokered offer may not contain a negative or zero brokerFee.
|
||||
@@ -982,7 +966,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
token::brokerFee(gwAUD(0)),
|
||||
ter(temMALFORMED));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// preclaim
|
||||
@@ -990,48 +974,33 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
// The buy offer must be non-zero.
|
||||
env(token::acceptBuyOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// The buy offer must be present in the ledger.
|
||||
uint256 const missingOfferIndex = keylet::nftoffer(alice, 1).key;
|
||||
env(token::acceptBuyOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// The buy offer must not have expired.
|
||||
// NOTE: this is only a preclaim check with the
|
||||
// fixExpiredNFTokenOfferRemoval amendment disabled.
|
||||
env(token::acceptBuyOffer(alice, buyerExpOfferIndex), ter(tecEXPIRED));
|
||||
env(token::acceptBuyOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
|
||||
env.close();
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
buyerCount--;
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// The sell offer must be non-zero.
|
||||
env(token::acceptSellOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// The sell offer must be present in the ledger.
|
||||
env(token::acceptSellOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// The sell offer must not have expired.
|
||||
// NOTE: this is only a preclaim check with the
|
||||
// fixExpiredNFTokenOfferRemoval amendment disabled.
|
||||
env(token::acceptSellOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
|
||||
env.close();
|
||||
// Alice's count is decremented by one when the expired offer is
|
||||
// removed.
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
aliceCount--;
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// preclaim brokered
|
||||
@@ -1043,13 +1012,8 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
env(pay(gw, buyer, gwAUD(30)));
|
||||
env.close();
|
||||
aliceCount++;
|
||||
buyerCount++;
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 7);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// We're about to exercise offer brokering, so we need
|
||||
// corresponding buy and sell offers.
|
||||
@@ -1058,38 +1022,35 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftAlice0ID, gwAUD(29)), token::owner(alice));
|
||||
env.close();
|
||||
buyerCount++;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// gw attempts to broker offers that are not for the same token.
|
||||
env(token::brokerOffers(gw, buyerOfferIndex, xrpOnlyOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// gw attempts to broker offers that are not for the same currency.
|
||||
env(token::brokerOffers(gw, buyerOfferIndex, plainOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// In a brokered offer, the buyer must offer greater than or
|
||||
// equal to the selling price.
|
||||
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex), ter(tecINSUFFICIENT_PAYMENT));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Remove buyer's offer.
|
||||
env(token::cancelOffer(buyer, {buyerOfferIndex}));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
}
|
||||
{
|
||||
// buyer creates a buy offer for one of alice's nfts.
|
||||
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftAlice0ID, gwAUD(31)), token::owner(alice));
|
||||
env.close();
|
||||
buyerCount++;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Broker sets their fee in a denomination other than the one
|
||||
// used by the offers
|
||||
@@ -1097,14 +1058,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
token::brokerFee(XRP(40)),
|
||||
ter(tecNFTOKEN_BUY_SELL_MISMATCH));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Broker fee way too big.
|
||||
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex),
|
||||
token::brokerFee(gwAUD(31)),
|
||||
ter(tecINSUFFICIENT_PAYMENT));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Broker fee is smaller, but still too big once the offer
|
||||
// seller's minimum is taken into account.
|
||||
@@ -1112,13 +1073,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
token::brokerFee(gwAUD(1.5)),
|
||||
ter(tecINSUFFICIENT_PAYMENT));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Remove buyer's offer.
|
||||
env(token::cancelOffer(buyer, {buyerOfferIndex}));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
}
|
||||
//----------------------------------------------------------------------
|
||||
// preclaim buy
|
||||
@@ -1127,18 +1087,17 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftAlice0ID, gwAUD(30)), token::owner(alice));
|
||||
env.close();
|
||||
buyerCount++;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Don't accept a buy offer if the sell flag is set.
|
||||
env(token::acceptBuyOffer(buyer, plainOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 7);
|
||||
|
||||
// An account can't accept its own offer.
|
||||
env(token::acceptBuyOffer(buyer, buyerOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// An offer acceptor must have enough funds to pay for the offer.
|
||||
env(pay(buyer, gw, gwAUD(30)));
|
||||
@@ -1146,7 +1105,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
|
||||
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecINSUFFICIENT_FUNDS));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// alice gives her NFT to gw, so alice no longer owns nftAlice0.
|
||||
{
|
||||
@@ -1155,7 +1114,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
env(token::acceptSellOffer(gw, offerIndex));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 7);
|
||||
}
|
||||
env(pay(gw, buyer, gwAUD(30)));
|
||||
env.close();
|
||||
@@ -1163,13 +1122,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
// alice can't accept a buy offer for an NFT she no longer owns.
|
||||
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecNO_PERMISSION));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Remove buyer's offer.
|
||||
env(token::cancelOffer(buyer, {buyerOfferIndex}));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
}
|
||||
//----------------------------------------------------------------------
|
||||
// preclaim sell
|
||||
@@ -1178,24 +1136,23 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftXrpOnlyID, XRP(30)), token::owner(alice));
|
||||
env.close();
|
||||
buyerCount++;
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Don't accept a sell offer without the sell flag set.
|
||||
env(token::acceptSellOffer(alice, buyerOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 7);
|
||||
|
||||
// An account can't accept its own offer.
|
||||
env(token::acceptSellOffer(alice, plainOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// The seller must currently be in possession of the token they
|
||||
// are selling. alice gave nftAlice0ID to gw.
|
||||
env(token::acceptSellOffer(buyer, plainOfferIndex), ter(tecNO_PERMISSION));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// gw gives nftAlice0ID back to alice. That allows us to check
|
||||
// buyer attempting to accept one of alice's offers with
|
||||
@@ -1206,14 +1163,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
env(token::acceptSellOffer(alice, offerIndex));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 7);
|
||||
}
|
||||
env(pay(buyer, gw, gwAUD(30)));
|
||||
env.close();
|
||||
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
|
||||
env(token::acceptSellOffer(buyer, audOfferIndex), ter(tecINSUFFICIENT_FUNDS));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
@@ -2812,7 +2769,6 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
uint256 const nftokenID1 = token::getNextID(env, issuer, 0, tfTransferable);
|
||||
env(token::mint(minter, 0), token::issuer(issuer), txflags(tfTransferable));
|
||||
env.close();
|
||||
uint8_t issuerCount, minterCount, buyerCount;
|
||||
|
||||
// Test how adding an Expiration field to an offer affects permissions
|
||||
// for cancelling offers.
|
||||
@@ -2836,12 +2792,9 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
uint256 const offerBuyerToMinter = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
|
||||
env.close();
|
||||
issuerCount = 1;
|
||||
minterCount = 3;
|
||||
buyerCount = 1;
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 3);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// Test who gets to cancel the offers. Anyone outside of the
|
||||
// offer-owner/destination pair should not be able to cancel
|
||||
@@ -2853,36 +2806,32 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env(token::cancelOffer(buyer, {offerIssuerToMinter}), ter(tecNO_PERMISSION));
|
||||
env.close();
|
||||
BEAST_EXPECT(lastClose(env) < expiration);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 3);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// The offer creator can cancel their own unexpired offer.
|
||||
env(token::cancelOffer(minter, {offerMinterToAnyone}));
|
||||
minterCount--;
|
||||
|
||||
// The destination of a sell offer can cancel the NFT owner's
|
||||
// unexpired offer.
|
||||
env(token::cancelOffer(issuer, {offerMinterToIssuer}));
|
||||
minterCount--;
|
||||
|
||||
// Close enough ledgers to get past the expiration.
|
||||
while (lastClose(env) < expiration)
|
||||
env.close();
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// Anyone can cancel expired offers.
|
||||
env(token::cancelOffer(issuer, {offerBuyerToMinter}));
|
||||
buyerCount--;
|
||||
env(token::cancelOffer(buyer, {offerIssuerToMinter}));
|
||||
issuerCount--;
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
// Show that:
|
||||
// 1. An unexpired sell offer with an expiration can be accepted.
|
||||
@@ -2895,70 +2844,44 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env(token::createOffer(minter, nftokenID0, drops(1)),
|
||||
token::expiration(expiration),
|
||||
txflags(tfSellNFToken));
|
||||
minterCount++;
|
||||
|
||||
uint256 const offer1 = keylet::nftoffer(minter, env.seq(minter)).key;
|
||||
env(token::createOffer(minter, nftokenID1, drops(1)),
|
||||
token::expiration(expiration),
|
||||
txflags(tfSellNFToken));
|
||||
minterCount++;
|
||||
env.close();
|
||||
BEAST_EXPECT(lastClose(env) < expiration);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 3);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
|
||||
// Anyone can accept an unexpired sell offer.
|
||||
env(token::acceptSellOffer(buyer, offer0));
|
||||
minterCount--;
|
||||
buyerCount++;
|
||||
|
||||
// Close enough ledgers to get past the expiration.
|
||||
while (lastClose(env) < expiration)
|
||||
env.close();
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// No one can accept an expired sell offer.
|
||||
env(token::acceptSellOffer(buyer, offer1), ter(tecEXPIRED));
|
||||
|
||||
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
|
||||
// attempt deletes the expired offer. Without the amendment,
|
||||
// the offer remains and we can try to accept it again.
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// After amendment: offer was deleted by first accept attempt
|
||||
minterCount--;
|
||||
env(token::acceptSellOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Before amendment: offer still exists, second accept also
|
||||
// fails
|
||||
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
|
||||
}
|
||||
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
|
||||
env.close();
|
||||
|
||||
// Check if the expired sell offer behavior matches amendment status
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
// The expired sell offer is still in the ledger.
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
if (!features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// Before amendment: expired offer still exists and needs to be
|
||||
// cancelled
|
||||
env(token::cancelOffer(issuer, {offer1}));
|
||||
env.close();
|
||||
minterCount--;
|
||||
}
|
||||
// Ensure that owner counts are correct with and without the
|
||||
// amendment
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
|
||||
// Anyone can cancel the expired sell offer.
|
||||
env(token::cancelOffer(issuer, {offer1}));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// Transfer nftokenID0 back to minter so we start the next test in
|
||||
// a simple place.
|
||||
@@ -2966,11 +2889,10 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env(token::createOffer(buyer, nftokenID0, XRP(0)), txflags(tfSellNFToken), token::destination(minter));
|
||||
env.close();
|
||||
env(token::acceptSellOffer(minter, offerSellBack));
|
||||
buyerCount--;
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
// Show that:
|
||||
// 1. An unexpired buy offer with an expiration can be accepted.
|
||||
@@ -2981,16 +2903,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
|
||||
uint256 const offer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
|
||||
buyerCount++;
|
||||
|
||||
uint256 const offer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter), token::expiration(expiration));
|
||||
buyerCount++;
|
||||
env.close();
|
||||
BEAST_EXPECT(lastClose(env) < expiration);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// An unexpired buy offer can be accepted.
|
||||
env(token::acceptBuyOffer(minter, offer0));
|
||||
@@ -2999,48 +2919,26 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
while (lastClose(env) < expiration)
|
||||
env.close();
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// An expired buy offer cannot be accepted.
|
||||
env(token::acceptBuyOffer(minter, offer1), ter(tecEXPIRED));
|
||||
|
||||
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
|
||||
// attempt deletes the expired offer. Without the amendment,
|
||||
// the offer remains and we can try to accept it again.
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// After amendment: offer was deleted by first accept attempt
|
||||
buyerCount--;
|
||||
env(token::acceptBuyOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Before amendment: offer still exists, second accept also
|
||||
// fails
|
||||
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
|
||||
}
|
||||
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
|
||||
env.close();
|
||||
|
||||
// Check if the expired buy offer behavior matches amendment status
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
// The expired buy offer is still in the ledger.
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
if (!features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// Before amendment: expired offer still exists and can be
|
||||
// cancelled
|
||||
env(token::cancelOffer(issuer, {offer1}));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
}
|
||||
// Ensure that owner counts are the same with and without the
|
||||
// amendment
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
|
||||
// Anyone can cancel the expired buy offer.
|
||||
env(token::cancelOffer(issuer, {offer1}));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// Transfer nftokenID0 back to minter so we start the next test in
|
||||
// a simple place.
|
||||
@@ -3049,10 +2947,9 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
env(token::acceptSellOffer(minter, offerSellBack));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
// Show that in brokered mode:
|
||||
// 1. An unexpired sell offer with an expiration can be accepted.
|
||||
@@ -3065,74 +2962,50 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env(token::createOffer(minter, nftokenID0, drops(1)),
|
||||
token::expiration(expiration),
|
||||
txflags(tfSellNFToken));
|
||||
minterCount++;
|
||||
|
||||
uint256 const sellOffer1 = keylet::nftoffer(minter, env.seq(minter)).key;
|
||||
env(token::createOffer(minter, nftokenID1, drops(1)),
|
||||
token::expiration(expiration),
|
||||
txflags(tfSellNFToken));
|
||||
minterCount++;
|
||||
|
||||
uint256 const buyOffer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter));
|
||||
buyerCount++;
|
||||
|
||||
uint256 const buyOffer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
|
||||
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter));
|
||||
buyerCount++;
|
||||
|
||||
env.close();
|
||||
BEAST_EXPECT(lastClose(env) < expiration);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 3);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// An unexpired offer can be brokered.
|
||||
env(token::brokerOffers(issuer, buyOffer0, sellOffer0));
|
||||
minterCount--;
|
||||
|
||||
// Close enough ledgers to get past the expiration.
|
||||
while (lastClose(env) < expiration)
|
||||
env.close();
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// If the sell offer is expired it cannot be brokered.
|
||||
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
|
||||
env.close();
|
||||
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// With amendment: expired offers are deleted
|
||||
minterCount--;
|
||||
}
|
||||
// The expired sell offer is still in the ledger.
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// The buy offer was deleted, so no need to cancel it
|
||||
// The sell offer still exists, so we can cancel it
|
||||
env(token::cancelOffer(buyer, {buyOffer1}));
|
||||
buyerCount--;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Anyone can cancel the expired offers
|
||||
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
|
||||
minterCount--;
|
||||
buyerCount--;
|
||||
}
|
||||
// Anyone can cancel the expired sell offer.
|
||||
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
|
||||
env.close();
|
||||
// Ensure that owner counts are the same with and without the
|
||||
// amendment
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
|
||||
// Transfer nftokenID0 back to minter so we start the next test in
|
||||
// a simple place.
|
||||
@@ -3141,10 +3014,9 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
env(token::acceptSellOffer(minter, offerSellBack));
|
||||
env.close();
|
||||
buyerCount--;
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 0);
|
||||
}
|
||||
// Show that in brokered mode:
|
||||
// 1. An unexpired buy offer with an expiration can be accepted.
|
||||
@@ -3182,28 +3054,17 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// If the buy offer is expired it cannot be brokered.
|
||||
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
|
||||
env.close();
|
||||
|
||||
// The expired buy offer is still in the ledger.
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
if (features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// After amendment: expired offers were deleted during broker
|
||||
// attempt
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 1);
|
||||
// The buy offer was deleted, so no need to cancel it
|
||||
// The sell offer still exists, so we can cancel it
|
||||
env(token::cancelOffer(minter, {sellOffer1}));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Before amendment: expired offers still exist in ledger
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
// Anyone can cancel the expired offers
|
||||
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Anyone can cancel the expired buy offer.
|
||||
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
@@ -3261,19 +3122,17 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// If the offers are expired they cannot be brokered.
|
||||
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
|
||||
env.close();
|
||||
|
||||
// The expired offers are still in the ledger.
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
if (!features[fixExpiredNFTokenOfferRemoval])
|
||||
{
|
||||
// Before amendment: expired offers still exist in ledger
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
// Anyone can cancel the expired offers
|
||||
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 2);
|
||||
BEAST_EXPECT(ownerCount(env, buyer) == 2);
|
||||
|
||||
// Anyone can cancel the expired offers.
|
||||
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
|
||||
env.close();
|
||||
BEAST_EXPECT(ownerCount(env, issuer) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, minter) == 1);
|
||||
@@ -6877,9 +6736,7 @@ public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testWithFeats(
|
||||
allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT -
|
||||
fixExpiredNFTokenOfferRemoval);
|
||||
testWithFeats(allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -6910,15 +6767,6 @@ class NFTokenWOModify_test : public NFTokenBaseUtil_test
|
||||
}
|
||||
};
|
||||
|
||||
class NFTokenWOExpiredOfferRemoval_test : public NFTokenBaseUtil_test
|
||||
{
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testWithFeats(allFeatures - fixExpiredNFTokenOfferRemoval);
|
||||
}
|
||||
};
|
||||
|
||||
class NFTokenAllFeatures_test : public NFTokenBaseUtil_test
|
||||
{
|
||||
void
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#include <test/jtx.h>
|
||||
|
||||
#include <xrpl/basics/Number.h>
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/IOUAmount.h>
|
||||
#include <xrpl/protocol/STAmount.h>
|
||||
#include <xrpl/protocol/XRPAmount.h>
|
||||
|
||||
@@ -1143,6 +1145,148 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testNumberConversion()
|
||||
{
|
||||
testcase("Number to STAmount conversions");
|
||||
|
||||
Issue const usd{Currency(0x5553440000000000), AccountID(0x4985601)};
|
||||
NumberSO stNumberSO{true};
|
||||
|
||||
// Test zero conversion
|
||||
{
|
||||
Number const zero{};
|
||||
STAmount const result{usd, zero};
|
||||
BEAST_EXPECT(result.mantissa() == 0);
|
||||
BEAST_EXPECT(result.exponent() == STAmount::cZeroOffset);
|
||||
BEAST_EXPECT(!result.negative());
|
||||
}
|
||||
|
||||
// Test positive zero
|
||||
{
|
||||
Number const zero{0, 0};
|
||||
STAmount const result{usd, zero};
|
||||
BEAST_EXPECT(result.mantissa() == 0);
|
||||
BEAST_EXPECT(result.exponent() == STAmount::cZeroOffset);
|
||||
}
|
||||
|
||||
// Test negative zero (should become positive zero)
|
||||
{
|
||||
Number const negZero{-0, 0};
|
||||
STAmount const result{usd, negZero};
|
||||
BEAST_EXPECT(result.mantissa() == 0);
|
||||
BEAST_EXPECT(!result.negative());
|
||||
}
|
||||
|
||||
// Test minimum positive IOU amount
|
||||
{
|
||||
Number const minPos{STAmount::cMinValue, STAmount::cMinOffset};
|
||||
STAmount const result{usd, minPos};
|
||||
BEAST_EXPECT(result.mantissa() == STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.exponent() == STAmount::cMinOffset);
|
||||
BEAST_EXPECT(!result.negative());
|
||||
}
|
||||
|
||||
// Test maximum positive IOU amount
|
||||
{
|
||||
Number const maxPos{STAmount::cMaxValue, STAmount::cMaxOffset};
|
||||
STAmount const result{usd, maxPos};
|
||||
BEAST_EXPECT(result.mantissa() == STAmount::cMaxValue);
|
||||
BEAST_EXPECT(result.exponent() == STAmount::cMaxOffset);
|
||||
BEAST_EXPECT(!result.negative());
|
||||
}
|
||||
|
||||
// Test negative amounts
|
||||
{
|
||||
Number const neg{-static_cast<std::int64_t>(STAmount::cMinValue), STAmount::cMinOffset};
|
||||
STAmount const result{usd, neg};
|
||||
BEAST_EXPECT(result.mantissa() == STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.exponent() == STAmount::cMinOffset);
|
||||
BEAST_EXPECT(result.negative());
|
||||
}
|
||||
|
||||
// Test value requiring scale up (mantissa too small)
|
||||
{
|
||||
Number const small{1000000000000000ull / 10, -95}; // Will scale up
|
||||
STAmount const result{usd, small};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
BEAST_EXPECT(result.exponent() >= STAmount::cMinOffset);
|
||||
BEAST_EXPECT(result.exponent() <= STAmount::cMaxOffset);
|
||||
}
|
||||
|
||||
// Test value requiring scale down (mantissa too large)
|
||||
{
|
||||
Number const large{9999999999999999ull * 10, 79}; // Will scale down
|
||||
STAmount const result{usd, large};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
BEAST_EXPECT(result.exponent() >= STAmount::cMinOffset);
|
||||
BEAST_EXPECT(result.exponent() <= STAmount::cMaxOffset);
|
||||
}
|
||||
|
||||
// Test boundary mantissa values
|
||||
{
|
||||
Number const atMin{STAmount::cMinValue, 0};
|
||||
STAmount const result{usd, atMin};
|
||||
BEAST_EXPECT(result.mantissa() == STAmount::cMinValue);
|
||||
}
|
||||
|
||||
{
|
||||
Number const atMax{STAmount::cMaxValue, 0};
|
||||
STAmount const result{usd, atMax};
|
||||
BEAST_EXPECT(result.mantissa() == STAmount::cMaxValue);
|
||||
}
|
||||
|
||||
// Test typical amounts
|
||||
{
|
||||
Number const typical{1234567890123456ull, -10};
|
||||
STAmount const result{usd, typical};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
BEAST_EXPECT(result.exponent() >= STAmount::cMinOffset);
|
||||
BEAST_EXPECT(result.exponent() <= STAmount::cMaxOffset);
|
||||
}
|
||||
|
||||
// Test round-trip conversion (Number -> STAmount -> Number)
|
||||
{
|
||||
Number const original{5000000000000000ull, 5};
|
||||
STAmount const st{usd, original};
|
||||
Number const recovered{st};
|
||||
BEAST_EXPECT(original == recovered);
|
||||
}
|
||||
|
||||
// Test various exponents
|
||||
for (int exp = STAmount::cMinOffset; exp <= STAmount::cMaxOffset; exp += 10)
|
||||
{
|
||||
Number const n{STAmount::cMinValue, exp};
|
||||
STAmount const result{usd, n};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
BEAST_EXPECT(result.exponent() >= STAmount::cMinOffset);
|
||||
BEAST_EXPECT(result.exponent() <= STAmount::cMaxOffset);
|
||||
}
|
||||
|
||||
// Test both mantissa scales (if applicable)
|
||||
{
|
||||
// Small mantissa scale test
|
||||
NumberMantissaScaleGuard guard{MantissaRange::small};
|
||||
Number const n{5000000000000000ull, 5};
|
||||
STAmount const result{usd, n};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
}
|
||||
|
||||
{
|
||||
// Large mantissa scale test
|
||||
NumberMantissaScaleGuard guard{MantissaRange::large};
|
||||
Number const n{5000000000000000ull, 5};
|
||||
STAmount const result{usd, n};
|
||||
BEAST_EXPECT(result.mantissa() >= STAmount::cMinValue);
|
||||
BEAST_EXPECT(result.mantissa() <= STAmount::cMaxValue);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void
|
||||
@@ -1157,6 +1301,7 @@ public:
|
||||
testParseJson();
|
||||
testConvertXRP();
|
||||
testConvertIOU();
|
||||
testNumberConversion();
|
||||
testCanAddXRP();
|
||||
testCanAddIOU();
|
||||
testCanAddMPT();
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/beast/utility/PropertyStream.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <xrpl/shamap/TreeNodeCache.h>
|
||||
|
||||
@@ -92,7 +91,7 @@ class Validations;
|
||||
class RCLValidationsAdaptor;
|
||||
using RCLValidations = Validations<RCLValidationsAdaptor>;
|
||||
|
||||
class Application : public ServiceRegistry, public beast::PropertyStream::Source
|
||||
class Application : public beast::PropertyStream::Source
|
||||
{
|
||||
public:
|
||||
/* VFALCO NOTE
|
||||
@@ -147,12 +146,92 @@ public:
|
||||
virtual boost::asio::io_context&
|
||||
getIOContext() = 0;
|
||||
|
||||
virtual CollectorManager&
|
||||
getCollectorManager() = 0;
|
||||
virtual Family&
|
||||
getNodeFamily() = 0;
|
||||
virtual TimeKeeper&
|
||||
timeKeeper() = 0;
|
||||
virtual JobQueue&
|
||||
getJobQueue() = 0;
|
||||
virtual NodeCache&
|
||||
getTempNodeCache() = 0;
|
||||
virtual CachedSLEs&
|
||||
cachedSLEs() = 0;
|
||||
virtual AmendmentTable&
|
||||
getAmendmentTable() = 0;
|
||||
virtual HashRouter&
|
||||
getHashRouter() = 0;
|
||||
virtual LoadFeeTrack&
|
||||
getFeeTrack() = 0;
|
||||
virtual LoadManager&
|
||||
getLoadManager() = 0;
|
||||
virtual Overlay&
|
||||
overlay() = 0;
|
||||
virtual TxQ&
|
||||
getTxQ() = 0;
|
||||
virtual ValidatorList&
|
||||
validators() = 0;
|
||||
virtual ValidatorSite&
|
||||
validatorSites() = 0;
|
||||
virtual ManifestCache&
|
||||
validatorManifests() = 0;
|
||||
virtual ManifestCache&
|
||||
publisherManifests() = 0;
|
||||
virtual Cluster&
|
||||
cluster() = 0;
|
||||
virtual PeerReservationTable&
|
||||
peerReservations() = 0;
|
||||
virtual RCLValidations&
|
||||
getValidations() = 0;
|
||||
virtual NodeStore::Database&
|
||||
getNodeStore() = 0;
|
||||
virtual InboundLedgers&
|
||||
getInboundLedgers() = 0;
|
||||
virtual InboundTransactions&
|
||||
getInboundTransactions() = 0;
|
||||
|
||||
virtual TaggedCache<uint256, AcceptedLedger>&
|
||||
getAcceptedLedgerCache() = 0;
|
||||
|
||||
virtual LedgerMaster&
|
||||
getLedgerMaster() = 0;
|
||||
virtual LedgerCleaner&
|
||||
getLedgerCleaner() = 0;
|
||||
virtual LedgerReplayer&
|
||||
getLedgerReplayer() = 0;
|
||||
virtual NetworkOPs&
|
||||
getOPs() = 0;
|
||||
virtual OrderBookDB&
|
||||
getOrderBookDB() = 0;
|
||||
virtual ServerHandler&
|
||||
getServerHandler() = 0;
|
||||
virtual TransactionMaster&
|
||||
getMasterTransaction() = 0;
|
||||
virtual perf::PerfLog&
|
||||
getPerfLog() = 0;
|
||||
|
||||
virtual std::pair<PublicKey, SecretKey> const&
|
||||
nodeIdentity() = 0;
|
||||
|
||||
virtual std::optional<PublicKey const>
|
||||
getValidationPublicKey() const = 0;
|
||||
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
virtual PathRequests&
|
||||
getPathRequests() = 0;
|
||||
virtual SHAMapStore&
|
||||
getSHAMapStore() = 0;
|
||||
virtual PendingSaves&
|
||||
pendingSaves() = 0;
|
||||
virtual OpenLedger&
|
||||
openLedger() = 0;
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
virtual RelationalDatabase&
|
||||
getRelationalDatabase() = 0;
|
||||
|
||||
virtual std::chrono::milliseconds
|
||||
getIOLatency() = 0;
|
||||
|
||||
|
||||
@@ -53,17 +53,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx)
|
||||
return {nullptr, tecOBJECT_NOT_FOUND};
|
||||
|
||||
if (hasExpired(ctx.view, (*offerSLE)[~sfExpiration]))
|
||||
{
|
||||
// Before fixExpiredNFTokenOfferRemoval amendment, expired
|
||||
// offers caused tecEXPIRED in preclaim, leaving them on ledger
|
||||
// forever. After the amendment, we allow expired offers to
|
||||
// reach doApply() where they get deleted and tecEXPIRED is
|
||||
// returned.
|
||||
if (!ctx.view.rules().enabled(fixExpiredNFTokenOfferRemoval))
|
||||
return {nullptr, tecEXPIRED};
|
||||
// Amendment enabled: return the expired offer to be handled in
|
||||
// doApply
|
||||
}
|
||||
return {nullptr, tecEXPIRED};
|
||||
|
||||
if ((*offerSLE)[sfAmount].negative())
|
||||
return {nullptr, temBAD_OFFER};
|
||||
@@ -309,7 +299,7 @@ NFTokenAcceptOffer::pay(AccountID const& from, AccountID const& to, STAmount con
|
||||
{
|
||||
// This should never happen, but it's easy and quick to check.
|
||||
if (amount < beast::zero)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
return tecINTERNAL;
|
||||
|
||||
auto const result = accountSend(view(), from, to, amount, j_);
|
||||
|
||||
@@ -420,39 +410,6 @@ NFTokenAcceptOffer::doApply()
|
||||
auto bo = loadToken(ctx_.tx[~sfNFTokenBuyOffer]);
|
||||
auto so = loadToken(ctx_.tx[~sfNFTokenSellOffer]);
|
||||
|
||||
// With fixExpiredNFTokenOfferRemoval amendment, check for expired offers
|
||||
// and delete them, returning tecEXPIRED. This ensures expired offers
|
||||
// are properly cleaned up from the ledger.
|
||||
if (view().rules().enabled(fixExpiredNFTokenOfferRemoval))
|
||||
{
|
||||
bool foundExpired = false;
|
||||
|
||||
auto const deleteOfferIfExpired = [this, &foundExpired](std::shared_ptr<SLE> const& offer) -> TER {
|
||||
if (offer && hasExpired(view(), (*offer)[~sfExpiration]))
|
||||
{
|
||||
JLOG(j_.trace()) << "Offer is expired, deleting: " << offer->key();
|
||||
if (!nft::deleteTokenOffer(view(), offer))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.fatal()) << "Unable to delete expired offer '" << offer->key() << "': ignoring";
|
||||
return tecINTERNAL;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
JLOG(j_.trace()) << "Deleted offer " << offer->key();
|
||||
foundExpired = true;
|
||||
}
|
||||
return tesSUCCESS;
|
||||
};
|
||||
|
||||
if (auto const r = deleteOfferIfExpired(bo); !isTesSuccess(r))
|
||||
return r;
|
||||
if (auto const r = deleteOfferIfExpired(so); !isTesSuccess(r))
|
||||
return r;
|
||||
|
||||
if (foundExpired)
|
||||
return tecEXPIRED;
|
||||
}
|
||||
|
||||
if (bo && !nft::deleteTokenOffer(view(), bo))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
|
||||
Reference in New Issue
Block a user