Compare commits

...

12 Commits

Author SHA1 Message Date
tequ
60bb552780 clang-format 2026-04-27 14:37:05 +09:00
tequ
8cfd5c3eec Merge remote-tracking branch 'upstream/dev' into HookAdministrator 2026-04-27 14:29:23 +09:00
Alloy Networks
cd00ed72d8 change build instructions url 2026-04-24 11:12:28 +10:00
tequ
05a3e04f2d Fix BEAST_ENHANCED_LOGGING not working and restore original behavior 2026-04-24 11:11:40 +10:00
tequ
66f7294120 Test: hint build_test_hooks.sh when hook wasm is empty in hso() 2026-04-24 11:10:46 +10:00
Nicholas Dudfield
7f6ac75617 Revert "chore: use improved levelization script with threading and argparse"
This reverts commit 5c1d7d9ae9.
2026-04-24 11:09:19 +10:00
Nicholas Dudfield
4150f0383c chore: use improved levelization script with threading and argparse 2026-04-24 11:09:19 +10:00
Nicholas Dudfield
25123b370a chore: replace levelization shell script with python
Backport of XRPLF/rippled#6325. The python version runs ~80x faster.
2026-04-24 11:09:19 +10:00
tequ
0825bddd87 Enhance new account creation at SetHook
- Add owner count and account index to AccountRoot
- Increment account count at FeeSetting
2026-01-10 11:42:06 +09:00
tequ
07008da032 Update sfcodes.h 2026-01-05 19:15:51 +09:00
tequ
90b009d63c Merge remote-tracking branch 'upstream/dev' into HookAdministrator 2026-01-05 19:14:07 +09:00
tequ
19e2036115 HookAdministrator Amendment 2026-01-05 19:13:57 +09:00
18 changed files with 453 additions and 160 deletions

View File

@@ -10,7 +10,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Check levelization
run: Builds/levelization/levelization.sh
run: python Builds/levelization/levelization.py
- name: Check for differences
id: assert
run: |
@@ -40,7 +40,7 @@ jobs:
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run './Builds/levelization/levelization.sh' in your repo,
2. Run 'python Builds/levelization/levelization.py' in your repo,
commit, and push.
See Builds/levelization/README.md for more info.

3
.gitignore vendored
View File

@@ -53,6 +53,9 @@ Builds/levelization/results/paths.txt
Builds/levelization/results/includes/
Builds/levelization/results/includedby/
# Python
__pycache__
# Ignore tmp directory.
tmp

View File

@@ -50,7 +50,7 @@ that `test` code should *never* be included in `ripple` code.)
## Validation
The [levelization.sh](levelization.sh) script takes no parameters,
The [levelization.py](levelization.py) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
@@ -84,7 +84,7 @@ It generates many files of [results](results):
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `levelization.sh`,
done anything else to improve levelization, run `levelization.py`,
and commit the updated results.
The `loops.txt` and `ordering.txt` files relate the modules
@@ -108,7 +108,7 @@ The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.
1. Run `levelization.sh`
1. Run `levelization.py`
2. Grep the modules in `paths.txt`.
* For example, if a cycle is found `A ~= B`, simply `grep -w
A Builds/levelization/results/paths.txt | grep -w B`

View File

@@ -0,0 +1,283 @@
#!/usr/bin/env python3
"""
Usage: levelization.py
This script takes no parameters, and can be called from any directory in the file system.
"""
import os
import re
import sys
from collections import defaultdict
from pathlib import Path
# Compile regex patterns once at module level
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
def dictionary_sort_key(s):
"""
Create a sort key that mimics 'sort -d' (dictionary order).
Dictionary order only considers blanks and alphanumeric characters.
"""
return "".join(c for c in s if c.isalnum() or c.isspace())
def get_level(file_path):
"""
Extract the level from a file path (second and third directory components).
Equivalent to bash: cut -d/ -f 2,3
Examples:
src/ripple/app/main.cpp -> ripple.app
src/test/app/Import_test.cpp -> test.app
"""
parts = file_path.split("/")
if len(parts) >= 3:
level = f"{parts[1]}/{parts[2]}"
elif len(parts) >= 2:
level = f"{parts[1]}/toplevel"
else:
level = file_path
# If the "level" indicates a file, cut off the filename
if "." in level.split("/")[-1]:
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level = level.rsplit("/", 1)[0] + "/toplevel"
return level.replace("/", ".")
def extract_include_level(include_line):
"""
Extract the include path from an #include directive.
Gets the first two directory components from the include path.
Equivalent to bash: cut -d/ -f 1,2
Examples:
#include <ripple/basics/base_uint.h> -> ripple.basics
#include "ripple/app/main/Application.h" -> ripple.app
"""
match = INCLUDE_PATH_PATTERN.search(include_line)
if not match:
return None
include_path = match.group(1)
parts = include_path.split("/")
if len(parts) >= 2:
include_level = f"{parts[0]}/{parts[1]}"
else:
include_level = include_path
# If the "includelevel" indicates a file, cut off the filename
if "." in include_level.split("/")[-1]:
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
return include_level.replace("/", ".")
def find_repository_directories(start_path, depth_limit=10):
"""
Find the repository root by looking for src or include folders.
Walks up the directory tree from the start path.
"""
current = start_path.resolve()
for _ in range(depth_limit):
src_path = current / "src"
include_path = current / "include"
has_src = src_path.exists()
has_include = include_path.exists()
if has_src or has_include:
dirs = []
if has_src:
dirs.append(src_path)
if has_include:
dirs.append(include_path)
return current, dirs
parent = current.parent
if parent == current:
break
current = parent
raise RuntimeError(
"Could not find repository root. "
"Expected to find a directory containing 'src' and/or 'include' folders."
)
def main():
script_dir = Path(__file__).parent.resolve()
os.chdir(script_dir)
# Clean up and create results directory.
results_dir = script_dir / "results"
if results_dir.exists():
import shutil
shutil.rmtree(results_dir)
results_dir.mkdir()
# Find the repository root.
try:
repo_root, scan_dirs = find_repository_directories(script_dir)
print(f"Found repository root: {repo_root}")
for scan_dir in scan_dirs:
print(f" Scanning: {scan_dir.relative_to(repo_root)}")
except RuntimeError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
# Find all #include directives.
print("\nScanning for raw includes...")
raw_includes = []
rawincludes_file = results_dir / "rawincludes.txt"
with open(rawincludes_file, "w", buffering=8192) as raw_f:
for dir_path in scan_dirs:
for file_path in dir_path.rglob("*"):
if not file_path.is_file():
continue
try:
rel_path_str = str(file_path.relative_to(repo_root))
with open(
file_path, "r", encoding="utf-8", errors="ignore", buffering=8192
) as f:
for line in f:
if "#include" not in line or "boost" in line:
continue
if INCLUDE_PATTERN.match(line):
line_stripped = line.strip()
entry = f"{rel_path_str}:{line_stripped}\n"
print(entry, end="")
raw_f.write(entry)
raw_includes.append((rel_path_str, line_stripped))
except Exception as e:
print(f"Error reading {file_path}: {e}", file=sys.stderr)
# Build levelization paths and count directly.
print("Build levelization paths")
path_counts = defaultdict(int)
for file_path, include_line in raw_includes:
include_level = extract_include_level(include_line)
if not include_level:
continue
level = get_level(file_path)
if level != include_level:
path_counts[(level, include_level)] += 1
# Sort and deduplicate paths.
print("Sort and deduplicate paths")
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
paths_file = results_dir / "paths.txt"
with open(paths_file, "w") as f:
for (level, include_level), count in sorted_items:
line = f"{count:7} {level} {include_level}\n"
print(line.rstrip())
f.write(line)
# Split into flat-file database.
print("Split into flat-file database")
includes_dir = results_dir / "includes"
includedby_dir = results_dir / "includedby"
includes_dir.mkdir()
includedby_dir.mkdir()
includes_data = defaultdict(list)
includedby_data = defaultdict(list)
for (level, include_level), count in sorted_items:
includes_data[level].append((include_level, count))
includedby_data[include_level].append((level, count))
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
with open(includes_dir / level, "w") as f:
for include_level, count in includes_data[level]:
line = f"{include_level} {count}\n"
print(line.rstrip())
f.write(line)
for include_level in sorted(includedby_data.keys(), key=dictionary_sort_key):
with open(includedby_dir / include_level, "w") as f:
for level, count in includedby_data[include_level]:
line = f"{level} {count}\n"
print(line.rstrip())
f.write(line)
# Search for loops.
print("Search for loops")
loops_file = results_dir / "loops.txt"
ordering_file = results_dir / "ordering.txt"
# Pre-load all include files into memory for fast lookup.
includes_cache = {}
includes_lookup = {}
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
if not include_file.is_file():
continue
includes_cache[include_file.name] = []
includes_lookup[include_file.name] = {}
with open(include_file, "r") as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 2:
name, count = parts[0], int(parts[1])
includes_cache[include_file.name].append((name, count))
includes_lookup[include_file.name][name] = count
loops_found = set()
with open(loops_file, "w", buffering=8192) as loops_f, open(
ordering_file, "w", buffering=8192
) as ordering_f:
for source in sorted(includes_cache.keys()):
for include, include_freq in includes_cache[source]:
if include not in includes_lookup:
continue
source_freq = includes_lookup[include].get(source)
if source_freq is not None:
loop_key = tuple(sorted([source, include]))
if loop_key in loops_found:
continue
loops_found.add(loop_key)
loops_f.write(f"Loop: {source} {include}\n")
diff = include_freq - source_freq
if diff > 3:
loops_f.write(f" {source} > {include}\n\n")
elif diff < -3:
loops_f.write(f" {include} > {source}\n\n")
elif source_freq == include_freq:
loops_f.write(f" {include} == {source}\n\n")
else:
loops_f.write(f" {include} ~= {source}\n\n")
else:
ordering_f.write(f"{source} > {include}\n")
# Print results.
print("\nOrdering:")
with open(ordering_file, "r") as f:
print(f.read(), end="")
print("\nLoops:")
with open(loops_file, "r") as f:
print(f.read(), end="")
if __name__ == "__main__":
main()

View File

@@ -1,130 +0,0 @@
#!/bin/bash
# Usage: levelization.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
pushd $( dirname $0 )
if [ -v PS1 ]
then
# if the shell is interactive, clean up any flotsam before analyzing
git clean -ix
fi
# Ensure all sorting is ASCII-order consistently across platforms.
export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}
popd
pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir includedby
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
do
level=$( echo ${file} | cut -d/ -f 2,3 )
# If the "level" indicates a file, cut off the filename
if [[ "${level##*.}" != "${level}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level="$( dirname ${level} )/toplevel"
fi
level=$( echo ${level} | tr '/' '.' )
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
cut -d/ -f 1,2 )
if [[ "${includelevel##*.}" != "${includelevel}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
includelevel="$( dirname ${includelevel} )/toplevel"
fi
includelevel=$( echo ${includelevel} | tr '/' '.' )
if [[ "$level" != "$includelevel" ]]
then
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and dedup paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
IFS=${oldifs}
unset oldifs
echo Split into flat-file database
exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a includedby/${include}
done
exec 4>&- #close fd 4
loops="$( pwd )/loops.txt"
ordering="$( pwd )/ordering.txt"
pushd includes
echo Search for loops
# Redirect stdout to a file
exec 4>&1
exec 1>"${loops}"
for source in *
do
if [[ -f "$source" ]]
then
exec 5<"${source}" # open for input
while read -r -u 5 include includefreq
do
if [[ -f $include ]]
then
if grep -q -w $source $include
then
if grep -q -w "Loop: $include $source" "${loops}"
then
continue
fi
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
echo "Loop: $source $include"
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
then
echo -e " $source > $include\n"
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
then
echo -e " $include > $source\n"
elif [[ $sourcefreq -eq $includefreq ]]
then
echo -e " $include == $source\n"
else
echo -e " $include ~= $source\n"
fi
else
echo "$source > $include" >> "${ordering}"
fi
fi
done
exec 5>&- #close fd 5
fi
done
exec 1>&4 #close fd 1
exec 4>&- #close fd 4
cat "${ordering}"
cat "${loops}"
popd
popd
popd

View File

@@ -12,7 +12,7 @@ The server software that powers Xahau is called `xahaud` and is available in thi
### Build from Source
* [Read the build instructions in our documentation](https://xahau.network/infrastructure/building-xahau)
* [Read the build instructions in our documentation](https://xahau.network/docs/infrastructure/build-xahaud/)
* If you encounter any issues, please [open an issue](https://github.com/xahau/xahaud/issues)
## Highlights of Xahau

View File

@@ -68,6 +68,17 @@ target_link_libraries(xrpl.imports.main
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
)
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
if(TARGET date::date-tz)
target_link_libraries(xrpl.imports.main INTERFACE date::date-tz)
endif()
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
target_compile_definitions(xrpl.imports.main INTERFACE
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
)
include(add_module)
include(target_link_modules)

View File

@@ -240,6 +240,7 @@
#define sfLockingChainDoor ((8U << 16U) + 22U)
#define sfIssuingChainDoor ((8U << 16U) + 23U)
#define sfSubject ((8U << 16U) + 24U)
#define sfHookAdministrator ((8U << 16U) + 98U)
#define sfInform ((8U << 16U) + 99U)
#define sfIndexes ((19U << 16U) + 1U)
#define sfHashes ((19U << 16U) + 2U)

View File

@@ -80,7 +80,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 113;
static constexpr std::size_t numFeatures = 114;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated

View File

@@ -223,6 +223,11 @@ constexpr std::uint32_t const tfNFTokenCancelOfferMask = ~tfUniversal;
// NFTokenAcceptOffer flags:
constexpr std::uint32_t const tfNFTokenAcceptOfferMask = ~tfUniversal;
enum SetHookFlags : uint32_t {
tfNewAccount = 0x00000001,
};
constexpr std::uint32_t const tfSetHookMask = ~(tfUniversal | tfNewAccount);
// URIToken mask
constexpr std::uint32_t const tfURITokenMintMask = ~(tfUniversal | tfBurnable);
constexpr std::uint32_t const tfURITokenNonMintMask = ~tfUniversal;

View File

@@ -31,6 +31,7 @@
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FEATURE(HookAdministrator, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(HookAPISerializedType240, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionedDomains, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicNFT, Supported::no, VoteBehavior::DefaultNo)

View File

@@ -262,6 +262,7 @@ LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, ({
{sfHookStateScale, soeOPTIONAL},
{sfCron, soeOPTIONAL},
{sfAMMID, soeOPTIONAL},
{sfHookAdministrator, soeOPTIONAL},
}))
/** A ledger object which contains a list of object identifiers.

View File

@@ -317,6 +317,7 @@ TYPED_SFIELD(sfAttestationRewardAccount, ACCOUNT, 21)
TYPED_SFIELD(sfLockingChainDoor, ACCOUNT, 22)
TYPED_SFIELD(sfIssuingChainDoor, ACCOUNT, 23)
TYPED_SFIELD(sfSubject, ACCOUNT, 24)
TYPED_SFIELD(sfHookAdministrator, ACCOUNT, 98)
TYPED_SFIELD(sfInform, ACCOUNT, 99)
// vector of 256-bit

View File

@@ -195,6 +195,7 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, ({
/** This transaction type installs a hook. */
TRANSACTION(ttHOOK_SET, 22, SetHook, ({
{sfHooks, soeREQUIRED},
{sfDestination, soeOPTIONAL},
}))
/** This transaction mints a new NFT. */

View File

@@ -65,29 +65,16 @@ hso_delete(void (*f)(Json::Value& jv))
Json::Value
hso(std::vector<uint8_t> const& wasmBytes, void (*f)(Json::Value& jv))
{
if (wasmBytes.size() == 0)
throw std::runtime_error("empty hook wasm passed to hso()");
Json::Value jv;
jv[jss::CreateCode] = strHex(wasmBytes);
{
jv[jss::HookOn] =
"0000000000000000000000000000000000000000000000000000000000000000";
jv[jss::HookNamespace] = to_string(uint256{beast::zero});
jv[jss::HookApiVersion] = Json::Value{0};
}
if (f)
f(jv);
return jv;
return hso(strHex(wasmBytes), f);
}
Json::Value
hso(std::string const& wasmHex, void (*f)(Json::Value& jv))
{
if (wasmHex.size() == 0)
throw std::runtime_error("empty hook wasm passed to hso()");
throw std::runtime_error(
"empty hook wasm passed to hso(): run "
"src/test/app/build_test_hooks.sh to generate the hook wasm");
Json::Value jv;
jv[jss::CreateCode] = wasmHex;

View File

@@ -330,6 +330,9 @@ DeleteAccount::preclaim(PreclaimContext const& ctx)
if (sleAccount->isFieldPresent(sfHookNamespaces) ||
sleAccount->isFieldPresent(sfHooks))
return tecHAS_OBLIGATIONS;
if (sleAccount->isFieldPresent(sfHookAdministrator))
return tecHAS_OBLIGATIONS;
}
// When fixNFTokenRemint is enabled, we don't allow an account to be

View File

@@ -1037,7 +1037,9 @@ ValidNewAccountRoot::finalize(
if ((tt == ttPAYMENT || tt == ttIMPORT || tt == ttGENESIS_MINT ||
tt == ttREMIT || tt == ttAMM_CREATE ||
tt == ttXCHAIN_ADD_CLAIM_ATTESTATION ||
tt == ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION) &&
tt == ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION ||
(tt == ttHOOK_SET &&
view.rules().enabled(featureHookAdministrator))) &&
isTesSuccess(result))
{
std::uint32_t const startingSeq{

View File

@@ -675,6 +675,21 @@ SetHook::calculateBaseFee(ReadView const& view, STTx const& tx)
TER
SetHook::preclaim(ripple::PreclaimContext const& ctx)
{
if (ctx.tx.isFieldPresent(sfHookAdministrator))
{
auto const& administrator = ctx.tx.getAccountID(sfHookAdministrator);
auto const& sle = ctx.view.read(keylet::account(administrator));
if (!sle)
return tecNO_DST;
if (!sle->isFieldPresent(sfHookAdministrator))
return tecNO_PERMISSION;
if (sle->getAccountID(sfHookAdministrator) !=
ctx.tx.getAccountID(sfAccount))
return tecNO_PERMISSION;
}
auto const& hookSets = ctx.tx.getFieldArray(sfHooks);
for (auto const& hookSetObj : hookSets)
@@ -714,12 +729,46 @@ SetHook::preflight(PreflightContext const& ctx)
return ret;
if (ctx.rules.enabled(fixInvalidTxFlags) &&
ctx.tx.getFlags() & tfUniversalMask)
ctx.tx.getFlags() & tfSetHookMask)
{
JLOG(ctx.j.trace()) << "SetHook: Invalid flags set.";
return temINVALID_FLAG;
}
if (ctx.tx.isFlag(tfNewAccount) &&
!ctx.rules.enabled(featureHookAdministrator))
{
JLOG(ctx.j.trace()) << "SetHook: New account flag set but hook "
"administrator amendment is not enabled.";
return temDISABLED;
}
if (ctx.tx.isFieldPresent(sfDestination))
{
if (!ctx.rules.enabled(featureHookAdministrator))
{
JLOG(ctx.j.trace())
<< "HookSet: Hook administrator amendment not enabled.";
return temDISABLED;
}
if (ctx.tx.isFlag(tfNewAccount))
{
JLOG(ctx.j.trace())
<< "HookSet: Both new account flag and destination set. "
"New account flag and destination cannot be set at the same "
"time.";
return temMALFORMED;
}
if (ctx.tx.getAccountID(sfDestination) ==
ctx.tx.getAccountID(sfAccount))
{
JLOG(ctx.j.trace()) << "HookSet: Redundant hook administrator.";
return temREDUNDANT;
}
}
if (!ctx.tx.isFieldPresent(sfHooks))
{
JLOG(ctx.j.trace())
@@ -1255,6 +1304,23 @@ struct KeyletComparator
}
};
AccountID
randomAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey)
{
// This number must not be changed without an amendment
constexpr std::uint16_t maxAccountAttempts = 256;
for (std::uint16_t i = 0; i < maxAccountAttempts; ++i)
{
ripesha_hasher rsh;
auto const hash = sha512Half(i, view.info().parentHash, pseudoOwnerKey);
rsh(hash.data(), hash.size());
AccountID const ret{static_cast<ripesha_hasher::result_type>(rsh)};
if (!view.read(keylet::account(ret)))
return ret;
}
return beast::zero;
}
TER
SetHook::setHook()
{
@@ -1274,11 +1340,69 @@ SetHook::setHook()
.app = ctx_.app,
.rules = ctx_.view().rules()};
const int blobMax = hook::maxHookWasmSize();
auto const accountKeylet = keylet::account(account_);
auto const hookKeylet = keylet::hook(account_);
auto targetAccount = ctx.tx[~sfDestination].value_or(account_);
if (ctx_.tx.isFlag(tfNewAccount))
{
// create the new account
auto const newAccount = randomAccountAddress(ctx_.view(), uint256{});
if (newAccount == beast::zero)
return tecDUPLICATE;
auto accountSLE = view().peek(accountKeylet);
auto sleNewAccount = std::make_shared<SLE>(keylet::account(newAccount));
sleNewAccount->setAccountID(sfAccount, newAccount);
sleNewAccount->setFieldAmount(sfBalance, STAmount{});
sleNewAccount->setFieldU32(sfOwnerCount, 1); // ltHook
std::uint32_t const seqno{
ctx_.view().rules().enabled(featureXahauGenesis)
? ctx_.view().info().parentCloseTime.time_since_epoch().count()
: ctx_.view().rules().enabled(featureDeletableAccounts)
? ctx_.view().seq()
: 1};
sleNewAccount->setFieldU32(sfSequence, seqno);
sleNewAccount->setFieldU32(sfFlags, lsfDisableMaster);
sleNewAccount->setAccountID(sfHookAdministrator, account_);
auto sleFees = view().peek(keylet::fees());
if (sleFees && view().rules().enabled(featureXahauGenesis))
{
auto actIdx = sleFees->isFieldPresent(sfAccountCount)
? sleFees->getFieldU64(sfAccountCount)
: 0;
sleNewAccount->setFieldU64(sfAccountIndex, actIdx);
sleFees->setFieldU64(sfAccountCount, actIdx + 1);
view().update(sleFees);
}
// fund AccountReserve + ObjectReserve (ltHook)
auto const requiredDrops = ctx_.view().fees().accountReserve(1);
auto sourceSle = ctx_.view().peek(keylet::account(account_));
if (!sourceSle)
return tefINTERNAL;
auto const sourceCurrentReserve = ctx_.view().fees().accountReserve(
sourceSle->getFieldU32(sfOwnerCount));
auto const sourceBalance = sourceSle->getFieldAmount(sfBalance).xrp();
if (sourceBalance < sourceCurrentReserve + requiredDrops)
return tecUNFUNDED;
sourceSle->setFieldAmount(sfBalance, sourceBalance - requiredDrops);
ctx_.view().update(sourceSle);
sleNewAccount->setFieldAmount(sfBalance, requiredDrops);
ctx_.view().insert(sleNewAccount);
targetAccount = newAccount;
}
const int blobMax = hook::maxHookWasmSize();
auto const hookKeylet = keylet::hook(targetAccount);
auto accountSLE = view().peek(keylet::account(targetAccount));
ripple::STArray newHooks{sfHooks, 8};
auto newHookSLE = std::make_shared<SLE>(hookKeylet);