Compare commits

..

22 Commits

Author SHA1 Message Date
Richard Holland
b350df0eda Merge branch 'dev' into switch-statement-700 2026-04-29 10:35:43 +10:00
tequ
ef7a03ec10 fix owner count assertion at XahauGenesis_test (#688) 2026-04-29 10:32:43 +10:00
tequ
6eb6c41ec6 fix: use OIDC token for Codecov uploads from fork PRs (#736) 2026-04-29 09:50:47 +10:00
tequ
9651f68b2e Add coverage workflow (#661)
Co-authored-by: Bronek Kozicki <brok@incorrekt.com>
Co-authored-by: Niq Dudfield <ndudfield@gmail.com>
2026-04-28 18:29:09 +10:00
tequ
f1f44ae232 Build xahau.h from source and check in workflow https://github.com/Xahau/xahaud/pull/674 2026-04-28 18:28:59 +10:00
Alloy Networks
c461dd9055 change build instructions url 2026-04-28 18:23:32 +10:00
tequ
73cf6d34cd Fix BEAST_ENHANCED_LOGGING not working and restore original behavior 2026-04-28 18:23:32 +10:00
tequ
ea92477d21 Test: hint build_test_hooks.sh when hook wasm is empty in hso() 2026-04-28 18:23:32 +10:00
Nicholas Dudfield
e4c7893bf0 Revert "chore: use improved levelization script with threading and argparse"
This reverts commit 5c1d7d9ae9.
2026-04-28 18:23:32 +10:00
Nicholas Dudfield
ad9d6a6eb3 chore: use improved levelization script with threading and argparse 2026-04-28 18:23:32 +10:00
Nicholas Dudfield
dbbffd917e chore: replace levelization shell script with python
Backport of XRPLF/rippled#6325. The python version runs ~80x faster.
2026-04-28 18:23:31 +10:00
tequ
1d7c7e5603 enable ccache direct_mode 2026-04-28 18:23:31 +10:00
tequ
52843e2321 output ccache configuration in release-builder 2026-04-28 18:23:31 +10:00
tequ
6aabbc940b fix: typo SignersListSet 2026-04-28 18:23:31 +10:00
tequ
3111ecea52 Update util_keylet fee test 2026-04-28 18:23:31 +10:00
tequ
1008508c9b Updated tests to align with the changes merged into the dev branch. 2026-04-28 18:23:31 +10:00
tequ
58e278289b Add tests for Hooks fee 2026-04-28 18:23:31 +10:00
tequ
d3d24f781b Merge fixAMMClawbackRounding amendment into featureAMMClawback amendment 2026-04-28 18:23:31 +10:00
yinyiqian1
131d659032 fixAMMClawbackRounding: adjust last holder's LPToken balance (#5513)
Due to rounding, the LPTokenBalance of the last LP might not match the LP's trustline balance. This was fixed for `AMMWithdraw` in `fixAMMv1_1` by adjusting the LPTokenBalance to be the same as the trustline balance. Since `AMMClawback` is also performing a withdrawal, we need to adjust LPTokenBalance as well in `AMMClawback.`

This change includes:
1. Refactored `verifyAndAdjustLPTokenBalance` function in `AMMUtils`, which both`AMMWithdraw` and `AMMClawback` call to adjust LPTokenBalance.
2. Added the unit test `testLastHolderLPTokenBalance` to test the scenario.
3. Modify the existing unit tests for `fixAMMClawbackRounding`.
2026-04-28 18:23:31 +10:00
tequ
503dee619a Merge fixAMMv1_3 amendment into featureAMM amendment 2026-04-28 18:23:31 +10:00
Gregory Tsipenyuk
1703d96a48 fix: Add AMMv1_3 amendment (#5203)
* Add AMM bid/create/deposit/swap/withdraw/vote invariants:
  - Deposit, Withdrawal invariants: `sqrt(asset1Balance * asset2Balance) >= LPTokens`.
  - Bid: `sqrt(asset1Balance * asset2Balance) > LPTokens` and the pool balances don't change.
  - Create: `sqrt(asset1Balance * assetBalance2) == LPTokens`.
  - Swap: `asset1BalanceAfter * asset2BalanceAfter >= asset1BalanceBefore * asset2BalanceBefore`
     and `LPTokens` don't change.
  - Vote: `LPTokens` and pool balances don't change.
  - All AMM and swap transactions: amounts and tokens are greater than zero, except on withdrawal if all tokens
    are withdrawn.
* Add AMM deposit and withdraw rounding to ensure AMM invariant:
  - On deposit, tokens out are rounded downward and deposit amount is rounded upward.
  - On withdrawal, tokens in are rounded upward and withdrawal amount is rounded downward.
* Add Order Book Offer invariant to verify consumed amounts. Consumed amounts are less than the offer.
* Fix Bid validation. `AuthAccount` can't have duplicate accounts or the submitter account.
2026-04-28 18:23:31 +10:00
Nicholas Dudfield
b960026701 fix: resolve switch fall-through in util_keylet unimplemented cases (#700) 2026-03-04 11:26:38 +07:00
155 changed files with 3055 additions and 18975 deletions

6
.codecov.yml Normal file
View File

@@ -0,0 +1,6 @@
coverage:
status:
project:
default:
target: 60%
threshold: 2%

View File

@@ -2,6 +2,14 @@ name: build
description: 'Builds the project with ccache integration'
inputs:
cmake-target:
description: 'CMake target to build'
required: false
default: all
cmake-args:
description: 'Additional CMake arguments'
required: false
default: null
generator:
description: 'CMake generator to use'
required: true
@@ -20,6 +28,10 @@ inputs:
description: 'C++ compiler to use'
required: false
default: ''
gcov:
description: 'Gcov to use'
required: false
default: ''
compiler-id:
description: 'Unique identifier: compiler-version-stdlib[-gccversion] (e.g. clang-14-libstdcxx-gcc11, gcc-13-libstdcxx)'
required: false
@@ -41,10 +53,11 @@ inputs:
required: false
default: 'dev'
stdlib:
description: 'C++ standard library to use'
description: 'C++ standard library to use (default = compiler default, e.g. GCC always uses libstdc++)'
required: true
type: choice
options:
- default
- libstdcxx
- libcxx
clang_gcc_toolchain:
@@ -87,11 +100,6 @@ runs:
export CCACHE_CONFIGPATH="$HOME/.config/ccache/ccache.conf"
echo "CCACHE_CONFIGPATH=$CCACHE_CONFIGPATH" >> $GITHUB_ENV
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
mkdir -p ~/.config/ccache
export CCACHE_CONFIGPATH="$HOME/.config/ccache/ccache.conf"
echo "CCACHE_CONFIGPATH=$CCACHE_CONFIGPATH" >> $GITHUB_ENV
# Configure ccache settings AFTER cache restore (prevents stale cached config)
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
@@ -122,6 +130,10 @@ runs:
export CXX="${{ inputs.cxx }}"
fi
if [ -n "${{ inputs.gcov }}" ]; then
ln -sf /usr/bin/${{ inputs.gcov }} /usr/local/bin/gcov
fi
# Create wrapper toolchain that overlays ccache on top of Conan's toolchain
# This enables ccache for the main app build without affecting Conan dependency builds
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
@@ -185,7 +197,8 @@ runs:
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=${TOOLCHAIN_FILE} \
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
-Dtests=TRUE \
-Dxrpld=TRUE
-Dxrpld=TRUE \
${{ inputs.cmake-args }}
- name: Show ccache config before build
if: inputs.ccache_enabled == 'true'
@@ -209,7 +222,7 @@ runs:
VERBOSE_FLAG="-- -v"
fi
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) ${VERBOSE_FLAG}
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) --target ${{ inputs.cmake-target }} ${VERBOSE_FLAG}
- name: Show ccache statistics
if: inputs.ccache_enabled == 'true'

View File

@@ -0,0 +1,107 @@
name: Check Genesis Hooks
on:
push:
pull_request:
jobs:
check-genesis-hooks:
runs-on: ubuntu-24.04
env:
CLANG_VERSION: 18
name: Verify xahau.h is in sync with genesis hooks
steps:
- name: Checkout repository
uses: actions/checkout@v6
# Install binaryen from GitHub Releases (pinned to version 100)
- name: Install binaryen (version 100)
run: |
curl -LO https://github.com/WebAssembly/binaryen/releases/download/version_100/binaryen-version_100-x86_64-linux.tar.gz
tar -xzf binaryen-version_100-x86_64-linux.tar.gz
sudo cp binaryen-version_100/bin/* /usr/local/bin/
wasm-opt --version
- name: Install clang-format
run: |
codename=$( lsb_release --codename --short )
sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <<EOF
deb http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
deb-src http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
EOF
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add
sudo apt-get update
sudo apt-get install clang-format-${CLANG_VERSION}
clang-format --version
# Install wasienv (WebAssembly SDK)
- name: Install wasienv
run: |
# Download install.sh
curl -o /tmp/wasienv-install.sh https://raw.githubusercontent.com/wasienv/wasienv/master/install.sh
# Replace /bin to /local/bin
sed -i 's|/bin|/local/bin|g' /tmp/wasienv-install.sh
# Execute the installed script
bash /tmp/wasienv-install.sh
# Add wasienv to PATH for subsequent steps
- name: Setup wasienv
run: |
echo "$HOME/.wasienv/bin" >> $GITHUB_PATH
wasmcc -v || true
# Build and install hook-cleaner tool
- name: Build and install hook-cleaner
run: |
git clone https://github.com/richardah/hook-cleaner-c.git /tmp/hook-cleaner
cd /tmp/hook-cleaner
make
cp hook-cleaner /usr/local/bin/
chmod +x /usr/local/bin/hook-cleaner
# Build and install guard_checker tool
- name: Build and install guard_checker
run: |
cd include/xrpl/hook
make
cp guard_checker /usr/local/bin/
chmod +x /usr/local/bin/guard_checker
# Verify all required tools are available
- name: Verify required tools
run: |
echo "Checking tool availability..."
command -v wasmcc || (echo "Error: wasmcc not found" && exit 1)
command -v wasm-opt || (echo "Error: wasm-opt not found" && exit 1)
command -v hook-cleaner || (echo "Error: hook-cleaner not found" && exit 1)
command -v guard_checker || (echo "Error: guard_checker not found" && exit 1)
command -v xxd || (echo "Error: xxd not found" && exit 1)
command -v clang-format || (echo "Error: clang-format not found" && exit 1)
echo "All tools verified successfully"
# Execute build script to regenerate xahau.h
- name: Run build_xahau_h.sh
run: |
cd hook/genesis
./build_xahau_h.sh
# Check if xahau.h has changed (fail if out of sync)
- name: Verify xahau.h is in sync
run: |
if ! git diff --exit-code include/xrpl/hook/xahau.h; then
echo ""
echo "❌ ERROR: xahau.h is out of sync with genesis hooks"
echo ""
echo "The generated xahau.h differs from the committed version."
echo "Please run the following command and commit the changes:"
echo ""
echo " cd hook/genesis && ./build_xahau_h.sh"
echo ""
echo "Diff:"
git diff include/xrpl/hook/xahau.h
exit 1
fi
echo "✅ xahau.h is in sync with genesis hooks"

View File

@@ -57,8 +57,9 @@ jobs:
"cc": "gcc-11",
"cxx": "g++-11",
"compiler_version": 11,
"stdlib": "libstdcxx",
"configuration": "Debug"
"stdlib": "default",
"configuration": "Debug",
"job_type": "build"
},
{
"compiler_id": "gcc-13-libstdcxx",
@@ -66,8 +67,20 @@ jobs:
"cc": "gcc-13",
"cxx": "g++-13",
"compiler_version": 13,
"stdlib": "libstdcxx",
"configuration": "Debug"
"stdlib": "default",
"configuration": "Debug",
"job_type": "build"
},
{
"compiler_id": "gcc-13-libstdcxx",
"compiler": "gcc",
"cc": "gcc-13",
"cxx": "g++-13",
"gcov": "gcov-13",
"compiler_version": 13,
"stdlib": "default",
"configuration": "Debug",
"job_type": "coverage"
},
{
"compiler_id": "clang-14-libstdcxx-gcc11",
@@ -77,7 +90,8 @@ jobs:
"compiler_version": 14,
"stdlib": "libstdcxx",
"clang_gcc_toolchain": 11,
"configuration": "Debug"
"configuration": "Debug",
"job_type": "build"
},
{
"compiler_id": "clang-16-libstdcxx-gcc13",
@@ -87,7 +101,8 @@ jobs:
"compiler_version": 16,
"stdlib": "libstdcxx",
"clang_gcc_toolchain": 13,
"configuration": "Debug"
"configuration": "Debug",
"job_type": "build"
},
{
"compiler_id": "clang-17-libcxx",
@@ -96,7 +111,8 @@ jobs:
"cxx": "clang++-17",
"compiler_version": 17,
"stdlib": "libcxx",
"configuration": "Debug"
"configuration": "Debug",
"job_type": "build"
},
{
# Clang 18 - testing if it's faster than Clang 17 with libc++
@@ -107,14 +123,16 @@ jobs:
"cxx": "clang++-18",
"compiler_version": 18,
"stdlib": "libcxx",
"configuration": "Debug"
"configuration": "Debug",
"job_type": "build"
}
]
# Minimal matrix for PRs and feature branches
minimal_matrix = [
full_matrix[1], # gcc-13 (middle-ground gcc)
full_matrix[2] # clang-14 (mature, stable clang)
full_matrix[2], # gcc-13 coverage
full_matrix[3] # clang-14 (mature, stable clang)
]
# Determine which matrix to use based on the target branch
@@ -189,14 +207,21 @@ jobs:
# Select the appropriate matrix
if use_full:
if force_full:
print(f"Using FULL matrix (6 configs) - forced by [ci-nix-full-matrix] tag")
print(f"Using FULL matrix (7 configs) - forced by [ci-nix-full-matrix] tag")
else:
print(f"Using FULL matrix (6 configs) - targeting main branch")
print(f"Using FULL matrix (7 configs) - targeting main branch")
matrix = full_matrix
else:
print(f"Using MINIMAL matrix (2 configs) - feature branch/PR")
print(f"Using MINIMAL matrix (3 configs) - feature branch/PR")
matrix = minimal_matrix
# Add runs_on based on job_type
for entry in matrix:
if entry.get("job_type") == "coverage":
entry["runs_on"] = '["self-hosted", "generic", 24.04]'
else:
entry["runs_on"] = '["self-hosted", "generic", 20.04]'
# Output the matrix as JSON
output = json.dumps({"include": matrix})
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
@@ -204,7 +229,10 @@ jobs:
build:
needs: matrix-setup
runs-on: [self-hosted, generic, 20.04]
runs-on: ${{ fromJSON(matrix.runs_on) }}
permissions:
id-token: write
contents: read
container:
image: ubuntu:24.04
volumes:
@@ -233,7 +261,7 @@ jobs:
apt-get install -y software-properties-common
add-apt-repository ppa:ubuntu-toolchain-r/test -y
apt-get update
apt-get install -y python3 python-is-python3 pipx
apt-get install -y git python3 python-is-python3 pipx
pipx ensurepath
apt-get install -y cmake ninja-build ${{ matrix.cc }} ${{ matrix.cxx }} ccache
apt-get install -y perl # for openssl build
@@ -304,6 +332,12 @@ jobs:
pipx install "conan>=2.0,<3"
echo "$HOME/.local/bin" >> $GITHUB_PATH
# Install gcovr for coverage jobs
if [ "${{ matrix.job_type }}" = "coverage" ]; then
pipx install "gcovr>=7,<9"
apt-get install -y curl lcov
fi
- name: Check environment
run: |
echo "PATH:"
@@ -313,6 +347,13 @@ jobs:
which ${{ matrix.cc }} && ${{ matrix.cc }} --version || echo "${{ matrix.cc }} not found"
which ${{ matrix.cxx }} && ${{ matrix.cxx }} --version || echo "${{ matrix.cxx }} not found"
which ccache && ccache --version || echo "ccache not found"
# Check gcovr for coverage jobs
if [ "${{ matrix.job_type }}" = "coverage" ]; then
which gcov && gcov --version || echo "gcov not found"
which gcovr && gcovr --version || echo "gcovr not found"
fi
echo "---- Full Environment ----"
env
@@ -340,6 +381,7 @@ jobs:
gha_cache_enabled: 'false' # Disable caching for self hosted runner
- name: Build
if: matrix.job_type == 'build'
uses: ./.github/actions/xahau-ga-build
with:
generator: Ninja
@@ -354,7 +396,27 @@ jobs:
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
ccache_max_size: '100G'
- name: Build (Coverage)
if: matrix.job_type == 'coverage'
uses: ./.github/actions/xahau-ga-build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
build_dir: ${{ env.build_dir }}
cc: ${{ matrix.cc }}
cxx: ${{ matrix.cxx }}
gcov: ${{ matrix.gcov }}
compiler-id: ${{ matrix.compiler_id }}
cache_version: ${{ env.CACHE_VERSION }}
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: ${{ matrix.stdlib }}
# Coverage builds are slower due to instrumentation; use fewer parallel jobs to avoid flakiness
cmake-args: '-Dcoverage=ON -Dcoverage_format=xml -Dcoverage_test_parallelism=$(($(nproc)/2)) -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_CXX_FLAGS="-O0" -DCMAKE_C_FLAGS="-O0"'
cmake-target: 'coverage'
ccache_max_size: '100G'
- name: Set artifact name
if: matrix.job_type == 'build'
id: set-artifact-name
run: |
ARTIFACT_NAME="build-output-nix-${{ github.run_id }}-${{ matrix.compiler }}-${{ matrix.configuration }}"
@@ -367,6 +429,7 @@ jobs:
ls -la ${{ env.build_dir }} || echo "Build directory not found or empty"
- name: Run tests
if: matrix.job_type == 'build'
run: |
# Ensure the binary exists before trying to run
if [ -f "${{ env.build_dir }}/rippled" ]; then
@@ -375,3 +438,29 @@ jobs:
echo "Error: rippled executable not found in ${{ env.build_dir }}"
exit 1
fi
# Coverage-specific steps
- name: Move coverage report
if: matrix.job_type == 'coverage'
shell: bash
run: |
mv "${{ env.build_dir }}/coverage.xml" ./
- name: Archive coverage report
if: matrix.job_type == 'coverage'
uses: actions/upload-artifact@v4
with:
name: coverage.xml
path: coverage.xml
retention-days: 30
- name: Upload coverage report
if: matrix.job_type == 'coverage'
uses: codecov/codecov-action@v5
with:
files: coverage.xml
fail_ci_if_error: true
disable_search: true
verbose: true
plugins: noop
use_oidc: true

3
.gitignore vendored
View File

@@ -127,8 +127,5 @@ bld.rippled/
generated
.vscode
# AI docs (local working documents)
.ai-docs/
# Suggested in-tree build directory
/.build/

4
.testnet/.gitignore vendored
View File

@@ -1,4 +0,0 @@
output/
__pycache__/
scenarios/odd-cases/
scenarios/suite-experiments.yml

View File

@@ -1,27 +0,0 @@
"""Scenario: ConsensusEntropy amendment crashes non-supporting node.
Votes ConsensusEntropy accept on all nodes except n4, then waits for n4
to crash as the amendment activates without its support.
x-testnet run --scenario-script consensus_entropy_crash.py
"""
async def scenario(ctx, log):
await ctx.wait_for_ledger_close()
ctx.feature("ConsensusEntropy", vetoed=False, exclude_nodes=[4])
log("Waiting for ConsensusEntropy to be voted for...")
await ctx.wait_for_feature(
"ConsensusEntropy",
check=lambda s: not s.get("vetoed"),
exclude_nodes=[4],
timeout=60,
)
log("Waiting for n4 to crash...")
op = await ctx.wait_for_nodes_down(nodes=[4], timeout=600)
ctx.assert_log("unsupported amendments activated", since=op.started, nodes=[4])
ctx.assert_exit_status(0, nodes=[4])
log("PASS: n4 shut down due to unsupported amendment")

View File

@@ -1,52 +0,0 @@
""":descr: entropy stays valid under transaction load"""
from __future__ import annotations
from helpers import require_entropy, get_entropy_tx, assert_valid_entropy
variants = [
{"label": "light", "min_txns": 5, "max_txns": 10},
{"label": "heavy", "min_txns": 50, "max_txns": 60},
{"label": "super_heavy", "min_txns": 90, "max_txns": 120},
]
async def scenario(ctx, log, *, min_txns=5, max_txns=10, **_):
await require_entropy(ctx, log)
gen = ctx.txn_generator(min_txns=min_txns, max_txns=max_txns)
await gen.start()
await gen.wait_until_ready()
log(f"Transaction generator ready ({min_txns}-{max_txns} txns/ledger)")
# Wait for pipeline warmup + a few txn-bearing ledgers.
await ctx.wait_for_ledgers(3, node_id=0, timeout=60)
start_seq = ctx.validated_ledger_index(0)
await ctx.wait_for_ledgers(10, node_id=0, timeout=120)
end_seq = ctx.validated_ledger_index(0)
log(f"Inspecting ledgers {start_seq + 1}{end_seq}")
digests = set()
total_user_txns = 0
for seq in range(start_seq + 1, end_seq + 1):
ce, user_txns = get_entropy_tx(ctx, seq)
digest, count = assert_valid_entropy(ce, seq, seen_digests=digests)
total_user_txns += len(user_txns)
log(
f" Ledger {seq}: EntropyCount={count} "
f"user_txns={len(user_txns)} Digest={digest[:16]}..."
)
await gen.stop()
log(
f"Verified {end_seq - start_seq} ledgers: {total_user_txns} user txns, "
f"all entropy valid and unique"
)
if total_user_txns == 0:
raise AssertionError("No user transactions were included in any ledger")
log("PASS")

View File

@@ -1,117 +0,0 @@
""":descr: 4/5 liveness, 3/5 zero-entropy fallback, recovery"""
from __future__ import annotations
from helpers import require_entropy, get_entropy_tx, entropy_fields
async def scenario(ctx, log):
await require_entropy(ctx, log)
# Baseline: wait 1 ledger to confirm network is healthy.
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
# --- 4/5 liveness ---
ctx.stop_node(4)
await ctx.wait_for_nodes_down(nodes=[4], timeout=30)
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
log("4/5: liveness OK")
# Snapshot validated seq before dropping to 3/5.
val_before = ctx.validated_ledger_index(0)
# --- 3/5 degraded window ---
ctx.stop_node(3)
await ctx.wait_for_nodes_down(nodes=[3], timeout=30)
# 10s ≈ 3 rounds at 3s cadence.
await ctx.sleep(10)
val_after = ctx.validated_ledger_index(0)
log(f"3/5: validated ledger {val_before}{val_after}")
# Accepted/built ledgers may still later appear as validated once the full
# network rejoins. For ConsensusEntropy the key invariant is that every
# ledger created during this sub-quorum window carries ZERO entropy.
degraded_zero = 0
degraded_end = val_after or val_before
if val_before and degraded_end and degraded_end > val_before:
for seq in range(val_before + 1, degraded_end + 1):
ce, _ = get_entropy_tx(ctx, seq)
digest, entropy_count, is_zero = entropy_fields(ce)
if not is_zero:
raise AssertionError(
f"Ledger {seq}: expected ZERO entropy during 3/5 window, "
f"got Digest={digest[:16]}... EntropyCount={entropy_count}"
)
degraded_zero += 1
log(f" Degraded ledger {seq}: EntropyCount={entropy_count} ZERO")
log(f"3/5 entropy summary: {degraded_zero} zero")
# Log checks tied to actual transition mechanics:
# - seq=1 proposals are emitted once commit-set phase is entered
# - ConvergingCommit transition is the gateway out of seq=0-only behavior
# - establish gate blocked indicates tx-consensus/pause prevented accept
ctx.log_level("LedgerConsensus", "trace")
op = await ctx.sleep(6, name="stall_window")
ctx.assert_not_log(
r"RNG: transitioned to ConvergingCommit", within=op.window, nodes=[0, 1, 2]
)
ctx.assert_not_log(r"RNG: propose seq=1", within=op.window, nodes=[0, 1, 2])
gate_blocked = ctx.search_logs(
r"STALLDIAG: establish gate blocked reason=(pause|no-tx-consensus)",
within=op.window,
nodes=[0, 1, 2],
)
log(f"3/5: establish gate-blocked logs in 6s: {gate_blocked.count}")
skips = ctx.search_logs(r"RNG: bootstrap skip", within=op.window, nodes=[0, 1, 2])
log(f"3/5: RNG bootstrap skips in 6s: {skips.count}")
# --- Recovery: restart nodes, verify ledger advancement ---
ctx.start_node(3)
ctx.start_node(4)
await ctx.wait_for_ledgers(1, node_id=0, timeout=120)
val_recovered = ctx.validated_ledger_index(0)
pre_recovery = max(v for v in [val_before, val_after] if v is not None)
log(f"Recovered: validated seq {pre_recovery}{val_recovered}")
if not val_recovered or val_recovered <= pre_recovery:
raise AssertionError(
f"Validated ledger did not advance after recovery "
f"({pre_recovery}{val_recovered})"
)
# Inspect post-recovery ledgers separately from the degraded window above.
# Once the network is back at quorum, non-zero entropy is valid again but
# must still be quorum-met.
zero_count = 0
nonzero_count = 0
for seq in range(pre_recovery + 1, val_recovered + 1):
ce, _ = get_entropy_tx(ctx, seq)
digest, entropy_count, is_zero = entropy_fields(ce)
if is_zero:
zero_count += 1
else:
nonzero_count += 1
if entropy_count < 4:
raise AssertionError(
f"Ledger {seq}: non-zero entropy with sub-quorum "
f"EntropyCount={entropy_count} (need >= 4)"
)
log(
f" Ledger {seq}: EntropyCount={entropy_count} "
f"{'ZERO' if is_zero else 'REAL'}"
)
log(f"Entropy summary: {zero_count} zero, {nonzero_count} non-zero")
log("PASS")

View File

@@ -1,46 +0,0 @@
""":descr: drop 2 nodes (3/5 stall), restart both, verify recovery"""
from __future__ import annotations
async def scenario(ctx, log):
await ctx.wait_for_ledger_close(timeout=120)
feature = ctx.feature_check("ConsensusEntropy", node_id=0)
if not feature or not feature.get("enabled", False):
raise AssertionError(f"ConsensusEntropy not enabled: {feature}")
await ctx.wait_for_ledgers(1, node_id=0, timeout=60)
log("Baseline OK")
# Drop 2 nodes → validation stall.
ctx.stop_node(3)
ctx.stop_node(4)
await ctx.wait_for_nodes_down(nodes=[3, 4], timeout=30)
info = ctx.rpc.server_info(node_id=0)
val_before = info.get("info", {}).get("validated_ledger", {}).get("seq", 0)
log(f"Stalled at validated seq {val_before}")
# Let it sit for a few rounds in degraded state.
await ctx.sleep(6)
# Bring both nodes back.
ctx.start_node(3)
ctx.start_node(4)
log("Restarted n3 and n4, waiting for recovery...")
# Recovery: wait for ANY validated ledger advance on n0.
await ctx.wait_for_ledger_close(node_id=0, timeout=60)
info = ctx.rpc.server_info(node_id=0)
val_after = info.get("info", {}).get("validated_ledger", {}).get("seq", 0)
log(f"Recovered: validated seq {val_before}{val_after}")
if val_after <= val_before:
raise AssertionError(
f"Validated ledger did not advance after recovery "
f"({val_before}{val_after})"
)
log("PASS")

View File

@@ -1,27 +0,0 @@
""":descr: all 5 nodes healthy, every ledger has valid unique quorum-met entropy"""
from __future__ import annotations
from helpers import require_entropy, get_entropy_tx, assert_valid_entropy
async def scenario(ctx, log):
await require_entropy(ctx, log)
# Wait for RNG pipeline to warm up past bootstrap skip.
await ctx.wait_for_ledgers(3, node_id=0, timeout=60)
log("Pipeline warmed up")
start_seq = ctx.validated_ledger_index(0)
await ctx.wait_for_ledgers(10, node_id=0, timeout=120)
end_seq = ctx.validated_ledger_index(0)
log(f"Inspecting ledgers {start_seq + 1}{end_seq}")
digests = set()
for seq in range(start_seq + 1, end_seq + 1):
ce, _ = get_entropy_tx(ctx, seq)
digest, count = assert_valid_entropy(ce, seq, seen_digests=digests)
log(f" Ledger {seq}: EntropyCount={count} Digest={digest[:16]}...")
log(f"Verified {end_seq - start_seq} ledgers: all quorum entropy, all unique")
log("PASS")

View File

@@ -1,86 +0,0 @@
defaults:
network:
node_count: 5
launcher: tmux
slave_delay: 0.2
features:
- ConsensusEntropy
- Export
track_features:
- ConsensusEntropy
- Export
log_levels:
TxQ: info
Protocol: debug
Peer: debug
LedgerConsensus: debug
NetworkOPs: info
env:
XAHAU_RESOURCE_PER_PORT: "1"
XAHAU_RNG_POLL_MS: "333"
tests:
# --- CE + Export (80% quorum, SHAMap convergence) ---
- name: steady_state_export_ce
script: .testnet/scenarios/export/steady_state_export.py
- name: retriable_export_ce
script: .testnet/scenarios/export/retriable_export.py
- name: export_degradation_ce
script: .testnet/scenarios/export/export_degradation.py
network:
node_env:
3:
XAHAUD_NO_EXPORT_SIG: "1"
4:
XAHAUD_NO_EXPORT_SIG: "1"
# CE + Export: 1 node suppressed, 4/5 = 80% quorum, should succeed
- name: export_ce_one_node_down
script: .testnet/scenarios/export/export_quorum.py
params:
expect_success: true
network:
node_env:
4:
XAHAUD_NO_EXPORT_SIG: "1"
# --- Export only, no CE (80% active-view quorum) ---
- name: export_only_all_up
script: .testnet/scenarios/export/export_quorum.py
params:
expect_success: true
network:
features:
- Export
track_features:
- Export
- name: export_only_one_node_down
script: .testnet/scenarios/export/export_quorum.py
params:
expect_success: true
network:
features:
- Export
track_features:
- Export
node_env:
4:
XAHAUD_NO_EXPORT_SIG: "1"
- name: export_only_two_nodes_down
script: .testnet/scenarios/export/export_quorum.py
params:
expect_success: false
network:
features:
- Export
track_features:
- Export
node_env:
3:
XAHAUD_NO_EXPORT_SIG: "1"
4:
XAHAUD_NO_EXPORT_SIG: "1"

View File

@@ -1,102 +0,0 @@
""":descr: Submit ttEXPORT with 2 nodes suppressing export sigs, verify it
retries via terRETRY_EXPORT until LLS expiry (not enough sigs for quorum).
Nodes 3 and 4 have XAHAUD_NO_EXPORT_SIG=1, so only 3/5 nodes provide
export signatures. With 80% quorum = ceil(5*0.8) = 4 required, the
export cannot reach quorum and should expire via tecEXPORT_EXPIRED.
Flow:
1. Fund alice and bob
2. alice submits ttEXPORT with tight LLS
3. Export retries (only 3/5 sigs available, need 4)
4. Verify export expires with tecEXPORT_EXPIRED
5. Verify subsequent payment still works (sequence not permanently blocked)
"""
from __future__ import annotations
from export_helpers import require_export, assert_shadow_ticket
async def scenario(ctx, log):
await require_export(ctx, log)
# --- Setup ---
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
log("Accounts funded")
alice = ctx.account("alice")
bob = ctx.account("bob")
current_seq = ctx.validated_ledger_index(0)
log(f"Current ledger: {current_seq}")
log("Nodes 3,4 have XAHAUD_NO_EXPORT_SIG=1 (3/5 sigs, need 4)")
# --- Submit ttEXPORT (should retry then expire -- only 3/5 sigs) ---
result = await ctx.submit_and_wait(
{
"TransactionType": "Export",
"LastLedgerSequence": current_seq + 8,
"Fee": "1000000",
"ExportedTxn": {
"TransactionType": "Payment",
"Account": alice.address,
"Destination": bob.address,
"Amount": "1000000",
"Fee": "10",
"Sequence": 0,
"TicketSequence": 1,
"FirstLedgerSequence": current_seq + 1,
"LastLedgerSequence": current_seq + 6,
"Flags": 2147483648,
"SigningPubKey": "",
},
},
alice.wallet,
timeout=60,
)
final_seq = ctx.validated_ledger_index(0)
engine_result = result.get("engine_result", "")
log(f"Export completed at ledger {final_seq}, result: {engine_result}")
# With only 3/5 sigs and 80% quorum (4 required), export MUST fail
if engine_result == "tesSUCCESS":
raise AssertionError(
"Export should NOT have succeeded with only 3/5 sigs "
"(need 4 for 80% quorum) -- check XAHAUD_NO_EXPORT_SIG config"
)
# Should be tecEXPORT_EXPIRED (LLS reached without quorum)
if engine_result != "tecEXPORT_EXPIRED":
log(f"WARNING: expected tecEXPORT_EXPIRED, got {engine_result}")
log(f"Export failed as expected ({engine_result})")
# No shadow ticket should exist (export never reached quorum)
assert_shadow_ticket(ctx, alice.address, log, expect_exists=False)
# --- Verify subsequent payment works regardless ---
log("Submitting payment from alice to bob...")
pay_result = await ctx.submit_and_wait(
{
"TransactionType": "Payment",
"Destination": bob.address,
"Amount": "1000000",
"Fee": "12",
},
alice.wallet,
timeout=30,
)
pay_engine = pay_result.get("engine_result", "")
log(f"Payment result: {pay_engine}")
if pay_engine != "tesSUCCESS":
raise AssertionError(
f"Payment failed after expired export: {pay_engine} "
f"-- sequence may be blocked"
)
log("Payment succeeded -- account not permanently blocked")
log("PASS")

View File

@@ -1,144 +0,0 @@
"""Shared helpers for Export scenario tests."""
from __future__ import annotations
async def require_export(ctx, log):
"""Wait for first ledger and assert Export amendment is enabled."""
await ctx.wait_for_ledger_close(timeout=120)
feature = ctx.feature_check("Export", node_id=0)
if not feature or not feature.get("enabled", False):
raise AssertionError(f"Export not enabled: {feature}")
log("Export amendment enabled")
def find_export_txns(ctx, seq):
"""Find Export transactions in a ledger.
Returns list of Export transaction dicts.
"""
result = ctx.ledger(seq, transactions=True)
if not result:
return []
txns = result.get("ledger", {}).get("transactions", [])
return [tx for tx in txns if tx.get("TransactionType") == "Export"]
def dst_param(address):
"""Encode an address as a HookParameter entry for the DST param."""
from xrpl.core.addresscodec import decode_classic_address
dst_hex = decode_classic_address(address).hex().upper()
return {
"HookParameter": {
"HookParameterName": "445354", # "DST"
"HookParameterValue": dst_hex,
}
}
def assert_hook_accepted(meta, log, *, expected_emits=1):
"""Assert hook executed with ACCEPT and the expected emit count.
Checks sfHookExecutions in transaction metadata.
Returns the hook execution entry for further inspection.
"""
hook_execs = meta.get("HookExecutions", [])
if not hook_execs:
raise AssertionError("No HookExecutions in metadata")
exec_entry = hook_execs[0].get("HookExecution", {})
hook_result = exec_entry.get("HookResult", -1)
emit_count = exec_entry.get("HookEmitCount", -1)
return_code = exec_entry.get("HookReturnCode", "")
log(f" HookResult={hook_result} EmitCount={emit_count} ReturnCode={return_code}")
# HookResult 3 = ExitType::ACCEPT
if hook_result != 3:
raise AssertionError(
f"Hook did not ACCEPT: HookResult={hook_result} "
f"ReturnCode={return_code}"
)
if emit_count != expected_emits:
raise AssertionError(
f"Expected {expected_emits} emits, got {emit_count}"
)
# ReturnCode 0 = success; non-zero = ASSERT line number in hook
if return_code and str(return_code) != "0":
raise AssertionError(
f"Hook returned error code {return_code} "
f"(likely ASSERT failure at that line)"
)
return exec_entry
def assert_export_result(meta, log, *, require_signers=True):
"""Assert ExportResult is present and well-formed in metadata.
Returns the ExportResult dict.
"""
export_result = meta.get("ExportResult", {})
if not export_result:
raise AssertionError("ExportResult not found in metadata")
# Must have LedgerSequence and TransactionHash
if "LedgerSequence" not in export_result:
raise AssertionError("ExportResult missing LedgerSequence")
if "TransactionHash" not in export_result:
raise AssertionError("ExportResult missing TransactionHash")
# Must have the inner ExportedTxn object
inner = export_result.get("ExportedTxn", {})
if not inner:
raise AssertionError("ExportResult missing ExportedTxn (multisigned blob)")
log(f" ExportResult: seq={export_result['LedgerSequence']} "
f"hash={export_result['TransactionHash'][:16]}...")
# Inner tx should have Account, Destination, TransactionType
if "Account" not in inner:
raise AssertionError("ExportedTxn missing Account")
if "TransactionType" not in inner:
raise AssertionError("ExportedTxn missing TransactionType")
# Should have empty SigningPubKey (multisigned)
if inner.get("SigningPubKey", "NOT_EMPTY") != "":
raise AssertionError(
f"ExportedTxn SigningPubKey should be empty, "
f"got '{inner.get('SigningPubKey')}'"
)
if require_signers:
signers = inner.get("Signers", [])
if not signers:
raise AssertionError("ExportedTxn has no Signers (multisig not applied)")
log(f" Signers: {len(signers)} validator(s)")
return export_result
def assert_shadow_ticket(ctx, account_address, log, *, expect_exists=True):
"""Assert shadow ticket exists (or doesn't) for the account."""
obj_result = ctx.rpc.request(
0, "account_objects", {"account": account_address}
)
all_objects = (obj_result or {}).get("account_objects", [])
shadow_tickets = [
obj for obj in all_objects
if obj.get("LedgerEntryType") == "ShadowTicket"
]
log(f" Shadow tickets: {len(shadow_tickets)}")
if expect_exists and not shadow_tickets:
raise AssertionError("Expected shadow ticket but none found")
if not expect_exists and shadow_tickets:
raise AssertionError(
f"Expected no shadow tickets but found {len(shadow_tickets)}"
)
return shadow_tickets

View File

@@ -1,112 +0,0 @@
""":descr: Test Export quorum behavior. When enough active validators sign,
the export should succeed whether or not CE is enabled. When fewer than the
active-view quorum sign, the export should expire.
Parameterized via `expect_success` kwarg from suite.yml.
Flow:
1. Fund alice and bob
2. alice submits ttEXPORT
3. Verify result matches expectation (tesSUCCESS or tecEXPORT_EXPIRED)
4. Verify ExportResult + shadow ticket on success, absence on failure
5. Verify subsequent payment works regardless
"""
from __future__ import annotations
from export_helpers import (
require_export,
assert_export_result,
assert_shadow_ticket,
)
async def scenario(ctx, log, expect_success=True):
await require_export(ctx, log)
# --- Setup ---
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
log("Accounts funded")
alice = ctx.account("alice")
bob = ctx.account("bob")
current_seq = ctx.validated_ledger_index(0)
log(f"Current ledger: {current_seq}")
outcome = "success" if expect_success else "failure (below quorum)"
log(f"Expecting export {outcome}")
# --- Submit ttEXPORT ---
result = await ctx.submit_and_wait(
{
"TransactionType": "Export",
"LastLedgerSequence": current_seq + 10,
"Fee": "1000000",
"ExportedTxn": {
"TransactionType": "Payment",
"Account": alice.address,
"Destination": bob.address,
"Amount": "1000000",
"Fee": "10",
"Sequence": 0,
"TicketSequence": 1,
"FirstLedgerSequence": current_seq + 1,
"LastLedgerSequence": current_seq + 8,
"Flags": 2147483648,
"SigningPubKey": "",
},
},
alice.wallet,
timeout=60,
)
final_seq = ctx.validated_ledger_index(0)
engine_result = result.get("engine_result", "")
meta = result.get("meta", {})
log(f"Export at ledger {final_seq}, result: {engine_result}")
if expect_success:
if engine_result != "tesSUCCESS":
raise AssertionError(
f"Expected tesSUCCESS, got {engine_result}"
)
# Assert ExportResult is well-formed with signers
assert_export_result(meta, log, require_signers=True)
# Assert shadow ticket was created
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
log("Export succeeded as expected (active-view quorum reached)")
else:
if engine_result == "tesSUCCESS":
raise AssertionError(
"Export should NOT have succeeded below active-view quorum"
)
log(f"Export failed as expected ({engine_result})")
# No shadow ticket should exist
assert_shadow_ticket(ctx, alice.address, log, expect_exists=False)
# --- Verify subsequent payment works ---
log("Submitting payment from alice to bob...")
pay_result = await ctx.submit_and_wait(
{
"TransactionType": "Payment",
"Destination": bob.address,
"Amount": "1000000",
"Fee": "12",
},
alice.wallet,
timeout=30,
)
pay_engine = pay_result.get("engine_result", "")
log(f"Payment result: {pay_engine}")
if pay_engine != "tesSUCCESS":
raise AssertionError(f"Payment failed: {pay_engine}")
log("Payment succeeded -- account not blocked")
log("PASS")

View File

@@ -1,94 +0,0 @@
""":descr: Submit ttEXPORT directly (no hook), verify it succeeds with
ExportResult in metadata. Then submit a payment from the same account
to verify sequence handling doesn't block subsequent transactions.
Flow:
1. Fund alice and bob
2. alice submits ttEXPORT with inner payment -> tesSUCCESS (provisional)
3. Validators attach sigs via proposals -> quorum -> ExportResult in metadata
4. alice submits a Payment to bob -> should succeed (sequence not blocked)
"""
from __future__ import annotations
from export_helpers import require_export, assert_export_result, assert_shadow_ticket
async def scenario(ctx, log):
await require_export(ctx, log)
# --- Setup ---
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
log("Accounts funded")
alice = ctx.account("alice")
bob = ctx.account("bob")
current_seq = ctx.validated_ledger_index(0)
log(f"Current ledger: {current_seq}")
# --- 1. Submit ttEXPORT ---
result = await ctx.submit_and_wait(
{
"TransactionType": "Export",
"LastLedgerSequence": current_seq + 15,
"Fee": "1000000",
"ExportedTxn": {
"TransactionType": "Payment",
"Account": alice.address,
"Destination": bob.address,
"Amount": "1000000",
"Fee": "10",
"Sequence": 0,
"TicketSequence": 1,
"FirstLedgerSequence": current_seq + 1,
"LastLedgerSequence": current_seq + 10,
"Flags": 2147483648,
"SigningPubKey": "",
},
},
alice.wallet,
timeout=60,
)
export_seq = ctx.validated_ledger_index(0)
engine_result = result.get("engine_result", "")
log(f"Export completed at ledger {export_seq}, result: {engine_result}")
if engine_result != "tesSUCCESS":
raise AssertionError(
f"Expected tesSUCCESS for export, got {engine_result}"
)
# Assert ExportResult is well-formed with signers
meta = result.get("meta", {})
assert_export_result(meta, log, require_signers=True)
# Assert shadow ticket was created
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
# --- 2. Submit Payment from same account ---
log("Submitting payment from alice to bob...")
pay_result = await ctx.submit_and_wait(
{
"TransactionType": "Payment",
"Destination": bob.address,
"Amount": "1000000",
"Fee": "12",
},
alice.wallet,
timeout=30,
)
pay_engine = pay_result.get("engine_result", "")
log(f"Payment result: {pay_engine}")
if pay_engine != "tesSUCCESS":
raise AssertionError(f"Payment failed: {pay_engine}")
log(
f"Both transactions succeeded: "
f"Export at ledger {export_seq}, Payment at ledger {ctx.validated_ledger_index(0)}"
)
log("Sequence handling OK - export didn't block subsequent txns")
log("PASS")

View File

@@ -1,211 +0,0 @@
""":descr: install xport hook, trigger export, verify emitted ttEXPORT lifecycle
1. Fund alice (hook holder), bob (trigger), carol (export destination)
2. Install xport hook on alice
3. bob pays alice with DST=carol → hook calls xport() → emits ttEXPORT
4. Emitted ttEXPORT enters open ledger, validators attach sigs via proposals
5. Verify Export transaction appears in a subsequent ledger
"""
from __future__ import annotations
from export_helpers import (
require_export,
find_export_txns,
dst_param,
assert_hook_accepted,
assert_export_result,
assert_shadow_ticket,
)
# C source for the xport hook — verbatim from src/test/app/Export_test_hooks.h
# On Payment to the hook account, exports a 1 XAH payment to the DST param.
XPORT_HOOK_C = r"""
#include <stdint.h>
extern int32_t _g(uint32_t id, uint32_t maxiter);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
extern int64_t xport_reserve(uint32_t count);
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
extern int64_t otxn_type(void);
extern int64_t ledger_seq(void);
#define SBUF(x) (uint32_t)(x), sizeof(x)
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
#define ttPAYMENT 0
#define tfCANONICAL 0x80000000UL
#define amAMOUNT 1
#define amFEE 8
#define atACCOUNT 1
#define atDESTINATION 3
#define ENCODE_TT(buf_out, tt) \
buf_out[0] = 0x12U; buf_out[1] = (tt >> 8) & 0xFFU; buf_out[2] = tt & 0xFFU; buf_out += 3;
#define ENCODE_FLAGS(buf_out, flags) \
buf_out[0] = 0x22U; buf_out[1] = (flags >> 24) & 0xFFU; buf_out[2] = (flags >> 16) & 0xFFU; \
buf_out[3] = (flags >> 8) & 0xFFU; buf_out[4] = flags & 0xFFU; buf_out += 5;
#define ENCODE_SEQUENCE(buf_out, seq) \
buf_out[0] = 0x24U; buf_out[1] = (seq >> 24) & 0xFFU; buf_out[2] = (seq >> 16) & 0xFFU; \
buf_out[3] = (seq >> 8) & 0xFFU; buf_out[4] = seq & 0xFFU; buf_out += 5;
#define ENCODE_FLS(buf_out, fls) \
buf_out[0] = 0x20U; buf_out[1] = 0x1AU; buf_out[2] = (fls >> 24) & 0xFFU; \
buf_out[3] = (fls >> 16) & 0xFFU; buf_out[4] = (fls >> 8) & 0xFFU; \
buf_out[5] = fls & 0xFFU; buf_out += 6;
#define ENCODE_LLS(buf_out, lls) \
buf_out[0] = 0x20U; buf_out[1] = 0x1BU; buf_out[2] = (lls >> 24) & 0xFFU; \
buf_out[3] = (lls >> 16) & 0xFFU; buf_out[4] = (lls >> 8) & 0xFFU; \
buf_out[5] = lls & 0xFFU; buf_out += 6;
#define ENCODE_DROPS(buf_out, drops, amt_type) \
buf_out[0] = 0x60U + amt_type; buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
buf_out[2] = (drops >> 48) & 0xFFU; buf_out[3] = (drops >> 40) & 0xFFU; \
buf_out[4] = (drops >> 32) & 0xFFU; buf_out[5] = (drops >> 24) & 0xFFU; \
buf_out[6] = (drops >> 16) & 0xFFU; buf_out[7] = (drops >> 8) & 0xFFU; \
buf_out[8] = drops & 0xFFU; buf_out += 9;
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
buf_out[0] = 0x73U; buf_out[1] = 0x00U; buf_out += 2;
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
buf_out[0] = 0x80U + acc_type; buf_out[1] = 0x14U; \
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; buf_out += 22;
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
int64_t hook(uint32_t reserved) {
_g(1, 1);
if (otxn_type() != ttPAYMENT)
return accept(0, 0, 0);
ASSERT(xport_reserve(1) == 1);
uint8_t dst[20];
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
ASSERT(dst_len == 20);
uint8_t acc[20];
ASSERT(hook_account(SBUF(acc)) == 20);
uint32_t cls = (uint32_t)ledger_seq();
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
uint8_t* buf = tx;
ENCODE_TT(buf, ttPAYMENT);
ENCODE_FLAGS(buf, tfCANONICAL);
ENCODE_SEQUENCE(buf, 0);
ENCODE_FLS(buf, cls + 1);
ENCODE_LLS(buf, cls + 5);
// sfTicketSequence = UINT32 field 41 = 0x20 0x29
buf[0] = 0x20U; buf[1] = 0x29U;
buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 1;
buf += 6;
uint64_t drops = 1000000;
ENCODE_DROPS(buf, drops, amAMOUNT);
ENCODE_DROPS(buf, 10, amFEE);
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
uint8_t hash[32];
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
ASSERT(xport_result == 32);
return accept(0, 0, 0);
}
"""
async def scenario(ctx, log):
# Wait for network to start and amendments to activate
await require_export(ctx, log)
# --- Setup ---
await ctx.fund_accounts({"alice": 10000, "bob": 10000, "carol": 1000})
log("Accounts funded")
alice = ctx.account("alice")
carol = ctx.account("carol")
# Compile and install xport hook on alice
wasm = ctx.compile_hook(XPORT_HOOK_C, label="xport")
await ctx.submit_and_wait(
{
"TransactionType": "SetHook",
"Hooks": [
{
"Hook": {
"CreateCode": wasm.hex().upper(),
"HookOn": "0" * 64,
"HookNamespace": "0" * 64,
"HookApiVersion": 0,
"Flags": 1, # hsfOVERRIDE
}
}
],
"Fee": "100000000",
},
alice.wallet,
)
log(
f"Hook installed on alice ({alice.address[:12]}...) "
f"ledger {ctx.validated_ledger_index(0)}"
)
# --- Trigger ---
# bob pays alice → hook calls xport() → emits ttEXPORT
trigger_result = await ctx.submit_and_wait(
{
"TransactionType": "Payment",
"Destination": alice.address,
"Amount": "100000000",
"Fee": "1000000",
"HookParameters": [dst_param(carol.address)],
},
ctx.account("bob").wallet,
)
trigger_seq = ctx.validated_ledger_index(0)
log(f"Export triggered at ledger {trigger_seq}")
# Assert hook fired with ACCEPT and emitted 1 tx
trigger_meta = trigger_result.get("meta", {})
assert_hook_accepted(trigger_meta, log, expected_emits=1)
# --- Verify: check each ledger close for the Export transaction ---
max_ledgers = 10
for i in range(max_ledgers):
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
seq = ctx.validated_ledger_index(0)
exports = find_export_txns(ctx, seq)
if exports:
export_tx = exports[0]
meta = export_tx.get("meta", export_tx.get("metaData", {}))
result = meta.get("TransactionResult", "")
log(f"Ledger {seq}: Export txn found, result={result}")
if result != "tesSUCCESS":
raise AssertionError(f"Export did not succeed: {result}")
# Assert ExportResult is well-formed with signers and inner tx
assert_export_result(meta, log, require_signers=True)
# Assert shadow ticket was created
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
log("PASS")
return
log(f"Ledger {seq}: no Export txn yet")
raise AssertionError(
f"No Export transaction found after {max_ledgers} ledger closes"
)

View File

@@ -1,60 +0,0 @@
"""Shared helpers for ConsensusEntropy scenario tests."""
from __future__ import annotations
ZERO_DIGEST = "0" * 64
async def require_entropy(ctx, log):
"""Wait for first ledger and assert ConsensusEntropy is enabled."""
await ctx.wait_for_ledger_close(timeout=120)
feature = ctx.feature_check("ConsensusEntropy", node_id=0)
if not feature or not feature.get("enabled", False):
raise AssertionError(f"ConsensusEntropy not enabled: {feature}")
log("ConsensusEntropy enabled")
def get_entropy_tx(ctx, seq):
"""Fetch ledger and return (ce_tx, user_txns) or raise."""
result = ctx.ledger(seq, transactions=True)
if not result:
raise AssertionError(f"Ledger {seq}: fetch failed")
txns = result.get("ledger", {}).get("transactions", [])
ce = [tx for tx in txns if tx.get("TransactionType") == "ConsensusEntropy"]
user = [tx for tx in txns if tx.get("TransactionType") != "ConsensusEntropy"]
if len(ce) != 1:
raise AssertionError(
f"Ledger {seq}: expected 1 ConsensusEntropy txn, got {len(ce)}"
)
return ce[0], user
def entropy_fields(ce_tx):
"""Return (digest, entropy_count, is_zero) from a ConsensusEntropy tx."""
digest = ce_tx.get("Digest", "")
entropy_count = ce_tx.get("EntropyCount", -1)
is_zero = digest == ZERO_DIGEST and entropy_count == 0
return digest, entropy_count, is_zero
def assert_valid_entropy(ce_tx, seq, seen_digests=None):
"""Assert non-zero quorum-met entropy. Optionally check uniqueness."""
digest, entropy_count, is_zero = entropy_fields(ce_tx)
if is_zero or not digest:
raise AssertionError(f"Ledger {seq}: zero/empty Digest")
if entropy_count < 4:
raise AssertionError(
f"Ledger {seq}: EntropyCount={entropy_count} < 4 (sub-quorum)"
)
if seen_digests is not None:
if digest in seen_digests:
raise AssertionError(f"Ledger {seq}: duplicate Digest {digest[:16]}...")
seen_digests.add(digest)
return digest, entropy_count

View File

@@ -1,42 +0,0 @@
defaults:
network:
node_count: 5
launcher: tmux
slave_delay: 0.2
features:
- ConsensusEntropy
track_features:
- ConsensusEntropy
log_levels:
TxQ: info
Protocol: debug
Peer: debug
LedgerConsensus: debug
NetworkOPs: info
env:
XAHAU_RESOURCE_PER_PORT: "1"
XAHAU_RNG_POLL_MS: "333"
tests:
- name: steady_state_entropy
script: .testnet/scenarios/entropy/steady_state_entropy.py
- name: steady_state_entropy_fast_start
script: .testnet/scenarios/entropy/steady_state_entropy.py
network:
env:
XAHAUD_BOOTSTRAP_FAST_START: "1"
- name: entropy_with_transactions
script: .testnet/scenarios/entropy/entropy_with_transactions.py
- name: quorum_recovery_smoke
script: .testnet/scenarios/entropy/quorum_recovery_smoke.py
- name: quorum_degradation_smoke
script: .testnet/scenarios/entropy/quorum_degradation_smoke.py
network:
log_levels:
LedgerConsensus: trace
# Export scenarios: see export-suite.yml

View File

@@ -26,7 +26,7 @@ Loop: xrpld.app xrpld.nodestore
xrpld.app > xrpld.nodestore
Loop: xrpld.app xrpld.overlay
xrpld.overlay == xrpld.app
xrpld.overlay ~= xrpld.app
Loop: xrpld.app xrpld.peerfinder
xrpld.app > xrpld.peerfinder
@@ -47,7 +47,7 @@ Loop: xrpld.net xrpld.rpc
xrpld.rpc > xrpld.net
Loop: xrpld.overlay xrpld.rpc
xrpld.rpc > xrpld.overlay
xrpld.rpc ~= xrpld.overlay
Loop: xrpld.perflog xrpld.rpc
xrpld.rpc ~= xrpld.perflog

View File

@@ -43,7 +43,6 @@ test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpld.core
test.consensus > xrpld.ledger
test.consensus > xrpl.json
test.consensus > xrpl.protocol
test.core > test.jtx
test.core > test.toplevel

View File

@@ -22,9 +22,6 @@ target_compile_definitions (opts
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
$<$<BOOL:${single_io_service_thread}>:RIPPLE_SINGLE_IO_SERVICE_THREAD=1>
# Enhanced logging is enabled for Debug builds, or explicitly via
# -DBEAST_ENHANCED_LOGGING=ON for other build types.
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
$<$<BOOL:${voidstar}>:ENABLE_VOIDSTAR>)
target_compile_options (opts
INTERFACE

View File

@@ -47,8 +47,5 @@
#define MEM_OVERLAP -43
#define TOO_MANY_STATE_MODIFICATIONS -44
#define TOO_MANY_NAMESPACES -45
#define EXPORT_FAILURE -46
#define TOO_MANY_EXPORTED_TXN -47
#define TOO_LITTLE_ENTROPY -48
#define HOOK_ERROR_CODES
#endif //HOOK_ERROR_CODES

View File

@@ -336,24 +336,5 @@ prepare(
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
xport_reserve(uint32_t count);
extern int64_t
xport(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
xport_cancel(uint32_t ticket_seq);
extern int64_t
dice(uint32_t sides);
extern int64_t
random(uint32_t write_ptr, uint32_t write_len);
#define HOOK_EXTERN
#endif // HOOK_EXTERN

203
hook/genesis/build_xahau_h.sh Executable file
View File

@@ -0,0 +1,203 @@
#!/bin/bash
# build_xahau_h.sh
# Builds genesis hook WASMs and updates xahau.h with hex arrays
set -euo pipefail
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Script directory and path constants
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
XAHAU_H="${SCRIPT_DIR}/../../include/xrpl/hook/xahau.h"
TEMP_DIR="${SCRIPT_DIR}/.temp"
# Hook file mappings (space-separated: name:file)
HOOK_FILES=(
"GovernanceHook:govern.wasm"
"RewardHook:reward.wasm"
# "MintHook:mint.wasm"
)
# Cleanup function
cleanup() {
local exit_code=$?
if [ ${exit_code} -eq 0 ] && [ -d "${TEMP_DIR}" ]; then
rm -rf "${TEMP_DIR}"
elif [ ${exit_code} -ne 0 ]; then
echo -e "${RED}Error: Script failed with exit code ${exit_code}${NC}" >&2
if [ -d "${TEMP_DIR}" ]; then
echo -e "${YELLOW}Temp files preserved at: ${TEMP_DIR}${NC}" >&2
fi
fi
exit ${exit_code}
}
trap cleanup EXIT INT TERM
# Tool verification
echo -e "${BLUE}==> Checking required tools...${NC}"
REQUIRED_TOOLS=("make" "xxd" "sed" "clang-format" "wasm-opt")
for tool in "${REQUIRED_TOOLS[@]}"; do
if ! command -v "${tool}" &> /dev/null; then
echo -e "${RED}Error: Required tool '${tool}' not found${NC}" >&2
exit 1
fi
echo -e "${GREEN}${tool}${NC}"
done
# Verify wasm-opt version is exactly 100
WASM_OPT_VERSION=$(wasm-opt --version | grep -oE '[0-9]+' | head -1)
if [ "${WASM_OPT_VERSION}" != "100" ]; then
echo -e "${RED}Error: wasm-opt version must be 100, but found ${WASM_OPT_VERSION}${NC}" >&2
exit 1
fi
echo -e "${GREEN} ✓ wasm-opt version 100${NC}"
# Verify xahau.h exists
if [ ! -f "${XAHAU_H}" ]; then
echo -e "${RED}Error: xahau.h not found at ${XAHAU_H}${NC}" >&2
exit 1
fi
# Create temp directory
mkdir -p "${TEMP_DIR}"
# Build all WASM files
echo -e "${BLUE}==> Building WASM files with 'make all'...${NC}"
cd "${SCRIPT_DIR}"
make all
echo -e "${GREEN} Build completed successfully${NC}"
# Function to convert WASM to hex array
wasm_to_hex_array() {
local wasm_file="$1"
local indent=" "
if [ ! -f "${wasm_file}" ]; then
echo -e "${RED}Error: WASM file not found: ${wasm_file}${NC}" >&2
return 1
fi
# Convert to hex with xxd, format with sed
xxd -p -u -c 10 "${wasm_file}" | \
sed 's/../0x&U,/g' | \
sed "s/^/${indent}/g" | \
sed '$ s/,$//'
}
# Function to update hook array in xahau.h
update_hook_array() {
local hook_name="$1"
local hex_array="$2"
local temp_file="${TEMP_DIR}/xahau.h.tmp"
echo -e "${BLUE}==> Updating ${hook_name}...${NC}"
# Check if hook already exists
if grep -q "static const std::vector<uint8_t> ${hook_name} = {" "${XAHAU_H}"; then
echo -e "${YELLOW} Replacing existing ${hook_name}${NC}"
# Use awk to replace the array content
awk -v hook="${hook_name}" -v hex="${hex_array}" '
BEGIN { in_array=0 }
{
if ($0 ~ "static const std::vector<uint8_t> " hook " = {") {
print $0
print hex
in_array=1
next
}
if (in_array && $0 ~ /};/) {
print "};"
in_array=0
next
}
if (!in_array) {
print $0
}
}
' "${XAHAU_H}" > "${temp_file}"
mv "${temp_file}" "${XAHAU_H}"
else
echo -e "${YELLOW} Adding new ${hook_name}${NC}"
# Find the position before #endif and add the new hook
awk -v hook="${hook_name}" -v hex="${hex_array}" '
{
if ($0 ~ /#endif.*XAHAU_GENESIS_HOOKS/) {
print ""
print "static const std::vector<uint8_t> " hook " = {"
print hex
print "};"
print ""
print $0
} else {
print $0
}
}
' "${XAHAU_H}" > "${temp_file}"
mv "${temp_file}" "${XAHAU_H}"
fi
echo -e "${GREEN}${hook_name} updated${NC}"
}
# Process each hook
for hook_entry in "${HOOK_FILES[@]}"; do
hook_name="${hook_entry%%:*}"
wasm_file="${SCRIPT_DIR}/${hook_entry##*:}"
echo -e "${BLUE}==> Converting ${wasm_file} to hex array...${NC}"
hex_array=$(wasm_to_hex_array "${wasm_file}")
if [ $? -ne 0 ]; then
echo -e "${RED}Error: Failed to convert ${wasm_file}${NC}" >&2
exit 1
fi
echo -e "${GREEN} Conversion successful ($(echo "${hex_array}" | wc -l) lines)${NC}"
update_hook_array "${hook_name}" "${hex_array}"
done
# Format with clang-format
echo -e "${BLUE}==> Formatting with clang-format...${NC}"
cp "${XAHAU_H}" "${TEMP_DIR}/xahau.h.before_format"
clang-format -i "${XAHAU_H}"
echo -e "${GREEN} Formatting completed${NC}"
# Verification
echo -e "${BLUE}==> Verifying changes...${NC}"
for hook_entry in "${HOOK_FILES[@]}"; do
hook_name="${hook_entry%%:*}"
if grep -q "static const std::vector<uint8_t> ${hook_name} = {" "${XAHAU_H}"; then
echo -e "${GREEN}${hook_name} found in xahau.h${NC}"
else
echo -e "${RED}${hook_name} NOT found in xahau.h${NC}" >&2
exit 1
fi
done
# Show summary
echo ""
echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN}Successfully updated xahau.h${NC}"
echo -e "${GREEN}========================================${NC}"
echo -e "Updated hooks:"
for hook_entry in "${HOOK_FILES[@]}"; do
hook_name="${hook_entry%%:*}"
wasm_file="${SCRIPT_DIR}/${hook_entry##*:}"
size=$(wc -c < "${wasm_file}" | tr -d ' ')
echo -e " - ${hook_name}: ${size} bytes"
done
echo ""
echo -e "File location: ${XAHAU_H}"
echo ""

View File

@@ -0,0 +1,46 @@
// For documentation please see: https://xrpl-hooks.readme.io/reference/
// Generated using generate_error.sh
#ifndef HOOK_ERROR_CODES
#define SUCCESS 0
#define OUT_OF_BOUNDS -1
#define INTERNAL_ERROR -2
#define TOO_BIG -3
#define TOO_SMALL -4
#define DOESNT_EXIST -5
#define NO_FREE_SLOTS -6
#define INVALID_ARGUMENT -7
#define ALREADY_SET -8
#define PREREQUISITE_NOT_MET -9
#define FEE_TOO_LARGE -10
#define EMISSION_FAILURE -11
#define TOO_MANY_NONCES -12
#define TOO_MANY_EMITTED_TXN -13
#define NOT_IMPLEMENTED -14
#define INVALID_ACCOUNT -15
#define GUARD_VIOLATION -16
#define INVALID_FIELD -17
#define PARSE_ERROR -18
#define RC_ROLLBACK -19
#define RC_ACCEPT -20
#define NO_SUCH_KEYLET -21
#define NOT_AN_ARRAY -22
#define NOT_AN_OBJECT -23
#define INVALID_FLOAT -10024
#define DIVISION_BY_ZERO -25
#define MANTISSA_OVERSIZED -26
#define MANTISSA_UNDERSIZED -27
#define EXPONENT_OVERSIZED -28
#define EXPONENT_UNDERSIZED -29
#define OVERFLOW -30
#define NOT_IOU_AMOUNT -31
#define NOT_AN_AMOUNT -32
#define CANT_RETURN_NEGATIVE -33
#define NOT_AUTHORIZED -34
#define PREVIOUS_FAILURE_PREVENTS_RETRY -35
#define TOO_MANY_PARAMS -36
#define INVALID_TXN -37
#define RESERVE_INSUFFICIENT -38
#define COMPLEX_NOT_SUPPORTED -39
#define DOES_NOT_MATCH -40
#define HOOK_ERROR_CODES
#endif //HOOK_ERROR_CODES

View File

@@ -0,0 +1,352 @@
// For documentation please see: https://xrpl-hooks.readme.io/reference/
// Generated using generate_extern.sh
#include <stdint.h>
#ifndef HOOK_EXTERN
extern int32_t __attribute__((noduplicate))
_g(uint32_t guard_id, uint32_t maxiter);
extern int64_t
accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t
emit(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
etxn_burden(void);
extern int64_t
etxn_details(uint32_t write_ptr, uint32_t write_len);
extern int64_t
etxn_fee_base(uint32_t read_ptr, uint32_t read_len);
extern int64_t
etxn_generation(void);
extern int64_t
etxn_nonce(uint32_t write_ptr, uint32_t write_len);
extern int64_t
etxn_reserve(uint32_t count);
extern int64_t
fee_base(void);
extern int64_t
float_compare(int64_t float1, int64_t float2, uint32_t mode);
extern int64_t
float_divide(int64_t float1, int64_t float2);
extern int64_t
float_exponent(int64_t float1);
extern int64_t
float_exponent_set(int64_t float1, int32_t exponent);
extern int64_t
float_int(int64_t float1, uint32_t decimal_places, uint32_t abs);
extern int64_t
float_invert(int64_t float1);
extern int64_t
float_log(int64_t float1);
extern int64_t
float_mantissa(int64_t float1);
extern int64_t
float_mantissa_set(int64_t float1, int64_t mantissa);
extern int64_t
float_mulratio(
int64_t float1,
uint32_t round_up,
uint32_t numerator,
uint32_t denominator);
extern int64_t
float_multiply(int64_t float1, int64_t float2);
extern int64_t
float_negate(int64_t float1);
extern int64_t
float_one(void);
extern int64_t
float_root(int64_t float1, uint32_t n);
extern int64_t
float_set(int32_t exponent, int64_t mantissa);
extern int64_t
float_sign(int64_t float1);
extern int64_t
float_sign_set(int64_t float1, uint32_t negative);
extern int64_t
float_sto(
uint32_t write_ptr,
uint32_t write_len,
uint32_t cread_ptr,
uint32_t cread_len,
uint32_t iread_ptr,
uint32_t iread_len,
int64_t float1,
uint32_t field_code);
extern int64_t
float_sto_set(uint32_t read_ptr, uint32_t read_len);
extern int64_t
float_sum(int64_t float1, int64_t float2);
extern int64_t
hook_account(uint32_t write_ptr, uint32_t write_len);
extern int64_t
hook_again(void);
extern int64_t
hook_hash(uint32_t write_ptr, uint32_t write_len, int32_t hook_no);
extern int64_t
hook_param(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
otxn_param(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
hook_param_set(
uint32_t read_ptr,
uint32_t read_len,
uint32_t kread_ptr,
uint32_t kread_len,
uint32_t hread_ptr,
uint32_t hread_len);
extern int64_t
hook_pos(void);
extern int64_t
hook_skip(uint32_t read_ptr, uint32_t read_len, uint32_t flags);
extern int64_t
ledger_keylet(
uint32_t write_ptr,
uint32_t write_len,
uint32_t lread_ptr,
uint32_t lread_len,
uint32_t hread_ptr,
uint32_t hread_len);
extern int64_t
ledger_last_hash(uint32_t write_ptr, uint32_t write_len);
extern int64_t
ledger_last_time(void);
extern int64_t
ledger_nonce(uint32_t write_ptr, uint32_t write_len);
extern int64_t
ledger_seq(void);
extern int64_t
meta_slot(uint32_t slot_no);
extern int64_t
otxn_burden(void);
extern int64_t
otxn_field(uint32_t write_ptr, uint32_t write_len, uint32_t field_id);
extern int64_t
otxn_field_txt(uint32_t write_ptr, uint32_t write_len, uint32_t field_id);
extern int64_t
otxn_generation(void);
extern int64_t
otxn_id(uint32_t write_ptr, uint32_t write_len, uint32_t flags);
extern int64_t
otxn_slot(uint32_t slot_no);
extern int64_t
otxn_type(void);
extern int64_t
rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t
slot(uint32_t write_ptr, uint32_t write_len, uint32_t slot);
extern int64_t
slot_clear(uint32_t slot);
extern int64_t
slot_count(uint32_t slot);
extern int64_t
slot_float(uint32_t slot_no);
extern int64_t
slot_id(uint32_t write_ptr, uint32_t write_len, uint32_t slot);
extern int64_t
slot_set(uint32_t read_ptr, uint32_t read_len, uint32_t slot);
extern int64_t
slot_size(uint32_t slot);
extern int64_t
slot_subarray(uint32_t parent_slot, uint32_t array_id, uint32_t new_slot);
extern int64_t
slot_subfield(uint32_t parent_slot, uint32_t field_id, uint32_t new_slot);
extern int64_t
slot_type(uint32_t slot_no, uint32_t flags);
extern int64_t
state(
uint32_t write_ptr,
uint32_t write_len,
uint32_t kread_ptr,
uint32_t kread_len);
extern int64_t
state_foreign(
uint32_t write_ptr,
uint32_t write_len,
uint32_t kread_ptr,
uint32_t kread_len,
uint32_t nread_ptr,
uint32_t nread_len,
uint32_t aread_ptr,
uint32_t aread_len);
extern int64_t
state_foreign_set(
uint32_t read_ptr,
uint32_t read_len,
uint32_t kread_ptr,
uint32_t kread_len,
uint32_t nread_ptr,
uint32_t nread_len,
uint32_t aread_ptr,
uint32_t aread_len);
extern int64_t
state_set(
uint32_t read_ptr,
uint32_t read_len,
uint32_t kread_ptr,
uint32_t kread_len);
extern int64_t
sto_emplace(
uint32_t write_ptr,
uint32_t write_len,
uint32_t sread_ptr,
uint32_t sread_len,
uint32_t fread_ptr,
uint32_t fread_len,
uint32_t field_id);
extern int64_t
sto_erase(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len,
uint32_t field_id);
extern int64_t
sto_subarray(uint32_t read_ptr, uint32_t read_len, uint32_t array_id);
extern int64_t
sto_subfield(uint32_t read_ptr, uint32_t read_len, uint32_t field_id);
extern int64_t
sto_validate(uint32_t tread_ptr, uint32_t tread_len);
extern int64_t
trace(
uint32_t mread_ptr,
uint32_t mread_len,
uint32_t dread_ptr,
uint32_t dread_len,
uint32_t as_hex);
extern int64_t
trace_float(uint32_t read_ptr, uint32_t read_len, int64_t float1);
extern int64_t
trace_num(uint32_t read_ptr, uint32_t read_len, int64_t number);
extern int64_t
trace_slot(uint32_t read_ptr, uint32_t read_len, uint32_t slot);
extern int64_t
util_accid(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
util_keylet(
uint32_t write_ptr,
uint32_t write_len,
uint32_t keylet_type,
uint32_t a,
uint32_t b,
uint32_t c,
uint32_t d,
uint32_t e,
uint32_t f);
extern int64_t
util_raddr(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
util_sha512h(
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
extern int64_t
util_verify(
uint32_t dread_ptr,
uint32_t dread_len,
uint32_t sread_ptr,
uint32_t sread_len,
uint32_t kread_ptr,
uint32_t kread_len);
extern int64_t xpop_slot(uint32_t, uint32_t);
#define HOOK_EXTERN
#endif // HOOK_EXTERN

View File

@@ -0,0 +1,50 @@
/**
* Hook API include file
*
* Note to the reader:
* This include defines two types of things: external functions and macros
* Functions are used sparingly because a non-inlining compiler may produce
* undesirable output.
*
* Find documentation here: https://xrpl-hooks.readme.io/reference/
*/
#ifndef HOOKAPI_INCLUDED
#define HOOKAPI_INCLUDED 1
#define KEYLET_HOOK 1
#define KEYLET_HOOK_STATE 2
#define KEYLET_ACCOUNT 3
#define KEYLET_AMENDMENTS 4
#define KEYLET_CHILD 5
#define KEYLET_SKIP 6
#define KEYLET_FEES 7
#define KEYLET_NEGATIVE_UNL 8
#define KEYLET_LINE 9
#define KEYLET_OFFER 10
#define KEYLET_QUALITY 11
#define KEYLET_EMITTED_DIR 12
#define KEYLET_TICKET 13
#define KEYLET_SIGNERS 14
#define KEYLET_CHECK 15
#define KEYLET_DEPOSIT_PREAUTH 16
#define KEYLET_UNCHECKED 17
#define KEYLET_OWNER_DIR 18
#define KEYLET_PAGE 19
#define KEYLET_ESCROW 20
#define KEYLET_PAYCHAN 21
#define KEYLET_EMITTED 22
#define KEYLET_NFT_OFFER 23
#define KEYLET_HOOK_DEFINITION 24
#define COMPARE_EQUAL 1U
#define COMPARE_LESS 2U
#define COMPARE_GREATER 4U
#include "error.h"
#include "extern.h"
#include "sfcodes.h"
#include "macro.h"
#include "types.h"
#endif

View File

@@ -0,0 +1,671 @@
/**
* These are helper macros for writing hooks, all of them are optional as is including hookmacro.h at all
*/
#include <stdint.h>
#include "hookapi.h"
#include "sfcodes.h"
#ifndef HOOKMACROS_INCLUDED
#define HOOKMACROS_INCLUDED 1
#ifdef NDEBUG
#define DEBUG 0
#else
#define DEBUG 1
#endif
#define TRACEVAR(v) if (DEBUG) trace_num((uint32_t)(#v), (uint32_t)(sizeof(#v) - 1), (int64_t)v);
#define TRACEHEX(v) if (DEBUG) trace((uint32_t)(#v), (uint32_t)(sizeof(#v) - 1), (uint32_t)(v), (uint32_t)(sizeof(v)), 1);
#define TRACEXFL(v) if (DEBUG) trace_float((uint32_t)(#v), (uint32_t)(sizeof(#v) - 1), (int64_t)v);
#define TRACESTR(v) if (DEBUG) trace((uint32_t)(#v), (uint32_t)(sizeof(#v) - 1), (uint32_t)(v), sizeof(v), 0);
// hook developers should use this guard macro, simply GUARD(<maximum iterations>)
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
#define GUARDM(maxiter, n) _g(( (1ULL << 31U) + (__LINE__ << 16) + n), (maxiter)+1)
#define SBUF(str) (uint32_t)(str), sizeof(str)
#define REQUIRE(cond, str)\
{\
if (!(cond))\
rollback(SBUF(str), __LINE__);\
}
// make a report buffer as a c-string
// provide a name for a buffer to declare (buf)
// provide a static string
// provide an integer to print after the string
#define RBUF(buf, out_len, str, num)\
unsigned char buf[sizeof(str) + 21];\
int out_len = 0;\
{\
int i = 0;\
for (; GUARDM(sizeof(str),1),i < sizeof(str); ++i)\
(buf)[i] = str[i];\
if ((buf)[sizeof(str)-1] == 0) i--;\
if ((num) < 0) (buf)[i++] = '-';\
uint64_t unsigned_num = (uint64_t)( (num) < 0 ? (num) * -1 : (num) );\
uint64_t j = 10000000000000000000ULL;\
int start = 1;\
for (; GUARDM(20,2), unsigned_num > 0 && j > 0; j /= 10)\
{\
unsigned char digit = ( unsigned_num / j ) % 10;\
if (digit == 0 && start)\
continue;\
start = 0;\
(buf)[i++] = '0' + digit;\
}\
(buf)[i] = '\0';\
out_len = i;\
}
#define RBUF2(buff, out_len, str, num, str2, num2)\
unsigned char buff[sizeof(str) + sizeof(str2) + 42];\
int out_len = 0;\
{\
unsigned char* buf = buff;\
int i = 0;\
for (; GUARDM(sizeof(str),1),i < sizeof(str); ++i)\
(buf)[i] = str[i];\
if ((buf)[sizeof(str)-1] == 0) i--;\
if ((num) < 0) (buf)[i++] = '-';\
uint64_t unsigned_num = (uint64_t)( (num) < 0 ? (num) * -1 : (num) );\
uint64_t j = 10000000000000000000ULL;\
int start = 1;\
for (; GUARDM(20,2), unsigned_num > 0 && j > 0; j /= 10)\
{\
unsigned char digit = ( unsigned_num / j ) % 10;\
if (digit == 0 && start)\
continue;\
start = 0;\
(buf)[i++] = '0' + digit;\
}\
buf += i;\
out_len += i;\
i = 0;\
for (; GUARDM(sizeof(str2),3),i < sizeof(str2); ++i)\
(buf)[i] = str2[i];\
if ((buf)[sizeof(str2)-1] == 0) i--;\
if ((num2) < 0) (buf)[i++] = '-';\
unsigned_num = (uint64_t)( (num2) < 0 ? (num2) * -1 : (num2) );\
j = 10000000000000000000ULL;\
start = 1;\
for (; GUARDM(20,4), unsigned_num > 0 && j > 0; j /= 10)\
{\
unsigned char digit = ( unsigned_num / j ) % 10;\
if (digit == 0 && start)\
continue;\
start = 0;\
(buf)[i++] = '0' + digit;\
}\
(buf)[i] = '\0';\
out_len += i;\
}
#define CLEARBUF(b)\
{\
for (int x = 0; GUARD(sizeof(b)), x < sizeof(b); ++x)\
b[x] = 0;\
}
// returns an in64_t, negative if error, non-negative if valid drops
#define AMOUNT_TO_DROPS(amount_buffer)\
(((amount_buffer)[0] >> 7) ? -2 : (\
((((uint64_t)((amount_buffer)[0])) & 0xb00111111) << 56) +\
(((uint64_t)((amount_buffer)[1])) << 48) +\
(((uint64_t)((amount_buffer)[2])) << 40) +\
(((uint64_t)((amount_buffer)[3])) << 32) +\
(((uint64_t)((amount_buffer)[4])) << 24) +\
(((uint64_t)((amount_buffer)[5])) << 16) +\
(((uint64_t)((amount_buffer)[6])) << 8) +\
(((uint64_t)((amount_buffer)[7])))))
#define SUB_OFFSET(x) ((int32_t)(x >> 32))
#define SUB_LENGTH(x) ((int32_t)(x & 0xFFFFFFFFULL))
#define BUFFER_EQUAL_20(buf1, buf2)\
(\
*(((uint64_t*)(buf1)) + 0) == *(((uint64_t*)(buf2)) + 0) &&\
*(((uint64_t*)(buf1)) + 1) == *(((uint64_t*)(buf2)) + 1) &&\
*(((uint32_t*)(buf1)) + 4) == *(((uint32_t*)(buf2)) + 4))
#define BUFFER_EQUAL_32(buf1, buf2)\
(\
*(((uint64_t*)(buf1)) + 0) == *(((uint64_t*)(buf2)) + 0) &&\
*(((uint64_t*)(buf1)) + 1) == *(((uint64_t*)(buf2)) + 1) &&\
*(((uint64_t*)(buf1)) + 2) == *(((uint64_t*)(buf2)) + 2) &&\
*(((uint64_t*)(buf1)) + 3) == *(((uint64_t*)(buf2)) + 3))
// when using this macro buf1len may be dynamic but buf2len must be static
// provide n >= 1 to indicate how many times the macro will be hit on the line of code
// e.g. if it is in a loop that loops 10 times n = 10
#define BUFFER_EQUAL_GUARD(output, buf1, buf1len, buf2, buf2len, n)\
{\
output = ((buf1len) == (buf2len) ? 1 : 0);\
for (int x = 0; GUARDM( (buf2len) * (n), 1 ), output && x < (buf2len);\
++x)\
output = *(((uint8_t*)(buf1)) + x) == *(((uint8_t*)(buf2)) + x);\
}
#define BUFFER_SWAP(x,y)\
{\
uint8_t* z = x;\
x = y;\
y = z;\
}
#define ACCOUNT_COMPARE(compare_result, buf1, buf2)\
{\
compare_result = 0;\
for (int i = 0; GUARD(20), i < 20; ++i)\
{\
if (buf1[i] > buf2[i])\
{\
compare_result = 1;\
break;\
}\
else if (buf1[i] < buf2[i])\
{\
compare_result = -1;\
break;\
}\
}\
}
#define BUFFER_EQUAL_STR_GUARD(output, buf1, buf1len, str, n)\
BUFFER_EQUAL_GUARD(output, buf1, buf1len, str, (sizeof(str)-1), n)
#define BUFFER_EQUAL_STR(output, buf1, buf1len, str)\
BUFFER_EQUAL_GUARD(output, buf1, buf1len, str, (sizeof(str)-1), 1)
#define BUFFER_EQUAL(output, buf1, buf2, compare_len)\
BUFFER_EQUAL_GUARD(output, buf1, compare_len, buf2, compare_len, 1)
#define UINT16_TO_BUF(buf_raw, i)\
{\
unsigned char* buf = (unsigned char*)buf_raw;\
buf[0] = (((uint64_t)i) >> 8) & 0xFFUL;\
buf[1] = (((uint64_t)i) >> 0) & 0xFFUL;\
}
#define UINT16_FROM_BUF(buf)\
(((uint64_t)((buf)[0]) << 8) +\
((uint64_t)((buf)[1]) << 0))
#define UINT32_TO_BUF(buf_raw, i)\
{\
unsigned char* buf = (unsigned char*)buf_raw;\
buf[0] = (((uint64_t)i) >> 24) & 0xFFUL;\
buf[1] = (((uint64_t)i) >> 16) & 0xFFUL;\
buf[2] = (((uint64_t)i) >> 8) & 0xFFUL;\
buf[3] = (((uint64_t)i) >> 0) & 0xFFUL;\
}
#define UINT32_FROM_BUF(buf)\
(((uint64_t)((buf)[0]) << 24) +\
((uint64_t)((buf)[1]) << 16) +\
((uint64_t)((buf)[2]) << 8) +\
((uint64_t)((buf)[3]) << 0))
#define UINT64_TO_BUF(buf_raw, i)\
{\
unsigned char* buf = (unsigned char*)buf_raw;\
buf[0] = (((uint64_t)i) >> 56) & 0xFFUL;\
buf[1] = (((uint64_t)i) >> 48) & 0xFFUL;\
buf[2] = (((uint64_t)i) >> 40) & 0xFFUL;\
buf[3] = (((uint64_t)i) >> 32) & 0xFFUL;\
buf[4] = (((uint64_t)i) >> 24) & 0xFFUL;\
buf[5] = (((uint64_t)i) >> 16) & 0xFFUL;\
buf[6] = (((uint64_t)i) >> 8) & 0xFFUL;\
buf[7] = (((uint64_t)i) >> 0) & 0xFFUL;\
}
#define UINT64_FROM_BUF(buf)\
(((uint64_t)((buf)[0]) << 56) +\
((uint64_t)((buf)[1]) << 48) +\
((uint64_t)((buf)[2]) << 40) +\
((uint64_t)((buf)[3]) << 32) +\
((uint64_t)((buf)[4]) << 24) +\
((uint64_t)((buf)[5]) << 16) +\
((uint64_t)((buf)[6]) << 8) +\
((uint64_t)((buf)[7]) << 0))
#define INT64_FROM_BUF(buf)\
((((uint64_t)((buf)[0] & 0x7FU) << 56) +\
((uint64_t)((buf)[1]) << 48) +\
((uint64_t)((buf)[2]) << 40) +\
((uint64_t)((buf)[3]) << 32) +\
((uint64_t)((buf)[4]) << 24) +\
((uint64_t)((buf)[5]) << 16) +\
((uint64_t)((buf)[6]) << 8) +\
((uint64_t)((buf)[7]) << 0)) * (buf[0] & 0x80U ? -1 : 1))
#define INT64_TO_BUF(buf_raw, i)\
{\
unsigned char* buf = (unsigned char*)buf_raw;\
buf[0] = (((uint64_t)i) >> 56) & 0x7FUL;\
buf[1] = (((uint64_t)i) >> 48) & 0xFFUL;\
buf[2] = (((uint64_t)i) >> 40) & 0xFFUL;\
buf[3] = (((uint64_t)i) >> 32) & 0xFFUL;\
buf[4] = (((uint64_t)i) >> 24) & 0xFFUL;\
buf[5] = (((uint64_t)i) >> 16) & 0xFFUL;\
buf[6] = (((uint64_t)i) >> 8) & 0xFFUL;\
buf[7] = (((uint64_t)i) >> 0) & 0xFFUL;\
if (i < 0) buf[0] |= 0x80U;\
}
#define ttPAYMENT 0
#define ttESCROW_CREATE 1
#define ttESCROW_FINISH 2
#define ttACCOUNT_SET 3
#define ttESCROW_CANCEL 4
#define ttREGULAR_KEY_SET 5
#define ttOFFER_CREATE 7
#define ttOFFER_CANCEL 8
#define ttTICKET_CREATE 10
#define ttSIGNER_LIST_SET 12
#define ttPAYCHAN_CREATE 13
#define ttPAYCHAN_FUND 14
#define ttPAYCHAN_CLAIM 15
#define ttCHECK_CREATE 16
#define ttCHECK_CASH 17
#define ttCHECK_CANCEL 18
#define ttDEPOSIT_PREAUTH 19
#define ttTRUST_SET 20
#define ttACCOUNT_DELETE 21
#define ttHOOK_SET 22
#define ttNFTOKEN_MINT 25
#define ttNFTOKEN_BURN 26
#define ttNFTOKEN_CREATE_OFFER 27
#define ttNFTOKEN_CANCEL_OFFER 28
#define ttNFTOKEN_ACCEPT_OFFER 29
#define ttURITOKEN_MINT 45
#define ttURITOKEN_BURN 46
#define ttURITOKEN_BUY 47
#define ttURITOKEN_CREATE_SELL_OFFER 48
#define ttURITOKEN_CANCEL_SELL_OFFER 49
#define ttCLAIM_REWARD 98
#define ttINVOKE 99
#define ttAMENDMENT 100
#define ttFEE 101
#define ttUNL_MODIFY 102
#define ttEMIT_FAILURE 103
#define tfCANONICAL 0x80000000UL
#define atACCOUNT 1U
#define atOWNER 2U
#define atDESTINATION 3U
#define atISSUER 4U
#define atAUTHORIZE 5U
#define atUNAUTHORIZE 6U
#define atTARGET 7U
#define atREGULARKEY 8U
#define atPSEUDOCALLBACK 9U
#define amAMOUNT 1U
#define amBALANCE 2U
#define amLIMITAMOUNT 3U
#define amTAKERPAYS 4U
#define amTAKERGETS 5U
#define amLOWLIMIT 6U
#define amHIGHLIMIT 7U
#define amFEE 8U
#define amSENDMAX 9U
#define amDELIVERMIN 10U
#define amMINIMUMOFFER 16U
#define amRIPPLEESCROW 17U
#define amDELIVEREDAMOUNT 18U
/**
* RH NOTE -- PAY ATTENTION
*
* ALL 'ENCODE' MACROS INCREMENT BUF_OUT
* THIS IS TO MAKE CHAINING EASY
* BUF_OUT IS A SACRIFICIAL POINTER
*
* 'ENCODE' MACROS WITH CONSTANTS HAVE
* ALIASING TO ASSIST YOU WITH ORDER
* _TYPECODE_FIELDCODE_ENCODE_MACRO
* TO PRODUCE A SERIALIZED OBJECT
* IN CANONICAL FORMAT YOU MUST ORDER
* FIRST BY TYPE CODE THEN BY FIELD CODE
*
* ALL 'PREPARE' MACROS PRESERVE POINTERS
*
**/
#define ENCODE_TL_SIZE 49
#define ENCODE_TL(buf_out, tlamt, amount_type)\
{\
uint8_t uat = amount_type; \
buf_out[0] = 0x60U +(uat & 0x0FU ); \
for (int i = 1; GUARDM(48, 1), i < 49; ++i)\
buf_out[i] = tlamt[i-1];\
buf_out += ENCODE_TL_SIZE;\
}
#define _06_XX_ENCODE_TL(buf_out, drops, amount_type )\
ENCODE_TL(buf_out, drops, amount_type );
#define ENCODE_TL_AMOUNT(buf_out, drops )\
ENCODE_TL(buf_out, drops, amAMOUNT );
#define _06_01_ENCODE_TL_AMOUNT(buf_out, drops )\
ENCODE_TL_AMOUNT(buf_out, drops );
// Encode drops to serialization format
// consumes 9 bytes
#define ENCODE_DROPS_SIZE 9
#define ENCODE_DROPS(buf_out, drops, amount_type ) \
{\
uint8_t uat = amount_type; \
uint64_t udrops = drops; \
buf_out[0] = 0x60U +(uat & 0x0FU ); \
buf_out[1] = 0b01000000 + (( udrops >> 56 ) & 0b00111111 ); \
buf_out[2] = (udrops >> 48) & 0xFFU; \
buf_out[3] = (udrops >> 40) & 0xFFU; \
buf_out[4] = (udrops >> 32) & 0xFFU; \
buf_out[5] = (udrops >> 24) & 0xFFU; \
buf_out[6] = (udrops >> 16) & 0xFFU; \
buf_out[7] = (udrops >> 8) & 0xFFU; \
buf_out[8] = (udrops >> 0) & 0xFFU; \
buf_out += ENCODE_DROPS_SIZE; \
}
#define _06_XX_ENCODE_DROPS(buf_out, drops, amount_type )\
ENCODE_DROPS(buf_out, drops, amount_type );
#define ENCODE_DROPS_AMOUNT(buf_out, drops )\
ENCODE_DROPS(buf_out, drops, amAMOUNT );
#define _06_01_ENCODE_DROPS_AMOUNT(buf_out, drops )\
ENCODE_DROPS_AMOUNT(buf_out, drops );
#define ENCODE_DROPS_FEE(buf_out, drops )\
ENCODE_DROPS(buf_out, drops, amFEE );
#define _06_08_ENCODE_DROPS_FEE(buf_out, drops )\
ENCODE_DROPS_FEE(buf_out, drops );
#define ENCODE_TT_SIZE 3
#define ENCODE_TT(buf_out, tt )\
{\
uint8_t utt = tt;\
buf_out[0] = 0x12U;\
buf_out[1] =(utt >> 8 ) & 0xFFU;\
buf_out[2] =(utt >> 0 ) & 0xFFU;\
buf_out += ENCODE_TT_SIZE; \
}
#define _01_02_ENCODE_TT(buf_out, tt)\
ENCODE_TT(buf_out, tt);
#define ENCODE_ACCOUNT_SIZE 22
#define ENCODE_ACCOUNT(buf_out, account_id, account_type)\
{\
uint8_t uat = account_type;\
buf_out[0] = 0x80U + uat;\
buf_out[1] = 0x14U;\
*(uint64_t*)(buf_out + 2) = *(uint64_t*)(account_id + 0);\
*(uint64_t*)(buf_out + 10) = *(uint64_t*)(account_id + 8);\
*(uint32_t*)(buf_out + 18) = *(uint32_t*)(account_id + 16);\
buf_out += ENCODE_ACCOUNT_SIZE;\
}
#define _08_XX_ENCODE_ACCOUNT(buf_out, account_id, account_type)\
ENCODE_ACCOUNT(buf_out, account_id, account_type);
#define ENCODE_ACCOUNT_SRC_SIZE 22
#define ENCODE_ACCOUNT_SRC(buf_out, account_id)\
ENCODE_ACCOUNT(buf_out, account_id, atACCOUNT);
#define _08_01_ENCODE_ACCOUNT_SRC(buf_out, account_id)\
ENCODE_ACCOUNT_SRC(buf_out, account_id);
#define ENCODE_ACCOUNT_DST_SIZE 22
#define ENCODE_ACCOUNT_DST(buf_out, account_id)\
ENCODE_ACCOUNT(buf_out, account_id, atDESTINATION);
#define _08_03_ENCODE_ACCOUNT_DST(buf_out, account_id)\
ENCODE_ACCOUNT_DST(buf_out, account_id);
#define ENCODE_ACCOUNT_OWNER_SIZE 22
#define ENCODE_ACCOUNT_OWNER(buf_out, account_id) \
ENCODE_ACCOUNT(buf_out, account_id, atOWNER);
#define _08_02_ENCODE_ACCOUNT_OWNER(buf_out, account_id) \
ENCODE_ACCOUNT_OWNER(buf_out, account_id);
#define ENCODE_UINT32_COMMON_SIZE 5U
#define ENCODE_UINT32_COMMON(buf_out, i, field)\
{\
uint32_t ui = i; \
uint8_t uf = field; \
buf_out[0] = 0x20U +(uf & 0x0FU); \
buf_out[1] =(ui >> 24 ) & 0xFFU; \
buf_out[2] =(ui >> 16 ) & 0xFFU; \
buf_out[3] =(ui >> 8 ) & 0xFFU; \
buf_out[4] =(ui >> 0 ) & 0xFFU; \
buf_out += ENCODE_UINT32_COMMON_SIZE; \
}
#define _02_XX_ENCODE_UINT32_COMMON(buf_out, i, field)\
ENCODE_UINT32_COMMON(buf_out, i, field)\
#define ENCODE_UINT32_UNCOMMON_SIZE 6U
#define ENCODE_UINT32_UNCOMMON(buf_out, i, field)\
{\
uint32_t ui = i; \
uint8_t uf = field; \
buf_out[0] = 0x20U; \
buf_out[1] = uf; \
buf_out[2] =(ui >> 24 ) & 0xFFU; \
buf_out[3] =(ui >> 16 ) & 0xFFU; \
buf_out[4] =(ui >> 8 ) & 0xFFU; \
buf_out[5] =(ui >> 0 ) & 0xFFU; \
buf_out += ENCODE_UINT32_UNCOMMON_SIZE; \
}
#define _02_XX_ENCODE_UINT32_UNCOMMON(buf_out, i, field)\
ENCODE_UINT32_UNCOMMON(buf_out, i, field)\
#define ENCODE_LLS_SIZE 6U
#define ENCODE_LLS(buf_out, lls )\
ENCODE_UINT32_UNCOMMON(buf_out, lls, 0x1B );
#define _02_27_ENCODE_LLS(buf_out, lls )\
ENCODE_LLS(buf_out, lls );
#define ENCODE_FLS_SIZE 6U
#define ENCODE_FLS(buf_out, fls )\
ENCODE_UINT32_UNCOMMON(buf_out, fls, 0x1A );
#define _02_26_ENCODE_FLS(buf_out, fls )\
ENCODE_FLS(buf_out, fls );
#define ENCODE_TAG_SRC_SIZE 5
#define ENCODE_TAG_SRC(buf_out, tag )\
ENCODE_UINT32_COMMON(buf_out, tag, 0x3U );
#define _02_03_ENCODE_TAG_SRC(buf_out, tag )\
ENCODE_TAG_SRC(buf_out, tag );
#define ENCODE_TAG_DST_SIZE 5
#define ENCODE_TAG_DST(buf_out, tag )\
ENCODE_UINT32_COMMON(buf_out, tag, 0xEU );
#define _02_14_ENCODE_TAG_DST(buf_out, tag )\
ENCODE_TAG_DST(buf_out, tag );
#define ENCODE_SEQUENCE_SIZE 5
#define ENCODE_SEQUENCE(buf_out, sequence )\
ENCODE_UINT32_COMMON(buf_out, sequence, 0x4U );
#define _02_04_ENCODE_SEQUENCE(buf_out, sequence )\
ENCODE_SEQUENCE(buf_out, sequence );
#define ENCODE_FLAGS_SIZE 5
#define ENCODE_FLAGS(buf_out, tag )\
ENCODE_UINT32_COMMON(buf_out, tag, 0x2U );
#define _02_02_ENCODE_FLAGS(buf_out, tag )\
ENCODE_FLAGS(buf_out, tag );
#define ENCODE_SIGNING_PUBKEY_SIZE 35
#define ENCODE_SIGNING_PUBKEY(buf_out, pkey )\
{\
buf_out[0] = 0x73U;\
buf_out[1] = 0x21U;\
*(uint64_t*)(buf_out + 2) = *(uint64_t*)(pkey + 0);\
*(uint64_t*)(buf_out + 10) = *(uint64_t*)(pkey + 8);\
*(uint64_t*)(buf_out + 18) = *(uint64_t*)(pkey + 16);\
*(uint64_t*)(buf_out + 26) = *(uint64_t*)(pkey + 24);\
buf[34] = pkey[32];\
buf_out += ENCODE_SIGNING_PUBKEY_SIZE;\
}
#define _07_03_ENCODE_SIGNING_PUBKEY(buf_out, pkey )\
ENCODE_SIGNING_PUBKEY(buf_out, pkey );
#define ENCODE_SIGNING_PUBKEY_NULL_SIZE 2
#define ENCODE_SIGNING_PUBKEY_NULL(buf_out )\
{\
*buf_out++ = 0x73U;\
*buf_out++ = 0x00U;\
}
#define _07_03_ENCODE_SIGNING_PUBKEY_NULL(buf_out )\
ENCODE_SIGNING_PUBKEY_NULL(buf_out );
#define _0E_0E_ENCODE_HOOKOBJ(buf_out, hhash)\
{\
uint8_t* hook0 = (hhash);\
*buf_out++ = 0xEEU; /* hook obj start */ \
if (hook0 == 0) /* noop */\
{\
/* do nothing */ \
}\
else\
{\
*buf_out++ = 0x22U; /* flags = override */\
*buf_out++ = 0x00U;\
*buf_out++ = 0x00U;\
*buf_out++ = 0x00U;\
*buf_out++ = 0x01U;\
if (hook0 == 0xFFFFFFFFUL) /* delete operation */ \
{\
*buf_out++ = 0x7BU; /* empty createcode */ \
*buf_out++ = 0x00U;\
}\
else\
{\
*buf_out++ = 0x50U; /* HookHash */\
*buf_out++ = 0x1FU;\
uint64_t* d = (uint64_t*)buf_out;\
uint64_t* s = (uint64_t*)hook0;\
*d++ = *s++;\
*d++ = *s++;\
*d++ = *s++;\
*d++ = *s++;\
buf_out+=32;\
}\
}\
*buf_out++ = 0xE1U;\
}
#define PREPARE_HOOKSET(buf_out_master, maxlen, h, sizeout)\
{\
uint8_t* buf_out = (buf_out_master); \
uint8_t acc[20]; \
uint32_t cls = (uint32_t)ledger_seq(); \
hook_account(SBUF(acc)); \
_01_02_ENCODE_TT (buf_out, ttHOOK_SET ); \
_02_02_ENCODE_FLAGS (buf_out, tfCANONICAL ); \
_02_04_ENCODE_SEQUENCE (buf_out, 0 ); \
_02_26_ENCODE_FLS (buf_out, cls + 1 ); \
_02_27_ENCODE_LLS (buf_out, cls + 5 ); \
uint8_t* fee_ptr = buf_out; \
_06_08_ENCODE_DROPS_FEE (buf_out, 0 ); \
_07_03_ENCODE_SIGNING_PUBKEY_NULL (buf_out ); \
_08_01_ENCODE_ACCOUNT_SRC (buf_out, acc ); \
uint32_t remaining_size = (maxlen) - (buf_out - (buf_out_master)); \
int64_t edlen = etxn_details((uint32_t)buf_out, remaining_size); \
buf_out += edlen; \
*buf_out++ = 0xFBU; /* hook array start */ \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[0]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[1]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[2]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[3]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[4]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[5]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[6]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[7]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[8]); \
_0E_0E_ENCODE_HOOKOBJ (buf_out, h[9]); \
*buf_out++ = 0xF1U; /* hook array end */ \
sizeout = (buf_out - (buf_out_master)); \
int64_t fee = etxn_fee_base(buf_out_master, sizeout); \
_06_08_ENCODE_DROPS_FEE (fee_ptr, fee ); \
}
#ifdef HAS_CALLBACK
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
#else
#define PREPARE_PAYMENT_SIMPLE_SIZE 248U
#endif
#define PREPARE_PAYMENT_SIMPLE(buf_out_master, drops_amount_raw, to_address, dest_tag_raw, src_tag_raw)\
{\
uint8_t* buf_out = buf_out_master;\
uint8_t acc[20];\
uint64_t drops_amount = (drops_amount_raw);\
uint32_t dest_tag = (dest_tag_raw);\
uint32_t src_tag = (src_tag_raw);\
uint32_t cls = (uint32_t)ledger_seq();\
hook_account(SBUF(acc));\
_01_02_ENCODE_TT (buf_out, ttPAYMENT ); /* uint16 | size 3 */ \
_02_02_ENCODE_FLAGS (buf_out, tfCANONICAL ); /* uint32 | size 5 */ \
_02_03_ENCODE_TAG_SRC (buf_out, src_tag ); /* uint32 | size 5 */ \
_02_04_ENCODE_SEQUENCE (buf_out, 0 ); /* uint32 | size 5 */ \
_02_14_ENCODE_TAG_DST (buf_out, dest_tag ); /* uint32 | size 5 */ \
_02_26_ENCODE_FLS (buf_out, cls + 1 ); /* uint32 | size 6 */ \
_02_27_ENCODE_LLS (buf_out, cls + 5 ); /* uint32 | size 6 */ \
_06_01_ENCODE_DROPS_AMOUNT (buf_out, drops_amount ); /* amount | size 9 */ \
uint8_t* fee_ptr = buf_out;\
_06_08_ENCODE_DROPS_FEE (buf_out, 0 ); /* amount | size 9 */ \
_07_03_ENCODE_SIGNING_PUBKEY_NULL (buf_out ); /* pk | size 35 */ \
_08_01_ENCODE_ACCOUNT_SRC (buf_out, acc ); /* account | size 22 */ \
_08_03_ENCODE_ACCOUNT_DST (buf_out, to_address ); /* account | size 22 */ \
int64_t edlen = etxn_details((uint32_t)buf_out, PREPARE_PAYMENT_SIMPLE_SIZE); /* emitdet | size 1?? */ \
int64_t fee = etxn_fee_base(buf_out_master, PREPARE_PAYMENT_SIMPLE_SIZE); \
_06_08_ENCODE_DROPS_FEE (fee_ptr, fee ); \
}
#ifdef HAS_CALLBACK
#define PREPARE_PAYMENT_SIMPLE_TRUSTLINE_SIZE 309
#else
#define PREPARE_PAYMENT_SIMPLE_TRUSTLINE_SIZE 287
#endif
#define PREPARE_PAYMENT_SIMPLE_TRUSTLINE(buf_out_master, tlamt, to_address, dest_tag_raw, src_tag_raw)\
{\
uint8_t* buf_out = buf_out_master;\
uint8_t acc[20];\
uint32_t dest_tag = (dest_tag_raw);\
uint32_t src_tag = (src_tag_raw);\
uint32_t cls = (uint32_t)ledger_seq();\
hook_account(SBUF(acc));\
_01_02_ENCODE_TT (buf_out, ttPAYMENT ); /* uint16 | size 3 */ \
_02_02_ENCODE_FLAGS (buf_out, tfCANONICAL ); /* uint32 | size 5 */ \
_02_03_ENCODE_TAG_SRC (buf_out, src_tag ); /* uint32 | size 5 */ \
_02_04_ENCODE_SEQUENCE (buf_out, 0 ); /* uint32 | size 5 */ \
_02_14_ENCODE_TAG_DST (buf_out, dest_tag ); /* uint32 | size 5 */ \
_02_26_ENCODE_FLS (buf_out, cls + 1 ); /* uint32 | size 6 */ \
_02_27_ENCODE_LLS (buf_out, cls + 5 ); /* uint32 | size 6 */ \
_06_01_ENCODE_TL_AMOUNT (buf_out, tlamt ); /* amount | size 48 */ \
uint8_t* fee_ptr = buf_out;\
_06_08_ENCODE_DROPS_FEE (buf_out, 0 ); /* amount | size 9 */ \
_07_03_ENCODE_SIGNING_PUBKEY_NULL (buf_out ); /* pk | size 35 */ \
_08_01_ENCODE_ACCOUNT_SRC (buf_out, acc ); /* account | size 22 */ \
_08_03_ENCODE_ACCOUNT_DST (buf_out, to_address ); /* account | size 22 */ \
etxn_details((uint32_t)buf_out, PREPARE_PAYMENT_SIMPLE_TRUSTLINE_SIZE); /* emitdet | size 1?? */ \
int64_t fee = etxn_fee_base(buf_out_master, PREPARE_PAYMENT_SIMPLE_TRUSTLINE_SIZE); \
_06_08_ENCODE_DROPS_FEE (fee_ptr, fee ); \
}
#endif

View File

@@ -0,0 +1,215 @@
// For documentation please see: https://xrpl-hooks.readme.io/reference/
// Generated using generate_sfcodes.sh
#define sfCloseResolution ((16U << 16U) + 1U)
#define sfMethod ((16U << 16U) + 2U)
#define sfTransactionResult ((16U << 16U) + 3U)
#define sfTickSize ((16U << 16U) + 16U)
#define sfUNLModifyDisabling ((16U << 16U) + 17U)
#define sfHookResult ((16U << 16U) + 18U)
#define sfLedgerEntryType ((1U << 16U) + 1U)
#define sfTransactionType ((1U << 16U) + 2U)
#define sfSignerWeight ((1U << 16U) + 3U)
#define sfTransferFee ((1U << 16U) + 4U)
#define sfVersion ((1U << 16U) + 16U)
#define sfHookStateChangeCount ((1U << 16U) + 17U)
#define sfHookEmitCount ((1U << 16U) + 18U)
#define sfHookExecutionIndex ((1U << 16U) + 19U)
#define sfHookApiVersion ((1U << 16U) + 20U)
#define sfNetworkID ((2U << 16U) + 1U)
#define sfFlags ((2U << 16U) + 2U)
#define sfSourceTag ((2U << 16U) + 3U)
#define sfSequence ((2U << 16U) + 4U)
#define sfPreviousTxnLgrSeq ((2U << 16U) + 5U)
#define sfLedgerSequence ((2U << 16U) + 6U)
#define sfCloseTime ((2U << 16U) + 7U)
#define sfParentCloseTime ((2U << 16U) + 8U)
#define sfSigningTime ((2U << 16U) + 9U)
#define sfExpiration ((2U << 16U) + 10U)
#define sfTransferRate ((2U << 16U) + 11U)
#define sfWalletSize ((2U << 16U) + 12U)
#define sfOwnerCount ((2U << 16U) + 13U)
#define sfDestinationTag ((2U << 16U) + 14U)
#define sfHighQualityIn ((2U << 16U) + 16U)
#define sfHighQualityOut ((2U << 16U) + 17U)
#define sfLowQualityIn ((2U << 16U) + 18U)
#define sfLowQualityOut ((2U << 16U) + 19U)
#define sfQualityIn ((2U << 16U) + 20U)
#define sfQualityOut ((2U << 16U) + 21U)
#define sfStampEscrow ((2U << 16U) + 22U)
#define sfBondAmount ((2U << 16U) + 23U)
#define sfLoadFee ((2U << 16U) + 24U)
#define sfOfferSequence ((2U << 16U) + 25U)
#define sfFirstLedgerSequence ((2U << 16U) + 26U)
#define sfLastLedgerSequence ((2U << 16U) + 27U)
#define sfTransactionIndex ((2U << 16U) + 28U)
#define sfOperationLimit ((2U << 16U) + 29U)
#define sfReferenceFeeUnits ((2U << 16U) + 30U)
#define sfReserveBase ((2U << 16U) + 31U)
#define sfReserveIncrement ((2U << 16U) + 32U)
#define sfSetFlag ((2U << 16U) + 33U)
#define sfClearFlag ((2U << 16U) + 34U)
#define sfSignerQuorum ((2U << 16U) + 35U)
#define sfCancelAfter ((2U << 16U) + 36U)
#define sfFinishAfter ((2U << 16U) + 37U)
#define sfSignerListID ((2U << 16U) + 38U)
#define sfSettleDelay ((2U << 16U) + 39U)
#define sfTicketCount ((2U << 16U) + 40U)
#define sfTicketSequence ((2U << 16U) + 41U)
#define sfNFTokenTaxon ((2U << 16U) + 42U)
#define sfMintedNFTokens ((2U << 16U) + 43U)
#define sfBurnedNFTokens ((2U << 16U) + 44U)
#define sfHookStateCount ((2U << 16U) + 45U)
#define sfEmitGeneration ((2U << 16U) + 46U)
#define sfLockCount ((2U << 16U) + 47U)
#define sfRewardTime ((2U << 16U) + 98U)
#define sfRewardLgrFirst ((2U << 16U) + 99U)
#define sfRewardLgrLast ((2U << 16U) + 100U)
#define sfIndexNext ((3U << 16U) + 1U)
#define sfIndexPrevious ((3U << 16U) + 2U)
#define sfBookNode ((3U << 16U) + 3U)
#define sfOwnerNode ((3U << 16U) + 4U)
#define sfBaseFee ((3U << 16U) + 5U)
#define sfExchangeRate ((3U << 16U) + 6U)
#define sfLowNode ((3U << 16U) + 7U)
#define sfHighNode ((3U << 16U) + 8U)
#define sfDestinationNode ((3U << 16U) + 9U)
#define sfCookie ((3U << 16U) + 10U)
#define sfServerVersion ((3U << 16U) + 11U)
#define sfNFTokenOfferNode ((3U << 16U) + 12U)
#define sfEmitBurden ((3U << 16U) + 13U)
#define sfHookInstructionCount ((3U << 16U) + 17U)
#define sfHookReturnCode ((3U << 16U) + 18U)
#define sfReferenceCount ((3U << 16U) + 19U)
#define sfRewardAccumulator ((3U << 16U) + 100U)
#define sfEmailHash ((4U << 16U) + 1U)
#define sfTakerPaysCurrency ((10U << 16U) + 1U)
#define sfTakerPaysIssuer ((10U << 16U) + 2U)
#define sfTakerGetsCurrency ((10U << 16U) + 3U)
#define sfTakerGetsIssuer ((10U << 16U) + 4U)
#define sfLedgerHash ((5U << 16U) + 1U)
#define sfParentHash ((5U << 16U) + 2U)
#define sfTransactionHash ((5U << 16U) + 3U)
#define sfAccountHash ((5U << 16U) + 4U)
#define sfPreviousTxnID ((5U << 16U) + 5U)
#define sfLedgerIndex ((5U << 16U) + 6U)
#define sfWalletLocator ((5U << 16U) + 7U)
#define sfRootIndex ((5U << 16U) + 8U)
#define sfAccountTxnID ((5U << 16U) + 9U)
#define sfNFTokenID ((5U << 16U) + 10U)
#define sfEmitParentTxnID ((5U << 16U) + 11U)
#define sfEmitNonce ((5U << 16U) + 12U)
#define sfEmitHookHash ((5U << 16U) + 13U)
#define sfBookDirectory ((5U << 16U) + 16U)
#define sfInvoiceID ((5U << 16U) + 17U)
#define sfNickname ((5U << 16U) + 18U)
#define sfAmendment ((5U << 16U) + 19U)
#define sfHookOn ((5U << 16U) + 20U)
#define sfDigest ((5U << 16U) + 21U)
#define sfChannel ((5U << 16U) + 22U)
#define sfConsensusHash ((5U << 16U) + 23U)
#define sfCheckID ((5U << 16U) + 24U)
#define sfValidatedHash ((5U << 16U) + 25U)
#define sfPreviousPageMin ((5U << 16U) + 26U)
#define sfNextPageMin ((5U << 16U) + 27U)
#define sfNFTokenBuyOffer ((5U << 16U) + 28U)
#define sfNFTokenSellOffer ((5U << 16U) + 29U)
#define sfHookStateKey ((5U << 16U) + 30U)
#define sfHookHash ((5U << 16U) + 31U)
#define sfHookNamespace ((5U << 16U) + 32U)
#define sfHookSetTxnID ((5U << 16U) + 33U)
#define sfOfferID ((5U << 16U) + 34U)
#define sfEscrowID ((5U << 16U) + 35U)
#define sfURITokenID ((5U << 16U) + 36U)
#define sfAmount ((6U << 16U) + 1U)
#define sfBalance ((6U << 16U) + 2U)
#define sfLimitAmount ((6U << 16U) + 3U)
#define sfTakerPays ((6U << 16U) + 4U)
#define sfTakerGets ((6U << 16U) + 5U)
#define sfLowLimit ((6U << 16U) + 6U)
#define sfHighLimit ((6U << 16U) + 7U)
#define sfFee ((6U << 16U) + 8U)
#define sfSendMax ((6U << 16U) + 9U)
#define sfDeliverMin ((6U << 16U) + 10U)
#define sfMinimumOffer ((6U << 16U) + 16U)
#define sfRippleEscrow ((6U << 16U) + 17U)
#define sfDeliveredAmount ((6U << 16U) + 18U)
#define sfNFTokenBrokerFee ((6U << 16U) + 19U)
#define sfHookCallbackFee ((6U << 16U) + 20U)
#define sfLockedBalance ((6U << 16U) + 21U)
#define sfPublicKey ((7U << 16U) + 1U)
#define sfMessageKey ((7U << 16U) + 2U)
#define sfSigningPubKey ((7U << 16U) + 3U)
#define sfTxnSignature ((7U << 16U) + 4U)
#define sfURI ((7U << 16U) + 5U)
#define sfSignature ((7U << 16U) + 6U)
#define sfDomain ((7U << 16U) + 7U)
#define sfFundCode ((7U << 16U) + 8U)
#define sfRemoveCode ((7U << 16U) + 9U)
#define sfExpireCode ((7U << 16U) + 10U)
#define sfCreateCode ((7U << 16U) + 11U)
#define sfMemoType ((7U << 16U) + 12U)
#define sfMemoData ((7U << 16U) + 13U)
#define sfMemoFormat ((7U << 16U) + 14U)
#define sfFulfillment ((7U << 16U) + 16U)
#define sfCondition ((7U << 16U) + 17U)
#define sfMasterSignature ((7U << 16U) + 18U)
#define sfUNLModifyValidator ((7U << 16U) + 19U)
#define sfValidatorToDisable ((7U << 16U) + 20U)
#define sfValidatorToReEnable ((7U << 16U) + 21U)
#define sfHookStateData ((7U << 16U) + 22U)
#define sfHookReturnString ((7U << 16U) + 23U)
#define sfHookParameterName ((7U << 16U) + 24U)
#define sfHookParameterValue ((7U << 16U) + 25U)
#define sfBlob ((7U << 16U) + 26U)
#define sfAccount ((8U << 16U) + 1U)
#define sfOwner ((8U << 16U) + 2U)
#define sfDestination ((8U << 16U) + 3U)
#define sfIssuer ((8U << 16U) + 4U)
#define sfAuthorize ((8U << 16U) + 5U)
#define sfUnauthorize ((8U << 16U) + 6U)
#define sfRegularKey ((8U << 16U) + 8U)
#define sfNFTokenMinter ((8U << 16U) + 9U)
#define sfEmitCallback ((8U << 16U) + 10U)
#define sfHookAccount ((8U << 16U) + 16U)
#define sfIndexes ((19U << 16U) + 1U)
#define sfHashes ((19U << 16U) + 2U)
#define sfAmendments ((19U << 16U) + 3U)
#define sfNFTokenOffers ((19U << 16U) + 4U)
#define sfHookNamespaces ((19U << 16U) + 5U)
#define sfPaths ((18U << 16U) + 1U)
#define sfTransactionMetaData ((14U << 16U) + 2U)
#define sfCreatedNode ((14U << 16U) + 3U)
#define sfDeletedNode ((14U << 16U) + 4U)
#define sfModifiedNode ((14U << 16U) + 5U)
#define sfPreviousFields ((14U << 16U) + 6U)
#define sfFinalFields ((14U << 16U) + 7U)
#define sfNewFields ((14U << 16U) + 8U)
#define sfTemplateEntry ((14U << 16U) + 9U)
#define sfMemo ((14U << 16U) + 10U)
#define sfSignerEntry ((14U << 16U) + 11U)
#define sfNFToken ((14U << 16U) + 12U)
#define sfEmitDetails ((14U << 16U) + 13U)
#define sfHook ((14U << 16U) + 14U)
#define sfSigner ((14U << 16U) + 16U)
#define sfMajority ((14U << 16U) + 18U)
#define sfDisabledValidator ((14U << 16U) + 19U)
#define sfEmittedTxn ((14U << 16U) + 20U)
#define sfHookExecution ((14U << 16U) + 21U)
#define sfHookDefinition ((14U << 16U) + 22U)
#define sfHookParameter ((14U << 16U) + 23U)
#define sfHookGrant ((14U << 16U) + 24U)
#define sfSigners ((15U << 16U) + 3U)
#define sfSignerEntries ((15U << 16U) + 4U)
#define sfTemplate ((15U << 16U) + 5U)
#define sfNecessary ((15U << 16U) + 6U)
#define sfSufficient ((15U << 16U) + 7U)
#define sfAffectedNodes ((15U << 16U) + 8U)
#define sfMemos ((15U << 16U) + 9U)
#define sfNFTokens ((15U << 16U) + 10U)
#define sfHooks ((15U << 16U) + 11U)
#define sfMajorities ((15U << 16U) + 16U)
#define sfDisabledValidators ((15U << 16U) + 17U)
#define sfHookExecutions ((15U << 16U) + 18U)
#define sfHookParameters ((15U << 16U) + 19U)
#define sfHookGrants ((15U << 16U) + 20U)
#define sfActiveValidators ((15U << 16U) + 95U)

View File

@@ -0,0 +1,239 @@
#include <stdint.h>
// 8 byte-int = 1 bytes
#define SFL_CLOSERESOLUTION 1
#define SFL_METHOD 1
#define SFL_TRANSACTIONRESULT 1
#define SFL_TICKSIZE 1
#define SFL_UNLMODIFYDISABLING 1
#define SFL_HOOKRESULT 1
// 16 byte-int = 2 bytes
#define SFL_LEDGERENTRYTYPE 2
#define SFL_TRANSACTIONTYPE 2
#define SFL_SIGNERWEIGHT 2
#define SFL_TRANSFERFEE 2
#define SFL_VERSION 2
#define SFL_HOOKSTATECHANGECOUNT 2
#define SFL_HOOKEMITCOUNT 2
#define SFL_HOOKEXECUTIONINDEX 2
#define SFL_HOOKAPIVERSION 2
// 32 byte-int = 4 bytes
#define SFL_NETWORKID 4
#define SFL_FLAGS 4
#define SFL_SOURCETAG 4
#define SFL_SEQUENCE 4
#define SFL_PREVIOUSTXNLGRSEQ 4
#define SFL_LEDGERSEQUENCE 4
#define SFL_CLOSETIME 4
#define SFL_PARENTCLOSETIME 4
#define SFL_SIGNINGTIME 4
#define SFL_EXPIRATION 4
#define SFL_TRANSFERRATE 4
#define SFL_WALLETSIZE 4
#define SFL_OWNERCOUNT 4
#define SFL_DESTINATIONTAG 4
#define SFL_HIGHQUALITYIN 4
#define SFL_HIGHQUALITYOUT 4
#define SFL_LOWQUALITYIN 4
#define SFL_LOWQUALITYOUT 4
#define SFL_QUALITYIN 4
#define SFL_QUALITYOUT 4
#define SFL_STAMPESCROW 4
#define SFL_BONDAMOUNT 4
#define SFL_LOADFEE 4
#define SFL_OFFERSEQUENCE 4
#define SFL_FIRSTLEDGERSEQUENCE 4
#define SFL_LASTLEDGERSEQUENCE 4
#define SFL_TRANSACTIONINDEX 4
#define SFL_OPERATIONLIMIT 4
#define SFL_REFERENCEFEEUNITS 4
#define SFL_RESERVEBASE 4
#define SFL_RESERVEINCREMENT 4
#define SFL_SETFLAG 4
#define SFL_CLEARFLAG 4
#define SFL_SIGNERQUORUM 4
#define SFL_CANCELAFTER 4
#define SFL_FINISHAFTER 4
#define SFL_SIGNERLISTID 4
#define SFL_SETTLEDELAY 4
#define SFL_TICKETCOUNT 4
#define SFL_TICKETSEQUENCE 4
#define SFL_NFTOKENTAXON 4
#define SFL_MINTEDNFTOKENS 4
#define SFL_BURNEDNFTOKENS 4
#define SFL_HOOKSTATECOUNT 4
#define SFL_EMITGENERATION 4
#define SFL_LOCKCOUNT 4
#define SFL_REWARDTIME 4
#define SFL_REWARDLGRFIRST 4
#define SFL_REWARDLGRLAST 4
#define SFL_FIRSTNFTOKENSEQUENCE 4
// 64 byte-int = 8 bytes
#define SFL_INDEX_NEXT 8
#define SFL_INDEX_PREVIOUS 8
#define SFL_BOOK_NODE 8
#define SFL_OWNER_NODE 8
#define SFL_BASE_FEE 8
#define SFL_EXCHANGE_RATE 8
#define SFL_LOW_NODE 8
#define SFL_HIGH_NODE 8
#define SFL_DESTINATION_NODE 8
#define SFL_COOKIE 8
#define SFL_SERVER_VERSION 8
#define SFL_EMIT_BURDEN 8
#define SFL_NFTOKEN_OFFER_NODE 8
#define SFL_HOOK_INSTRUCTION_COUNT 8
#define SFL_HOOK_RETURN_CODE 8
#define SFL_REFERENCE_COUNT 8
#define SFL_REWARD_ACCUMULATOR 8
// 128 byte-int = 4 bytes
#define SFL_EMAIL_HASH 128
// 160 byte-int = 4 bytes
#define SFL_TAKER_PAYS_CURRENCY 160
#define SFL_TAKER_PAYS_ISSUER 160
#define SFL_TAKER_GETS_CURRENCY 160
#define SFL_TAKER_GETS_ISSUER 160
// 256 byte-int = ??? bytes
#define SFL_LEDGER_HASH 256
#define SFL_PARENT_HASH 256
#define SFL_TRANSACTION_HASH 256
#define SFL_ACCOUNT_HASH 256
#define SFL_HOOK_ON 256
#define SFL_PREVIOUS_TXN_ID 256
#define SFL_LEDGER_INDEX 256
#define SFL_WALLET_LOCATOR 256
#define SFL_ROOT_INDEX 256
#define SFL_ACCOUNT_TXN_ID 256
#define SFL_NFTOKEN_ID 256
#define SFL_EMIT_PARENT_TXN_ID 256
#define SFL_EMIT_NONCE 256
#define SFL_EMIT_HOOK_HASH 256
// 256 byte-int = ??? bytes
#define SFL_BOOK_DIRECTORY 256
#define SFL_INVOICE_ID 256
#define SFL_NICKNAME 256
#define SFL_AMENDMENT 256
#define SFL_DIGEST 256
#define SFL_CHANNEL 256
#define SFL_CONSENSUS_HASH 256
#define SFL_CHECK_ID 256
#define SFL_VALIDATED_HASH 256
#define SFL_PREVIOUS_PAGE_MIN 256
#define SFL_NEXT_PAGE_MIN 256
#define SFL_NFTOKEN_BUY_OFFER 256
#define SFL_NFTOKEN_SELL_OFFER 256
#define SFL_HOOK_STATE_KEY 256
#define SFL_HOOK_HASH 256
#define SFL_HOOK_NAMESPACE 256
#define SFL_HOOK_SET_TXN_ID 256
#define SFL_OFFER_ID 256
#define SFL_ESCROW_ID 256
#define SFL_URITOKEN_ID 256
// 20 bytes
#define SFL_AMOUNT 20
#define SFL_BALANCE 20
#define SFL_LIMIT_AMOUNT 20
#define SFL_TAKER_PAYS 20
#define SFL_TAKER_GETS 20
#define SFL_LOW_LIMIT 20
#define SFL_HIGH_LIMIT 20
#define SFL_FEE 20
#define SFL_SEND_MAX 20
#define SFL_DELIVER_MIN 20
#define SFL_LOCKED_BALANCE 20
// Unimplemented
#define SFL_AMOUNT_MINIMUM_OFFER 8
#define SFL_AMOUNT_RIPPLE_ESCROW 8
#define SFL_AMOUNT_DELIVERED_AMOUNT 8
#define SFL_AMOUNT_NFTOKEN_BROKER_FEE 8
#define SFL_AMOUNT_HOOK_CALLBACK_FEE 8
#define SFL_AMOUNT_BASE_FEE_DROPS 8
#define SFL_AMOUNT_RESERVE_BASE_DROPS 8
#define SFL_AMOUNT_RESERVE_INCREMENT_DROPS 8
// Unimplemented
#define SFL_VL_PUBLIC_KEY 64
#define SFL_VL_MESSAGE_KEY 64
#define SFL_VL_SIGNING_PUB_KEY 64
// Unimplemented
#define SFL_VL_TXN_SIGNATURE 96
// Unimplemented
#define SFL_VL_URI 256
// Unimplemented
#define SFL_VL_SIGNATURE 96
// Unimplemented
#define SFL_VL_DOMAIN 256
#define SFL_VL_FUND_CODE 256
#define SFL_VL_REMOVE_CODE 256
#define SFL_VL_EXPIRE_CODE 256
#define SFL_VL_CREATE_CODE 256
#define SFL_VL_MEMO_TYPE 256
#define SFL_VL_MEMO_DATA 256
#define SFL_VL_MEMO_FORMAT 256
#define SFL_VL_FULFILLMENT 256
#define SFL_VL_CONDITION 256
// Unimplemented
#define SFL_VL_MASTER_SIGNATURE 96
// Unimplemented
#define SFL_VL_UNL_MODIFY_VALIDATOR 256
#define SFL_VL_VALIDATOR_TO_DISABLE 256
#define SFL_VL_VALIDATOR_TO_RE_ENABLE 256
#define SFL_VL_HOOK_STATE_DATA 256
#define SFL_VL_HOOK_RETURN_STRING 256
#define SFL_VL_HOOK_PARAMETER_NAME 256
#define SFL_VL_HOOK_PARAMETER_VALUE 256
#define SFL_VL_BLOB 256
// 20 bytes
#define SFL_ACCOUNT 20
#define SFL_OWNER 20
#define SFL_DESTINATION 20
#define SFL_ISSUER 20
#define SFL_AUTHORIZE 20
#define SFL_UNAUTHORIZE 20
#define SFL_REGULAR_KEY 20
#define SFL_NFTOKEN_MINTER 20
#define SFL_EMIT_CALLBACK 20
#define SFL_HOOK_ACCOUNT 20
#define SFL_NFTOKEN_MINTER 20
// Unimplemented
#define SFL_PATHS 1
// Unimplemented
#define SFL_VECTOR256_INDEXES 32
#define SFL_VECTOR256_HASHES 32
#define SFL_VECTOR256_AMENDMENTS 32
#define SFL_VECTOR256_NFTOKEN_OFFERS 32
#define SFL_VECTOR256_HOOK_NAMESPACES 32
// Unimplemented
#define SFL_TRANSACTION_META_DATA 1
#define SFL_CREATED_NODE 1
#define SFL_DELETED_NODE 1
#define SFL_MODIFIED_NODE 1
#define SFL_PREVIOUS_FIELDS 1
#define SFL_FINAL_FIELDS 1
#define SFL_NEW_FIELDS 1
#define SFL_TEMPLATE_ENTRY 1
#define SFL_MEMO 1
#define SFL_SIGNER_ENTRY 1
#define SFL_NFTOKEN 1
#define SFL_EMIT_DETAILS 1
#define SFL_HOOK 1
#define SFL_SIGNER 1
#define SFL_MAJORITY 1
#define SFL_DISABLED_VALIDATOR 1
#define SFL_EMITTED_TXN 1
#define SFL_HOOK_EXECUTION 1
#define SFL_HOOK_DEFINITION 1
#define SFL_HOOK_PARAMETER 1
#define SFL_HOOK_GRANT 1
#define SFL_SIGNERS 1
#define SFL_SIGNER_ENTRIES 1
#define SFL_TEMPLATE 1
#define SFL_NECESSARY 1
#define SFL_SUFFICIENT 1
#define SFL_AFFECTED_NODES 1
#define SFL_MEMOS 1
#define SFL_NFTOKENS 1
#define SFL_HOOKS 1
#define SFL_MAJORITIES 1
#define SFL_DISABLED_VALIDATORS 1
#define SFL_HOOK_EXECUTIONS 1
#define SFL_HOOK_EXECUTION 1

View File

@@ -1,9 +1,9 @@
all: reward govern mint
accept:
wasmcc accept.c -o accept.wasm -Oz -Wl,--allow-undefined -I../
wasmcc accept.c -o accept.wasm -Oz -Wl,--allow-undefined -I./headers
hook-cleaner accept.wasm
reward:
wasmcc reward.c -o reward.wasm -Oz -Wl,--allow-undefined -I../
wasmcc reward.c -o reward.wasm -Oz -Wl,--allow-undefined -I./headers
wasm-opt reward.wasm -o reward.wasm \
--shrink-level=100000000 \
--coalesce-locals-learning \
@@ -58,7 +58,7 @@ reward:
hook-cleaner reward.wasm
guard_checker reward.wasm
govern:
wasmcc govern.c -o govern.wasm -Oz -Wl,--allow-undefined -I../
wasmcc govern.c -o govern.wasm -Oz -Wl,--allow-undefined -I./headers
wasm-opt govern.wasm -o govern.wasm \
--shrink-level=100000000 \
--coalesce-locals-learning \
@@ -113,7 +113,7 @@ govern:
hook-cleaner govern.wasm
guard_checker govern.wasm
mint:
wasmcc mint.c -o mint.wasm -Oz -Wl,--allow-undefined -I../
wasmcc mint.c -o mint.wasm -Oz -Wl,--allow-undefined -I./headers
wasm-opt mint.wasm -o mint.wasm \
--shrink-level=100000000 \
--coalesce-locals-learning \
@@ -142,5 +142,5 @@ mint:
hook-cleaner mint.wasm
guard_checker mint.wasm
nftoken:
wasmcc nftoken.c -o nftoken.wasm -Oz -Wl,--allow-undefined -I../
wasmcc nftoken.c -o nftoken.wasm -Oz -Wl,--allow-undefined -I./headers
hook-cleaner nftoken.wasm

View File

@@ -9,7 +9,6 @@
#define sfUNLModifyDisabling ((16U << 16U) + 17U)
#define sfHookResult ((16U << 16U) + 18U)
#define sfWasLockingChainSend ((16U << 16U) + 19U)
#define sfSidecarType ((16U << 16U) + 20U)
#define sfLedgerEntryType ((1U << 16U) + 1U)
#define sfTransactionType ((1U << 16U) + 2U)
#define sfSignerWeight ((1U << 16U) + 3U)
@@ -23,8 +22,6 @@
#define sfHookApiVersion ((1U << 16U) + 20U)
#define sfHookStateScale ((1U << 16U) + 21U)
#define sfLedgerFixType ((1U << 16U) + 22U)
#define sfHookExportCount ((1U << 16U) + 98U)
#define sfEntropyCount ((1U << 16U) + 99U)
#define sfNetworkID ((2U << 16U) + 1U)
#define sfFlags ((2U << 16U) + 2U)
#define sfSourceTag ((2U << 16U) + 3U)
@@ -83,7 +80,6 @@
#define sfRewardTime ((2U << 16U) + 98U)
#define sfRewardLgrFirst ((2U << 16U) + 99U)
#define sfRewardLgrLast ((2U << 16U) + 100U)
#define sfCancelTicketSequence ((2U << 16U) + 101U)
#define sfIndexNext ((3U << 16U) + 1U)
#define sfIndexPrevious ((3U << 16U) + 2U)
#define sfBookNode ((3U << 16U) + 3U)
@@ -163,7 +159,6 @@
#define sfEmittedTxnID ((5U << 16U) + 97U)
#define sfGovernanceMarks ((5U << 16U) + 98U)
#define sfGovernanceFlags ((5U << 16U) + 99U)
#define sfEntropyDigest ((5U << 16U) + 100U)
#define sfNumber ((9U << 16U) + 1U)
#define sfAmount ((6U << 16U) + 1U)
#define sfBalance ((6U << 16U) + 2U)
@@ -291,7 +286,6 @@
#define sfXChainCreateAccountAttestationCollectionElement ((14U << 16U) + 31U)
#define sfPriceData ((14U << 16U) + 32U)
#define sfCredential ((14U << 16U) + 33U)
#define sfExportedTxn ((14U << 16U) + 90U)
#define sfAmountEntry ((14U << 16U) + 91U)
#define sfMintURIToken ((14U << 16U) + 92U)
#define sfHookEmission ((14U << 16U) + 93U)
@@ -299,7 +293,6 @@
#define sfActiveValidator ((14U << 16U) + 95U)
#define sfGenesisMint ((14U << 16U) + 96U)
#define sfRemark ((14U << 16U) + 97U)
#define sfExportResult ((14U << 16U) + 98U)
#define sfSigners ((15U << 16U) + 3U)
#define sfSignerEntries ((15U << 16U) + 4U)
#define sfTemplate ((15U << 16U) + 5U)

View File

@@ -61,7 +61,6 @@
#define ttNFTOKEN_MODIFY 70
#define ttPERMISSIONED_DOMAIN_SET 71
#define ttPERMISSIONED_DOMAIN_DELETE 72
#define ttEXPORT 91
#define ttCRON 92
#define ttCRON_SET 93
#define ttREMARKS_SET 94
@@ -75,4 +74,3 @@
#define ttUNL_MODIFY 102
#define ttEMIT_FAILURE 103
#define ttUNL_REPORT 104
#define ttCONSENSUS_ENTROPY 105

View File

@@ -15,8 +15,6 @@
#define uint256 std::string
#define featureHooksUpdate1 "1"
#define featureHooksUpdate2 "1"
#define featureExport "1"
#define featureConsensusEntropy "1"
#define fix20250131 "1"
namespace hook_api {
struct Rules
@@ -385,10 +383,7 @@ enum hook_return_code : int64_t {
MEM_OVERLAP = -43, // one or more specified buffers are the same memory
TOO_MANY_STATE_MODIFICATIONS = -44, // more than 5000 modified state
// entires in the combined hook chains
TOO_MANY_NAMESPACES = -45,
EXPORT_FAILURE = -46,
TOO_MANY_EXPORTED_TXN = -47,
TOO_LITTLE_ENTROPY = -48,
TOO_MANY_NAMESPACES = -45
};
enum ExitType : uint8_t {
@@ -402,7 +397,6 @@ const uint16_t max_state_modifications = 256;
const uint8_t max_slots = 255;
const uint8_t max_nonce = 255;
const uint8_t max_emit = 255;
const uint8_t max_export = 2;
const uint8_t max_params = 16;
const double fee_base_multiplier = 1.1f;
@@ -443,6 +437,10 @@ getImportWhitelist(Rules const& rules)
return whitelist;
}
#undef HOOK_API_DEFINITION
#undef I32
#undef I64
enum GuardRulesVersion : uint64_t {
GuardRuleFix20250131 = 0x00000001,
};

View File

@@ -372,28 +372,3 @@ HOOK_API_DEFINITION(
HOOK_API_DEFINITION(
int64_t, prepare, (uint32_t, uint32_t, uint32_t, uint32_t),
featureHooksUpdate2)
// int64_t xport_reserve(uint32_t count);
HOOK_API_DEFINITION(
int64_t, xport_reserve, (uint32_t),
featureExport)
// int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
HOOK_API_DEFINITION(
int64_t, xport, (uint32_t, uint32_t, uint32_t, uint32_t),
featureExport)
// int64_t xport_cancel(uint32_t ticket_seq);
HOOK_API_DEFINITION(
int64_t, xport_cancel, (uint32_t),
featureExport)
// int64_t dice(uint32_t sides);
HOOK_API_DEFINITION(
int64_t, dice, (uint32_t),
featureConsensusEntropy)
// int64_t random(uint32_t write_ptr, uint32_t write_len);
HOOK_API_DEFINITION(
int64_t, random, (uint32_t, uint32_t),
featureConsensusEntropy)

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
---
DisableFormat: true

View File

@@ -166,14 +166,6 @@ message TMProposeSet
// Number of hops traveled
optional uint32 hops = 12 [deprecated=true];
// Export signatures for pending exports seen in the proposal set. The
// proposal's ExtendedPosition includes a digest of this repeated field, so
// these side-channel blobs are covered by the proposal signature.
// Each entry is: txnHash (32 bytes) + validator pubkey (33 bytes)
// + multisign signature (variable length). Validators attach these
// so export quorum can be reached within the same consensus round.
repeated bytes exportSignatures = 13;
}
enum TxSetStatus
@@ -392,3 +384,4 @@ message TMHaveTransactions
{
repeated bytes hashes = 1;
}

View File

@@ -1,33 +0,0 @@
#ifndef RIPPLE_PROTOCOL_EXPORT_LIMITS_H_INCLUDED
#define RIPPLE_PROTOCOL_EXPORT_LIMITS_H_INCLUDED
#include <cstdint>
namespace ripple {
// Export system caps.
//
// These limits bound the DoS surface of the export signature system:
// - Each pending export requires every validator to sign it every round
// (sign-once, attach once via TMProposeSet)
// - Inbound signature processing involves crypto verification per sig
// - The open-ledger cap (maxPendingExports) is the root constraint;
// signing throughput and inbound processing are transitively bounded by it
struct ExportLimits
{
// Maximum exports a single hook execution may produce
// (also enforced by hook_api::max_export in Enum.h)
static constexpr std::uint8_t maxExportsPerHook = 2;
// Maximum pending export transactions in an open/apply ledger.
// Hook-emitted export backlog drains into the open ledger at this cap.
// This transitively caps:
// - signatures per TMProposeSet message (1 per pending export)
// - inbound proposal signature processing (clamped to this)
// - validator signing work per round
static constexpr std::uint8_t maxPendingExports = 8;
};
} // namespace ripple
#endif

View File

@@ -80,7 +80,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 115;
static constexpr std::size_t numFeatures = 113;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated

View File

@@ -96,9 +96,6 @@ enum class HashPrefix : std::uint32_t {
/** Credentials signature */
credential = detail::make_hash_prefix('C', 'R', 'D'),
/** consensus extension sidecar object */
sidecar = detail::make_hash_prefix('S', 'C', 'R'),
};
template <class Hasher>

View File

@@ -62,9 +62,6 @@ emittedDir() noexcept;
Keylet
emittedTxn(uint256 const& id) noexcept;
Keylet
shadowTicket(AccountID const& account, std::uint32_t ticketSeq) noexcept;
Keylet
hookDefinition(uint256 const& hash) noexcept;
@@ -121,10 +118,6 @@ negativeUNL() noexcept;
Keylet const&
UNLReport() noexcept;
/** The (fixed) index of the object containing consensus-derived entropy. */
Keylet const&
consensusEntropy() noexcept;
/** The beginning of an order book */
struct book_t
{

View File

@@ -1,21 +0,0 @@
#ifndef RIPPLE_PROTOCOL_SIDECAR_TYPE_H_INCLUDED
#define RIPPLE_PROTOCOL_SIDECAR_TYPE_H_INCLUDED
#include <cstdint>
namespace ripple {
/// Discriminator for sidecar set entries (SHAMap leaves used for
/// consensus extension data: RNG commit/reveal, export signatures).
///
/// Stored in sfSidecarType (UINT8) on each STObject entry.
/// Makes sidecar sets self-describing — no content-sniffing needed.
enum SidecarType : std::uint8_t {
sidecarRngCommit = 1,
sidecarRngReveal = 2,
sidecarExportSig = 3,
};
} // namespace ripple
#endif

View File

@@ -68,7 +68,6 @@ enum TELcodes : TERUnderlyingType {
telNON_LOCAL_EMITTED_TXN,
telIMPORT_VL_KEY_NOT_RECOGNISED,
telCAN_NOT_QUEUE_IMPORT,
telSHADOW_TICKET_REQUIRED,
telENV_RPC_FAILED,
};
@@ -234,10 +233,8 @@ enum TERcodes : TERUnderlyingType {
terQUEUED, // Transaction is being held in TxQ until fee drops
terPRE_TICKET, // Ticket is not yet in ledger but might be on its way
terNO_AMM, // AMM doesn't exist for the asset pair
terNO_HOOK, // Transaction requires a non-existent hook definition
terNO_HOOK // Transaction requires a non-existent hook definition
// (referenced by sfHookHash)
terRETRY_EXPORT // Export does not yet have enough validator signatures.
// Retained in retriable set for next ledger.
};
//------------------------------------------------------------------------------
@@ -365,7 +362,6 @@ enum TECcodes : TERUnderlyingType {
tecARRAY_TOO_LARGE = 197,
tecLOCKED = 198,
tecBAD_CREDENTIALS = 199,
tecEXPORT_EXPIRED = 200,
tecLAST_POSSIBLE_ENTRY = 255,
};

View File

@@ -274,13 +274,6 @@ enum BridgeModifyFlags : uint32_t {
tfClearAccountCreateAmount = 0x00010000,
};
constexpr std::uint32_t tfBridgeModifyMask = ~(tfUniversal | tfClearAccountCreateAmount);
// ConsensusEntropy flags (used on ttCONSENSUS_ENTROPY SHAMap entries):
enum ConsensusEntropyFlags : uint32_t {
tfEntropyCommit = 0x00000001, // entry is a commitment in commitSet
tfEntropyReveal = 0x00000002, // entry is a reveal in entropySet
};
// flag=0 (no tfEntropyCommit/tfEntropyReveal) = final injected pseudo-tx
// clang-format on
} // namespace ripple

View File

@@ -140,12 +140,6 @@ public:
mHookEmissions = hookEmissions;
}
void
setExportResult(STObject const& exportResult)
{
mExportResult = exportResult;
}
bool
hasHookExecutions() const
{
@@ -158,12 +152,6 @@ public:
return static_cast<bool>(mHookEmissions);
}
bool
hasExportResult() const
{
return static_cast<bool>(mExportResult);
}
STAmount
getDeliveredAmount() const
{
@@ -188,7 +176,6 @@ private:
std::optional<STAmount> mDelivered;
std::optional<STArray> mHookExecutions;
std::optional<STArray> mHookEmissions;
std::optional<STObject> mExportResult;
STArray mNodes;
};

View File

@@ -56,8 +56,6 @@ XRPL_FEATURE(AMM, Supported::yes, VoteBehavior::DefaultNo
XRPL_FIX (ReducedOffersV1, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(HooksUpdate2, Supported::yes, VoteBehavior::DefaultNo);
XRPL_FEATURE(HookOnV2, Supported::yes, VoteBehavior::DefaultNo);
XRPL_FEATURE(Export, Supported::yes, VoteBehavior::DefaultNo);
XRPL_FEATURE(ConsensusEntropy, Supported::yes, VoteBehavior::DefaultNo);
XRPL_FIX (HookAPI20251128, Supported::yes, VoteBehavior::DefaultYes);
XRPL_FIX (CronStacking, Supported::yes, VoteBehavior::DefaultYes);
XRPL_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);

View File

@@ -223,20 +223,6 @@ LEDGER_ENTRY(ltURI_TOKEN, 0x0055, URIToken, uri_token, ({
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
/** The ledger object which stores consensus-derived entropy.
\note This is a singleton: only one such object exists in the ledger.
\sa keylet::consensusEntropy
*/
LEDGER_ENTRY_DUPLICATE(ltCONSENSUS_ENTROPY, 0x0058, ConsensusEntropy, consensus_entropy, ({
{sfDigest, soeREQUIRED},
{sfEntropyCount, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
/** A ledger object which describes an account.
\sa keylet::account
@@ -604,22 +590,6 @@ LEDGER_ENTRY(ltDID, 0x008D, DID, did, ({
{sfPreviousTxnLgrSeq, soeREQUIRED},
}))
//@@start shadow-ticket-ledger-entry
/** A shadow ticket for export replay protection.
Created when a transaction is exported. Consumed when
proof-of-execution is imported back. Account-owned (pays reserve).
\sa keylet::shadowTicket
*/
LEDGER_ENTRY(ltSHADOW_TICKET, 0x5374, ShadowTicket, shadow_ticket, ({
{sfAccount, soeREQUIRED},
{sfTicketSequence, soeREQUIRED},
{sfTransactionHash, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
}))
//@@end shadow-ticket-ledger-entry
#undef EXPAND
#undef LEDGER_ENTRY_DUPLICATE

View File

@@ -42,7 +42,6 @@ TYPED_SFIELD(sfTickSize, UINT8, 16)
TYPED_SFIELD(sfUNLModifyDisabling, UINT8, 17)
TYPED_SFIELD(sfHookResult, UINT8, 18)
TYPED_SFIELD(sfWasLockingChainSend, UINT8, 19)
TYPED_SFIELD(sfSidecarType, UINT8, 20)
// 16-bit integers (common)
TYPED_SFIELD(sfLedgerEntryType, UINT16, 1, SField::sMD_Never)
@@ -60,8 +59,6 @@ TYPED_SFIELD(sfHookExecutionIndex, UINT16, 19)
TYPED_SFIELD(sfHookApiVersion, UINT16, 20)
TYPED_SFIELD(sfHookStateScale, UINT16, 21)
TYPED_SFIELD(sfLedgerFixType, UINT16, 22)
TYPED_SFIELD(sfHookExportCount, UINT16, 98)
TYPED_SFIELD(sfEntropyCount, UINT16, 99)
// 32-bit integers (common)
TYPED_SFIELD(sfNetworkID, UINT32, 1)
@@ -126,7 +123,6 @@ TYPED_SFIELD(sfImportSequence, UINT32, 97)
TYPED_SFIELD(sfRewardTime, UINT32, 98)
TYPED_SFIELD(sfRewardLgrFirst, UINT32, 99)
TYPED_SFIELD(sfRewardLgrLast, UINT32, 100)
TYPED_SFIELD(sfCancelTicketSequence, UINT32, 101)
// 64-bit integers (common)
TYPED_SFIELD(sfIndexNext, UINT64, 1)
@@ -221,7 +217,6 @@ TYPED_SFIELD(sfHookCanEmit, UINT256, 96)
TYPED_SFIELD(sfEmittedTxnID, UINT256, 97)
TYPED_SFIELD(sfGovernanceMarks, UINT256, 98)
TYPED_SFIELD(sfGovernanceFlags, UINT256, 99)
TYPED_SFIELD(sfEntropyDigest, UINT256, 100)
// number (common)
TYPED_SFIELD(sfNumber, NUMBER, 1)
@@ -384,7 +379,6 @@ UNTYPED_SFIELD(sfXChainClaimAttestationCollectionElement, OBJECT, 30)
UNTYPED_SFIELD(sfXChainCreateAccountAttestationCollectionElement, OBJECT, 31)
UNTYPED_SFIELD(sfPriceData, OBJECT, 32)
UNTYPED_SFIELD(sfCredential, OBJECT, 33)
UNTYPED_SFIELD(sfExportedTxn, OBJECT, 90)
UNTYPED_SFIELD(sfAmountEntry, OBJECT, 91)
UNTYPED_SFIELD(sfMintURIToken, OBJECT, 92)
UNTYPED_SFIELD(sfHookEmission, OBJECT, 93)
@@ -392,7 +386,6 @@ UNTYPED_SFIELD(sfImportVLKey, OBJECT, 94)
UNTYPED_SFIELD(sfActiveValidator, OBJECT, 95)
UNTYPED_SFIELD(sfGenesisMint, OBJECT, 96)
UNTYPED_SFIELD(sfRemark, OBJECT, 97)
UNTYPED_SFIELD(sfExportResult, OBJECT, 98)
// array of objects (common)
// ARRAY/1 is reserved for end of array

View File

@@ -500,17 +500,6 @@ TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 72, PermissionedDomainDelete, ({
{sfDomainID, soeREQUIRED},
}))
//@@start export-transaction-types
/* User-submittable export: creates a cross-chain transaction for
validator signing. Retries via terRETRY_EXPORT until quorum.
Also supports shadow ticket cancellation via sfCancelTicketSequence.
At least one of sfExportedTxn or sfCancelTicketSequence must be present. */
TRANSACTION(ttEXPORT, 91, Export, ({
{sfExportedTxn, soeOPTIONAL},
{sfCancelTicketSequence, soeOPTIONAL},
}))
//@@end export-transaction-types
/* A pseudo-txn alarm signal for invoking a hook, emitted by validators after alarm set conditions are met */
TRANSACTION(ttCRON, 92, Cron, ({
{sfOwner, soeREQUIRED},
@@ -616,10 +605,3 @@ TRANSACTION(ttUNL_REPORT, 104, UNLReport, ({
{sfActiveValidator, soeOPTIONAL},
{sfImportVLKey, soeOPTIONAL},
}))
TRANSACTION(ttCONSENSUS_ENTROPY, 105, ConsensusEntropy, ({
{sfLedgerSequence, soeREQUIRED},
{sfDigest, soeREQUIRED},
{sfEntropyCount, soeREQUIRED},
{sfBlob, soeOPTIONAL},
}))

View File

@@ -109,22 +109,14 @@ public:
Consumer
newInboundEndpoint(beast::IP::Endpoint const& address)
{
//@@start rng-local-testnet-resource-bucket
// Inbound connections from the same IP normally share one
// resource bucket (port stripped) for DoS protection. For
// loopback addresses, preserve the port so local testnet nodes
// each get their own bucket instead of all sharing one.
auto const key = is_loopback(address) ? address : address.at_port(0);
//@@end rng-local-testnet-resource-bucket
Entry* entry(nullptr);
{
std::lock_guard _(lock_);
auto [resultIt, resultInserted] = table_.emplace(
std::piecewise_construct,
std::make_tuple(kindInbound, key),
std::make_tuple(m_clock.now()));
std::make_tuple(kindInbound, address.at_port(0)), // Key
std::make_tuple(m_clock.now())); // Entry
entry = &resultIt->second;
entry->key = &resultIt->first;

View File

@@ -31,7 +31,6 @@
#include <cassert>
#include <cstring>
#include <ctime>
#include <exception>
#include <fstream>
#include <functional>
#include <iostream>
@@ -352,18 +351,9 @@ Logs::format(
if (useLocalTime)
{
try
{
auto now = std::chrono::system_clock::now();
auto local = date::make_zoned(date::current_zone(), now);
output = date::format(fmt, local);
}
catch (std::exception const&)
{
// Enhanced logging should not make startup fatal if tzdb lookup is
// unavailable or misconfigured. Fall back to UTC formatting.
output = date::format(fmt, std::chrono::system_clock::now());
}
auto now = std::chrono::system_clock::now();
auto local = date::make_zoned(date::current_zone(), now);
output = date::format(fmt, local);
}
else
{

View File

@@ -72,7 +72,6 @@ enum class LedgerNameSpace : std::uint16_t {
HOOK_DEFINITION = 'D',
EMITTED_TXN = 'E',
EMITTED_DIR = 'F',
SHADOW_TICKET = 0x5374, // St
NFTOKEN_OFFER = 'q',
NFTOKEN_BUY_OFFERS = 'h',
NFTOKEN_SELL_OFFERS = 'i',
@@ -80,7 +79,6 @@ enum class LedgerNameSpace : std::uint16_t {
IMPORT_VLSEQ = 'I',
UNL_REPORT = 'R',
CRON = 'L',
CONSENSUS_ENTROPY = 'X',
AMM = 'A',
BRIDGE = 'H',
XCHAIN_CLAIM_ID = 'Q',
@@ -188,15 +186,6 @@ emittedTxn(uint256 const& id) noexcept
return {ltEMITTED_TXN, indexHash(LedgerNameSpace::EMITTED_TXN, id)};
}
Keylet
shadowTicket(AccountID const& account, std::uint32_t ticketSeq) noexcept
{
return {
ltSHADOW_TICKET,
indexHash(
LedgerNameSpace::SHADOW_TICKET, account, std::uint32_t(ticketSeq))};
}
Keylet
hook(AccountID const& id) noexcept
{
@@ -555,14 +544,6 @@ cron(uint32_t timestamp, std::optional<AccountID> const& id)
return {ltCRON, uint256::fromVoid(h)};
}
Keylet const&
consensusEntropy() noexcept
{
static Keylet const ret{
ltCONSENSUS_ENTROPY, indexHash(LedgerNameSpace::CONSENSUS_ENTROPY)};
return ret;
}
Keylet
amm(Asset const& issue1, Asset const& issue2) noexcept
{

View File

@@ -78,7 +78,6 @@ InnerObjectFormats::InnerObjectFormats()
{sfHookExecutionIndex, soeREQUIRED},
{sfHookStateChangeCount, soeREQUIRED},
{sfHookEmitCount, soeREQUIRED},
{sfHookExportCount, soeOPTIONAL},
{sfFlags, soeOPTIONAL}});
add(sfHookEmission.jsonName,

View File

@@ -684,8 +684,7 @@ isPseudoTx(STObject const& tx)
auto tt = safe_cast<TxType>(*t);
return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY ||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON ||
tt == ttCONSENSUS_ENTROPY;
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON;
}
} // namespace ripple

View File

@@ -124,7 +124,6 @@ transResults()
MAKE_ERROR(tecARRAY_TOO_LARGE, "Array is too large."),
MAKE_ERROR(tecLOCKED, "Fund is locked."),
MAKE_ERROR(tecBAD_CREDENTIALS, "Bad credentials."),
MAKE_ERROR(tecEXPORT_EXPIRED, "Export expired without reaching signature quorum."),
MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."),
MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."),
@@ -172,7 +171,6 @@ transResults()
MAKE_ERROR(telNON_LOCAL_EMITTED_TXN, "Emitted transaction cannot be applied because it was not generated locally."),
MAKE_ERROR(telIMPORT_VL_KEY_NOT_RECOGNISED, "Import vl key was not recognized."),
MAKE_ERROR(telCAN_NOT_QUEUE_IMPORT, "Import transaction was not able to be directly applied and cannot be queued."),
MAKE_ERROR(telSHADOW_TICKET_REQUIRED, "The imported transaction uses a TicketSequence but no shadow ticket exists."),
MAKE_ERROR(telENV_RPC_FAILED, "Unit test RPC failure."),
MAKE_ERROR(temMALFORMED, "Malformed transaction."),
@@ -240,7 +238,6 @@ transResults()
MAKE_ERROR(terPRE_TICKET, "Ticket is not yet in ledger."),
MAKE_ERROR(terNO_HOOK, "No hook with that hash exists on the ledger."),
MAKE_ERROR(terNO_AMM, "AMM doesn't exist for the asset pair."),
MAKE_ERROR(terRETRY_EXPORT, "Export awaiting validator signatures."),
MAKE_ERROR(tesSUCCESS, "The transaction was applied. Only final in a validated ledger."),
MAKE_ERROR(tesPARTIAL, "The transaction was applied but should be submitted again until returning tesSUCCESS."),

View File

@@ -49,11 +49,6 @@ TxMeta::TxMeta(
if (obj.isFieldPresent(sfHookEmissions))
setHookEmissions(obj.getFieldArray(sfHookEmissions));
if (obj.isFieldPresent(sfExportResult))
setExportResult(const_cast<STObject&>(obj)
.getField(sfExportResult)
.downcast<STObject>());
}
TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj)
@@ -80,11 +75,6 @@ TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj)
if (obj.isFieldPresent(sfHookEmissions))
setHookEmissions(obj.getFieldArray(sfHookEmissions));
if (obj.isFieldPresent(sfExportResult))
setExportResult(const_cast<STObject&>(obj)
.getField(sfExportResult)
.downcast<STObject>());
}
TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, Blob const& vec)
@@ -255,14 +245,6 @@ TxMeta::getAsObject() const
if (hasHookEmissions())
metaData.setFieldArray(sfHookEmissions, getHookEmissions());
if (hasExportResult())
{
Serializer s;
mExportResult->add(s);
SerialIter sit(s.slice());
metaData.emplace_back(STObject(sit, sfExportResult));
}
return metaData;
}

View File

@@ -1,437 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2026 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/app/ConsensusEntropy_test_hooks.h>
#include <test/jtx.h>
#include <test/jtx/hook.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/hook/Enum.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/TxFlags.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
namespace test {
using TestHook = std::vector<uint8_t> const&;
#define BEAST_REQUIRE(x) \
{ \
BEAST_EXPECT(!!(x)); \
if (!(x)) \
return; \
}
#define HSFEE fee(100'000'000)
#define M(m) memo(m, "", "")
class ConsensusEntropy_test : public beast::unit_test::suite
{
static void
overrideFlag(Json::Value& jv)
{
jv[jss::Flags] = hsfOVERRIDE;
}
void
testSLECreated()
{
testcase("SLE created on ledger close");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
env.close();
auto const sle = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle);
auto const digest = sle->getFieldH256(sfDigest);
BEAST_EXPECT(digest != uint256{});
auto const count = sle->getFieldU16(sfEntropyCount);
BEAST_EXPECT(count >= 5);
auto const sleSeq = sle->getFieldU32(sfLedgerSequence);
BEAST_EXPECT(sleSeq == env.closed()->seq());
}
void
testSLEUpdatedOnSubsequentClose()
{
testcase("SLE updated on subsequent ledger close");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
env.close();
auto const sle1 = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle1);
auto const digest1 = sle1->getFieldH256(sfDigest);
auto const seq1 = sle1->getFieldU32(sfLedgerSequence);
env.close();
auto const sle2 = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle2);
auto const digest2 = sle2->getFieldH256(sfDigest);
auto const seq2 = sle2->getFieldU32(sfLedgerSequence);
BEAST_EXPECT(digest2 != digest1);
BEAST_EXPECT(seq2 == seq1 + 1);
}
void
testNoSLEWithoutAmendment()
{
testcase("No SLE without amendment");
using namespace jtx;
Env env{*this};
env.close();
env.close();
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
}
void
testDice()
{
testcase("Hook dice() API");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
// Entropy SLE must exist before hook can use dice()
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Set the hook
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t hook(uint32_t r)
{
_g(1,1);
// dice(6) should return 0..5
int64_t result = dice(6);
// negative means error
if (result < 0)
rollback(0, 0, result);
if (result >= 6)
rollback(0, 0, -1);
// return the dice result as the accept code
return accept(0, 0, result);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice hook"),
HSFEE);
env.close();
// Invoke the hook
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
auto const returnCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
std::cerr << " dice(6) returnCode = " << returnCode << " (hex 0x"
<< std::hex << returnCode << std::dec << ")\n";
// dice(6) returns 0..5
BEAST_EXPECT(returnCode <= 5);
// Result should be 3 (accept)
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
testRandom()
{
testcase("Hook random() API");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Hook calls random() to fill a 32-byte buffer, then checks
// the buffer is not all zeroes.
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t hook(uint32_t r)
{
_g(1,1);
uint8_t buf[32];
for (int i = 0; GUARD(32), i < 32; ++i)
buf[i] = 0;
int64_t result = random((uint32_t)buf, 32);
// Should return 32 (bytes written)
if (result != 32)
rollback(0, 0, result);
// Verify buffer is not all zeroes
int nonzero = 0;
for (int i = 0; GUARD(32), i < 32; ++i)
if (buf[i] != 0) nonzero = 1;
if (!nonzero)
rollback(0, 0, -2);
return accept(0, 0, 0);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set random hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test random"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
// Return code 0 = all checks passed in the hook
BEAST_EXPECT(hookExecutions[0].getFieldU64(sfHookReturnCode) == 0);
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
testDiceConsecutiveCallsDiffer()
{
testcase("Hook dice() consecutive calls return different values");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// dice(1000000) twice — large range makes collision near-impossible
// encode r1 in low 20 bits, r2 in high bits
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t r1 = dice(1000000);
if (r1 < 0)
rollback(0, 0, r1);
int64_t r2 = dice(1000000);
if (r2 < 0)
rollback(0, 0, r2);
// consecutive calls should differ (rngCallCounter)
if (r1 == r2)
rollback(0, 0, -1);
return accept(0, 0, r1 | (r2 << 20));
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice consecutive"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
auto const rc = hookExecutions[0].getFieldU64(sfHookReturnCode);
auto const r1 = rc & 0xFFFFF;
auto const r2 = (rc >> 20) & 0xFFFFF;
std::cerr << " two-call dice(1000000): returnCode=" << rc << " hex=0x"
<< std::hex << rc << std::dec << " r1=" << r1 << " r2=" << r2
<< "\n";
// hookResult 3 = accept (would be 1 if r1==r2 triggered rollback)
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
BEAST_EXPECT(r1 < 1000000);
BEAST_EXPECT(r2 < 1000000);
BEAST_EXPECT(r1 != r2);
}
void
testDiceZeroSides()
{
testcase("Hook dice(0) returns INVALID_ARGUMENT");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Hook calls dice(0) and returns whatever dice returns.
// dice(0) should return INVALID_ARGUMENT (-7).
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t result = dice(0);
// dice(0) should return negative error code, pass it through
return accept(0, 0, result);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice0 hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice(0)"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
// INVALID_ARGUMENT = -7, encoded as 0x8000000000000000 + abs(code)
// (see applyHook.cpp unsigned_exit_code encoding)
auto const rawCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
int64_t returnCode = (rawCode & 0x8000000000000000ULL)
? -static_cast<int64_t>(rawCode & 0x7FFFFFFFFFFFFFFFULL)
: static_cast<int64_t>(rawCode);
std::cerr << " dice(0) returnCode = " << returnCode << " (raw 0x"
<< std::hex << rawCode << std::dec << ")\n";
BEAST_EXPECT(returnCode == -7);
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
run() override
{
testSLECreated();
testSLEUpdatedOnSubsequentClose();
testNoSLEWithoutAmendment();
testDice();
testDiceZeroSides();
testRandom();
testDiceConsecutiveCallsDiffer();
}
};
BEAST_DEFINE_TESTSUITE(ConsensusEntropy, app, ripple);
} // namespace test
} // namespace ripple

View File

@@ -1,235 +0,0 @@
// This file is generated by build_test_hooks.py
#ifndef CONSENSUSENTROPY_TEST_WASM_INCLUDED
#define CONSENSUSENTROPY_TEST_WASM_INCLUDED
#include <map>
#include <stdint.h>
#include <string>
#include <vector>
namespace ripple {
namespace test {
std::map<std::string, std::vector<uint8_t>> consensusentropy_test_wasm = {
/* ==== WASM: 0 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t hook(uint32_t r)
{
_g(1,1);
// dice(6) should return 0..5
int64_t result = dice(6);
// negative means error
if (result < 0)
rollback(0, 0, result);
if (result >= 6)
rollback(0, 0, -1);
// return the dice result as the accept code
return accept(0, 0, result);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U,
0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U,
0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU,
0x6FU, 0x6BU, 0x00U, 0x04U, 0x0AU, 0xD0U, 0x80U, 0x00U, 0x01U, 0xCCU,
0x80U, 0x00U, 0x01U, 0x02U, 0x7EU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U,
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x06U, 0x10U, 0x81U,
0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x01U, 0x21U, 0x02U, 0x02U, 0x40U,
0x02U, 0x40U, 0x20U, 0x01U, 0x42U, 0x00U, 0x53U, 0x0DU, 0x00U, 0x42U,
0x7FU, 0x21U, 0x02U, 0x20U, 0x01U, 0x42U, 0x06U, 0x53U, 0x0DU, 0x01U,
0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x02U, 0x10U, 0x82U, 0x80U,
0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x0BU,
}},
/* ==== WASM: 1 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t hook(uint32_t r)
{
_g(1,1);
uint8_t buf[32];
for (int i = 0; GUARD(32), i < 32; ++i)
buf[i] = 0;
int64_t result = random((uint32_t)buf, 32);
// Should return 32 (bytes written)
if (result != 32)
rollback(0, 0, result);
// Verify buffer is not all zeroes
int nonzero = 0;
for (int i = 0; GUARD(32), i < 32; ++i)
if (buf[i] != 0) nonzero = 1;
if (!nonzero)
rollback(0, 0, -2);
return accept(0, 0, 0);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x19U,
0x04U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x02U, 0x7FU,
0x7FU, 0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU,
0x60U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x33U, 0x04U, 0x03U, 0x65U,
0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x72U, 0x61U, 0x6EU, 0x64U, 0x6FU, 0x6DU, 0x00U, 0x01U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U,
0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U,
0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x02U,
0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U, 0x05U,
0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU,
0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU,
0x00U, 0x04U, 0x0AU, 0x86U, 0x82U, 0x00U, 0x01U, 0x82U, 0x82U, 0x00U,
0x03U, 0x02U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U, 0x80U,
0x80U, 0x80U, 0x00U, 0x41U, 0x20U, 0x6BU, 0x22U, 0x01U, 0x24U, 0x80U,
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x80U,
0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x8EU, 0x80U, 0x80U, 0x80U,
0x78U, 0x41U, 0x21U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
0x41U, 0x00U, 0x21U, 0x02U, 0x03U, 0x40U, 0x41U, 0x8EU, 0x80U, 0x80U,
0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x00U, 0x1AU, 0x20U, 0x01U, 0x20U,
0x02U, 0x6AU, 0x41U, 0x00U, 0x3AU, 0x00U, 0x00U, 0x41U, 0x8EU, 0x80U,
0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x1AU, 0x01U, 0x01U, 0x01U, 0x01U,
0x01U, 0x1AU, 0x20U, 0x02U, 0x41U, 0x01U, 0x6AU, 0x22U, 0x02U, 0x41U,
0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U,
0x20U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x03U, 0x42U,
0x20U, 0x51U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x03U,
0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x99U,
0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x80U, 0x80U, 0x80U,
0x80U, 0x00U, 0x1AU, 0x41U, 0x00U, 0x21U, 0x02U, 0x41U, 0x00U, 0x21U,
0x04U, 0x03U, 0x40U, 0x41U, 0x99U, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U,
0x21U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x20U, 0x01U,
0x20U, 0x02U, 0x6AU, 0x2DU, 0x00U, 0x00U, 0x21U, 0x05U, 0x41U, 0x01U,
0x20U, 0x04U, 0x20U, 0x05U, 0x1BU, 0x21U, 0x04U, 0x20U, 0x02U, 0x41U,
0x01U, 0x6AU, 0x22U, 0x02U, 0x41U, 0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU,
0x02U, 0x40U, 0x20U, 0x04U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U,
0x42U, 0x7EU, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU,
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x83U, 0x80U, 0x80U,
0x80U, 0x00U, 0x21U, 0x03U, 0x20U, 0x01U, 0x41U, 0x20U, 0x6AU, 0x24U,
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x20U, 0x03U, 0x0BU,
}},
/* ==== WASM: 2 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t r1 = dice(1000000);
if (r1 < 0)
rollback(0, 0, r1);
int64_t r2 = dice(1000000);
if (r2 < 0)
rollback(0, 0, r2);
// consecutive calls should differ (rngCallCounter)
if (r1 == r2)
rollback(0, 0, -1);
return accept(0, 0, r1 | (r2 << 20));
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U,
0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U,
0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU,
0x6FU, 0x6BU, 0x00U, 0x04U, 0x0AU, 0xFEU, 0x80U, 0x00U, 0x01U, 0xFAU,
0x80U, 0x00U, 0x01U, 0x02U, 0x7EU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U,
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x41U, 0xC0U,
0x84U, 0x3DU, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x01U,
0x42U, 0x7FU, 0x55U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
0x01U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
0x40U, 0x41U, 0xC0U, 0x84U, 0x3DU, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U,
0x00U, 0x22U, 0x02U, 0x42U, 0x7FU, 0x55U, 0x0DU, 0x00U, 0x41U, 0x00U,
0x41U, 0x00U, 0x20U, 0x02U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U,
0x1AU, 0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x20U, 0x02U, 0x52U, 0x0DU,
0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x7FU, 0x10U, 0x82U, 0x80U,
0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
0x02U, 0x42U, 0x14U, 0x86U, 0x20U, 0x01U, 0x84U, 0x10U, 0x83U, 0x80U,
0x80U, 0x80U, 0x00U, 0x0BU,
}},
/* ==== WASM: 3 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t result = dice(0);
// dice(0) should return negative error code, pass it through
return accept(0, 0, result);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x22U, 0x03U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U, 0x61U, 0x63U, 0x63U,
0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x02U, 0x01U, 0x01U, 0x05U,
0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U, 0x05U, 0x7FU, 0x01U, 0x41U,
0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x07U,
0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x00U, 0x03U, 0x0AU,
0xA3U, 0x80U, 0x00U, 0x01U, 0x9FU, 0x80U, 0x00U, 0x00U, 0x41U, 0x01U,
0x41U, 0x01U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U,
0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U,
0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x0BU,
}},
};
}
} // namespace ripple
#endif

View File

@@ -1,162 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/app/misc/ExportSigCollector.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/protocol/digest.h>
#include <cstring>
namespace ripple {
namespace test {
namespace {
uint256
makeHash(char const* label)
{
return sha512Half(Slice(label, std::strlen(label)));
}
PublicKey
makePublicKey(char const* hex)
{
auto const raw = strUnHex(hex);
return PublicKey{makeSlice(*raw)};
}
Buffer
makeSignature(std::uint8_t seed)
{
std::uint8_t bytes[] = {
seed,
static_cast<std::uint8_t>(seed + 1),
static_cast<std::uint8_t>(seed + 2)};
return Buffer(bytes, sizeof(bytes));
}
} // namespace
class ExportSigCollector_test : public beast::unit_test::suite
{
PublicKey const validator_ = makePublicKey(
"0388935426E0D08083314842EDFBB2D517BD47699F9A4527318A8E10468C97C05"
"2");
public:
void
testCleanupUsesFirstSeenSeq()
{
testcase("cleanup uses first seen sequence");
ExportSigCollector collector;
auto const tx = makeHash("cleanup-verified");
auto const sig = makeSignature(1);
collector.addVerifiedSignature(tx, validator_, sig, 10);
BEAST_EXPECT(collector.signatureCount(tx) == 1);
collector.cleanupStale(266);
BEAST_EXPECT(collector.signatureCount(tx) == 1);
collector.cleanupStale(267);
BEAST_EXPECT(collector.signatureCount(tx) == 0);
}
void
testUpgradeSetsFirstSeenSeq()
{
testcase("upgrade sets first seen sequence");
ExportSigCollector collector;
auto const tx = makeHash("cleanup-upgraded");
auto const sig = makeSignature(5);
collector.addUnverifiedSignature(tx, validator_, sig);
BEAST_EXPECT(collector.hasUnverifiedSignatures());
collector.upgradeSignature(tx, validator_, sig, 10);
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
BEAST_EXPECT(collector.signatureCount(tx) == 1);
collector.cleanupStale(266);
BEAST_EXPECT(collector.signatureCount(tx) == 1);
collector.cleanupStale(267);
BEAST_EXPECT(collector.signatureCount(tx) == 0);
}
void
testRemoveInvalidUnverifiedSignature()
{
testcase("remove invalid unverified signature");
ExportSigCollector collector;
auto const tx = makeHash("remove-invalid");
auto const sig = makeSignature(9);
auto const otherSig = makeSignature(10);
collector.addUnverifiedSignature(tx, validator_, sig, 10);
BEAST_EXPECT(collector.hasUnverifiedSignatures());
BEAST_EXPECT(!collector.removeSignature(tx, validator_, otherSig));
BEAST_EXPECT(collector.hasUnverifiedSignatures());
BEAST_EXPECT(collector.removeSignature(tx, validator_, sig));
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
BEAST_EXPECT(collector.signatureCount(tx) == 0);
}
void
testClearAll()
{
testcase("clear all signatures and round state");
ExportSigCollector collector;
auto const verifiedTx = makeHash("clear-all-verified");
auto const unverifiedTx = makeHash("clear-all-unverified");
auto const sig = makeSignature(12);
collector.addVerifiedSignature(verifiedTx, validator_, sig, 10);
collector.addUnverifiedSignature(unverifiedTx, validator_, sig, 10);
BEAST_EXPECT(collector.signatureCount(verifiedTx) == 1);
BEAST_EXPECT(collector.hasUnverifiedSignatures());
BEAST_EXPECT(collector.markSent(verifiedTx));
BEAST_EXPECT(!collector.markSent(verifiedTx));
collector.clearAll();
BEAST_EXPECT(collector.signatureCount(verifiedTx) == 0);
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
BEAST_EXPECT(collector.markSent(verifiedTx));
}
void
run() override
{
testCleanupUsesFirstSeenSeq();
testUpgradeSetsFirstSeenSeq();
testRemoveInvalidUnverifiedSignature();
testClearAll();
}
};
BEAST_DEFINE_TESTSUITE(ExportSigCollector, app, ripple);
} // namespace test
} // namespace ripple

File diff suppressed because it is too large Load Diff

View File

@@ -1,483 +0,0 @@
// This file is generated by build_test_hooks.py
#ifndef EXPORT_TEST_WASM_INCLUDED
#define EXPORT_TEST_WASM_INCLUDED
#include <map>
#include <stdint.h>
#include <string>
#include <vector>
namespace ripple {
namespace test {
std::map<std::string, std::vector<uint8_t>> export_test_wasm = {
/* ==== WASM: 0 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t id, uint32_t maxiter);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
extern int64_t xport_reserve(uint32_t count);
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
extern int64_t otxn_type(void);
extern int64_t ledger_seq(void);
#define SBUF(x) (uint32_t)(x), sizeof(x)
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
#define ttPAYMENT 0
#define tfCANONICAL 0x80000000UL
#define amAMOUNT 1
#define amFEE 8
#define atACCOUNT 1
#define atDESTINATION 3
#define ENCODE_TT(buf_out, tt) \
buf_out[0] = 0x12U; \
buf_out[1] = (tt >> 8) & 0xFFU; \
buf_out[2] = tt & 0xFFU; \
buf_out += 3;
#define ENCODE_FLAGS(buf_out, flags) \
buf_out[0] = 0x22U; \
buf_out[1] = (flags >> 24) & 0xFFU; \
buf_out[2] = (flags >> 16) & 0xFFU; \
buf_out[3] = (flags >> 8) & 0xFFU; \
buf_out[4] = flags & 0xFFU; \
buf_out += 5;
#define ENCODE_SEQUENCE(buf_out, seq) \
buf_out[0] = 0x24U; \
buf_out[1] = (seq >> 24) & 0xFFU; \
buf_out[2] = (seq >> 16) & 0xFFU; \
buf_out[3] = (seq >> 8) & 0xFFU; \
buf_out[4] = seq & 0xFFU; \
buf_out += 5;
#define ENCODE_FLS(buf_out, fls) \
buf_out[0] = 0x20U; \
buf_out[1] = 0x1AU; \
buf_out[2] = (fls >> 24) & 0xFFU; \
buf_out[3] = (fls >> 16) & 0xFFU; \
buf_out[4] = (fls >> 8) & 0xFFU; \
buf_out[5] = fls & 0xFFU; \
buf_out += 6;
#define ENCODE_LLS(buf_out, lls) \
buf_out[0] = 0x20U; \
buf_out[1] = 0x1BU; \
buf_out[2] = (lls >> 24) & 0xFFU; \
buf_out[3] = (lls >> 16) & 0xFFU; \
buf_out[4] = (lls >> 8) & 0xFFU; \
buf_out[5] = lls & 0xFFU; \
buf_out += 6;
#define ENCODE_DROPS(buf_out, drops, amt_type) \
buf_out[0] = 0x60U + amt_type; \
buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
buf_out[2] = (drops >> 48) & 0xFFU; \
buf_out[3] = (drops >> 40) & 0xFFU; \
buf_out[4] = (drops >> 32) & 0xFFU; \
buf_out[5] = (drops >> 24) & 0xFFU; \
buf_out[6] = (drops >> 16) & 0xFFU; \
buf_out[7] = (drops >> 8) & 0xFFU; \
buf_out[8] = drops & 0xFFU; \
buf_out += 9;
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
buf_out[0] = 0x73U; \
buf_out[1] = 0x00U; \
buf_out += 2;
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
buf_out[0] = 0x80U + acc_type; \
buf_out[1] = 0x14U; \
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; \
buf_out += 22;
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
int64_t hook(uint32_t reserved) {
_g(1, 1);
if (otxn_type() != ttPAYMENT)
return accept(0, 0, 0);
ASSERT(xport_reserve(1) == 1);
uint8_t dst[20];
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
ASSERT(dst_len == 20);
uint8_t acc[20];
ASSERT(hook_account(SBUF(acc)) == 20);
uint32_t cls = (uint32_t)ledger_seq();
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
uint8_t* buf = tx;
ENCODE_TT(buf, ttPAYMENT);
ENCODE_FLAGS(buf, tfCANONICAL);
ENCODE_SEQUENCE(buf, 0);
ENCODE_FLS(buf, cls + 1);
ENCODE_LLS(buf, cls + 5);
// sfTicketSequence = UINT32 field 41 = 0x20 0x29
buf[0] = 0x20U; buf[1] = 0x29U;
buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 1;
buf += 6;
uint64_t drops = 1000000;
ENCODE_DROPS(buf, drops, amAMOUNT);
ENCODE_DROPS(buf, 10, amFEE);
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
uint8_t hash[32];
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
ASSERT(xport_result == 32);
return accept(0, 0, 0);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x25U,
0x06U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x00U, 0x01U,
0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U, 0x01U,
0x7FU, 0x01U, 0x7EU, 0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U,
0x7EU, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x8BU, 0x01U,
0x09U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x09U, 0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU,
0x74U, 0x79U, 0x70U, 0x65U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U,
0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U,
0x65U, 0x6EU, 0x76U, 0x0DU, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x5FU,
0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x00U, 0x03U, 0x03U,
0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U, 0x61U,
0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6FU,
0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU, 0x00U,
0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0CU, 0x68U, 0x6FU, 0x6FU, 0x6BU,
0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x00U, 0x05U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
0x72U, 0x5FU, 0x73U, 0x65U, 0x71U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x05U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x00U, 0x04U, 0x03U,
0x02U, 0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U,
0x05U, 0x7FU, 0x01U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0xD9U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU,
0x6BU, 0x00U, 0x09U, 0x0AU, 0xC5U, 0x84U, 0x00U, 0x01U, 0xC1U, 0x84U,
0x00U, 0x03U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U,
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0xF0U, 0x02U, 0x6BU, 0x22U, 0x01U,
0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U,
0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x02U,
0x40U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x50U, 0x0DU, 0x00U,
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U,
0x80U, 0x00U, 0x21U, 0x02U, 0x0CU, 0x01U, 0x0BU, 0x02U, 0x40U, 0x41U,
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x01U, 0x51U,
0x0DU, 0x00U, 0x41U, 0x80U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x16U,
0x42U, 0xDFU, 0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U,
0x14U, 0x41U, 0x96U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x03U, 0x10U,
0x85U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
0x41U, 0x9AU, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x0EU, 0x42U, 0xE3U,
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
0x40U, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U, 0x6AU, 0x41U, 0x14U, 0x10U,
0x86U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
0x41U, 0xA8U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x1EU, 0x42U, 0xE6U,
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x10U,
0x87U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U, 0x20U, 0x01U, 0x41U,
0xCEU, 0x00U, 0x6AU, 0x41U, 0x00U, 0x3BU, 0x01U, 0x00U, 0x20U, 0x01U,
0x41U, 0xC0U, 0x00U, 0x3AU, 0x00U, 0x49U, 0x20U, 0x01U, 0x42U, 0x80U,
0x80U, 0x80U, 0x80U, 0xF0U, 0xC1U, 0x90U, 0xA0U, 0xE8U, 0x00U, 0x37U,
0x00U, 0x41U, 0x20U, 0x01U, 0x42U, 0xA0U, 0xD2U, 0x80U, 0x80U, 0x80U,
0xA0U, 0xC0U, 0xB0U, 0xC0U, 0x00U, 0x37U, 0x00U, 0x39U, 0x20U, 0x01U,
0x41U, 0xA0U, 0x36U, 0x3BU, 0x00U, 0x33U, 0x20U, 0x01U, 0x41U, 0xA0U,
0x34U, 0x3BU, 0x00U, 0x2DU, 0x20U, 0x01U, 0x41U, 0x00U, 0x36U, 0x00U,
0x29U, 0x20U, 0x01U, 0x41U, 0x24U, 0x3AU, 0x00U, 0x28U, 0x20U, 0x01U,
0x42U, 0x92U, 0x80U, 0x80U, 0x90U, 0x82U, 0x10U, 0x37U, 0x03U, 0x20U,
0x20U, 0x01U, 0x41U, 0x00U, 0x36U, 0x01U, 0x4AU, 0x20U, 0x01U, 0x20U,
0x02U, 0xA7U, 0x22U, 0x03U, 0x41U, 0x05U, 0x6AU, 0x22U, 0x04U, 0x3AU,
0x00U, 0x38U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U, 0x76U, 0x3AU,
0x00U, 0x37U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U, 0x76U, 0x3AU,
0x00U, 0x36U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U, 0x76U, 0x3AU,
0x00U, 0x35U, 0x20U, 0x01U, 0x20U, 0x03U, 0x41U, 0x01U, 0x6AU, 0x22U,
0x04U, 0x3AU, 0x00U, 0x32U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U,
0x76U, 0x3AU, 0x00U, 0x31U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U,
0x76U, 0x3AU, 0x00U, 0x30U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U,
0x76U, 0x3AU, 0x00U, 0x2FU, 0x20U, 0x01U, 0x41U, 0xDDU, 0x00U, 0x6AU,
0x20U, 0x01U, 0x29U, 0x03U, 0xB8U, 0x02U, 0x37U, 0x00U, 0x00U, 0x20U,
0x01U, 0x41U, 0xE5U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U,
0x6AU, 0x41U, 0x10U, 0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x00U, 0x00U,
0x20U, 0x01U, 0x41U, 0xF3U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x29U, 0x03U,
0xD8U, 0x02U, 0x37U, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0xFBU, 0x00U,
0x6AU, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U, 0x10U, 0x6AU,
0x28U, 0x02U, 0x00U, 0x36U, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0x14U,
0x3AU, 0x00U, 0x54U, 0x20U, 0x01U, 0x41U, 0x8AU, 0xE6U, 0x81U, 0x88U,
0x78U, 0x36U, 0x02U, 0x50U, 0x20U, 0x01U, 0x41U, 0x83U, 0x29U, 0x3BU,
0x00U, 0x69U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xB0U, 0x02U,
0x37U, 0x00U, 0x55U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xD0U,
0x02U, 0x37U, 0x00U, 0x6BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0x20U,
0x20U, 0x01U, 0x41U, 0x20U, 0x6AU, 0x41U, 0xDFU, 0x00U, 0x10U, 0x88U,
0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x20U, 0x51U, 0x0DU, 0x00U, 0x41U,
0xC6U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x13U, 0x42U, 0x81U, 0x01U,
0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U,
0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U,
0x21U, 0x02U, 0x0BU, 0x20U, 0x01U, 0x41U, 0xF0U, 0x02U, 0x6AU, 0x24U,
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x20U, 0x02U, 0x0BU, 0x0BU, 0x60U,
0x01U, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x59U, 0x78U, 0x70U, 0x6FU,
0x72U, 0x74U, 0x5FU, 0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U,
0x28U, 0x31U, 0x29U, 0x20U, 0x3DU, 0x3DU, 0x20U, 0x31U, 0x00U, 0x44U,
0x53U, 0x54U, 0x00U, 0x64U, 0x73U, 0x74U, 0x5FU, 0x6CU, 0x65U, 0x6EU,
0x20U, 0x3DU, 0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x68U, 0x6FU, 0x6FU,
0x6BU, 0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x28U,
0x53U, 0x42U, 0x55U, 0x46U, 0x28U, 0x61U, 0x63U, 0x63U, 0x29U, 0x29U,
0x20U, 0x3DU, 0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x78U, 0x70U, 0x6FU,
0x72U, 0x74U, 0x5FU, 0x72U, 0x65U, 0x73U, 0x75U, 0x6CU, 0x74U, 0x20U,
0x3DU, 0x3DU, 0x20U, 0x33U, 0x32U, 0x00U,
}},
/* ==== WASM: 1 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t id, uint32_t maxiter);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
extern int64_t xport_reserve(uint32_t count);
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
extern int64_t otxn_type(void);
extern int64_t ledger_seq(void);
#define SBUF(x) (uint32_t)(x), sizeof(x)
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
#define ttPAYMENT 0
#define tfCANONICAL 0x80000000UL
#define amAMOUNT 1
#define amFEE 8
#define atACCOUNT 1
#define atDESTINATION 3
#define ENCODE_TT(buf_out, tt) \
buf_out[0] = 0x12U; \
buf_out[1] = (tt >> 8) & 0xFFU; \
buf_out[2] = tt & 0xFFU; \
buf_out += 3;
#define ENCODE_FLAGS(buf_out, flags) \
buf_out[0] = 0x22U; \
buf_out[1] = (flags >> 24) & 0xFFU; \
buf_out[2] = (flags >> 16) & 0xFFU; \
buf_out[3] = (flags >> 8) & 0xFFU; \
buf_out[4] = flags & 0xFFU; \
buf_out += 5;
#define ENCODE_SEQUENCE(buf_out, seq) \
buf_out[0] = 0x24U; \
buf_out[1] = (seq >> 24) & 0xFFU; \
buf_out[2] = (seq >> 16) & 0xFFU; \
buf_out[3] = (seq >> 8) & 0xFFU; \
buf_out[4] = seq & 0xFFU; \
buf_out += 5;
// sfNetworkID = UINT32 field 1 = 0x21
#define ENCODE_NETWORK_ID(buf_out, id) \
buf_out[0] = 0x21U; \
buf_out[1] = (id >> 24) & 0xFFU; \
buf_out[2] = (id >> 16) & 0xFFU; \
buf_out[3] = (id >> 8) & 0xFFU; \
buf_out[4] = id & 0xFFU; \
buf_out += 5;
#define ENCODE_FLS(buf_out, fls) \
buf_out[0] = 0x20U; \
buf_out[1] = 0x1AU; \
buf_out[2] = (fls >> 24) & 0xFFU; \
buf_out[3] = (fls >> 16) & 0xFFU; \
buf_out[4] = (fls >> 8) & 0xFFU; \
buf_out[5] = fls & 0xFFU; \
buf_out += 6;
#define ENCODE_LLS(buf_out, lls) \
buf_out[0] = 0x20U; \
buf_out[1] = 0x1BU; \
buf_out[2] = (lls >> 24) & 0xFFU; \
buf_out[3] = (lls >> 16) & 0xFFU; \
buf_out[4] = (lls >> 8) & 0xFFU; \
buf_out[5] = lls & 0xFFU; \
buf_out += 6;
#define ENCODE_DROPS(buf_out, drops, amt_type) \
buf_out[0] = 0x60U + amt_type; \
buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
buf_out[2] = (drops >> 48) & 0xFFU; \
buf_out[3] = (drops >> 40) & 0xFFU; \
buf_out[4] = (drops >> 32) & 0xFFU; \
buf_out[5] = (drops >> 24) & 0xFFU; \
buf_out[6] = (drops >> 16) & 0xFFU; \
buf_out[7] = (drops >> 8) & 0xFFU; \
buf_out[8] = drops & 0xFFU; \
buf_out += 9;
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
buf_out[0] = 0x73U; \
buf_out[1] = 0x00U; \
buf_out += 2;
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
buf_out[0] = 0x80U + acc_type; \
buf_out[1] = 0x14U; \
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; \
buf_out += 22;
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
int64_t hook(uint32_t reserved) {
_g(1, 1);
if (otxn_type() != ttPAYMENT)
return accept(0, 0, 0);
ASSERT(xport_reserve(1) == 1);
uint8_t dst[20];
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
ASSERT(dst_len == 20);
uint8_t acc[20];
ASSERT(hook_account(SBUF(acc)) == 20);
uint32_t cls = (uint32_t)ledger_seq();
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
uint8_t* buf = tx;
ENCODE_TT(buf, ttPAYMENT);
ENCODE_NETWORK_ID(buf, 21337); // must precede Sequence (canonical order)
ENCODE_FLAGS(buf, tfCANONICAL);
ENCODE_SEQUENCE(buf, 0);
ENCODE_FLS(buf, cls + 1);
ENCODE_LLS(buf, cls + 5);
uint64_t drops = 1000000;
ENCODE_DROPS(buf, drops, amAMOUNT);
ENCODE_DROPS(buf, 10, amFEE);
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
uint8_t hash[32];
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
// xport should return EXPORT_FAILURE (-46), ASSERT will rollback
ASSERT(xport_result == 32);
return accept(0, 0, 0);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x25U,
0x06U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x00U, 0x01U,
0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U, 0x01U,
0x7FU, 0x01U, 0x7EU, 0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U,
0x7EU, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x8BU, 0x01U,
0x09U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x09U, 0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU,
0x74U, 0x79U, 0x70U, 0x65U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U,
0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U,
0x65U, 0x6EU, 0x76U, 0x0DU, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x5FU,
0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x00U, 0x03U, 0x03U,
0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U, 0x61U,
0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6FU,
0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU, 0x00U,
0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0CU, 0x68U, 0x6FU, 0x6FU, 0x6BU,
0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x00U, 0x05U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
0x72U, 0x5FU, 0x73U, 0x65U, 0x71U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x05U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x00U, 0x04U, 0x03U,
0x02U, 0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U,
0x05U, 0x7FU, 0x01U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0xD9U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU,
0x6BU, 0x00U, 0x09U, 0x0AU, 0xCDU, 0x84U, 0x00U, 0x01U, 0xC9U, 0x84U,
0x00U, 0x03U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U,
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0xF0U, 0x02U, 0x6BU, 0x22U, 0x01U,
0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U,
0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x02U,
0x40U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x50U, 0x0DU, 0x00U,
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U,
0x80U, 0x00U, 0x21U, 0x02U, 0x0CU, 0x01U, 0x0BU, 0x02U, 0x40U, 0x41U,
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x01U, 0x51U,
0x0DU, 0x00U, 0x41U, 0x80U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x16U,
0x42U, 0xE8U, 0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U,
0x14U, 0x41U, 0x96U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x03U, 0x10U,
0x85U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
0x41U, 0x9AU, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x0EU, 0x42U, 0xECU,
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
0x40U, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U, 0x6AU, 0x41U, 0x14U, 0x10U,
0x86U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
0x41U, 0xA8U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x1EU, 0x42U, 0xEFU,
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x10U,
0x87U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U, 0x20U, 0x01U, 0x41U,
0xC0U, 0x00U, 0x3AU, 0x00U, 0x48U, 0x20U, 0x01U, 0x42U, 0x80U, 0x80U,
0x80U, 0x80U, 0xF0U, 0xC1U, 0x90U, 0xA0U, 0xE8U, 0x00U, 0x37U, 0x03U,
0x40U, 0x20U, 0x01U, 0x41U, 0xE1U, 0x80U, 0x01U, 0x3BU, 0x01U, 0x3EU,
0x20U, 0x01U, 0x41U, 0xA0U, 0x36U, 0x3BU, 0x01U, 0x38U, 0x20U, 0x01U,
0x41U, 0xA0U, 0x34U, 0x3BU, 0x01U, 0x32U, 0x20U, 0x01U, 0x41U, 0x00U,
0x36U, 0x01U, 0x2EU, 0x20U, 0x01U, 0x41U, 0x80U, 0xC8U, 0x00U, 0x3BU,
0x01U, 0x2CU, 0x20U, 0x01U, 0x41U, 0xA2U, 0x80U, 0x02U, 0x36U, 0x02U,
0x28U, 0x20U, 0x01U, 0x42U, 0x92U, 0x80U, 0x80U, 0x88U, 0x82U, 0x80U,
0xC0U, 0xA9U, 0xD9U, 0x00U, 0x37U, 0x03U, 0x20U, 0x20U, 0x01U, 0x20U,
0x02U, 0xA7U, 0x22U, 0x03U, 0x41U, 0x05U, 0x6AU, 0x22U, 0x04U, 0x3AU,
0x00U, 0x3DU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U, 0x76U, 0x3AU,
0x00U, 0x3CU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U, 0x76U, 0x3AU,
0x00U, 0x3BU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U, 0x76U, 0x3AU,
0x00U, 0x3AU, 0x20U, 0x01U, 0x20U, 0x03U, 0x41U, 0x01U, 0x6AU, 0x22U,
0x04U, 0x3AU, 0x00U, 0x37U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U,
0x76U, 0x3AU, 0x00U, 0x36U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U,
0x76U, 0x3AU, 0x00U, 0x35U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U,
0x76U, 0x3AU, 0x00U, 0x34U, 0x20U, 0x01U, 0x41U, 0xCDU, 0x00U, 0x6AU,
0x41U, 0x00U, 0x3BU, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0xDCU, 0x00U,
0x6AU, 0x20U, 0x01U, 0x29U, 0x03U, 0xB8U, 0x02U, 0x37U, 0x02U, 0x00U,
0x20U, 0x01U, 0x41U, 0xE4U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xB0U,
0x02U, 0x6AU, 0x41U, 0x10U, 0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x02U,
0x00U, 0x20U, 0x01U, 0x41U, 0xF2U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x29U,
0x03U, 0xD8U, 0x02U, 0x37U, 0x01U, 0x00U, 0x20U, 0x01U, 0x41U, 0xFAU,
0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U, 0x10U,
0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x01U, 0x00U, 0x20U, 0x01U, 0x41U,
0x00U, 0x36U, 0x00U, 0x49U, 0x20U, 0x01U, 0x41U, 0x8AU, 0xE6U, 0x81U,
0x88U, 0x78U, 0x36U, 0x00U, 0x4FU, 0x20U, 0x01U, 0x41U, 0x14U, 0x3AU,
0x00U, 0x53U, 0x20U, 0x01U, 0x41U, 0x83U, 0x29U, 0x3BU, 0x01U, 0x68U,
0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xB0U, 0x02U, 0x37U, 0x02U,
0x54U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xD0U, 0x02U, 0x37U,
0x01U, 0x6AU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0x20U, 0x20U, 0x01U,
0x41U, 0x20U, 0x6AU, 0x41U, 0xDEU, 0x00U, 0x10U, 0x88U, 0x80U, 0x80U,
0x80U, 0x00U, 0x42U, 0x20U, 0x51U, 0x0DU, 0x00U, 0x41U, 0xC6U, 0x88U,
0x80U, 0x80U, 0x00U, 0x41U, 0x13U, 0x42U, 0x88U, 0x01U, 0x10U, 0x84U,
0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U,
0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U,
0x0BU, 0x20U, 0x01U, 0x41U, 0xF0U, 0x02U, 0x6AU, 0x24U, 0x80U, 0x80U,
0x80U, 0x80U, 0x00U, 0x20U, 0x02U, 0x0BU, 0x0BU, 0x60U, 0x01U, 0x00U,
0x41U, 0x80U, 0x08U, 0x0BU, 0x59U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U,
0x5FU, 0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x28U, 0x31U,
0x29U, 0x20U, 0x3DU, 0x3DU, 0x20U, 0x31U, 0x00U, 0x44U, 0x53U, 0x54U,
0x00U, 0x64U, 0x73U, 0x74U, 0x5FU, 0x6CU, 0x65U, 0x6EU, 0x20U, 0x3DU,
0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x5FU,
0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x28U, 0x53U, 0x42U,
0x55U, 0x46U, 0x28U, 0x61U, 0x63U, 0x63U, 0x29U, 0x29U, 0x20U, 0x3DU,
0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U,
0x5FU, 0x72U, 0x65U, 0x73U, 0x75U, 0x6CU, 0x74U, 0x20U, 0x3DU, 0x3DU,
0x20U, 0x33U, 0x32U, 0x00U,
}},
};
}
} // namespace ripple
#endif

View File

@@ -1,301 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2026 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <test/jtx/import.h>
#include <test/jtx/xpop.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/proof/LedgerProof.h>
#include <xrpld/app/proof/ProofBuilder.h>
#include <xrpld/app/proof/XPOPv1.h>
#include <xrpl/protocol/Import.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
namespace test {
struct XPOP_test : public beast::unit_test::suite
{
void
testBuildLedgerProof()
{
testcase("Build LedgerProof from a payment");
using namespace jtx;
Env env{*this};
Account const alice{"alice"};
Account const bob{"bob"};
env.fund(XRP(10000), alice, bob);
env.close();
// Submit a payment and close the ledger.
env(pay(alice, bob, XRP(100)));
env.close();
// Get the tx hash from the last closed ledger.
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
BEAST_EXPECT(lcl);
// Find a payment tx in the ledger.
uint256 paymentHash;
bool found = false;
lcl->txMap().visitLeaves(
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
if (!found)
{
paymentHash = item->key();
found = true;
}
});
BEAST_EXPECT(found);
// Build the proof.
auto const lp = proof::buildLedgerProof(*lcl, paymentHash);
BEAST_EXPECT(lp.has_value());
if (lp)
{
// Verify header fields are populated.
BEAST_EXPECT(lp->ledgerIndex > 0);
BEAST_EXPECT(lp->totalCoins > 0);
BEAST_EXPECT(lp->parentHash != uint256{});
BEAST_EXPECT(lp->txRoot != uint256{});
BEAST_EXPECT(lp->accountRoot != uint256{});
// Verify tx blob is non-empty.
BEAST_EXPECT(!lp->txBlob.empty());
BEAST_EXPECT(!lp->metaBlob.empty());
// Verify merkle proof exists and is valid.
BEAST_EXPECT(lp->txProof.has_value());
if (lp->txProof)
{
auto const computedRoot = lp->txProof->computeRoot();
BEAST_EXPECT(computedRoot.has_value());
if (computedRoot)
BEAST_EXPECT(*computedRoot == lp->txRoot);
}
// Verify ledger hash reconstruction.
auto const computedHash = lp->computeLedgerHash();
BEAST_EXPECT(computedHash == lcl->info().hash);
}
}
void
testBuildXPOPv1()
{
testcase("Build XPOP v1 JSON from a payment");
using namespace jtx;
Env env{*this};
Account const alice{"alice"};
Account const bob{"bob"};
env.fund(XRP(10000), alice, bob);
env.close();
env(pay(alice, bob, XRP(100)));
env.close();
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
BEAST_EXPECT(lcl);
// Find a tx.
uint256 txHash;
lcl->txMap().visitLeaves(
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
txHash = item->key();
});
// Build XPOP using the test helper.
auto const xpop = xpop::buildTestXPOP(env, txHash, 3);
BEAST_EXPECT(!xpop.isNull());
// Verify structure.
BEAST_EXPECT(xpop.isMember(jss::ledger));
BEAST_EXPECT(xpop.isMember(jss::transaction));
BEAST_EXPECT(xpop.isMember(jss::validation));
// Ledger section.
auto const& lgr = xpop[jss::ledger];
BEAST_EXPECT(lgr.isMember(jss::index));
BEAST_EXPECT(lgr.isMember(jss::coins));
BEAST_EXPECT(lgr.isMember(jss::phash));
BEAST_EXPECT(lgr.isMember(jss::txroot));
BEAST_EXPECT(lgr.isMember(jss::acroot));
BEAST_EXPECT(lgr.isMember(jss::close));
BEAST_EXPECT(lgr.isMember(jss::pclose));
BEAST_EXPECT(lgr.isMember(jss::cres));
BEAST_EXPECT(lgr.isMember(jss::flags));
// Transaction section.
auto const& txn = xpop[jss::transaction];
BEAST_EXPECT(txn.isMember(jss::blob));
BEAST_EXPECT(txn.isMember(jss::meta));
BEAST_EXPECT(txn.isMember(jss::proof));
BEAST_EXPECT(txn[jss::blob].asString().size() > 0);
BEAST_EXPECT(txn[jss::meta].asString().size() > 0);
// Validation section.
auto const& val = xpop[jss::validation];
BEAST_EXPECT(val.isMember(jss::data));
BEAST_EXPECT(val.isMember(jss::unl));
BEAST_EXPECT(val[jss::data].size() == 3); // 3 validators
auto const& unl = val[jss::unl];
BEAST_EXPECT(unl.isMember(jss::public_key));
BEAST_EXPECT(unl.isMember(jss::manifest));
BEAST_EXPECT(unl.isMember(jss::blob));
BEAST_EXPECT(unl.isMember(jss::signature));
BEAST_EXPECT(unl.isMember(jss::version));
}
void
testMerkleProofVerification()
{
testcase("Merkle proof verifies against tx root");
using namespace jtx;
Env env{*this};
Account const alice{"alice"};
Account const bob{"bob"};
Account const carol{"carol"};
env.fund(XRP(10000), alice, bob, carol);
env.close();
// Multiple transactions to create a deeper trie.
env(pay(alice, bob, XRP(10)));
env(pay(bob, carol, XRP(5)));
env(pay(carol, alice, XRP(1)));
env.close();
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
BEAST_EXPECT(lcl);
// Verify proof for each transaction in the ledger.
int proofCount = 0;
lcl->txMap().visitLeaves(
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
auto const lp = proof::buildLedgerProof(*lcl, item->key());
BEAST_EXPECT(lp.has_value());
if (lp && lp->txProof)
{
// Proof must verify against the ledger's tx root.
BEAST_EXPECT(lp->txProof->verify(lp->txRoot));
// JSON v1 serialization must round-trip.
auto const json = lp->txProof->toJsonV1();
BEAST_EXPECT(!json.isNull());
BEAST_EXPECT(json.isArray());
++proofCount;
}
});
// We should have proven at least 3 transactions.
BEAST_EXPECT(proofCount >= 3);
}
void
testImportWithGeneratedXPOP()
{
testcase("Import accepts dynamically generated XPOP");
using namespace jtx;
// Create XPOP context (VL publisher + validators).
auto const xpopCtx = xpop::TestXPOPContext::create(3);
// --- Source "network": generate a payment and build XPOP ---
Env srcEnv{*this};
Account const alice{"alice"};
Account const bob{"bob"};
srcEnv.fund(XRP(10000), alice, bob);
srcEnv.close();
// Import requires: no sfNetworkID + sfOperationLimit = dest NETWORK_ID.
Json::Value payTx;
payTx[jss::TransactionType] = jss::Payment;
payTx[jss::Account] = alice.human();
payTx[jss::Destination] = bob.human();
payTx[jss::Amount] = "100000000";
payTx[sfOperationLimit.jsonName] = 21337;
srcEnv(payTx, fee(XRP(1)));
srcEnv.close();
// Find the tx hash and build the XPOP.
auto const srcLcl = srcEnv.app().getLedgerMaster().getClosedLedger();
BEAST_EXPECT(srcLcl);
uint256 paymentHash;
srcLcl->txMap().visitLeaves(
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
paymentHash = item->key();
});
auto const xpopJson = xpopCtx.buildXPOP(*srcLcl, paymentHash);
BEAST_EXPECT(!xpopJson.isNull());
// --- Destination "network": import the XPOP ---
Env dstEnv{*this, xpopCtx.makeEnvConfig(21337)};
// Burn some XRP so B2M can credit.
auto const master = Account("masterpassphrase");
dstEnv(noop(master), fee(10'000'000'000), ter(tesSUCCESS));
dstEnv.close();
Account const importAlice{"alice"};
dstEnv.fund(XRP(1000), importAlice);
dstEnv.close();
auto const feeDrops = dstEnv.current()->fees().base;
// Submit the import — should succeed (B2M path).
dstEnv(
import::import(importAlice, xpopJson),
fee(feeDrops * 10),
ter(tesSUCCESS));
dstEnv.close();
}
void
run() override
{
testBuildLedgerProof();
testBuildXPOPv1();
testMerkleProofVerification();
testImportWithGeneratedXPOP();
}
};
BEAST_DEFINE_TESTSUITE(XPOP, app, ripple);
} // namespace test
} // namespace ripple

View File

@@ -140,8 +140,7 @@ struct XahauGenesis_test : public beast::unit_test::suite
bool skipTests = false,
bool const testFlag = false,
bool const badNetID = false,
uint32_t const expectedOwnerCount =
10 /** testFlag ? 10 : 14 (default) */)
uint32_t const expectedOwnerCount = 14 /** case for testFlag=false */)
{
using namespace jtx;
@@ -250,9 +249,7 @@ struct XahauGenesis_test : public beast::unit_test::suite
genesisAccRoot->getFieldAmount(sfBalance) ==
XahauGenesis::GenesisAmount);
BEAST_EXPECT(
genesisAccRoot->getFieldU32(sfOwnerCount) == !testFlag
? expectedOwnerCount
: 14);
genesisAccRoot->getFieldU32(sfOwnerCount) == expectedOwnerCount);
// ensure the definitions are correctly set
{
@@ -595,7 +592,8 @@ struct XahauGenesis_test : public beast::unit_test::suite
false,
true,
{},
3 /* IRR,IRD,IMC */ + members.size() + tables.size());
2 /*Hook objects *2 */ + 3 /* IRR,IRD,IMC HookStates */ +
members.size());
env.close();
env.close();
@@ -2327,7 +2325,7 @@ struct XahauGenesis_test : public beast::unit_test::suite
{
BEAST_EXPECT(
root->getFieldU32(sfOwnerCount) ==
mc * 2 + 2 + paramsCount);
(mc * 2 + 2 + paramsCount));
BEAST_EXPECT(root->getFieldU32(sfFlags) & lsfDisableMaster);
BEAST_EXPECT(root->getAccountID(sfRegularKey) == noAccount());
}

View File

@@ -1,732 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <xrpld/app/consensus/ConsensusExtensions.h>
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/misc/ValidatorKeys.h>
#include <xrpld/consensus/ConsensusExtensionsTick.h>
#include <xrpld/consensus/ConsensusProposal.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/digest.h>
#include <cstring>
namespace ripple {
namespace test {
namespace {
uint256
makeHash(char const* label)
{
return sha512Half(Slice(label, std::strlen(label)));
}
NodeID
makeNode(std::uint8_t id)
{
NodeID node;
node.zero();
node.data()[NodeID::size() - 1] = id;
return node;
}
std::string
makeExportSigBlob(uint256 const& txHash, PublicKey const& publicKey)
{
std::string blob;
blob.append(reinterpret_cast<char const*>(txHash.data()), uint256::size());
blob.append(
reinterpret_cast<char const*>(publicKey.data()), publicKey.size());
blob.push_back('\x30');
return blob;
}
struct FakeTxSet
{
using ID = uint256;
uint256 hash;
uint256
id() const
{
return hash;
}
};
class FakePeerPosition
{
public:
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
FakePeerPosition(NodeID const& nodeId, ExtendedPosition const& position)
: proposal_(
uint256{},
Proposal::seqJoin,
position,
NetClock::time_point{},
NetClock::time_point{},
nodeId)
{
}
Proposal const&
proposal() const
{
return proposal_;
}
private:
Proposal proposal_;
};
struct FakeExtensions
{
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
beast::Journal j_{beast::Journal::getNullSink()};
EstablishState estState_{EstablishState::ConvergingTx};
std::chrono::steady_clock::time_point revealPhaseStart_{};
std::chrono::steady_clock::time_point commitHashConflictStart_{};
bool explicitFinalProposalSent_{false};
bool entropySetPublished_{false};
std::chrono::steady_clock::time_point entropyPublishStart_{};
bool exportSigGateStarted_{false};
std::chrono::steady_clock::time_point exportSigGateStart_{};
bool exportSigConvergenceFailed_{false};
bool rngOn{false};
bool localExportSigs{true};
bool consensusExportTxns{false};
bool exportOn{true};
bool entropyFailed{false};
std::size_t exportQuorum{4};
uint256 exportHash{makeHash("local-export-sig-set")};
uint256 entropyHash{makeHash("local-entropy-set")};
std::vector<uint256> fetchedExportSets;
std::vector<uint256> fetchedEntropySets;
int exportBuilds = 0;
int entropyBuilds = 0;
bool
rngEnabled() const
{
return rngOn;
}
bool
exportEnabled() const
{
return exportOn;
}
std::size_t
quorumThreshold() const
{
return exportQuorum;
}
std::size_t
exportSigQuorumThreshold() const
{
return exportQuorum;
}
std::size_t
pendingCommitCount() const
{
return rngOn ? exportQuorum : 0;
}
std::size_t
pendingRevealCount() const
{
return rngOn ? exportQuorum : 0;
}
std::size_t
expectedProposerCount() const
{
return 0;
}
bool
hasQuorumOfCommits() const
{
return rngOn;
}
bool
hasMinimumReveals() const
{
return rngOn;
}
bool
hasAnyReveals() const
{
return rngOn;
}
uint256
buildCommitSet(LedgerIndex)
{
return makeHash("commit-set");
}
uint256
buildEntropySet(LedgerIndex)
{
++entropyBuilds;
return entropyHash;
}
uint256
getEntropySecret() const
{
return makeHash("entropy-secret");
}
void
selfSeedReveal()
{
}
void
setEntropyFailed()
{
entropyFailed = true;
}
void
fetchRngSetIfNeeded(std::optional<uint256> const& hash, SidecarKind kind)
{
if (kind == SidecarKind::reveal && hash)
fetchedEntropySets.push_back(*hash);
else if (kind == SidecarKind::exportSig && hash)
fetchedExportSets.push_back(*hash);
}
bool
shouldSendExplicitFinalProposal() const
{
return false;
}
std::optional<FakeTxSet>
buildExplicitFinalProposalTxSet(FakeTxSet const&, LedgerIndex)
{
return std::nullopt;
}
bool
hasPendingExportSigs() const
{
return localExportSigs;
}
bool
hasConsensusExportTxns() const
{
return consensusExportTxns;
}
uint256
buildExportSigSet(LedgerIndex)
{
++exportBuilds;
return exportHash;
}
void
setExportSigConvergenceFailed()
{
exportSigConvergenceFailed_ = true;
}
};
struct ExportTickHarness
{
ExtendedPosition position{makeHash("tx-set")};
FakeTxSet txns{position.txSetHash};
hash_map<NodeID, FakePeerPosition> peers;
ConsensusParms parms;
NetClock::time_point netNow{NetClock::duration{123}};
std::chrono::steady_clock::time_point start{};
std::size_t prevProposers = 4;
int updates = 0;
int proposes = 0;
void
addPeer(
std::uint8_t id,
std::optional<uint256> exportSigSetHash,
uint256 txSetHash = makeHash("tx-set"))
{
ExtendedPosition peerPosition{txSetHash};
peerPosition.exportSigSetHash = exportSigSetHash;
peers.emplace(
makeNode(id), FakePeerPosition{makeNode(id), peerPosition});
}
void
addEntropyPeer(
std::uint8_t id,
std::optional<uint256> entropySetHash,
uint256 txSetHash = makeHash("tx-set"))
{
ExtendedPosition peerPosition{txSetHash};
peerPosition.entropySetHash = entropySetHash;
peers.emplace(
makeNode(id), FakePeerPosition{makeNode(id), peerPosition});
}
ExtensionTickResult
tick(FakeExtensions& ext, std::chrono::milliseconds elapsed = {})
{
ConsensusTick<ExtendedPosition, FakePeerPosition, FakeTxSet> ctx{
.buildSeq = 2,
.now = netNow,
.nowSteady = start + elapsed,
.roundTime = elapsed,
.mode = ConsensusMode::proposing,
.prevProposers = prevProposers,
.peerPositions = peers,
.parms = parms,
.haveCloseTimeConsensus = true,
.convergePercent = 100,
.j = beast::Journal{beast::Journal::getNullSink()},
.getPosition = [&]() -> ExtendedPosition const& {
return position;
},
.updatePosition =
[&](ExtendedPosition const& newPosition) {
position = newPosition;
++updates;
},
.propose = [&]() { ++proposes; },
.haveConsensus = []() { return true; },
.cacheAndShareTxSet = [](FakeTxSet const&) {},
.getTxns = [&]() -> FakeTxSet const& { return txns; }};
return extensionsTick(ext, ctx);
}
};
} // namespace
class ConsensusExtensions_test : public beast::unit_test::suite
{
std::vector<PublicKey>
makeValidatorKeys() const
{
std::vector<std::string> const rawKeys = {
"0388935426E0D08083314842EDFBB2D517BD47699F9A4527318A8E10468C97C05"
"2",
"02691AC5AE1C4C333AE5DF8A93BDC495F0EEBFC6DB0DA7EB6EF808F3AFC006E3F"
"E"};
std::vector<PublicKey> keys;
keys.reserve(rawKeys.size());
for (auto const& rawKey : rawKeys)
{
auto const pkHex = strUnHex(rawKey);
keys.emplace_back(makeSlice(*pkHex));
}
return keys;
}
void
testActiveValidatorViewAppliesNegativeUNL()
{
testcase("Active validator view applies NegativeUNL");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureNegativeUNL,
nullptr};
auto const vlKeys = makeValidatorKeys();
auto const genesis = std::make_shared<Ledger>(
create_genesis,
env.app().config(),
std::vector<uint256>{},
env.app().getNodeFamily());
auto l = std::make_shared<Ledger>(
*genesis, env.app().timeKeeper().closeTime());
BEAST_EXPECT(l->rules().enabled(featureNegativeUNL));
auto report = std::make_shared<SLE>(keylet::UNLReport());
std::vector<STObject> activeValidators;
for (auto const& pk : vlKeys)
{
activeValidators.push_back(
STObject::makeInnerObject(sfActiveValidator));
activeValidators.back().setFieldVL(sfPublicKey, pk);
}
report->setFieldArray(
sfActiveValidators, STArray(activeValidators, sfActiveValidators));
auto negUnl = std::make_shared<SLE>(keylet::negativeUNL());
std::vector<STObject> disabledValidators;
disabledValidators.push_back(
STObject::makeInnerObject(sfDisabledValidator));
disabledValidators.back().setFieldVL(sfPublicKey, vlKeys[0]);
disabledValidators.back().setFieldU32(sfFirstLedgerSequence, l->seq());
negUnl->setFieldArray(
sfDisabledValidators,
STArray(disabledValidators, sfDisabledValidators));
OpenView accum(&*l);
accum.rawInsert(report);
accum.rawInsert(negUnl);
accum.apply(*l);
ConsensusExtensions ce{env.app(), env.journal};
auto const view = ce.makeActiveValidatorView(l);
BEAST_EXPECT(view->fromUNLReport);
BEAST_EXPECT(view->size() == 1);
BEAST_EXPECT(!view->containsMaster(vlKeys[0]));
BEAST_EXPECT(!view->containsNode(calcNodeID(vlKeys[0])));
BEAST_EXPECT(view->containsMaster(vlKeys[1]));
BEAST_EXPECT(view->containsNode(calcNodeID(vlKeys[1])));
}
void
testExportSigGateRequiresQuorumAlignment()
{
testcase("Export sig gate requires quorum alignment");
FakeExtensions ext;
ExportTickHarness harness;
auto const localHash = ext.exportHash;
harness.addPeer(1, localHash);
harness.addPeer(2, localHash);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(harness.position.exportSigSetHash == localHash);
BEAST_EXPECT(ext.exportSigGateStarted_);
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
result = harness.tick(
ext,
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
}
void
testRngEntropyGateRequiresFullObservation()
{
testcase("RNG entropy gate requires full sidecar observation");
FakeExtensions ext;
ext.rngOn = true;
ext.exportOn = false;
ext.estState_ = EstablishState::ConvergingReveal;
ExportTickHarness harness;
auto const localHash = ext.entropyHash;
harness.addEntropyPeer(1, localHash);
harness.addEntropyPeer(2, localHash);
harness.addEntropyPeer(3, localHash);
harness.addEntropyPeer(4, std::nullopt);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(harness.position.entropySetHash == localHash);
BEAST_EXPECT(ext.entropySetPublished_);
// Quorum alignment is not safe if a tx-converged peer has not
// advertised any entropySetHash. Otherwise local observation order
// can split non-zero entropy from deterministic zero fallback.
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(!ext.entropyFailed);
BEAST_EXPECT(harness.position.entropySetHash == localHash);
result = harness.tick(
ext,
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(ext.entropyFailed);
BEAST_EXPECT(!harness.position.entropySetHash);
}
void
testRngFastPathWaitsAfterEntropyPublish()
{
testcase("RNG fast path waits after entropy publish");
FakeExtensions ext;
ext.rngOn = true;
ext.exportOn = false;
ext.estState_ = EstablishState::ConvergingCommit;
ExportTickHarness harness;
auto const localHash = ext.entropyHash;
harness.addEntropyPeer(1, localHash);
harness.addEntropyPeer(2, localHash);
harness.addEntropyPeer(3, localHash);
harness.addEntropyPeer(4, localHash);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(ext.estState_ == EstablishState::ConvergingReveal);
BEAST_EXPECT(ext.entropySetPublished_);
BEAST_EXPECT(harness.position.entropySetHash == localHash);
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(!ext.entropyFailed);
BEAST_EXPECT(harness.position.entropySetHash == localHash);
}
void
testExportSigGateAllowsAlignedQuorumDespiteMinorityConflict()
{
testcase("Export sig gate ignores minority conflict after quorum");
FakeExtensions ext;
ExportTickHarness harness;
auto const localHash = ext.exportHash;
auto const conflictHash = makeHash("conflicting-export-sig-set");
harness.addPeer(1, localHash);
harness.addPeer(2, localHash);
harness.addPeer(3, localHash);
harness.addPeer(4, conflictHash);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
BEAST_EXPECT(ext.fetchedExportSets.size() == 1);
BEAST_EXPECT(ext.fetchedExportSets.front() == conflictHash);
}
void
testExportSigGateRequiresFullObservation()
{
testcase("Export sig gate requires full sidecar observation");
FakeExtensions ext;
ExportTickHarness harness;
auto const localHash = ext.exportHash;
harness.addPeer(1, localHash);
harness.addPeer(2, localHash);
harness.addPeer(3, localHash);
harness.addPeer(4, std::nullopt);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(harness.position.exportSigSetHash == localHash);
BEAST_EXPECT(ext.exportSigGateStarted_);
// Local quorum alignment is not enough if a tx-converged peer has
// not advertised any exportSigSetHash yet.
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
result = harness.tick(
ext,
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
}
void
testExportSigGateFetchesAdvertisedPeerSets()
{
testcase("Export sig gate fetches advertised peer sets");
FakeExtensions ext;
ext.localExportSigs = false;
ExportTickHarness harness;
auto const peerHash = makeHash("peer-export-sig-set");
harness.addPeer(1, peerHash);
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(ext.exportSigGateStarted_);
BEAST_EXPECT(!harness.position.exportSigSetHash);
BEAST_EXPECT(ext.fetchedExportSets.size() == 1);
BEAST_EXPECT(ext.fetchedExportSets.front() == peerHash);
result = harness.tick(
ext,
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
}
void
testExportSigGateBoundsCandidateObservationWindow()
{
testcase("Export sig gate bounds candidate observation window");
FakeExtensions ext;
ext.localExportSigs = false;
ext.consensusExportTxns = true;
ExportTickHarness harness;
auto result = harness.tick(ext);
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(ext.exportSigGateStarted_);
BEAST_EXPECT(!harness.position.exportSigSetHash);
BEAST_EXPECT(ext.fetchedExportSets.empty());
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
result = harness.tick(ext, std::chrono::milliseconds{100});
BEAST_EXPECT(!result.readyForAccept);
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
result = harness.tick(
ext,
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
}
void
testExportSigGateSkipsWhenExportDisabled()
{
testcase("Export sig gate skips when Export disabled");
FakeExtensions ext;
ext.exportOn = false;
ExportTickHarness harness;
harness.addPeer(1, ext.exportHash);
auto result = harness.tick(ext);
BEAST_EXPECT(result.readyForAccept);
BEAST_EXPECT(!ext.exportSigGateStarted_);
BEAST_EXPECT(!harness.position.exportSigSetHash);
BEAST_EXPECT(ext.exportBuilds == 0);
BEAST_EXPECT(ext.fetchedExportSets.empty());
}
void
testExportDisabledRoundClearsCollector()
{
testcase("Export disabled round clears collector");
using namespace jtx;
Env env{*this, envconfig(), supported_amendments(), nullptr};
ConsensusExtensions ce{env.app(), env.journal};
auto const tx = makeHash("export-disabled-clears-collector");
auto const pk = makeValidatorKeys().front();
std::uint8_t const sigBytes[] = {1, 2, 3};
Buffer const sig{sigBytes, sizeof(sigBytes)};
ce.setExportEnabledThisRound(true);
ce.exportSigCollector().addVerifiedSignature(tx, pk, sig, 10);
ce.clearRngState();
BEAST_EXPECT(ce.exportSigCollector().signatureCount(tx) == 1);
ce.setExportEnabledThisRound(false);
ce.clearRngState();
BEAST_EXPECT(ce.exportSigCollector().signatureCount(tx) == 0);
}
void
testReplayedProposalHarvestsExportSigs()
{
testcase("Replayed proposal harvests export signatures");
using namespace jtx;
Env env{
*this, envconfig(validator, ""), supported_amendments(), nullptr};
auto const& valKeys = env.app().getValidatorKeys();
BEAST_EXPECT(valKeys.keys);
if (!valKeys.keys)
return;
ConsensusExtensions ce{env.app(), env.journal};
ce.setExportEnabledThisRound(true);
ce.cacheUNLReport();
auto const activeView = ce.activeValidatorView();
BEAST_EXPECT(activeView->sourceLedgerHash);
if (!activeView->sourceLedgerHash)
return;
auto const senderPK = valKeys.keys->publicKey;
BEAST_EXPECT(ce.isActiveValidator(senderPK, *activeView));
if (!ce.isActiveValidator(senderPK, *activeView))
return;
auto const tx = makeHash("replayed-export-sig-tx");
auto const blob = makeExportSigBlob(tx, senderPK);
ExtendedPosition position{makeHash("replayed-position")};
position.exportSignaturesHash =
proposalExportSignaturesHash(std::vector<std::string>{blob});
ce.onTrustedPeerProposal(
calcNodeID(senderPK),
senderPK,
position,
0,
NetClock::time_point{},
*activeView->sourceLedgerHash,
Slice{},
std::vector<std::string>{blob});
BEAST_EXPECT(ce.exportSigCollector().hasUnverifiedSignatures());
}
public:
void
run() override
{
testActiveValidatorViewAppliesNegativeUNL();
testExportSigGateRequiresQuorumAlignment();
testRngEntropyGateRequiresFullObservation();
testRngFastPathWaitsAfterEntropyPublish();
testExportSigGateAllowsAlignedQuorumDespiteMinorityConflict();
testExportSigGateRequiresFullObservation();
testExportSigGateFetchesAdvertisedPeerSets();
testExportSigGateBoundsCandidateObservationWindow();
testExportSigGateSkipsWhenExportDisabled();
testExportDisabledRoundClearsCollector();
testReplayedProposalHarvestsExportSigs();
}
};
BEAST_DEFINE_TESTSUITE(ConsensusExtensions, consensus, ripple);
} // namespace test
} // namespace ripple

File diff suppressed because it is too large Load Diff

View File

@@ -22,8 +22,6 @@
#include <xrpld/consensus/ConsensusProposal.h>
#include <xrpl/beast/clock/manual_clock.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/json/to_string.h>
#include <optional>
#include <utility>
namespace ripple {
@@ -42,7 +40,6 @@ public:
testShouldCloseLedger()
{
using namespace std::chrono_literals;
testcase("should close ledger");
// Use default parameters
ConsensusParms const p{};
@@ -81,102 +78,46 @@ public:
testCheckConsensus()
{
using namespace std::chrono_literals;
testcase("check consensus");
// Use default parameterss
ConsensusParms const p{};
///////////////
// Disputes still in doubt
//
// Not enough time has elapsed
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
// If not enough peers have propsed, ensure
// more time for proposals
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
// Enough time has elapsed and we all agree
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
// Enough time has elapsed and we don't yet agree
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
// Our peers have moved on
// Enough time has elapsed and we all agree
BEAST_EXPECT(
ConsensusState::MovedOn ==
checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
// If no peers, don't agree until time has passed.
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
// Agree if no peers and enough time has passed.
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
// Expire if too much time has passed without agreement
BEAST_EXPECT(
ConsensusState::Expired ==
checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
///////////////
// Stalled
//
// Not enough time has elapsed
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
// If not enough peers have propsed, ensure
// more time for proposals
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
// Enough time has elapsed and we all agree
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
// Enough time has elapsed and we don't yet agree, but there's nothing
// left to dispute
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
// Our peers have moved on
// Enough time has elapsed and we all agree, nothing left to dispute
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
// If no peers, don't agree until time has passed.
BEAST_EXPECT(
ConsensusState::No ==
checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
// Agree if no peers and enough time has passed.
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
// We are done if there's nothing left to dispute, no matter how much
// time has passed
BEAST_EXPECT(
ConsensusState::Yes ==
checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
checkConsensus(0, 0, 0, 0, 3s, 16s, p, true, journal_));
}
void
@@ -184,7 +125,6 @@ public:
{
using namespace std::chrono_literals;
using namespace csf;
testcase("standalone");
Sim s;
PeerGroup peers = s.createGroup(1);
@@ -209,9 +149,7 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("peers agree");
//@@start peers-agree
ConsensusParms const parms{};
Sim sim;
PeerGroup peers = sim.createGroup(5);
@@ -241,7 +179,6 @@ public:
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
}
}
//@@end peers-agree
}
void
@@ -249,13 +186,11 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("slow peers");
// Several tests of a complete trust graph with a subset of peers
// that have significantly longer network delays to the rest of the
// network
//@@start slow-peer-scenario
// Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
{
ConsensusParms const parms{};
@@ -294,18 +229,16 @@ public:
BEAST_EXPECT(
peer->prevRoundTime == network[0]->prevRoundTime);
// Slow peer's transaction (Tx{0}) didn't make it in time
BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
for (std::uint32_t i = 2; i < network.size(); ++i)
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
// Tx 0 is still in the open transaction set for next round
// Tx 0 didn't make it
BEAST_EXPECT(
peer->openTxs.find(Tx{0}) != peer->openTxs.end());
}
}
}
//@@end slow-peer-scenario
// Test when the slow peers delay a consensus quorum (4/6 agree)
{
@@ -418,7 +351,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("close time disagree");
// This is a very specialized test to get ledgers to disagree on
// the close time. It unfortunately assumes knowledge about current
@@ -485,8 +417,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("wrong LCL");
// Specialized test to exercise a temporary fork in which some peers
// are working on an incorrect prior ledger.
@@ -496,7 +426,6 @@ public:
// the wrong LCL at different phases of consensus
for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
{
//@@start wrong-lcl-scenario
// Consider 10 peers:
// 0 1 2 3 4 5 6 7 8 9
// minority majorityA majorityB
@@ -517,7 +446,6 @@ public:
// This topology can potentially fork with the above trust relations
// but that is intended for this test.
//@@end wrong-lcl-scenario
Sim sim;
@@ -661,7 +589,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("consensus close time rounding");
// This is a specialized test engineered to yield ledgers with different
// close times even though the peers believe they had close time
@@ -677,6 +604,9 @@ public:
PeerGroup fast = sim.createGroup(4);
PeerGroup network = fast + slow;
for (Peer* peer : network)
peer->consensusParms = parms;
// Connected trust graph
network.trust(network);
@@ -762,7 +692,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("fork");
std::uint32_t numPeers = 10;
// Vary overlap between two UNLs
@@ -800,7 +729,6 @@ public:
}
sim.run(1);
//@@start fork-threshold
// Fork should not happen for 40% or greater overlap
// Since the overlapped nodes have a UNL that is the union of the
// two cliques, the maximum sized UNL list is the number of peers
@@ -812,7 +740,6 @@ public:
// One for cliqueA, one for cliqueB and one for nodes in both
BEAST_EXPECT(sim.branches() <= 3);
}
//@@end fork-threshold
}
}
@@ -821,7 +748,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("hub network");
// Simulate a set of 5 validators that aren't directly connected but
// rely on a single hub node for communication
@@ -909,7 +835,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("preferred by branch");
// Simulate network splits that are prevented from forking when using
// preferred ledger by trie. This is a contrived example that involves
@@ -1042,7 +967,6 @@ public:
{
using namespace csf;
using namespace std::chrono;
testcase("pause for laggards");
// Test that validators that jump ahead of the network slow
// down.
@@ -1128,410 +1052,6 @@ public:
BEAST_EXPECT(sim.synchronized());
}
// RNG consensus tests in ConsensusRng_test.cpp
// MERGE NOTE (sync-2.5.0): upstream testDisputes() is already present
// below with j/clog stalled() params from 86ef16dbeb. If upstream
// auto-merges a duplicate, delete it — keep only this version.
void
testDisputes()
{
testcase("disputes");
using namespace csf;
// Test dispute objects directly
using Dispute = DisputedTx<Tx, PeerID>;
Tx const txTrue{99};
Tx const txFalse{98};
Tx const txFollowingTrue{97};
Tx const txFollowingFalse{96};
int const numPeers = 100;
ConsensusParms p;
std::size_t peersUnchanged = 0;
auto logs = std::make_unique<Logs>(beast::severities::kError);
auto j = logs->journal("Test");
auto clog = std::make_unique<std::stringstream>();
// Three cases:
// 1 proposing, initial vote yes
// 2 proposing, initial vote no
// 3 not proposing, initial vote doesn't matter after the first update,
// use yes
{
Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
Dispute followingTrue{
txFollowingTrue.id(), true, numPeers, journal_};
Dispute followingFalse{
txFollowingFalse.id(), false, numPeers, journal_};
BEAST_EXPECT(proposingTrue.ID() == 99);
BEAST_EXPECT(proposingFalse.ID() == 98);
BEAST_EXPECT(followingTrue.ID() == 97);
BEAST_EXPECT(followingFalse.ID() == 96);
// Create an even split in the peer votes
for (int i = 0; i < numPeers; ++i)
{
BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
}
// Switch the middle vote to match mine
BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
// no changes yet
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
BEAST_EXPECT(
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(clog->str() == "");
// I'm in the majority, my vote should not change
BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
peersUnchanged = 2;
BEAST_EXPECT(
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(clog->str() == "");
// Right now, the vote is 51%. The requirement is about to jump to
// 65%
BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == false);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// 16 validators change their vote to match my original vote
for (int i = 0; i < 16; ++i)
{
auto pTrue = PeerID(numPeers - i - 1);
auto pFalse = PeerID(i);
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
}
// The vote should now be 66%, threshold is 65%
BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// Threshold jumps to 70%
BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == false);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// 5 more validators change their vote to match my original vote
for (int i = 16; i < 21; ++i)
{
auto pTrue = PeerID(numPeers - i - 1);
auto pFalse = PeerID(i);
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
}
// The vote should now be 71%, threshold is 70%
BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// The vote should now be 71%, threshold is 70%
BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// The vote should now be 71%, threshold is 70%
BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
peersUnchanged = 3;
BEAST_EXPECT(
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(clog->str() == "");
// Threshold jumps to 95%
BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == false);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// 25 more validators change their vote to match my original vote
for (int i = 21; i < 46; ++i)
{
auto pTrue = PeerID(numPeers - i - 1);
auto pFalse = PeerID(i);
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
}
// The vote should now be 96%, threshold is 95%
BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
{
BEAST_EXPECT(
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
BEAST_EXPECT(clog->str() == "");
}
auto expectStalled = [this, &clog](
int txid,
bool ourVote,
int ourTime,
int peerTime,
int support,
std::uint32_t line) {
using namespace std::string_literals;
auto const s = clog->str();
expect(s.find("stalled"), s, __FILE__, line);
expect(
s.starts_with("Transaction "s + std::to_string(txid)),
s,
__FILE__,
line);
expect(
s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos,
s,
__FILE__,
line);
expect(
s.find("for "s + std::to_string(ourTime) + " rounds."s) !=
s.npos,
s,
__FILE__,
line);
expect(
s.find(
"votes in "s + std::to_string(peerTime) + " rounds.") !=
s.npos,
s,
__FILE__,
line);
expect(
s.ends_with(
"has "s + std::to_string(support) + "% support. "s),
s,
__FILE__,
line);
clog = std::make_unique<std::stringstream>();
};
for (int i = 0; i < 1; ++i)
{
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(
!followingFalse.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// true vote has changed recently, so not stalled
BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
BEAST_EXPECT(clog->str() == "");
// remaining votes have been unchanged in so long that we only
// need to hit the second round at 95% to be stalled, regardless
// of peers
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
expectStalled(98, false, 11, 0, 2, __LINE__);
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
expectStalled(97, true, 11, 0, 97, __LINE__);
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
expectStalled(96, false, 11, 0, 3, __LINE__);
// true vote has changed recently, so not stalled
BEAST_EXPECT(
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
BEAST_EXPECTS(clog->str() == "", clog->str());
// remaining votes have been unchanged in so long that we only
// need to hit the second round at 95% to be stalled, regardless
// of peers
BEAST_EXPECT(
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
expectStalled(98, false, 11, 6, 2, __LINE__);
BEAST_EXPECT(
followingTrue.stalled(p, false, peersUnchanged, j, clog));
expectStalled(97, true, 11, 6, 97, __LINE__);
BEAST_EXPECT(
followingFalse.stalled(p, false, peersUnchanged, j, clog));
expectStalled(96, false, 11, 6, 3, __LINE__);
}
for (int i = 1; i < 3; ++i)
{
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(
!followingFalse.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
// true vote changed 2 rounds ago, and peers are changing, so
// not stalled
BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
BEAST_EXPECTS(clog->str() == "", clog->str());
// still stalled
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
expectStalled(98, false, 11 + i, 0, 2, __LINE__);
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
expectStalled(97, true, 11 + i, 0, 97, __LINE__);
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
expectStalled(96, false, 11 + i, 0, 3, __LINE__);
// true vote changed 2 rounds ago, and peers are NOT changing,
// so stalled
BEAST_EXPECT(
proposingTrue.stalled(p, true, peersUnchanged, j, clog));
expectStalled(99, true, 1 + i, 6, 97, __LINE__);
// still stalled
BEAST_EXPECT(
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
expectStalled(98, false, 11 + i, 6, 2, __LINE__);
BEAST_EXPECT(
followingTrue.stalled(p, false, peersUnchanged, j, clog));
expectStalled(97, true, 11 + i, 6, 97, __LINE__);
BEAST_EXPECT(
followingFalse.stalled(p, false, peersUnchanged, j, clog));
expectStalled(96, false, 11 + i, 6, 3, __LINE__);
}
for (int i = 3; i < 5; ++i)
{
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(
!followingFalse.updateVote(250 + 10 * i, false, p));
BEAST_EXPECT(proposingTrue.getOurVote() == true);
BEAST_EXPECT(proposingFalse.getOurVote() == false);
BEAST_EXPECT(followingTrue.getOurVote() == true);
BEAST_EXPECT(followingFalse.getOurVote() == false);
BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
expectStalled(99, true, 1 + i, 0, 97, __LINE__);
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
expectStalled(98, false, 11 + i, 0, 2, __LINE__);
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
expectStalled(97, true, 11 + i, 0, 97, __LINE__);
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
expectStalled(96, false, 11 + i, 0, 3, __LINE__);
BEAST_EXPECT(
proposingTrue.stalled(p, true, peersUnchanged, j, clog));
expectStalled(99, true, 1 + i, 6, 97, __LINE__);
BEAST_EXPECT(
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
expectStalled(98, false, 11 + i, 6, 2, __LINE__);
BEAST_EXPECT(
followingTrue.stalled(p, false, peersUnchanged, j, clog));
expectStalled(97, true, 11 + i, 6, 97, __LINE__);
BEAST_EXPECT(
followingFalse.stalled(p, false, peersUnchanged, j, clog));
expectStalled(96, false, 11 + i, 6, 3, __LINE__);
}
}
}
void
run() override
{
@@ -1548,8 +1068,6 @@ public:
testHubNetwork();
testPreferredByBranch();
testPauseForLaggards();
// RNG consensus tests moved to ConsensusRng_test.cpp
testDisputes();
}
};

View File

@@ -1,478 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2026 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/app/consensus/RCLCxPeerPos.h>
#include <xrpld/consensus/ConsensusProposal.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/digest.h>
#include <cstring>
namespace ripple {
namespace test {
class ExtendedPosition_test : public beast::unit_test::suite
{
// Generate deterministic test hashes
static uint256
makeHash(char const* label)
{
return sha512Half(Slice(label, std::strlen(label)));
}
void
testSerializationRoundTrip()
{
testcase("Serialization round-trip");
// Empty position (legacy compat)
{
auto const txSet = makeHash("txset-a");
ExtendedPosition pos{txSet};
Serializer s;
pos.add(s);
// Should be exactly 32 bytes (no flags byte)
BEAST_EXPECT(s.getDataLength() == 32);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(!deserialized->myCommitment);
BEAST_EXPECT(!deserialized->myReveal);
BEAST_EXPECT(!deserialized->commitSetHash);
BEAST_EXPECT(!deserialized->entropySetHash);
BEAST_EXPECT(!deserialized->exportSigSetHash);
BEAST_EXPECT(!deserialized->exportSignaturesHash);
}
// Position with commitment
{
auto const txSet = makeHash("txset-b");
auto const commit = makeHash("commit-b");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
Serializer s;
pos.add(s);
// 32 (txSet) + 1 (flags) + 32 (commitment) = 65
BEAST_EXPECT(s.getDataLength() == 65);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(deserialized->myCommitment == commit);
BEAST_EXPECT(!deserialized->myReveal);
}
// Position with all fields
{
auto const txSet = makeHash("txset-c");
auto const commitSet = makeHash("commitset-c");
auto const entropySet = makeHash("entropyset-c");
auto const exportSigSet = makeHash("exportsigset-c");
auto const exportSigs = makeHash("exportsigs-c");
auto const commit = makeHash("commit-c");
auto const reveal = makeHash("reveal-c");
ExtendedPosition pos{txSet};
pos.commitSetHash = commitSet;
pos.entropySetHash = entropySet;
pos.exportSigSetHash = exportSigSet;
pos.exportSignaturesHash = exportSigs;
pos.myCommitment = commit;
pos.myReveal = reveal;
Serializer s;
pos.add(s);
// 32 + 1 + 6*32 = 225
BEAST_EXPECT(s.getDataLength() == 225);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(deserialized->commitSetHash == commitSet);
BEAST_EXPECT(deserialized->entropySetHash == entropySet);
BEAST_EXPECT(deserialized->exportSigSetHash == exportSigSet);
BEAST_EXPECT(deserialized->exportSignaturesHash == exportSigs);
BEAST_EXPECT(deserialized->myCommitment == commit);
BEAST_EXPECT(deserialized->myReveal == reveal);
}
}
void
testSigningConsistency()
{
testcase("Signing hash consistency");
// The signing hash from ConsensusProposal::signingHash() must match
// what a receiver would compute via the same function after
// deserializing the ExtendedPosition from the wire.
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
auto const nodeId = calcNodeID(pk);
auto const prevLedger = makeHash("prevledger");
auto const closeTime =
NetClock::time_point{NetClock::duration{1234567}};
// Test with commitment (the case that was failing)
{
auto const txSet = makeHash("txset-sign");
auto const commit = makeHash("commitment-sign");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
using Proposal =
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
Proposal::seqJoin,
pos,
closeTime,
NetClock::time_point{},
nodeId};
// Sign it (same as propose() does)
auto const signingHash = prop.signingHash();
auto sig = signDigest(pk, sk, signingHash);
// Serialize position to wire format
Serializer positionData;
pos.add(positionData);
auto const posSlice = positionData.slice();
// Deserialize (same as PeerImp::onMessage does)
SerialIter sit(posSlice);
auto const maybeReceivedPos =
ExtendedPosition::fromSerialIter(sit, posSlice.size());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
// Reconstruct proposal on receiver side
Proposal receivedProp{
prevLedger,
Proposal::seqJoin,
*maybeReceivedPos,
closeTime,
NetClock::time_point{},
nodeId};
// The signing hash must match
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
// Verify signature (same as checkSign does)
BEAST_EXPECT(
verifyDigest(pk, receivedProp.signingHash(), sig, false));
}
// Test without commitment (legacy case)
{
auto const txSet = makeHash("txset-legacy");
ExtendedPosition pos{txSet};
using Proposal =
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
Proposal::seqJoin,
pos,
closeTime,
NetClock::time_point{},
nodeId};
auto const signingHash = prop.signingHash();
auto sig = signDigest(pk, sk, signingHash);
Serializer positionData;
pos.add(positionData);
SerialIter sit(positionData.slice());
auto const maybeReceivedPos = ExtendedPosition::fromSerialIter(
sit, positionData.getDataLength());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
Proposal receivedProp{
prevLedger,
Proposal::seqJoin,
*maybeReceivedPos,
closeTime,
NetClock::time_point{},
nodeId};
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
BEAST_EXPECT(
verifyDigest(pk, receivedProp.signingHash(), sig, false));
}
}
void
testSuppressionConsistency()
{
testcase("Suppression hash consistency");
// proposalUniqueId must produce the same result on sender and
// receiver when given the same ExtendedPosition data.
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
auto const prevLedger = makeHash("prevledger-supp");
auto const closeTime =
NetClock::time_point{NetClock::duration{1234567}};
std::uint32_t const proposeSeq = 0;
auto const txSet = makeHash("txset-supp");
auto const commit = makeHash("commitment-supp");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
// Sign (to get a real signature for suppression)
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
proposeSeq,
pos,
closeTime,
NetClock::time_point{},
calcNodeID(pk)};
auto sig = signDigest(pk, sk, prop.signingHash());
// Sender computes suppression
auto const senderSuppression =
proposalUniqueId(pos, prevLedger, proposeSeq, closeTime, pk, sig);
// Simulate wire: serialize and deserialize
Serializer positionData;
pos.add(positionData);
SerialIter sit(positionData.slice());
auto const maybeReceivedPos =
ExtendedPosition::fromSerialIter(sit, positionData.getDataLength());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
// Receiver computes suppression
auto const receiverSuppression = proposalUniqueId(
*maybeReceivedPos, prevLedger, proposeSeq, closeTime, pk, sig);
BEAST_EXPECT(senderSuppression == receiverSuppression);
}
void
testMalformedPayload()
{
testcase("Malformed payload rejected");
// Too short (< 32 bytes)
{
Serializer s;
s.add32(0xDEADBEEF); // only 4 bytes
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Empty payload
{
Serializer s;
SerialIter sit(s.slice());
auto result = ExtendedPosition::fromSerialIter(sit, 0);
BEAST_EXPECT(!result.has_value());
}
// Flags claim fields that aren't present (truncated)
{
auto const txSet = makeHash("txset-malformed");
Serializer s;
s.addBitString(txSet);
// flags = 0x0F (all 4 fields), but no field data follows
s.add8(0x0F);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Flags claim 2 fields but only 1 field's worth of data
{
auto const txSet = makeHash("txset-malformed2");
auto const commit = makeHash("commit-malformed2");
Serializer s;
s.addBitString(txSet);
// flags = 0x03 (commitSetHash + entropySetHash), but only
// provide commitSetHash data
s.add8(0x03);
s.addBitString(commit);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Unknown flag bits above known extension fields (wire malleability)
{
auto const txSet = makeHash("txset-unkflags");
Serializer s;
s.addBitString(txSet);
s.add8(0x41); // bit 6 is unknown, bit 0 = commitSetHash
s.addBitString(makeHash("commitset-unkflags"));
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Trailing extra bytes after valid fields
{
auto const txSet = makeHash("txset-trailing");
auto const commitSet = makeHash("commitset-trailing");
Serializer s;
s.addBitString(txSet);
s.add8(0x01); // commitSetHash only
s.addBitString(commitSet);
s.add32(0xDEADBEEF); // 4 extra trailing bytes
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Valid flags with exactly the right amount of data (should succeed)
{
auto const txSet = makeHash("txset-ok");
auto const commitSet = makeHash("commitset-ok");
Serializer s;
s.addBitString(txSet);
s.add8(0x01); // commitSetHash only
s.addBitString(commitSet);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(result.has_value());
if (result)
{
BEAST_EXPECT(result->txSetHash == txSet);
BEAST_EXPECT(result->commitSetHash == commitSet);
BEAST_EXPECT(!result->entropySetHash);
}
}
}
void
testEquality()
{
testcase("Equality is txSetHash only");
auto const txSet = makeHash("txset-eq");
auto const txSet2 = makeHash("txset-eq-2");
ExtendedPosition a{txSet};
a.myCommitment = makeHash("commit1-eq");
ExtendedPosition b{txSet};
b.myCommitment = makeHash("commit2-eq");
// Same txSetHash, different leaves -> equal
BEAST_EXPECT(a == b);
// Same txSetHash, different commitSetHash -> still equal
// (sub-state quorum handles commitSetHash agreement)
b.commitSetHash = makeHash("cs-eq");
BEAST_EXPECT(a == b);
// Same txSetHash, different entropySetHash -> still equal
b.entropySetHash = makeHash("es-eq");
BEAST_EXPECT(a == b);
// Same txSetHash, different export signature digest -> still equal
b.exportSignaturesHash = makeHash("export-sigs-eq");
BEAST_EXPECT(a == b);
// Different txSetHash -> not equal
ExtendedPosition c{txSet2};
BEAST_EXPECT(a != c);
}
void
testExportSignatureDigest()
{
testcase("Export signature digest");
std::vector<std::string> blobs;
blobs.emplace_back("txhash-pubkey-sig-a");
blobs.emplace_back("txhash-pubkey-sig-b");
auto const digest = proposalExportSignaturesHash(blobs);
BEAST_EXPECT(digest == proposalExportSignaturesHash(blobs));
auto reordered = blobs;
std::swap(reordered[0], reordered[1]);
BEAST_EXPECT(digest != proposalExportSignaturesHash(reordered));
auto mutated = blobs;
mutated[1].push_back('x');
BEAST_EXPECT(digest != proposalExportSignaturesHash(mutated));
}
public:
void
run() override
{
testSerializationRoundTrip();
testSigningConsistency();
testSuppressionConsistency();
testMalformedPayload();
testEquality();
testExportSignatureDigest();
}
};
BEAST_DEFINE_TESTSUITE(ExtendedPosition, consensus, ripple);
} // namespace test
} // namespace ripple

View File

@@ -22,7 +22,6 @@
#include <test/csf/Histogram.h>
#include <test/csf/Peer.h>
#include <test/csf/PeerGroup.h>
#include <test/csf/PeerTick.h>
#include <test/csf/Proposal.h>
#include <test/csf/Scheduler.h>
#include <test/csf/Sim.h>

View File

@@ -20,7 +20,6 @@
#define RIPPLE_TEST_CSF_PEER_H_INCLUDED
#include <test/csf/CollectorRef.h>
#include <test/csf/Proposal.h>
#include <test/csf/Scheduler.h>
#include <test/csf/TrustGraph.h>
#include <test/csf/Tx.h>
@@ -29,14 +28,11 @@
#include <test/csf/ledgers.h>
#include <xrpld/consensus/Consensus.h>
#include <xrpld/consensus/Validations.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/beast/utility/WrappedSink.h>
#include <xrpl/protocol/PublicKey.h>
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include <algorithm>
#include <string>
#include <vector>
namespace ripple {
namespace test {
@@ -55,41 +51,6 @@ namespace bc = boost::container;
by Collectors
- Exposes most internal state for forcibly simulating arbitrary scenarios
*/
/// Content-addressed sidecar set store, simulating InboundTransactions.
/// Shared across all peers in a simulation — peers publish sets by hash
/// and fetch them by hash, just like the real SHAMap fetch pipeline.
///
/// Each entry is tagged with its type so fetchRngSetIfNeeded can merge
/// into the correct local set without content-sniffing heuristics.
struct SidecarStore
{
enum class Type { commit, reveal, exportSig };
using EntrySet = hash_map<PeerID, uint256>;
struct TaggedSet
{
Type type;
EntrySet entries;
};
void
publish(uint256 const& hash, Type type, EntrySet const& entries)
{
sets_[hash] = {type, entries};
}
TaggedSet const*
fetch(uint256 const& hash) const
{
auto it = sets_.find(hash);
return it != sets_.end() ? &it->second : nullptr;
}
private:
std::map<uint256, TaggedSet> sets_;
};
struct Peer
{
/** Basic wrapper of a proposed position taken by a peer.
@@ -100,8 +61,6 @@ struct Peer
class Position
{
public:
using Proposal = csf::Proposal;
Position(Proposal const& p) : proposal_(p)
{
}
@@ -118,18 +77,6 @@ struct Peer
return proposal_.getJson();
}
PeerKey
publicKey() const
{
return {proposal_.nodeID(), 0};
}
std::uint64_t
signature() const
{
return 0;
}
std::string
render() const
{
@@ -222,7 +169,6 @@ struct Peer
using NodeKey_t = PeerKey;
using TxSet_t = TxSet;
using PeerPosition_t = Position;
using Position_t = ProposalPosition;
using Result = ConsensusResult<Peer>;
using NodeKey = Validation::NodeKey;
@@ -242,9 +188,6 @@ struct Peer
//! The oracle that manages unique ledgers
LedgerOracle& oracle;
//! Shared sidecar store (simulates InboundTransactions)
SidecarStore& sidecarStore;
//! Scheduler of events
Scheduler& scheduler;
@@ -314,686 +257,6 @@ struct Peer
// Simulation parameters
ConsensusParms consensusParms;
/// RNG consensus extensions for CSF. Owns all RNG state and methods,
/// same pattern as ConsensusExtensions for production.
struct Extensions
{
Peer& peer;
beast::Journal j_;
// Sub-state machine
EstablishState estState_{EstablishState::ConvergingTx};
std::chrono::steady_clock::time_point revealPhaseStart_{};
std::chrono::steady_clock::time_point commitHashConflictStart_{};
bool explicitFinalProposalSent_{false};
bool entropySetPublished_{false};
std::chrono::steady_clock::time_point entropyPublishStart_{};
bool exportSigGateStarted_{false};
std::chrono::steady_clock::time_point exportSigGateStart_{};
bool exportSigConvergenceFailed_{false};
// RNG state
bool enableRngConsensus_ = false;
bool enableExportConsensus_ = false;
hash_set<PeerID> unlNodes_;
hash_set<PeerID> likelyParticipants_;
hash_map<PeerID, uint256> pendingCommits_;
hash_map<PeerID, uint256> pendingReveals_;
hash_map<PeerID, uint256> pendingExportSigs_;
hash_map<PeerID, PeerKey> nodeKeys_;
uint256 myEntropySecret_;
bool entropyFailed_ = false;
// Last round summary (for test assertions)
uint256 lastEntropyDigest_;
std::uint16_t lastEntropyCount_ = 0;
bool lastEntropyWasFallback_ = true;
bool lastExportSucceeded_ = false;
bool lastExportRetried_ = false;
std::size_t exportSigFetchMerges_ = 0;
// Optional test hook: force a specific commit-set hash
std::optional<uint256> forcedCommitSetHash_;
// Optional test hook: force a specific entropy-set hash
std::optional<uint256> forcedEntropySetHash_;
// Optional test hook: force a specific export sig-set hash
std::optional<uint256> forcedExportSigSetHash_;
// Optional test hook: drop reveals from specific peers
// (simulates asymmetric reveal delivery / packet loss)
hash_set<PeerID> dropRevealFrom_;
// Optional test hook: drop proposal-carried export signatures.
hash_set<PeerID> dropExportSigFrom_;
// Optional test hook: stay an active proposer but do not originate an
// export signature, so tests can force sidecar-fetch-only convergence.
bool suppressOwnExportSig_ = false;
explicit Extensions(Peer& p) : peer(p), j_(p.j)
{
}
// --- RNG methods ---
bool
rngEnabled() const
{
return enableRngConsensus_;
}
bool
exportEnabled() const
{
return enableExportConsensus_;
}
std::size_t
quorumThreshold() const
{
if (!enableRngConsensus_)
return (std::numeric_limits<std::size_t>::max)() / 4;
auto const base = unlNodes_.size();
return calculateQuorumThreshold(base == 0 ? 1 : base);
}
std::size_t
exportSigQuorumThreshold() const
{
if (!enableExportConsensus_)
return (std::numeric_limits<std::size_t>::max)() / 4;
auto const base =
unlNodes_.empty() ? std::size_t{1} : unlNodes_.size();
return calculateQuorumThreshold(base);
}
std::size_t
pendingCommitCount() const
{
return pendingCommits_.size();
}
std::size_t
pendingRevealCount() const
{
return pendingReveals_.size();
}
std::size_t
expectedProposerCount() const
{
return likelyParticipants_.size();
}
bool
hasQuorumOfCommits() const
{
if (!enableRngConsensus_)
return false;
return pendingCommits_.size() >= quorumThreshold();
}
bool
hasMinimumReveals() const
{
if (!enableRngConsensus_)
return false;
return pendingReveals_.size() >= pendingCommits_.size();
}
bool
hasAnyReveals() const
{
if (!enableRngConsensus_)
return false;
return !pendingReveals_.empty();
}
bool
shouldZeroEntropy() const
{
if (entropyFailed_ || pendingReveals_.empty())
return true;
// Match production: zero when reveals < quorum threshold.
auto const threshold = unlNodes_.empty()
? std::size_t{1}
: calculateQuorumThreshold(unlNodes_.size());
return pendingReveals_.size() < threshold;
}
uint256
buildCommitSet(Ledger::Seq seq)
{
if (forcedCommitSetHash_)
return *forcedCommitSetHash_;
auto const hash = hashRngSet(pendingCommits_, seq, "commit");
peer.sidecarStore.publish(
hash, SidecarStore::Type::commit, pendingCommits_);
return hash;
}
uint256
buildEntropySet(Ledger::Seq seq)
{
if (forcedEntropySetHash_)
return *forcedEntropySetHash_;
auto const hash = hashRngSet(pendingReveals_, seq, "reveal");
peer.sidecarStore.publish(
hash, SidecarStore::Type::reveal, pendingReveals_);
return hash;
}
uint256
buildExportSigSet(Ledger::Seq seq)
{
if (forcedExportSigSetHash_)
return *forcedExportSigSetHash_;
auto const hash = hashRngSet(pendingExportSigs_, seq, "export-sig");
peer.sidecarStore.publish(
hash, SidecarStore::Type::exportSig, pendingExportSigs_);
return hash;
}
void
generateEntropySecret()
{
if (!enableRngConsensus_)
return;
auto const seq =
static_cast<std::uint32_t>(peer.lastClosedLedger.seq()) + 1;
myEntropySecret_ = sha512Half(
std::string("csf-rng-secret"),
static_cast<std::uint32_t>(peer.id),
peer.key.second,
seq,
peer.completedLedgers);
}
uint256
getEntropySecret() const
{
return myEntropySecret_;
}
void
selfSeedReveal()
{
if (!enableRngConsensus_)
return;
// Self-seed our own reveal into pendingReveals_ so it
// counts toward reveal quorum. The real code does this
// in decorateMessage; the CSF does it here since it has
// no equivalent serialization hook.
if (myEntropySecret_ != uint256{})
pendingReveals_[peer.id] = myEntropySecret_;
}
void
setEntropyFailed()
{
if (!enableRngConsensus_)
return;
entropyFailed_ = true;
}
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
void
fetchRngSetIfNeeded(
std::optional<uint256> const& hash,
SidecarKind kind = SidecarKind::commit)
{
if (!hash)
return;
auto const* fetched = peer.sidecarStore.fetch(*hash);
if (!fetched)
return;
// Union merge into the correct local set based on type.
auto& target = [&]() -> hash_map<PeerID, uint256>& {
switch (fetched->type)
{
case SidecarStore::Type::commit:
return pendingCommits_;
case SidecarStore::Type::reveal:
return pendingReveals_;
case SidecarStore::Type::exportSig:
return pendingExportSigs_;
}
return pendingCommits_;
}();
for (auto const& [nodeId, digest] : fetched->entries)
{
auto const [_, inserted] = target.emplace(nodeId, digest);
if (fetched->type == SidecarStore::Type::exportSig && inserted)
++exportSigFetchMerges_;
}
}
void
fetchSidecarsIfNeeded(ProposalPosition const& pos)
{
fetchRngSetIfNeeded(pos.commitSetHash, SidecarKind::commit);
fetchRngSetIfNeeded(pos.entropySetHash, SidecarKind::reveal);
fetchRngSetIfNeeded(pos.exportSigSetHash, SidecarKind::exportSig);
}
void
clearRngState()
{
pendingCommits_.clear();
pendingReveals_.clear();
pendingExportSigs_.clear();
nodeKeys_.clear();
likelyParticipants_.clear();
myEntropySecret_.zero();
entropyFailed_ = false;
exportSigGateStarted_ = false;
exportSigGateStart_ = {};
exportSigConvergenceFailed_ = false;
}
void
cacheUNLReport()
{
unlNodes_.clear();
for (auto const* p : peer.trustGraph.trustedPeers(&peer))
{
if (!peer.runAsValidator && p->id == peer.id)
continue;
unlNodes_.insert(p->id);
}
if (peer.runAsValidator)
unlNodes_.insert(peer.id);
}
void
setExpectedProposers(hash_set<PeerID> proposers)
{
bool const includeSelf = peer.runAsValidator;
if (!proposers.empty())
{
hash_set<PeerID> filtered;
for (auto const& nid : proposers)
{
if (!includeSelf && nid == peer.id)
continue;
if (isUNLReportMember(nid))
filtered.insert(nid);
}
if (includeSelf)
filtered.insert(peer.id);
likelyParticipants_ = std::move(filtered);
return;
}
likelyParticipants_.clear();
if (!unlNodes_.empty())
likelyParticipants_ = unlNodes_;
}
void
harvestRngData(
PeerID const& nodeId,
PeerKey const& publicKey,
ProposalPosition const& position,
std::uint32_t,
NetClock::time_point,
Ledger::ID const& prevLedger,
std::uint64_t)
{
if (!enableRngConsensus_ && !enableExportConsensus_)
return;
if (!isUNLReportMember(nodeId))
return;
nodeKeys_.insert_or_assign(nodeId, publicKey);
if (enableRngConsensus_ && position.myCommitment)
{
auto [it, inserted] =
pendingCommits_.emplace(nodeId, *position.myCommitment);
if (!inserted && it->second != *position.myCommitment)
{
it->second = *position.myCommitment;
pendingReveals_.erase(nodeId);
}
}
if (!enableRngConsensus_ || !position.myReveal)
{
if (enableExportConsensus_ && position.myExportSignature &&
dropExportSigFrom_.count(nodeId) == 0)
pendingExportSigs_[nodeId] = *position.myExportSignature;
return;
}
// Test hook: drop reveals from specific peers
if (dropRevealFrom_.count(nodeId) == 0)
{
auto const commitIt = pendingCommits_.find(nodeId);
if (commitIt != pendingCommits_.end())
{
auto const prevIt = peer.ledgers.find(prevLedger);
if (prevIt != peer.ledgers.end())
{
auto const seq =
static_cast<std::uint32_t>(prevIt->second.seq()) +
1;
auto const expected = sha512Half(
*position.myReveal,
static_cast<std::uint32_t>(publicKey.first),
publicKey.second,
seq);
if (expected == commitIt->second)
pendingReveals_[nodeId] = *position.myReveal;
}
}
}
if (enableExportConsensus_ && position.myExportSignature &&
dropExportSigFrom_.count(nodeId) == 0)
pendingExportSigs_[nodeId] = *position.myExportSignature;
}
bool
isUNLReportMember(PeerID const& nodeId) const
{
return unlNodes_.count(nodeId) > 0;
}
void
finalizeRoundEntropy(std::uint32_t seq)
{
if (!enableRngConsensus_)
{
lastEntropyDigest_.zero();
lastEntropyCount_ = 0;
lastEntropyWasFallback_ = true;
return;
}
if (shouldZeroEntropy())
{
lastEntropyDigest_.zero();
lastEntropyCount_ = 0;
lastEntropyWasFallback_ = true;
return;
}
std::vector<std::pair<PeerKey, uint256>> ordered;
ordered.reserve(pendingReveals_.size());
for (auto const& [nodeId, reveal] : pendingReveals_)
{
auto const it = nodeKeys_.find(nodeId);
if (it == nodeKeys_.end())
continue;
ordered.emplace_back(it->second, reveal);
}
if (ordered.empty())
{
lastEntropyDigest_.zero();
lastEntropyCount_ = 0;
lastEntropyWasFallback_ = true;
return;
}
std::sort(
ordered.begin(),
ordered.end(),
[](auto const& a, auto const& b) {
if (a.first.first != b.first.first)
return a.first.first < b.first.first;
return a.first.second < b.first.second;
});
uint256 digest = sha512Half(
std::string("csf-rng-entropy"),
static_cast<std::uint32_t>(seq));
for (auto const& [keyId, reveal] : ordered)
{
digest = sha512Half(
digest,
static_cast<std::uint32_t>(keyId.first),
keyId.second,
reveal);
}
lastEntropyDigest_ = digest;
lastEntropyCount_ = static_cast<std::uint16_t>(ordered.size());
lastEntropyWasFallback_ = false;
}
void
finalizeRoundExport()
{
if (!enableExportConsensus_)
{
lastExportSucceeded_ = false;
lastExportRetried_ = false;
return;
}
auto const activeSigCount = std::count_if(
pendingExportSigs_.begin(),
pendingExportSigs_.end(),
[&](auto const& entry) {
return isUNLReportMember(entry.first);
});
lastExportSucceeded_ = !exportSigConvergenceFailed_ &&
static_cast<std::size_t>(activeSigCount) >=
exportSigQuorumThreshold();
lastExportRetried_ = !lastExportSucceeded_;
}
// --- Lifecycle hooks (matching design doc) ---
template <class Ledger_t>
void
onRoundStart(
Ledger_t const& /* prevLedger */,
hash_set<PeerID> lastProposers)
{
clearRngState();
cacheUNLReport();
setExpectedProposers(std::move(lastProposers));
resetSubState();
}
void
onTrustedPeerProposal(
PeerID const& nodeId,
PeerKey const& publicKey,
ProposalPosition const& position,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
Ledger::ID const& prevLedger,
std::uint64_t signature)
{
harvestRngData(
nodeId,
publicKey,
position,
proposeSeq,
closeTime,
prevLedger,
signature);
}
void
onAcceptComplete()
{
}
template <class Ledger_t>
void
decoratePosition(
ProposalPosition& pos,
Ledger_t const& prevLedger,
bool proposing)
{
decorateExportPosition(pos, prevLedger, proposing);
if (!enableRngConsensus_ || !proposing || !peer.runAsValidator)
return;
generateEntropySecret();
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
auto const commitment = sha512Half(
myEntropySecret_,
static_cast<std::uint32_t>(peer.id),
peer.key.second,
seq);
pos.myCommitment = commitment;
pendingCommits_[peer.id] = commitment;
nodeKeys_.insert_or_assign(peer.id, peer.key);
}
template <class Ledger_t>
void
decorateExportPosition(
ProposalPosition& pos,
Ledger_t const& prevLedger,
bool proposing)
{
if (!enableExportConsensus_ || !proposing || !peer.runAsValidator)
return;
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
auto const sig = sha512Half(
std::string("csf-export-sig"),
static_cast<std::uint32_t>(peer.id),
peer.key.second,
seq);
if (!suppressOwnExportSig_)
{
pos.myExportSignature = sig;
pendingExportSigs_[peer.id] = sig;
}
nodeKeys_.insert_or_assign(peer.id, peer.key);
}
void
appendJson(Json::Value&) const
{
}
template <class Pos>
void
logPosition(
Pos const&,
beast::Journal,
beast::severities::Severity = beast::severities::kTrace) const
{
}
// --- Stubs for features CSF doesn't model ---
bool
bootstrapFastStartEnabled() const
{
return false;
}
bool
shouldSendExplicitFinalProposal() const
{
return false;
}
std::optional<TxSet>
buildExplicitFinalProposalTxSet(TxSet const&, Ledger::Seq)
{
return std::nullopt;
}
bool
hasPendingExportSigs() const
{
return enableExportConsensus_ && !pendingExportSigs_.empty();
}
bool
hasConsensusExportTxns() const
{
return enableExportConsensus_;
}
void
setExportSigConvergenceFailed()
{
if (enableExportConsensus_)
exportSigConvergenceFailed_ = true;
}
// --- Sub-state accessors ---
bool
extensionsBusy() const
{
return estState_ != EstablishState::ConvergingTx ||
(exportEnabled() &&
(exportSigGateStarted_ || hasPendingExportSigs()));
}
EstablishState
estState() const
{
return estState_;
}
void
resetSubState()
{
estState_ = EstablishState::ConvergingTx;
revealPhaseStart_ = {};
commitHashConflictStart_ = {};
explicitFinalProposalSent_ = false;
entropySetPublished_ = false;
entropyPublishStart_ = {};
exportSigGateStarted_ = false;
exportSigGateStart_ = {};
exportSigConvergenceFailed_ = false;
}
/// Defined in test/csf/PeerTick.h (keeps xrpld/app dependency
/// out of this header).
template <class Ctx>
ExtensionTickResult
onTick(Ctx const& ctx);
private:
uint256
hashRngSet(
hash_map<PeerID, uint256> const& entries,
Ledger::Seq seq,
std::string const& domain) const
{
std::vector<std::pair<std::uint32_t, uint256>> ordered;
ordered.reserve(entries.size());
for (auto const& [nodeId, digest] : entries)
{
if (!isUNLReportMember(nodeId))
continue;
ordered.emplace_back(
static_cast<std::uint32_t>(nodeId), digest);
}
if (ordered.empty())
return uint256{};
std::sort(
ordered.begin(),
ordered.end(),
[](auto const& a, auto const& b) { return a.first < b.first; });
uint256 out = sha512Half(
std::string("csf-rng-set"),
domain,
static_cast<std::uint32_t>(seq));
for (auto const& [nodeId, digest] : ordered)
out = sha512Half(out, nodeId, digest);
return out;
}
};
Extensions extensions_{*this};
Extensions&
ce()
{
return extensions_;
}
Extensions const&
ce() const
{
return extensions_;
}
//! The collectors to report events to
CollectorRefs& collectors;
@@ -1015,15 +278,13 @@ struct Peer
BasicNetwork<Peer*>& n,
TrustGraph<Peer*>& tg,
CollectorRefs& c,
beast::Journal jIn,
SidecarStore& sc)
beast::Journal jIn)
: sink(jIn, "Peer " + to_string(i) + ": ")
, j(sink)
, consensus(s.clock(), *this, j)
, id{i}
, key{id, 0}
, oracle{o}
, sidecarStore{sc}
, scheduler{s}
, net{n}
, trustGraph(tg)
@@ -1249,15 +510,15 @@ struct Peer
{
issue(CloseLedger{prevLedger, openTxs});
Position_t pos{TxSet::calcID(openTxs)};
ce().decoratePosition(
pos, prevLedger, mode == ConsensusMode::proposing);
return Result(
TxSet{openTxs},
Proposal(
prevLedger.id(), Proposal::seqJoin, pos, closeTime, now(), id));
prevLedger.id(),
Proposal::seqJoin,
TxSet::calcID(openTxs),
closeTime,
now(),
id));
}
void
@@ -1292,10 +553,6 @@ struct Peer
schedule(delays.ledgerAccept, [=, this]() {
const bool proposing = mode == ConsensusMode::proposing;
const bool consensusFail = result.state == ConsensusState::MovedOn;
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
ce().finalizeRoundEntropy(seq);
ce().finalizeRoundExport();
TxSet const acceptedTxs = injectTxs(prevLedger, result.txns);
Ledger const newLedger = oracle.accept(

View File

@@ -1,14 +0,0 @@
#ifndef RIPPLE_TEST_CSF_PEERTICK_H_INCLUDED
#define RIPPLE_TEST_CSF_PEERTICK_H_INCLUDED
#include <test/csf/Peer.h>
#include <xrpld/consensus/ConsensusExtensionsTick.h>
template <class Ctx>
ripple::ExtensionTickResult
ripple::test::csf::Peer::Extensions::onTick(Ctx const& ctx)
{
return ripple::extensionsTick(*this, ctx);
}
#endif

View File

@@ -23,123 +23,17 @@
#include <test/csf/Validation.h>
#include <test/csf/ledgers.h>
#include <xrpld/consensus/ConsensusProposal.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/beast/hash/hash_append.h>
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
namespace ripple {
namespace test {
namespace csf {
/** Position sidecar for CSF that can model RNG commit/reveal fields.
Core tx-set convergence remains keyed on txSetHash only, matching
production's ExtendedPosition behavior.
/** Proposal is a position taken in the consensus process and is represented
directly from the generic types.
*/
struct RngPosition
{
TxSet::ID txSetHash{};
std::optional<uint256> commitSetHash;
std::optional<uint256> entropySetHash;
std::optional<uint256> exportSigSetHash;
std::optional<uint256> myCommitment;
std::optional<uint256> myReveal;
std::optional<uint256> myExportSignature;
RngPosition() = default;
explicit RngPosition(TxSet::ID txSet) : txSetHash(txSet)
{
}
operator TxSet::ID() const
{
return txSetHash;
}
void
updateTxSet(TxSet::ID txSet)
{
txSetHash = txSet;
}
bool
operator==(RngPosition const& other) const
{
return txSetHash == other.txSetHash;
}
bool
operator!=(RngPosition const& other) const
{
return !(*this == other);
}
bool
operator==(TxSet::ID txSet) const
{
return txSetHash == txSet;
}
bool
operator!=(TxSet::ID txSet) const
{
return txSetHash != txSet;
}
};
inline bool
operator==(TxSet::ID txSet, RngPosition const& pos)
{
return pos == txSet;
}
inline bool
operator!=(TxSet::ID txSet, RngPosition const& pos)
{
return pos != txSet;
}
inline std::string
to_string(RngPosition const& pos)
{
return std::to_string(pos.txSetHash);
}
inline std::ostream&
operator<<(std::ostream& os, RngPosition const& pos)
{
return os << pos.txSetHash;
}
template <class Hasher>
void
hash_append(Hasher& h, RngPosition const& pos)
{
using beast::hash_append;
auto appendOpt = [&](std::optional<uint256> const& o) {
hash_append(h, static_cast<std::uint8_t>(o.has_value() ? 1 : 0));
if (o)
hash_append(h, *o);
};
hash_append(h, pos.txSetHash);
appendOpt(pos.commitSetHash);
appendOpt(pos.entropySetHash);
appendOpt(pos.exportSigSetHash);
appendOpt(pos.myCommitment);
appendOpt(pos.myReveal);
appendOpt(pos.myExportSignature);
}
/** Proposal is a position taken in the consensus process.
*/
using Proposal = ConsensusProposal<PeerID, Ledger::ID, RngPosition>;
using ProposalPosition = RngPosition;
using Proposal = ConsensusProposal<PeerID, Ledger::ID, TxSet::ID>;
} // namespace csf
} // namespace test
} // namespace ripple
#endif
#endif

View File

@@ -25,7 +25,6 @@
#include <test/csf/Digraph.h>
#include <test/csf/Peer.h>
#include <test/csf/PeerGroup.h>
#include <test/csf/PeerTick.h>
#include <test/csf/Scheduler.h>
#include <test/csf/SimTime.h>
#include <test/csf/TrustGraph.h>
@@ -84,7 +83,6 @@ public:
BasicNetwork<Peer*> net;
TrustGraph<Peer*> trustGraph;
CollectorRefs collectors;
SidecarStore sidecarStore;
/** Create a simulation
@@ -121,8 +119,7 @@ public:
net,
trustGraph,
collectors,
j,
sidecarStore);
j);
newPeers.emplace_back(&peers.back());
}
PeerGroup res{newPeers};

View File

@@ -48,7 +48,7 @@ public:
{
}
ID const&
ID
id() const
{
return id_;

View File

@@ -82,12 +82,7 @@ supported_amendments()
Throw<std::runtime_error>(
"Unknown feature: " + s + " in supportedAmendments.");
}
//@@start rng-test-environment-gating
// TODO: ConsensusEntropy injects a pseudo-tx every ledger which
// breaks existing test transaction count assumptions. Exclude from
// default test set until dedicated tests are written.
return FeatureBitset(feats) - featureConsensusEntropy;
//@@end rng-test-environment-gating
return FeatureBitset(feats);
}();
return ids;
}

View File

@@ -130,8 +130,7 @@ Env::close(
// Go through the rpc interface unless we need to simulate
// a specific consensus delay.
if (consensusDelay)
app().getOPs().acceptLedger(
consensusDelay, "Env::close(consensusDelay)");
app().getOPs().acceptLedger(consensusDelay);
else
{
auto resp = rpc("ledger_accept");

View File

@@ -1,212 +0,0 @@
#ifndef RIPPLE_TEST_JTX_XPOP_H_INCLUDED
#define RIPPLE_TEST_JTX_XPOP_H_INCLUDED
#include <test/jtx/Env.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/proof/LedgerProof.h>
#include <xrpld/app/proof/XPOPv1.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/basics/base64.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Sign.h>
#include <xrpl/protocol/digest.h>
namespace ripple {
namespace test {
namespace jtx {
namespace xpop {
/// Build a manifest string (binary, not base64).
inline std::string
makeManifestRaw(
PublicKey const& masterPub,
SecretKey const& masterSec,
PublicKey const& signingPub,
SecretKey const& signingSec,
int seq = 1)
{
STObject st(sfGeneric);
st[sfSequence] = seq;
st[sfPublicKey] = masterPub;
st[sfSigningPubKey] = signingPub;
sign(st, HashPrefix::manifest, *publicKeyType(signingPub), signingSec);
sign(
st,
HashPrefix::manifest,
*publicKeyType(masterPub),
masterSec,
sfMasterSignature);
Serializer s;
st.add(s);
return std::string(static_cast<char const*>(s.data()), s.size());
}
/// A complete test validator with all keys and manifest.
struct TestValidator
{
PublicKey masterPublic;
SecretKey masterSecret;
PublicKey signingPublic;
SecretKey signingSecret;
std::string manifestRaw;
std::string manifestBase64;
static TestValidator
create()
{
auto const ms = randomSecretKey();
auto const mp = derivePublicKey(KeyType::ed25519, ms);
auto const [sp, ss] = randomKeyPair(KeyType::secp256k1);
auto raw = makeManifestRaw(mp, ms, sp, ss, 1);
return {mp, ms, sp, ss, raw, base64_encode(raw)};
}
proof::ValidatorKeys
toValidatorKeys() const
{
return {
masterPublic,
masterSecret,
signingPublic,
signingSecret,
manifestBase64};
}
};
/// A complete test VL publisher with keys and manifest.
struct TestVLPublisher
{
PublicKey masterPublic;
SecretKey masterSecret;
PublicKey signingPublic;
SecretKey signingSecret;
std::string manifestBase64;
static TestVLPublisher
create()
{
auto const ms = randomSecretKey();
auto const mp = derivePublicKey(KeyType::ed25519, ms);
auto const [sp, ss] = randomKeyPair(KeyType::secp256k1);
return {
mp, ms, sp, ss, base64_encode(makeManifestRaw(mp, ms, sp, ss, 1))};
}
/// Build VL data for these validators.
proof::VLData
buildVLData(
std::vector<TestValidator> const& validators,
std::uint32_t sequence = 1,
std::uint32_t expiration = 767784645) const
{
// Build the JSON blob
std::string data = "{\"sequence\":" + std::to_string(sequence) +
",\"expiration\":" + std::to_string(expiration) +
",\"validators\":[";
for (std::size_t i = 0; i < validators.size(); ++i)
{
if (i > 0)
data += ",";
data += "{\"validation_public_key\":\"" +
strHex(validators[i].masterPublic) + "\",\"manifest\":\"" +
validators[i].manifestBase64 + "\"}";
}
data += "]}";
auto const blob = base64_encode(data);
auto const sig =
strHex(sign(signingPublic, signingSecret, makeSlice(data)));
return proof::VLData{
masterPublic, masterSecret, manifestBase64, blob, sig, 1};
}
};
/// Everything needed to build and import XPOPs in tests.
struct TestXPOPContext
{
std::vector<TestValidator> validators;
TestVLPublisher publisher;
proof::VLData vlData;
static TestXPOPContext
create(int validatorCount = 5)
{
auto pub = TestVLPublisher::create();
std::vector<TestValidator> vals;
for (int i = 0; i < validatorCount; ++i)
vals.push_back(TestValidator::create());
auto vl = pub.buildVLData(vals);
return {std::move(vals), std::move(pub), std::move(vl)};
}
/// Get the VL master public key hex for IMPORT_VL_KEYS config.
std::string
vlKeyHex() const
{
return strHex(publisher.masterPublic);
}
/// Build an Env config with NETWORK_ID and IMPORT_VL_KEYS set.
std::unique_ptr<Config>
makeEnvConfig(std::uint32_t networkID = 21337) const
{
auto cfg = envconfig(jtx::validator, "");
cfg->NETWORK_ID = networkID;
auto const keyHex = vlKeyHex();
auto const pkHex = strUnHex(keyHex);
if (pkHex)
cfg->IMPORT_VL_KEYS.emplace(keyHex, makeSlice(*pkHex));
return cfg;
}
/// Build XPOP from a closed ledger for a specific tx.
Json::Value
buildXPOP(Ledger const& ledger, uint256 const& txHash) const
{
std::vector<proof::ValidatorKeys> valKeys;
for (auto const& v : validators)
valKeys.push_back(v.toValidatorKeys());
return proof::buildXPOPv1(ledger, txHash, valKeys, vlData);
}
/// Build XPOP from an Env's last closed ledger.
Json::Value
buildXPOP(Env& env, uint256 const& txHash) const
{
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
if (!lcl)
return {};
return buildXPOP(*lcl, txHash);
}
};
/// Build a complete XPOP v1 JSON from an Env's last closed ledger.
/// Creates fresh validator keys and VL publisher for each call.
inline Json::Value
buildTestXPOP(Env& env, uint256 const& txHash, int validatorCount = 5)
{
auto ctx = TestXPOPContext::create(validatorCount);
return ctx.buildXPOP(env, txHash);
}
/// Get the hex-encoded XPOP blob suitable for sfBlob in ttIMPORT.
inline std::string
buildTestXPOPHex(Env& env, uint256 const& txHash, int validatorCount = 5)
{
auto const xpop = buildTestXPOP(env, txHash, validatorCount);
if (xpop.isNull())
return {};
return proof::xpopToHex(xpop);
}
} // namespace xpop
} // namespace jtx
} // namespace test
} // namespace ripple
#endif

View File

@@ -1,530 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2026 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <xrpld/app/misc/RuntimeConfig.h>
#include <xrpld/overlay/detail/TrafficCount.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
class RuntimeConfig_test : public beast::unit_test::suite
{
// Helper to call runtime_config RPC with JSON params
Json::Value
runtimeConfig(test::jtx::Env& env, Json::Value const& params)
{
return env.rpc(
"json", "runtime_config", to_string(params))[jss::result];
}
// Helper to call runtime_config RPC with no params (GET)
Json::Value
runtimeConfig(test::jtx::Env& env)
{
return env.rpc("runtime_config")[jss::result];
}
void
testGetEmpty()
{
testcase("GET empty config");
using namespace test::jtx;
Env env{*this};
auto result = runtimeConfig(env);
BEAST_EXPECT(result.isMember("configs"));
BEAST_EXPECT(result["configs"].size() == 0);
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
}
void
testSetGlobal()
{
testcase("SET global config");
using namespace test::jtx;
Env env{*this};
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["*"]["send_delay_jitter_ms"] = 20;
params["set"]["*"]["send_drop_pct"] = 5.5;
auto result = runtimeConfig(env, params);
BEAST_EXPECT(result.isMember("configs"));
auto const& configs = result["configs"];
if (!BEAST_EXPECT(configs.isMember("*")))
return;
auto const& global = configs["*"];
BEAST_EXPECT(global["send_delay_ms"].asInt() == 100);
BEAST_EXPECT(global["send_delay_jitter_ms"].asInt() == 20);
BEAST_EXPECT(global["send_drop_pct"].asDouble() == 5.5);
// Verify active state via RuntimeConfig directly
BEAST_EXPECT(env.app().getRuntimeConfig().active());
// Verify getConfig returns the global for any peer
auto cfg = env.app().getRuntimeConfig().getConfig("10.0.0.1:51235");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->sendDelayMs == 100);
BEAST_EXPECT(cfg->sendDelayJitterMs == 20);
BEAST_EXPECT(cfg->sendDropPctX100 == 550);
}
void
testSetPerPeer()
{
testcase("SET per-peer config with merge");
using namespace test::jtx;
Env env{*this};
// Set global first
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["*"]["send_drop_pct"] = 10.0;
runtimeConfig(env, params);
}
// Set per-peer override (only delay, no drop)
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 500;
runtimeConfig(env, params);
}
auto& rc = env.app().getRuntimeConfig();
// Per-peer should have merged values: delay from override, drop from *
auto peerCfg = rc.getConfig("10.0.0.2:51235");
if (!BEAST_EXPECT(peerCfg.has_value()))
return;
BEAST_EXPECT(peerCfg->sendDelayMs == 500); // overridden
BEAST_EXPECT(peerCfg->sendDropPctX100 == 1000); // inherited from *
// Other peers still get the global
auto otherCfg = rc.getConfig("10.0.0.3:51235");
if (!BEAST_EXPECT(otherCfg.has_value()))
return;
BEAST_EXPECT(otherCfg->sendDelayMs == 100);
BEAST_EXPECT(otherCfg->sendDropPctX100 == 1000);
}
void
testClear()
{
testcase("CLEAR specific target");
using namespace test::jtx;
Env env{*this};
// Set global + per-peer
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 50;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 200;
runtimeConfig(env, params);
}
// Clear per-peer
{
Json::Value params;
params["clear"] = Json::arrayValue;
params["clear"].append("10.0.0.2:51235");
auto result = runtimeConfig(env, params);
// Should still have "*"
BEAST_EXPECT(result["configs"].isMember("*"));
BEAST_EXPECT(!result["configs"].isMember("10.0.0.2:51235"));
}
// Per-peer now falls back to global
auto cfg = env.app().getRuntimeConfig().getConfig("10.0.0.2:51235");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->sendDelayMs == 50);
}
void
testClearAll()
{
testcase("CLEAR_ALL");
using namespace test::jtx;
Env env{*this};
// Set some configs
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["send_drop_pct"] = 50.0;
runtimeConfig(env, params);
}
BEAST_EXPECT(env.app().getRuntimeConfig().active());
// Clear all
{
Json::Value params;
params["clear_all"] = true;
auto result = runtimeConfig(env, params);
BEAST_EXPECT(result["configs"].size() == 0);
}
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
BEAST_EXPECT(!env.app().getRuntimeConfig().getConfig("*").has_value());
}
void
testPerPeerWithoutGlobal()
{
testcase("Per-peer config without global");
using namespace test::jtx;
Env env{*this};
// Set only per-peer, no global
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 300;
runtimeConfig(env, params);
}
auto& rc = env.app().getRuntimeConfig();
BEAST_EXPECT(rc.active());
// Targeted peer gets the config
auto peerCfg = rc.getConfig("10.0.0.2:51235");
BEAST_EXPECT(peerCfg.has_value());
BEAST_EXPECT(peerCfg->sendDelayMs == 300);
// Other peers get nothing
BEAST_EXPECT(!rc.getConfig("10.0.0.3:51235").has_value());
}
void
testMessageTypeFilter()
{
testcase("Message type filter");
using namespace test::jtx;
Env env{*this};
// Set with message_types filter
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["*"]["message_types"] = Json::arrayValue;
params["set"]["*"]["message_types"].append("proposal");
params["set"]["*"]["message_types"].append("validation");
auto result = runtimeConfig(env, params);
// Verify response includes message_types
auto const& global = result["configs"]["*"];
BEAST_EXPECT(global.isMember("message_types"));
BEAST_EXPECT(global["message_types"].size() == 2);
}
auto& rc = env.app().getRuntimeConfig();
auto cfg = rc.getConfig("10.0.0.1:51235");
if (!BEAST_EXPECT(cfg.has_value()))
return;
// Applies to proposal and validation categories
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::proposal));
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::validation));
// Does NOT apply to other categories
BEAST_EXPECT(!cfg->appliesTo(TrafficCount::category::transaction));
BEAST_EXPECT(!cfg->appliesTo(TrafficCount::category::base));
}
void
testMessageTypeFilterEmpty()
{
testcase("No message type filter means all");
using namespace test::jtx;
Env env{*this};
// Set without message_types — applies to all
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
runtimeConfig(env, params);
}
auto cfg = env.app().getRuntimeConfig().getConfig("*");
if (!BEAST_EXPECT(cfg.has_value()))
return;
BEAST_EXPECT(!cfg->messageCategories.has_value());
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::proposal));
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::validation));
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::transaction));
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::base));
}
void
testInvalidMessageType()
{
testcase("Invalid message type returns error");
using namespace test::jtx;
Env env{*this};
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["*"]["message_types"] = Json::arrayValue;
params["set"]["*"]["message_types"].append("proposals"); // typo
auto result = runtimeConfig(env, params);
BEAST_EXPECT(result.isMember("error"));
BEAST_EXPECT(result["error"].asString() == "invalidParams");
// Config should NOT have been applied
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
}
void
testDropPctClamping()
{
testcase("send_drop_pct clamped to 0-100");
using namespace test::jtx;
Env env{*this};
// Over 100
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_drop_pct"] = 200.0;
runtimeConfig(env, params);
}
auto cfg = env.app().getRuntimeConfig().getConfig("*");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->sendDropPctX100 == 10000); // clamped to 100%
// Negative
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_drop_pct"] = -50.0;
runtimeConfig(env, params);
}
cfg = env.app().getRuntimeConfig().getConfig("*");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->sendDropPctX100 == 0); // clamped to 0%
}
void
testRngClaimDropPct()
{
testcase("rng_claim_drop_pct round-trips");
using namespace test::jtx;
Env env{*this};
// Set rng_claim_drop_pct
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["rng_claim_drop_pct"] = 50.0;
auto result = runtimeConfig(env, params);
auto const& global = result["configs"]["*"];
BEAST_EXPECT(global["rng_claim_drop_pct"].asDouble() == 50.0);
}
BEAST_EXPECT(env.app().getRuntimeConfig().active());
// Verify via getConfig
auto cfg = env.app().getRuntimeConfig().getConfig("*");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 5000);
// Clear and verify removal
{
Json::Value params;
params["clear_all"] = true;
auto result = runtimeConfig(env, params);
BEAST_EXPECT(result["configs"].size() == 0);
}
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
}
void
testRngClaimDropPctClamping()
{
testcase("rng_claim_drop_pct clamped to 0-100");
using namespace test::jtx;
Env env{*this};
// Over 100
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["rng_claim_drop_pct"] = 150.0;
runtimeConfig(env, params);
}
auto cfg = env.app().getRuntimeConfig().getConfig("*");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 10000); // clamped to 100%
// Negative
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["rng_claim_drop_pct"] = -10.0;
runtimeConfig(env, params);
}
cfg = env.app().getRuntimeConfig().getConfig("*");
BEAST_EXPECT(cfg.has_value());
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 0); // clamped to 0%
}
void
testExplicitFinalProposalToggle()
{
testcase("explicit_final_proposal round-trips and merges");
using namespace test::jtx;
Env env{*this};
// Global default for this node: skip explicit final proposal.
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["explicit_final_proposal"] = false;
auto result = runtimeConfig(env, params);
auto const& global = result["configs"]["*"];
BEAST_EXPECT(global["explicit_final_proposal"].asBool() == false);
}
auto& rc = env.app().getRuntimeConfig();
BEAST_EXPECT(rc.active());
// Global view is false.
auto globalCfg = rc.getConfig("*");
BEAST_EXPECT(globalCfg.has_value());
BEAST_EXPECT(globalCfg->explicitFinalProposal.has_value());
BEAST_EXPECT(*globalCfg->explicitFinalProposal == false);
// Per-peer override can re-enable.
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["explicit_final_proposal"] = true;
runtimeConfig(env, params);
}
auto peerCfg = rc.getConfig("10.0.0.2:51235");
BEAST_EXPECT(peerCfg.has_value());
BEAST_EXPECT(peerCfg->explicitFinalProposal.has_value());
BEAST_EXPECT(*peerCfg->explicitFinalProposal == true);
auto otherCfg = rc.getConfig("10.0.0.3:51235");
BEAST_EXPECT(otherCfg.has_value());
BEAST_EXPECT(otherCfg->explicitFinalProposal.has_value());
BEAST_EXPECT(*otherCfg->explicitFinalProposal == false);
}
void
testPerPeerClearInheritedFilter()
{
testcase("Per-peer can override global filter to all");
using namespace test::jtx;
Env env{*this};
// Global: only proposals
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["*"] = Json::objectValue;
params["set"]["*"]["send_delay_ms"] = 100;
params["set"]["*"]["message_types"] = Json::arrayValue;
params["set"]["*"]["message_types"].append("proposal");
runtimeConfig(env, params);
}
// Per-peer: message_types = [] (explicitly all)
{
Json::Value params;
params["set"] = Json::objectValue;
params["set"]["10.0.0.2:51235"] = Json::objectValue;
params["set"]["10.0.0.2:51235"]["message_types"] = Json::arrayValue;
runtimeConfig(env, params);
}
auto& rc = env.app().getRuntimeConfig();
// Per-peer should apply to all categories (empty set override)
auto peerCfg = rc.getConfig("10.0.0.2:51235");
BEAST_EXPECT(peerCfg.has_value());
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::proposal));
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::validation));
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::transaction));
// Other peers still only get proposal filter from global
auto otherCfg = rc.getConfig("10.0.0.3:51235");
BEAST_EXPECT(otherCfg.has_value());
BEAST_EXPECT(otherCfg->appliesTo(TrafficCount::category::proposal));
BEAST_EXPECT(!otherCfg->appliesTo(TrafficCount::category::validation));
}
public:
void
run() override
{
testGetEmpty();
testSetGlobal();
testSetPerPeer();
testClear();
testClearAll();
testPerPeerWithoutGlobal();
testMessageTypeFilter();
testMessageTypeFilterEmpty();
testInvalidMessageType();
testDropPctClamping();
testRngClaimDropPct();
testRngClaimDropPctClamping();
testExplicitFinalProposalToggle();
testPerPeerClearInheritedFilter();
}
};
BEAST_DEFINE_TESTSUITE(RuntimeConfig, rpc, ripple);
} // namespace ripple

View File

@@ -1,131 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2026 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef TEST_UNIT_TEST_SUITE_LOGS_WITH_OVERRIDES_H
#define TEST_UNIT_TEST_SUITE_LOGS_WITH_OVERRIDES_H
#include <test/unit_test/SuiteJournal.h>
#include <xrpl/basics/Log.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/beast/utility/Journal.h>
#include <iostream>
#include <mutex>
#include <set>
#include <string>
namespace ripple {
namespace test {
/** A Journal::Sink that writes directly to stderr.
*
* Unlike SuiteJournalSink (which writes to suite_.log and is only
* visible when tests fail), this always produces visible output.
*/
class StderrJournalSink : public beast::Journal::Sink
{
std::string partition_;
public:
StderrJournalSink(
std::string const& partition,
beast::severities::Severity threshold)
: Sink(threshold, false), partition_(partition)
{
}
bool
active(beast::severities::Severity level) const override
{
return level >= threshold();
}
void
write(beast::severities::Severity level, std::string const& text) override
{
if (level >= threshold())
writeAlways(level, text);
}
void
writeAlways(beast::severities::Severity level, std::string const& text)
override
{
static std::mutex mtx;
std::lock_guard lock(mtx);
std::cerr << partition_ << ":" << text << std::endl;
}
};
/** SuiteLogs with per-partition severity overrides written to stderr.
*
* Overridden partitions write to stderr (always visible).
* All other partitions use SuiteJournalSink (suite_.log, only on failure).
*
* Usage:
* #include <test/unit_test/SuiteLogsWithOverrides.h>
*
* using Sev = beast::severities::Severity;
* Env env{*this, cfg, features,
* std::make_unique<SuiteLogsWithOverrides>(
* *this,
* SuiteLogsWithOverrides::Overrides{
* {"Export", Sev::kTrace},
* {"TxQ", Sev::kInfo},
* {"View", Sev::kDebug},
* })};
*/
class SuiteLogsWithOverrides : public Logs
{
beast::unit_test::suite& suite_;
std::set<std::string> overridden_;
public:
using Overrides = std::initializer_list<
std::pair<std::string, beast::severities::Severity>>;
SuiteLogsWithOverrides(
beast::unit_test::suite& suite,
Overrides overrides,
beast::severities::Severity defaultThresh = beast::severities::kError)
: Logs(defaultThresh), suite_(suite)
{
for (auto const& [name, sev] : overrides)
{
overridden_.insert(name);
get(name).threshold(sev);
}
}
~SuiteLogsWithOverrides() override = default;
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity threshold) override
{
if (overridden_.count(partition))
return std::make_unique<StderrJournalSink>(partition, threshold);
return std::make_unique<SuiteJournalSink>(partition, threshold, suite_);
}
};
} // namespace test
} // namespace ripple
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,458 +0,0 @@
#ifndef RIPPLE_APP_CONSENSUS_CONSENSUSEXTENSIONS_H_INCLUDED
#define RIPPLE_APP_CONSENSUS_CONSENSUSEXTENSIONS_H_INCLUDED
#include <xrpld/app/consensus/RCLCxLedger.h>
#include <xrpld/app/consensus/RCLCxPeerPos.h>
#include <xrpld/app/consensus/RCLCxTx.h>
#include <xrpld/app/misc/ExportSigCollector.h>
#include <xrpld/consensus/ConsensusParms.h>
#include <xrpld/consensus/ConsensusTypes.h>
#include <xrpld/overlay/Message.h>
#include <xrpld/shamap/SHAMap.h>
#include <xrpl/basics/Log.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/protocol/PublicKey.h>
#include <chrono>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <vector>
namespace ripple {
class Application;
class CanonicalTXSet;
class Ledger;
/// Concrete alias for the consensus tick context.
using TickContext = ConsensusTick<ExtendedPosition, RCLCxPeerPos, RCLTxSet>;
/// Concrete Xahau-owned manager for consensus extensions (RNG + Export).
///
/// Owns all RNG/Export state that was previously scattered across
/// RCLCxAdaptor and Consensus.h. Lifecycle hooks are grouped by
/// caller/threading context.
class ConsensusExtensions
{
Application& app_;
ExportSigCollector exportSigCollector_;
public:
beast::Journal j_; // public: accessed by extensionsTick template
// Type of sidecar set, known at fetch time from proposal context.
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
struct ActiveValidatorView
{
hash_set<PublicKey> masterKeys;
hash_set<NodeID> nodeIds;
std::optional<uint256> sourceLedgerHash;
bool fromUNLReport = false;
// Export paths receive validator keys; RNG sidecars identify
// validators by NodeID. Keep both indexes in lockstep.
void
insertMaster(PublicKey const& masterKey)
{
masterKeys.insert(masterKey);
nodeIds.insert(calcNodeID(masterKey));
}
void
eraseMaster(PublicKey const& masterKey)
{
masterKeys.erase(masterKey);
nodeIds.erase(calcNodeID(masterKey));
}
std::size_t
size() const
{
return masterKeys.size();
}
bool
containsMaster(PublicKey const& masterKey) const
{
return masterKeys.count(masterKey) > 0;
}
bool
containsNode(NodeID const& nodeId) const
{
return nodeIds.count(nodeId) > 0;
}
};
using ActiveValidatorViewPtr = std::shared_ptr<ActiveValidatorView const>;
private:
// --- RNG Pipelined Storage ---
hash_map<NodeID, uint256> pendingCommits_;
hash_map<NodeID, uint256> pendingReveals_;
hash_map<NodeID, PublicKey> nodeIdToKey_;
// Ephemeral entropy secret (in-memory only, crash = non-revealer)
uint256 myEntropySecret_;
bool entropyFailed_ = false;
bool rngEnabledThisRound_ = false;
bool exportEnabledThisRound_ = false;
// Real SHAMaps for the current round (unbacked, ephemeral)
std::shared_ptr<SHAMap> commitSetMap_;
std::shared_ptr<SHAMap> entropySetMap_;
std::shared_ptr<SHAMap> exportSigSetMap_;
std::optional<LedgerIndex> rngRoundSeq_;
std::shared_ptr<SHAMap const> consensusTxSetMap_;
hash_map<uint256, std::shared_ptr<STTx const>> consensusExportTxns_;
std::optional<uint256> consensusTxSetHash_;
// Track pending sidecar set fetches by hash → kind.
// Kind is known at fetch time (call site context), so
// onAcquiredSidecarSet can dispatch without content-sniffing.
hash_map<uint256, SidecarKind> pendingRngFetches_;
// Parent-ledger validator view used by RNG and Export quorum logic.
ActiveValidatorViewPtr activeValidatorView_ =
std::make_shared<ActiveValidatorView const>();
mutable std::mutex activeValidatorViewMutex_;
// Recent proposers intersected with the active UNL (liveness hint)
hash_set<NodeID> likelyParticipants_;
// Current consensus mode (set by adaptor at round start)
ConsensusMode mode_{ConsensusMode::observing};
public:
// --- RNG Sub-state Machine (accessed by extensionsTick template) ---
EstablishState estState_{EstablishState::ConvergingTx};
std::chrono::steady_clock::time_point revealPhaseStart_{};
std::chrono::steady_clock::time_point commitHashConflictStart_{};
bool explicitFinalProposalSent_{false};
bool entropySetPublished_{false};
std::chrono::steady_clock::time_point entropyPublishStart_{};
bool exportSigGateStarted_{false};
std::chrono::steady_clock::time_point exportSigGateStart_{};
bool exportSigConvergenceFailed_{false};
/** Proof data from a proposal signature, for embedding in SHAMap
entries. Contains everything needed to independently verify
that a validator committed/revealed a specific value. */
struct ProposalProof
{
std::uint32_t proposeSeq;
std::uint32_t closeTime;
uint256 prevLedger;
Serializer positionData; // serialized ExtendedPosition
Buffer signature;
};
private:
// Proposal proofs keyed by NodeID.
// commitProofs_: only seq=0 proofs (deterministic across all nodes).
// proposalProofs_: latest proof with reveal (for entropySet).
hash_map<NodeID, ProposalProof> commitProofs_;
hash_map<NodeID, ProposalProof> proposalProofs_;
public:
ConsensusExtensions(Application& app, beast::Journal j);
ExportSigCollector&
exportSigCollector()
{
return exportSigCollector_;
}
ExportSigCollector const&
exportSigCollector() const
{
return exportSigCollector_;
}
/// Set the current consensus mode (called by adaptor).
void
setMode(ConsensusMode m)
{
mode_ = m;
}
// --- RNG Helper Methods ---
std::size_t
quorumThreshold() const;
std::size_t
exportSigQuorumThreshold() const;
void
setExpectedProposers(hash_set<NodeID> proposers);
std::size_t
pendingCommitCount() const;
std::size_t
pendingRevealCount() const;
std::size_t
expectedProposerCount() const;
bool
hasQuorumOfCommits() const;
bool
hasMinimumReveals() const;
bool
hasAnyReveals() const;
bool
shouldZeroEntropy() const;
bool
rngEnabled() const;
bool
exportEnabled() const;
bool
bootstrapFastStartEnabled() const;
bool
shouldSendExplicitFinalProposal() const;
std::optional<RCLTxSet>
buildExplicitFinalProposalTxSet(RCLTxSet const& txns, LedgerIndex seq);
uint256
buildCommitSet(LedgerIndex seq);
uint256
buildEntropySet(LedgerIndex seq);
uint256
buildExportSigSet(LedgerIndex seq);
bool
hasPendingExportSigs() const;
bool
hasConsensusExportTxns() const;
void
setExportSigConvergenceFailed();
bool
exportSigConvergenceFailed() const;
bool
isSidecarSet(uint256 const& hash) const;
ActiveValidatorViewPtr
activeValidatorView() const;
ActiveValidatorViewPtr
makeActiveValidatorView(
std::shared_ptr<Ledger const> const& prevLedger) const;
bool
isActiveValidator(PublicKey const& validationKey) const;
bool
isActiveValidator(
PublicKey const& validationKey,
ActiveValidatorView const& view) const;
void
onAcquiredSidecarSet(std::shared_ptr<SHAMap> const& map);
void
fetchRngSetIfNeeded(
std::optional<uint256> const& hash,
SidecarKind kind = SidecarKind::commit);
/// Fetch any sidecar sets from a peer's position if needed.
void
fetchSidecarsIfNeeded(ExtendedPosition const& peerPos);
void
cacheConsensusTxSet(RCLTxSet const& txns);
std::size_t
verifyPendingExportSigs(RCLTxSet const& txns, LedgerIndex seq);
void
cacheUNLReport(std::shared_ptr<Ledger const> const& prevLedger = {});
bool
isUNLReportMember(NodeID const& nodeId) const;
void
generateEntropySecret();
uint256
getEntropySecret() const;
void
setEntropyFailed();
/// Self-seed our own reveal into pendingReveals_.
/// Called from extensionsTick at reveal transition.
/// In production, decorateMessage also self-seeds (belt + suspenders).
void
selfSeedReveal();
void
clearRngState();
void
onPreBuild(CanonicalTXSet& retriableTxs, LedgerIndex seq);
void
harvestRngData(
NodeID const& nodeId,
PublicKey const& publicKey,
ExtendedPosition const& position,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
uint256 const& prevLedger,
Slice const& signature);
static Blob
serializeProof(ProposalProof const& proof);
static std::optional<ProposalProof>
deserializeProof(Blob const& proofBlob);
static bool
verifyProof(
Blob const& proofBlob,
PublicKey const& publicKey,
uint256 const& expectedDigest,
bool isCommit);
/// Append extension diagnostics to consensus JSON.
void
appendJson(Json::Value& ret) const;
/// Log extension-specific position fields at trace level.
void
logPosition(
ExtendedPosition const& pos,
beast::Journal j,
beast::severities::Severity level = beast::severities::kTrace) const;
// --- Consensus/adaptor lifecycle hooks ---
/** Reset per-round extension state.
Called from startRoundInternal under RCLConsensus::mutex_. */
void
onRoundStart(RCLCxLedger const& prevLedger, hash_set<NodeID> lastProposers);
/** Extract extension data from the parsed proposal.
Called from peerProposalInternal under RCLConsensus::mutex_. */
void
onTrustedPeerProposal(
NodeID const& nodeId,
PublicKey const& publicKey,
ExtendedPosition const& position,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
uint256 const& prevLedger,
Slice const& signature,
std::vector<std::string> const& exportSignatures = {});
/** Harvest proposal-carried export signatures after the proposal payload is
known to be signed by `publicKey`. */
std::size_t
harvestExportSignatures(
PublicKey const& publicKey,
uint256 const& prevLedger,
std::vector<std::string> const& exportSignatures,
char const* source);
/** Signal that the accept/build path finished successfully.
Called from doAccept (frozen state, no consensus mutex). */
void
onAcceptComplete();
/** Extract export signatures from the raw protobuf wire message.
Called from PeerImp overlay ingress (outside consensus mutex).
Only touches the independently synchronized ExportSigCollector. */
void
onTrustedPeerMessage(::protocol::TMProposeSet const& wireMsg);
/** Attach RNG commitment to the initial proposal position.
Called from onClose BEFORE signing. Affects proposal identity.
Generates entropy secret, caches UNL, seeds own commitment. */
void
decoratePosition(
ExtendedPosition& pos,
std::shared_ptr<Ledger const> const& prevLedger,
bool proposing);
/** Attach export signatures before proposal signing.
The caller hashes the resulting blobs into ExtendedPosition so the
proposal signature authenticates the side-channel protobuf field. */
void
attachExportSignatures(
protocol::TMProposeSet& prop,
RCLCxPeerPos::Proposal const& proposal);
/** Record post-signature RNG state for the outgoing protobuf.
Self-seeds own reveal and stores proposal proofs. */
void
decorateMessage(
protocol::TMProposeSet& prop,
RCLCxPeerPos::Proposal const& proposal,
ExtendedPosition const& signedPosition,
Buffer const& proposalSig);
ExtensionTickResult
onTick(TickContext const& ctx);
// --- Accessors for adaptor forwarding ---
void
setRngEnabledThisRound(bool v)
{
rngEnabledThisRound_ = v;
}
void
setExportEnabledThisRound(bool v)
{
exportEnabledThisRound_ = v;
}
bool
extensionsBusy() const
{
return estState_ != EstablishState::ConvergingTx ||
(exportEnabled() &&
(exportSigGateStarted_ || hasPendingExportSigs()));
}
EstablishState
estState() const
{
return estState_;
}
void
resetSubState()
{
estState_ = EstablishState::ConvergingTx;
revealPhaseStart_ = {};
commitHashConflictStart_ = {};
explicitFinalProposalSent_ = false;
entropySetPublished_ = false;
entropyPublishStart_ = {};
exportSigGateStarted_ = false;
exportSigGateStart_ = {};
exportSigConvergenceFailed_ = false;
}
};
} // namespace ripple
#endif

View File

@@ -1,250 +0,0 @@
# Consensus Extension Design Principles
This note captures the principles behind the Xahau consensus extensions:
ConsensusEntropy/RNG, proposal sidecars, and export signature convergence.
Read this before changing `ConsensusExtensions`, `ConsensusExtensionsTick`,
`ExtendedPosition`, sidecar SHAMap handling, or the related CSF tests.
The short version: extension data may coordinate extra same-ledger features,
but it must not redefine ordinary transaction-set consensus. When extension
state cannot be made safe in time, the extension degrades deterministically
and the ledger still closes.
The priority order for consensus extensions is: safe, fast, works. Safety means
extension timing must not create divergent closed-ledger effects when a bounded
coordination step can avoid it. Fast means those coordination steps stay short
and conditional, never becoming an open-ended wait for an extension feature to
succeed. Works means missed or late extension material follows that feature's
deterministic fallback, such as zero entropy for RNG or normal Export
retry/expiry, rather than blocking core consensus.
## Core Invariants
1. Core consensus remains keyed by the transaction set.
`ExtendedPosition::operator==` intentionally compares only `txSetHash`.
RNG, export sig, commit-set, and entropy-set hashes are proposal sidecars.
They are coordinated during establish, but they do not define whether peers
agree on the ordinary transaction set.
2. Extension waits are bounded.
RNG and export sidecar convergence may wait briefly inside establish, but
they must not block ledger close indefinitely. If RNG cannot establish a
safe non-zero entropy value, it injects the deterministic zero-entropy path.
If export signatures cannot converge, export retries or expires according
to transaction rules.
3. Safety is in validation; extension logic is deliberation.
ConsensusEntropy is materialized during accept/buildLCL as a deterministic
pseudo-transaction. Nodes agree on the base transaction set first, then
derive the entropy transaction from agreed sidecar inputs. Any local fault
still has to survive normal validation/LCL agreement.
4. Converge signed inputs, not just derived outputs.
RNG commits, RNG reveals, and export signatures are the verifiable inputs.
The design converges on those input sets using sidecar SHAMaps. The final
entropy digest and export quorum result are derived from the converged
inputs.
5. Sidecars are not transactions.
Commit, reveal, and export signature entries are `STObject(sfGeneric)`
leaves in ephemeral `SHAMapType::SIDECAR` maps. They use `sfSidecarType`
to distinguish payloads and `HashPrefix::sidecar` for item hashes. They
are fetched through sidecar sync, not parsed or submitted as transactions.
6. Proposal-visible or validation-visible extension data must be signed.
Do not attach behavior-changing sidecar payloads as unsigned out-of-band
proposal or validation wrapper data. If stripping or changing a field would
alter RNG or export behavior, that field must be covered by the relevant
signed payload and by the identity used for duplicate suppression/replay
checks on that path.
Today ConsensusExtensions uses signed proposal sidecars, not validation
sidecars. If a future design carries extension material through
validations, the same rule applies: the behavior-changing data, or a digest
of it, must be inside the signed validation payload and bound to the
validating key and ledger. A protobuf field outside the signed validation is
only transport metadata; it must not affect consensus-extension behavior.
## Validator Set And Quorum
The active validator view is the shared denominator for RNG and export:
- Prefer `UNLReport.sfActiveValidators` from the consensus parent ledger.
- If no report is available, fall back to configured trusted validators so
early ledgers and dev/test networks can make progress.
- If `featureNegativeUNL` is enabled, subtract the parent ledger's Negative
UNL from whichever source produced the view.
- Use the same snapshot throughout the round.
`quorumThreshold()` is 80% of that active validator view. Recent or expected
proposers are liveness hints only; they do not shrink the quorum denominator.
Be careful with `prevProposers`: in the generic consensus code it is peer-only.
When checking whether the previous round had enough active participants, count
our own proposer slot if this node is proposing.
## RNG Commit/Reveal Principles
RNG proceeds through establish sub-states:
1. `ConvergingTx`: ordinary transaction-set convergence while harvesting
commitments.
2. `ConvergingCommit`: after proofed commit quorum, publish the commit sidecar
hash and reveal the same secret that produced the original commitment.
3. `ConvergingReveal`: collect reveals, publish the entropy sidecar hash, and
wait for sidecar agreement or deterministic fallback.
Commit quorum counts only proofed commits from active validators. A commit that
cannot be emitted as a verifiable sidecar leaf does not count.
Reveal collection targets all known committers, because the commit sidecar set
defines who is expected to reveal. The reveal wait is still bounded. A node
that crashes, withholds, or partitions after committing must not stop the
ledger forever.
Final entropy is computed from the agreed entropy sidecar SHAMap, not from a
node's opportunistic local `pendingReveals_` map. This prevents different
local reveal subsets at timeout boundaries from producing different entropy.
## Entropy Alignment Rules
Non-zero entropy requires quorum alignment on the entropy sidecar hash.
The alignment count is:
```
our published entropySetHash + tx-converged peers with the same entropySetHash
```
If that count reaches `quorumThreshold()`, the node may proceed with non-zero
entropy even if a below-quorum minority advertises a conflicting or
unacquirable entropy hash.
If no entropy hash reaches quorum alignment before the bounded deadline, the
round must fall back to zero entropy. This is the safe degradation path, not a
consensus failure.
Examples with five active validators and threshold four:
- Four honest validators align on one entropy hash and one validator advertises
a bogus hash: proceed with non-zero entropy for the honest quorum.
- Two validators advertise different bogus hashes and only three align on the
honest hash: fall back to zero entropy.
- No peer entropy hash is observed in time: fall back to zero entropy.
Zero entropy means unavailable entropy. The pseudo-transaction is still
deterministic, with zero digest and zero entropy count, so hooks can detect
the unavailable path.
## Sidecar Convergence Rules
Sidecar SHAMaps use union convergence:
- Every valid active-validator contribution belongs in the set.
- Sets only grow during fetch/merge.
- Fetch/merge is a safety net for missed proposals, not the normal transport.
- Rebuild and republish the sidecar hash after merging missing leaves.
Do not use avalanche-style transaction inclusion logic for sidecar inputs.
For RNG and export sidecars, the disagreement to resolve is usually timing or
delivery, not whether a valid contribution should be included.
The entropy sidecar gate always gives peers at least one observation tick after
publishing `entropySetHash`. Publishing and accepting in the same tick can hide
conflicts and produce asymmetric zero/non-zero outcomes.
## Export Principles
`featureExport` and `featureConsensusEntropy` are independently amendment
gated.
Export can run without ConsensusEntropy and still uses the active validator
view's 80% quorum threshold. Verified export signature sidecars converge
through `ExtendedPosition`, and the `exportSigSetHash` is signed by proposals
whether or not RNG is enabled. Do not make Export liveness depend on unanimity:
one active validator with a missing, delayed, or conflicting sidecar must not
veto an otherwise quorum-aligned export round.
The extended proposal machinery is enabled when either feature needs signed
sidecar fields. Do not make Export depend on RNG availability just because RNG
was the first consumer of `ExtendedPosition`.
When `featureExport` is disabled, the export sidecar gate is disabled too. Stale
collector entries must not keep a stopped amendment active.
Only verified export signatures count toward quorum or enter export sidecar
SHAMaps. Proposal-ingress signatures are sender-bound to the trusted proposal
validator and may be stored as unverified until the matching export transaction
is available for cryptographic verification.
The consensus candidate transaction set is the authority for export signature
verification. The open ledger may be used for early proposal ingestion, but
once a candidate tx set exists, only signatures verified against the `ttEXPORT`
in that candidate set may become quorum material or enter `exportSigSetHash`.
Export sidecar publication is local-material only. A node may publish only the
verified export signatures it actually has locally, and only for `ttEXPORT`
transactions in the consensus candidate set. A fetched export sidecar is not a
separate apply input: on merge, each leaf must be active-view checked, verified
against the candidate transaction, and promoted into `ExportSigCollector`.
Closed-ledger apply snapshots that collector, so the sidecar convergence state
and the signer set used by `ttEXPORT` stay on the same path.
If the consensus candidate contains a `ttEXPORT` but the node has no eligible
local export signatures yet, the export sidecar gate opens only a bounded
safety window for tx-converged peers to advertise `exportSigSetHash`. This is
not a wait-for-Export-success mechanism; it is a short opportunity to avoid
closing a minority ledger while sidecar convergence is already reachable. If no
advertised sidecar appears by the deadline, the gate stops waiting and the
export retries or expires through normal transaction rules.
Export success requires quorum alignment on `exportSigSetHash`, not merely a
local collector quorum. If a quorum of tx-converged participants advertises the
same export signature sidecar hash, that hash is aligned and below-quorum
conflicts are ignored. If no export signature hash reaches quorum alignment by
the bounded deadline, do not choose the largest non-quorum set; the export
retries or expires according to normal transaction rules.
Closed-ledger apply must not promote unverified proposal-carried signatures into
current-round quorum material. It may verify and retain them for a future retry,
where they can be published in a sidecar set and converged before use.
Export sig convergence runs in parallel with RNG. An export-side convergence
failure must not change RNG semantics; an RNG fallback must not make export
unsafe. Each feature has its own gate and fallback.
Accept-time cleanup must preserve Export state through `buildLCL` whenever
`featureExport` is enabled. RNG-disabled does not mean extensions-disabled:
`ttEXPORT` still needs the round's export sidecar convergence state when it
applies.
CSF consensus tests model the export sidecar gate directly. Testnet scenarios
under `.testnet/scenarios/export/` cover live-node Export+CE behavior and
Export-only quorum behavior.
## Review Checklist
When changing consensus extension code, check these questions:
- Does this preserve transaction-set equality as the core consensus identity?
- Does every extension wait have a bounded fallback?
- Does non-zero entropy require active-validator quorum alignment?
- Can one bad validator deny entropy to an honest quorum? It must not.
- Can a sub-quorum set produce non-zero entropy? It must not.
- Are quorum calculations using the active validator view, not recent
proposers as the denominator?
- Are sidecar entries typed as sidecars, not pseudo-transactions?
- Are proposal-visible or validation-visible sidecar fields covered by the
relevant signature and duplicate/replay identity?
- Are export signatures verified before they count?
- Does export success require `exportSigSetHash` alignment, not just local
collector quorum?
- Can one bad validator deny Export to an honest quorum? It must not.
- Can timeout select a largest-but-below-quorum export sidecar set? It must not.
- Are CE and Export still independently gated and independently stoppable?

View File

@@ -17,7 +17,6 @@
*/
//==============================================================================
#include <xrpld/app/consensus/ConsensusExtensions.h>
#include <xrpld/app/consensus/RCLConsensus.h>
#include <xrpld/app/consensus/RCLValidations.h>
#include <xrpld/app/ledger/BuildLedger.h>
@@ -28,39 +27,27 @@
#include <xrpld/app/ledger/LocalTxs.h>
#include <xrpld/app/ledger/OpenLedger.h>
#include <xrpld/app/misc/AmendmentTable.h>
#include <xrpld/app/misc/CanonicalTXSet.h>
#include <xrpld/app/misc/HashRouter.h>
#include <xrpld/app/misc/LoadFeeTrack.h>
#include <xrpld/app/misc/NegativeUNLVote.h>
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpld/app/misc/RuntimeConfig.h>
#include <xrpld/app/misc/Transaction.h>
#include <xrpld/app/misc/TxQ.h>
#include <xrpld/app/misc/ValidatorKeys.h>
#include <xrpld/app/misc/ValidatorList.h>
#include <xrpld/app/tx/apply.h>
#include <xrpld/consensus/Consensus.h>
#include <xrpld/consensus/LedgerTiming.h>
#include <xrpld/overlay/Overlay.h>
#include <xrpld/overlay/predicates.h>
#include <xrpl/basics/random.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/crypto/csprng.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/BuildInfo.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Sign.h>
#include <xrpl/protocol/TxFlags.h>
#include <xrpl/protocol/TxFormats.h>
#include <xrpl/protocol/digest.h>
#include <boost/algorithm/string.hpp>
#include <algorithm>
#include <cstring>
#include <iomanip>
#include <mutex>
#include <random>
namespace ripple {
@@ -70,7 +57,7 @@ RCLConsensus::RCLConsensus(
LedgerMaster& ledgerMaster,
LocalTxs& localTxs,
InboundTransactions& inboundTransactions,
clock_type const& clock,
Consensus<Adaptor>::clock_type const& clock,
ValidatorKeys const& validatorKeys,
beast::Journal journal)
: adaptor_(
@@ -81,13 +68,11 @@ RCLConsensus::RCLConsensus(
inboundTransactions,
validatorKeys,
journal)
, consensus_(std::make_unique<Consensus<Adaptor>>(clock, adaptor_, journal))
, consensus_(clock, adaptor_, journal)
, j_(journal)
{
}
RCLConsensus::~RCLConsensus() = default;
RCLConsensus::Adaptor::Adaptor(
Application& app,
std::unique_ptr<FeeVote>&& feeVote,
@@ -137,22 +122,6 @@ RCLConsensus::Adaptor::Adaptor(
}
}
// --- ConsensusExtensions helpers ---
ConsensusExtensions&
RCLConsensus::Adaptor::ce()
{
return app_.getConsensusExtensions();
}
ConsensusExtensions const&
RCLConsensus::Adaptor::ce() const
{
return app_.getConsensusExtensions();
}
// --- End ConsensusExtensions helpers ---
std::optional<RCLCxLedger>
RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
{
@@ -204,14 +173,10 @@ RCLConsensus::Adaptor::share(RCLCxPeerPos const& peerPos)
prop.set_proposeseq(proposal.proposeSeq());
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
// Serialize full ExtendedPosition
Serializer positionData;
proposal.position().add(positionData);
auto const posSlice = positionData.slice();
prop.set_currenttxhash(posSlice.data(), posSlice.size());
prop.set_currenttxhash(
proposal.position().begin(), proposal.position().size());
prop.set_previousledger(
proposal.prevLedger().begin(), proposal.prevLedger().size());
proposal.prevLedger().begin(), proposal.position().size());
auto const pk = peerPos.publicKey().slice();
prop.set_nodepubkey(pk.data(), pk.size());
@@ -219,9 +184,6 @@ RCLConsensus::Adaptor::share(RCLCxPeerPos const& peerPos)
auto const sig = peerPos.signature();
prop.set_signature(sig.data(), sig.size());
for (auto const& exportSig : peerPos.exportSignatures())
prop.add_exportsignatures(exportSig.data(), exportSig.size());
app_.overlay().relay(prop, peerPos.suppressionID(), peerPos.publicKey());
}
@@ -255,52 +217,39 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
protocol::TMProposeSet prop;
auto wirePosition = proposal.position();
ce().attachExportSignatures(prop, proposal);
if (prop.exportsignatures_size() > 0)
wirePosition.exportSignaturesHash =
proposalExportSignaturesHash(prop.exportsignatures());
// Serialize full ExtendedPosition (includes RNG leaves and export
// signature digest)
Serializer positionData;
wirePosition.add(positionData);
auto const posSlice = positionData.slice();
prop.set_currenttxhash(posSlice.data(), posSlice.size());
prop.set_currenttxhash(
proposal.position().begin(), proposal.position().size());
prop.set_previousledger(
proposal.prevLedger().begin(), proposal.prevLedger().size());
prop.set_proposeseq(proposal.proposeSeq());
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
prop.set_nodepubkey(
validatorKeys_.keys->publicKey.data(),
validatorKeys_.keys->publicKey.size());
auto sig = signDigest(
validatorKeys_.keys->publicKey,
validatorKeys_.keys->secretKey,
sha512Half(
HashPrefix::proposal,
std::uint32_t(proposal.proposeSeq()),
proposal.closeTime().time_since_epoch().count(),
proposal.prevLedger(),
wirePosition));
if (!validatorKeys_.keys)
{
JLOG(j_.warn()) << "RCLConsensus::Adaptor::propose: ValidatorKeys "
"not set: \n";
return;
}
auto const& keys = *validatorKeys_.keys;
prop.set_nodepubkey(keys.publicKey.data(), keys.publicKey.size());
auto sig =
signDigest(keys.publicKey, keys.secretKey, proposal.signingHash());
prop.set_signature(sig.data(), sig.size());
auto const suppression = proposalUniqueId(
wirePosition,
proposal.position(),
proposal.prevLedger(),
proposal.proposeSeq(),
proposal.closeTime(),
validatorKeys_.keys->publicKey,
keys.publicKey,
sig);
app_.getHashRouter().addSuppression(suppression);
ce().decorateMessage(prop, proposal, wirePosition, sig);
app_.overlay().broadcast(prop);
}
@@ -451,16 +400,12 @@ RCLConsensus::Adaptor::onClose(
// Needed because of the move below.
auto const setHash = initialSet->getHash().as_uint256();
ExtendedPosition pos{setHash};
ce().decoratePosition(pos, prevLedger, proposing);
return Result{
std::move(initialSet),
RCLCxPeerPos::Proposal{
initialLedger->info().parentHash,
RCLCxPeerPos::Proposal::seqJoin,
std::move(pos),
setHash,
closeTime,
app_.timeKeeper().closeTime(),
validatorKeys_.nodeID}};
@@ -498,13 +443,11 @@ RCLConsensus::Adaptor::onAccept(
jtACCEPT,
"acceptLedger",
[=, this, cj = std::move(consensusJson)]() mutable {
//@@start do-accept-freeze-contract
// Note that no lock is held or acquired during this job.
// This is because generic Consensus guarantees that once a ledger
// is accepted, the consensus results and capture by reference state
// will not change until startRound is called (which happens via
// endConsensus).
//@@end do-accept-freeze-contract
RclConsensusLogger clog("onAccept", validating, j_);
this->doAccept(
result,
@@ -586,17 +529,6 @@ RCLConsensus::Adaptor::doAccept(
}
}
//@@start auxiliary-pre-build-injection
// Inject consensus entropy pseudo-transaction (if amendment enabled).
// Export-only rounds still need extension state preserved through buildLCL
// so ttEXPORT can observe exportSigSetHash convergence at apply time.
//@@start accept-time-cleanup-disabled
if (ce().rngEnabled())
ce().onPreBuild(retriableTxs, prevLedger.seq() + 1);
else if (!ce().exportEnabled())
ce().clearRngState();
//@@end accept-time-cleanup-disabled
auto built = buildLCL(
prevLedger,
retriableTxs,
@@ -605,7 +537,6 @@ RCLConsensus::Adaptor::doAccept(
closeResolution,
result.roundTime.read(),
failed);
//@@end auxiliary-pre-build-injection
auto const newLCLHash = built.id();
JLOG(j_.debug()) << "Built ledger #" << built.seq() << ": " << newLCLHash;
@@ -798,8 +729,6 @@ RCLConsensus::Adaptor::doAccept(
app_.timeKeeper().adjustCloseTime(offset);
}
ce().onAcceptComplete();
}
void
@@ -897,10 +826,19 @@ RCLConsensus::Adaptor::validate(
validationTime = lastValidationTime_ + 1s;
lastValidationTime_ = validationTime;
if (!validatorKeys_.keys)
{
JLOG(j_.warn()) << "RCLConsensus::Adaptor::validate: ValidatorKeys "
"not set\n";
return;
}
auto const& keys = *validatorKeys_.keys;
auto v = std::make_shared<STValidation>(
lastValidationTime_,
validatorKeys_.keys->publicKey,
validatorKeys_.keys->secretKey,
keys.publicKey,
keys.secretKey,
validatorKeys_.nodeID,
[&](STValidation& v) {
v.setFieldH256(sfLedgerHash, ledger.id());
@@ -962,7 +900,7 @@ RCLConsensus::Adaptor::validate(
handleNewValidation(app_, v, "local");
// Broadcast validation to all peers.
// Broadcast to all our peers:
protocol::TMValidation val;
val.set_validation(serialized.data(), serialized.size());
app_.overlay().broadcast(val);
@@ -985,26 +923,6 @@ RCLConsensus::Adaptor::onModeChange(ConsensusMode before, ConsensusMode after)
censorshipDetector_.reset();
mode_ = after;
ce().setMode(after);
}
ConsensusPhase
RCLConsensus::phase() const
{
return consensus_->phase();
}
bool
RCLConsensus::extensionsBusy() const
{
return consensus_->extensionsBusy();
}
RCLCxLedger::ID
RCLConsensus::prevLedgerID() const
{
std::lock_guard _{mutex_};
return consensus_->prevLedgerID();
}
Json::Value
@@ -1013,7 +931,7 @@ RCLConsensus::getJson(bool full) const
Json::Value ret;
{
std::lock_guard _{mutex_};
ret = consensus_->getJson(full);
ret = consensus_.getJson(full);
}
ret["validating"] = adaptor_.validating();
return ret;
@@ -1027,7 +945,7 @@ RCLConsensus::timerEntry(
try
{
std::lock_guard _{mutex_};
consensus_->timerEntry(now, clog);
consensus_.timerEntry(now, clog);
}
catch (SHAMapMissingNode const& mn)
{
@@ -1046,7 +964,7 @@ RCLConsensus::gotTxSet(NetClock::time_point const& now, RCLTxSet const& txSet)
try
{
std::lock_guard _{mutex_};
consensus_->gotTxSet(now, txSet);
consensus_.gotTxSet(now, txSet);
}
catch (SHAMapMissingNode const& mn)
{
@@ -1064,7 +982,7 @@ RCLConsensus::simulate(
std::optional<std::chrono::milliseconds> consensusDelay)
{
std::lock_guard _{mutex_};
consensus_->simulate(now, consensusDelay);
consensus_.simulate(now, consensusDelay);
}
bool
@@ -1073,24 +991,14 @@ RCLConsensus::peerProposal(
RCLCxPeerPos const& newProposal)
{
std::lock_guard _{mutex_};
return consensus_->peerProposal(now, newProposal);
return consensus_.peerProposal(now, newProposal);
}
//@@start pre-start-round
bool
RCLConsensus::Adaptor::preStartRound(
RCLCxLedger const& prevLgr,
hash_set<NodeID> const& nowTrusted)
{
ce().setRngEnabledThisRound(
prevLgr.ledger_->rules().enabled(featureConsensusEntropy));
ce().setExportEnabledThisRound(
prevLgr.ledger_->rules().enabled(featureExport));
JLOG(j_.trace()) << "RNGGATE: preStartRound prevSeq=" << prevLgr.seq()
<< " rulesEnabled=" << ce().rngEnabled()
<< " exportEnabled=" << ce().exportEnabled();
// We have a key, we do not want out of sync validations after a restart
// and are not amendment blocked.
validating_ = validatorKeys_.keys &&
@@ -1133,19 +1041,9 @@ RCLConsensus::Adaptor::preStartRound(
!nowTrusted.empty())
nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted);
bool const proposing = validating_ && synced;
JLOG(j_.info()) << "STARTDIAG: preStartRound"
<< " mode=" << app_.getOPs().strOperatingMode()
<< " synced=" << (synced ? "yes" : "no")
<< " validating=" << (validating_ ? "yes" : "no")
<< " proposing=" << (proposing ? "yes" : "no")
<< " seq=" << (prevLgr.seq() + 1);
// propose only if we're in sync with the network (and validating)
return proposing;
return validating_ && synced;
}
//@@end pre-start-round
bool
RCLConsensus::Adaptor::haveValidated() const
@@ -1183,11 +1081,7 @@ void
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
{
if (!positions && app_.getOPs().isFull())
{
JLOG(j_.warn()) << "STARTDIAG: updateOperatingMode demoting"
<< " FULL->CONNECTED positions=" << positions;
app_.getOPs().setMode(OperatingMode::CONNECTED);
}
}
void
@@ -1200,7 +1094,7 @@ RCLConsensus::startRound(
std::unique_ptr<std::stringstream> const& clog)
{
std::lock_guard _{mutex_};
consensus_->startRound(
consensus_.startRound(
now,
prevLgrId,
prevLgr,
@@ -1230,16 +1124,11 @@ RclConsensusLogger::~RclConsensusLogger()
return;
auto const duration = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start_);
ss_->seekg(0, std::ios::beg);
std::string line;
while (std::getline(*ss_, line, '.'))
{
boost::algorithm::trim(line);
if (!line.empty())
JLOG(j_.debug()) << header_ << line << ".";
}
JLOG(j_.debug()) << header_ << "Total duration: " << duration.count()
<< "ms.";
std::stringstream outSs;
outSs << header_ << "duration " << (duration.count() / 1000) << '.'
<< std::setw(3) << std::setfill('0') << (duration.count() % 1000)
<< "s. " << ss_->str();
j_.sink().writeAlways(beast::severities::kInfo, outSs.str());
}
} // namespace ripple

View File

@@ -20,26 +20,22 @@
#ifndef RIPPLE_APP_CONSENSUS_RCLCONSENSUS_H_INCLUDED
#define RIPPLE_APP_CONSENSUS_RCLCONSENSUS_H_INCLUDED
#include <xrpld/app/consensus/ConsensusExtensions.h>
#include <xrpld/app/consensus/RCLCensorshipDetector.h>
#include <xrpld/app/consensus/RCLCxLedger.h>
#include <xrpld/app/consensus/RCLCxPeerPos.h>
#include <xrpld/app/consensus/RCLCxTx.h>
#include <xrpld/app/misc/FeeVote.h>
#include <xrpld/app/misc/NegativeUNLVote.h>
#include <xrpld/consensus/ConsensusParms.h>
#include <xrpld/consensus/ConsensusTypes.h>
#include <xrpld/consensus/Consensus.h>
#include <xrpld/core/JobQueue.h>
#include <xrpld/overlay/Message.h>
#include <xrpld/shamap/SHAMap.h>
#include <xrpl/basics/CountedObject.h>
#include <xrpl/basics/Log.h>
#include <xrpl/beast/clock/abstract_clock.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/protocol/RippleLedgerHash.h>
#include <xrpl/protocol/STValidation.h>
#include <atomic>
#include <chrono>
#include <memory>
#include <mutex>
#include <set>
@@ -48,13 +44,10 @@
namespace ripple {
class CanonicalTXSet;
class InboundTransactions;
class LocalTxs;
class LedgerMaster;
class ValidatorKeys;
template <class Adaptor>
class Consensus;
/** Manages the generic consensus algorithm for use by the RCL.
*/
@@ -98,16 +91,12 @@ class RCLConsensus
RCLCensorshipDetector<TxID, LedgerIndex> censorshipDetector_;
NegativeUNLVote nUnlVote_;
// RNG/Export state has moved to ConsensusExtensions
// (owned by Application, accessible via app_.getConsensusExtensions())
public:
using Ledger_t = RCLCxLedger;
using NodeID_t = NodeID;
using NodeKey_t = PublicKey;
using TxSet_t = RCLTxSet;
using PeerPosition_t = RCLCxPeerPos;
using Position_t = ExtendedPosition;
using Result = ConsensusResult<Adaptor>;
@@ -193,14 +182,6 @@ class RCLConsensus
return parms_;
}
// --- ConsensusExtensions access ---
ConsensusExtensions&
ce();
ConsensusExtensions const&
ce() const;
private:
//---------------------------------------------------------------------
// The following members implement the generic Consensus requirements
@@ -439,8 +420,6 @@ class RCLConsensus
};
public:
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
//! Constructor
RCLConsensus(
Application& app,
@@ -448,12 +427,10 @@ public:
LedgerMaster& ledgerMaster,
LocalTxs& localTxs,
InboundTransactions& inboundTransactions,
clock_type const& clock,
Consensus<Adaptor>::clock_type const& clock,
ValidatorKeys const& validatorKeys,
beast::Journal journal);
~RCLConsensus();
RCLConsensus(RCLConsensus const&) = delete;
RCLConsensus&
@@ -495,26 +472,9 @@ public:
}
ConsensusPhase
phase() const;
//! Whether extensions have pending sub-state work in establish
bool
extensionsBusy() const;
//! Check if hash is a known extension sidecar set (under mutex)
bool
isExtensionSet(uint256 const& hash) const
phase() const
{
std::lock_guard _{mutex_};
return adaptor_.ce().isSidecarSet(hash);
}
//! Route acquired extension sidecar set (under mutex)
void
gotExtensionSet(std::shared_ptr<SHAMap> const& map)
{
std::lock_guard _{mutex_};
adaptor_.ce().onAcquiredSidecarSet(map);
return consensus_.phase();
}
//! @see Consensus::getJson
@@ -545,7 +505,11 @@ public:
// @see Consensus::prevLedgerID
RCLCxLedger::ID
prevLedgerID() const;
prevLedgerID() const
{
std::lock_guard _{mutex_};
return consensus_.prevLedgerID();
}
//! @see Consensus::simulate
void
@@ -572,7 +536,7 @@ private:
mutable std::recursive_mutex mutex_;
Adaptor adaptor_;
std::unique_ptr<Consensus<Adaptor>> consensus_;
Consensus<Adaptor> consensus_;
beast::Journal const j_;
};

View File

@@ -31,12 +31,10 @@ RCLCxPeerPos::RCLCxPeerPos(
PublicKey const& publicKey,
Slice const& signature,
uint256 const& suppression,
Proposal&& proposal,
std::vector<std::string> exportSignatures)
Proposal&& proposal)
: publicKey_(publicKey)
, suppression_(suppression)
, proposal_(std::move(proposal))
, exportSignatures_(std::move(exportSignatures))
{
// The maximum allowed size of a signature is 72 bytes; we verify
// this elsewhere, but we want to be extra careful here:
@@ -68,17 +66,15 @@ RCLCxPeerPos::getJson() const
uint256
proposalUniqueId(
ExtendedPosition const& position,
uint256 const& proposeHash,
uint256 const& previousLedger,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
Slice const& publicKey,
Slice const& signature)
{
// This is for suppression/dedup only, NOT for signing.
// Must include all fields that distinguish proposals.
Serializer s(512);
position.add(s);
s.addBitString(proposeHash);
s.addBitString(previousLedger);
s.add32(proposeSeq);
s.add32(closeTime.time_since_epoch().count());

View File

@@ -28,294 +28,13 @@
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Serializer.h>
#include <boost/container/static_vector.hpp>
#include <chrono>
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
#include <vector>
namespace ripple {
/** Extended position for consensus with RNG entropy support.
Carries the tx-set hash (the core convergence target), RNG set hashes
(agreed via sub-state quorum, not via operator==), and per-validator
leaves (unique to each proposer, piggybacked on proposals).
Critical design:
- operator== compares txSetHash ONLY (sub-states handle the rest)
- add() includes ALL fields for signing (prevents stripping attacks)
*/
struct ExtendedPosition
{
// === Core Convergence Target ===
uint256 txSetHash;
// === Set Hashes (sub-state quorum, not in operator==) ===
std::optional<uint256> commitSetHash;
std::optional<uint256> entropySetHash;
std::optional<uint256> exportSigSetHash;
std::optional<uint256> exportSignaturesHash;
// === Per-Validator Leaves (unique per proposer) ===
std::optional<uint256> myCommitment;
std::optional<uint256> myReveal;
ExtendedPosition() = default;
explicit ExtendedPosition(uint256 const& txSet) : txSetHash(txSet)
{
}
// Implicit conversion for legacy compatibility
operator uint256() const
{
return txSetHash;
}
// Helper to update TxSet while preserving sidecar data
void
updateTxSet(uint256 const& set)
{
txSetHash = set;
}
// TODO: replace operator== with a named method (e.g. txSetMatches())
// so call sites read as intent, not as "full equality". Overloading
// operator== to ignore most fields is surprising and fragile.
//
// CRITICAL: Only compare txSetHash for consensus convergence.
//
// Why not commitSetHash / entropySetHash?
// Nodes transition through sub-states (ConvergingTx → ConvergingCommit
// → ConvergingReveal) at slightly different times. If we included
// commitSetHash here, a node that transitions first would set it,
// making its position "different" from peers who haven't transitioned
// yet — deadlocking haveConsensus() for everyone.
//
// Instead, the sub-state machine in phaseEstablish handles agreement
// on those fields via quorum checks (hasQuorumOfCommits, etc.).
//
// Implications to consider:
// - Two nodes with the same txSetHash but different commitSetHash
// will appear to "agree" from the convergence engine's perspective.
// This is intentional: tx consensus must not be blocked by RNG.
// - A malicious node could propose a different commitSetHash without
// affecting tx convergence. This is safe because commitSetHash
// disagreement is caught by the sub-state quorum checks, and the
// entropy result is verified deterministically from collected reveals.
// - Leaves (myCommitment, myReveal) are also excluded — they are
// per-validator data unique to each proposer.
//@@start rng-extended-position-equality
bool
operator==(ExtendedPosition const& other) const
{
return txSetHash == other.txSetHash;
}
bool
operator!=(ExtendedPosition const& other) const
{
return !(*this == other);
}
// Comparison with uint256 (compares txSetHash only)
bool
operator==(uint256 const& hash) const
{
return txSetHash == hash;
}
bool
operator!=(uint256 const& hash) const
{
return txSetHash != hash;
}
friend bool
operator==(uint256 const& hash, ExtendedPosition const& pos)
{
return pos.txSetHash == hash;
}
friend bool
operator!=(uint256 const& hash, ExtendedPosition const& pos)
{
return pos.txSetHash != hash;
}
//@@end rng-extended-position-equality
// CRITICAL: Include ALL fields for signing (prevents stripping attacks)
//
// Compatibility note:
// - New code accepts both legacy 32-byte tx-set hashes and the extended
// payload with RNG sidecars.
// - Older binaries that only understand a raw uint256 proposal position
// will reject extended payloads as malformed.
// - Therefore ConsensusEntropy requires an all-upgraded validator set
// before activation; this format is backward-compatible, not
// forward-compatible.
//@@start rng-extended-position-serialize
void
add(Serializer& s) const
{
s.addBitString(txSetHash);
// Wire compatibility: if no extensions, emit exactly 32 bytes
// so legacy nodes that expect a plain uint256 work unchanged.
if (!commitSetHash && !entropySetHash && !exportSigSetHash &&
!exportSignaturesHash && !myCommitment && !myReveal)
return;
std::uint8_t flags = 0;
if (commitSetHash)
flags |= 0x01;
if (entropySetHash)
flags |= 0x02;
if (myCommitment)
flags |= 0x04;
if (myReveal)
flags |= 0x08;
if (exportSigSetHash)
flags |= 0x10;
if (exportSignaturesHash)
flags |= 0x20;
s.add8(flags);
if (commitSetHash)
s.addBitString(*commitSetHash);
if (entropySetHash)
s.addBitString(*entropySetHash);
if (myCommitment)
s.addBitString(*myCommitment);
if (myReveal)
s.addBitString(*myReveal);
if (exportSigSetHash)
s.addBitString(*exportSigSetHash);
if (exportSignaturesHash)
s.addBitString(*exportSignaturesHash);
}
//@@end rng-extended-position-serialize
Json::Value
getJson() const
{
Json::Value ret = Json::objectValue;
ret["tx_set"] = to_string(txSetHash);
if (commitSetHash)
ret["commit_set"] = to_string(*commitSetHash);
if (entropySetHash)
ret["entropy_set"] = to_string(*entropySetHash);
if (exportSigSetHash)
ret["export_sig_set"] = to_string(*exportSigSetHash);
if (exportSignaturesHash)
ret["export_signatures"] = to_string(*exportSignaturesHash);
return ret;
}
/** Deserialize from wire format.
Handles both legacy 32-byte hash and new extended format.
Returns nullopt if the payload is malformed (truncated for the
flags advertised).
*/
//@@start rng-extended-position-deserialize
static std::optional<ExtendedPosition>
fromSerialIter(SerialIter& sit, std::size_t totalSize)
{
if (totalSize < 32)
return std::nullopt;
ExtendedPosition pos;
pos.txSetHash = sit.get256();
// Legacy format: exactly 32 bytes
if (totalSize == 32)
return pos;
// Extended format: flags byte + optional uint256 fields
if (sit.empty())
return pos;
std::uint8_t flags = sit.get8();
// Reject unknown flag bits (reduces wire malleability)
if (flags & 0xC0)
return std::nullopt;
// Validate exact byte count for the flagged fields.
// Each flag bit indicates a 32-byte uint256.
int fieldCount = 0;
for (int i = 0; i < 6; ++i)
if (flags & (1 << i))
++fieldCount;
if (sit.getBytesLeft() != static_cast<std::size_t>(fieldCount * 32))
return std::nullopt;
if (flags & 0x01)
pos.commitSetHash = sit.get256();
if (flags & 0x02)
pos.entropySetHash = sit.get256();
if (flags & 0x04)
pos.myCommitment = sit.get256();
if (flags & 0x08)
pos.myReveal = sit.get256();
if (flags & 0x10)
pos.exportSigSetHash = sit.get256();
if (flags & 0x20)
pos.exportSignaturesHash = sit.get256();
return pos;
}
//@@end rng-extended-position-deserialize
};
// For logging/debugging - returns txSetHash as string
inline std::string
to_string(ExtendedPosition const& pos)
{
return to_string(pos.txSetHash);
}
// Stream output for logging
inline std::ostream&
operator<<(std::ostream& os, ExtendedPosition const& pos)
{
return os << pos.txSetHash;
}
/** Hash the raw export-signature blobs carried alongside a proposal.
The resulting digest is embedded in ExtendedPosition and therefore covered
by the normal proposal signature. The raw protobuf field remains outside
consensus equality, but stripping or mutating it invalidates the signed
digest before duplicate suppression.
*/
template <class ExportSignatures>
uint256
proposalExportSignaturesHash(ExportSignatures const& exportSignatures)
{
Serializer s(512);
s.add32(static_cast<std::uint32_t>(exportSignatures.size()));
for (auto const& blob : exportSignatures)
s.addVL(Slice(blob.data(), blob.size()));
return s.getSHA512Half();
}
// For hash_append (used in sha512Half and similar)
template <class Hasher>
void
hash_append(Hasher& h, ExtendedPosition const& pos)
{
using beast::hash_append;
// Serialize full position including all fields
Serializer s;
pos.add(s);
hash_append(h, s.slice());
}
/** A peer's signed, proposed position for use in RCLConsensus.
Carries a ConsensusProposal signed by a peer. Provides value semantics
@@ -324,9 +43,8 @@ hash_append(Hasher& h, ExtendedPosition const& pos)
class RCLCxPeerPos
{
public:
//< The type of the proposed position (uses ExtendedPosition for RNG
// support)
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
//< The type of the proposed position
using Proposal = ConsensusProposal<NodeID, uint256, uint256>;
/** Constructor
@@ -342,8 +60,7 @@ public:
PublicKey const& publicKey,
Slice const& signature,
uint256 const& suppress,
Proposal&& proposal,
std::vector<std::string> exportSignatures = {});
Proposal&& proposal);
//! Verify the signing hash of the proposal
bool
@@ -376,12 +93,6 @@ public:
return proposal_;
}
std::vector<std::string> const&
exportSignatures() const
{
return exportSignatures_;
}
//! JSON representation of proposal
Json::Value
getJson() const;
@@ -396,7 +107,6 @@ private:
PublicKey publicKey_;
uint256 suppression_;
Proposal proposal_;
std::vector<std::string> exportSignatures_;
boost::container::static_vector<std::uint8_t, 72> signature_;
template <class Hasher>
@@ -408,10 +118,7 @@ private:
hash_append(h, std::uint32_t(proposal().proposeSeq()));
hash_append(h, proposal().closeTime());
hash_append(h, proposal().prevLedger());
// Serialize full ExtendedPosition for hashing
Serializer s;
proposal().position().add(s);
hash_append(h, s.slice());
hash_append(h, proposal().position());
}
};
@@ -424,7 +131,7 @@ private:
order to validate the signature. If the last closed ledger is left out, then
it is considered as all zeroes for the purposes of signing.
@param position The extended position (includes entropy fields)
@param proposeHash The hash of the proposed position
@param previousLedger The hash of the ledger the proposal is based upon
@param proposeSeq Sequence number of the proposal
@param closeTime Close time of the proposal
@@ -433,7 +140,7 @@ private:
*/
uint256
proposalUniqueId(
ExtendedPosition const& position,
uint256 const& proposeHash,
uint256 const& previousLedger,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,

View File

@@ -139,8 +139,7 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
if (!ledger)
{
// MERGE NOTE (upstream 86ef16dbeb): promoted from debug to warn.
JLOG(j_.warn())
JLOG(j_.debug())
<< "Need validated ledger for preferred ledger analysis " << hash;
Application* pApp = &app_;

View File

@@ -1,4 +1,4 @@
# RCL Consensus
# RCL Consensus
This directory holds the types and classes needed
to connect the generic consensus algorithm to the
@@ -7,11 +7,7 @@ rippled-specific instance of consensus.
* `RCLCxTx` adapts a `SHAMapItem` transaction.
* `RCLCxTxSet` adapts a `SHAMap` to represent a set of transactions.
* `RCLCxLedger` adapts a `Ledger`.
* `RCLConsensus` implements the requirements of the generic
* `RCLConsensus` is implements the requirements of the generic
`Consensus` class by connecting to the rest of the `rippled`
application.
application.
Xahau-specific proposal sidecars, ConsensusEntropy/RNG, and export signature
convergence follow the invariants in
[`ConsensusExtensionsDesign.md`](ConsensusExtensionsDesign.md). Read that note
before changing extension quorum, sidecar sync, or fallback behavior.

View File

@@ -82,16 +82,6 @@ public:
Expected<uint256, HookReturnCode>
etxn_nonce() const;
/// xport APIs
Expected<uint64_t, HookReturnCode>
xport_reserve(uint64_t count) const;
Expected<uint256, HookReturnCode>
xport(Slice const& txBlob) const;
Expected<uint64_t, HookReturnCode>
xport_cancel(uint32_t ticketSeq) const;
/// float APIs
Expected<uint64_t, HookReturnCode>
float_set(int32_t exponent, int64_t mantissa) const;

View File

@@ -145,7 +145,7 @@ struct HookResult
ripple::uint256 const hookNamespace;
std::queue<std::shared_ptr<ripple::Transaction>>
emittedTxn{}; // etx stored here until accept/rollback (includes xport)
emittedTxn{}; // etx stored here until accept/rollback
HookStateMap& stateMap;
uint16_t changedStateCount = 0;
std::map<
@@ -174,8 +174,6 @@ struct HookResult
false; // hook_again allows strong pre-apply to nominate
// additional weak post-apply execution
std::shared_ptr<STObject const> provisionalMeta;
uint64_t rngCallCounter{
0}; // used to ensure conseq. rng calls don't return same data
};
class HookExecutor;
@@ -204,8 +202,6 @@ struct HookContext
uint16_t ledger_nonce_counter{0};
int64_t expected_etxn_count{-1}; // make this a 64bit int so the uint32
// from the hookapi cant overflow it
int64_t expected_export_count{-1};
int64_t export_count{0}; // how many xport() calls succeeded
std::map<ripple::uint256, bool> nonce_used{};
uint32_t generation =
0; // used for caching, only generated when txn_generation is called

View File

@@ -3,7 +3,6 @@
#include <xrpld/app/hook/HookAPI.h>
#include <xrpld/app/ledger/OpenLedger.h>
#include <xrpld/app/ledger/TransactionMaster.h>
#include <xrpld/app/tx/detail/ExportLedgerOps.h>
#include <xrpld/app/tx/detail/Import.h>
#include <xrpl/protocol/STParsedJSON.h>
#include <cfenv>
@@ -1224,193 +1223,6 @@ HookAPI::etxn_reserve(uint64_t count) const
return count;
}
Expected<uint64_t, HookReturnCode>
HookAPI::xport_reserve(uint64_t count) const
{
if (hookCtx.expected_export_count > -1)
return Unexpected(ALREADY_SET);
if (count < 1)
return Unexpected(TOO_SMALL);
if (count > hook_api::max_export)
return Unexpected(TOO_BIG);
hookCtx.expected_export_count = count;
// Also reserve emit slots so the wrapper ttEXPORT can flow
// through the normal emitted txn path.
if (hookCtx.expected_etxn_count < 0)
hookCtx.expected_etxn_count = 0;
hookCtx.expected_etxn_count += count;
return count;
}
Expected<uint256, HookReturnCode>
HookAPI::xport(Slice const& txBlob) const
{
auto& applyCtx = hookCtx.applyCtx;
auto& app = applyCtx.app;
auto j = app.journal("View");
auto& view = applyCtx.view();
if (hookCtx.expected_export_count < 0)
return Unexpected(PREREQUISITE_NOT_MET);
if (hookCtx.export_count >= hookCtx.expected_export_count)
return Unexpected(TOO_MANY_EXPORTED_TXN);
// Parse and validate the inner (cross-chain) transaction.
std::shared_ptr<STTx const> innerTx;
try
{
SerialIter sit(txBlob);
innerTx = std::make_shared<STTx const>(sit);
}
catch (std::exception const& e)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC() << "]: Failed "
<< e.what();
return Unexpected(EXPORT_FAILURE);
}
if (auto ter = ExportLedgerOps::validateExportAccount(
*innerTx, hookCtx.result.account, j);
!isTesSuccess(ter))
return Unexpected(EXPORT_FAILURE);
if (auto ter = ExportLedgerOps::validateNetworkID(
*innerTx, app.config().NETWORK_ID, j);
!isTesSuccess(ter))
return Unexpected(EXPORT_FAILURE);
if (auto ter = ExportLedgerOps::validateTicketSequence(*innerTx, j);
!isTesSuccess(ter))
return Unexpected(EXPORT_FAILURE);
// Construct a ttEXPORT wrapping the inner tx, with EmitDetails,
// and push onto the emitted txn queue. This flows through the
// normal emitted txn path (emitted dir → TxQ injection → open
// ledger → retriable Export transactor).
uint32_t const ledgerSeq = view.info().seq;
// Generate a nonce for the emitted ttEXPORT wrapper.
auto nonce = etxn_nonce();
if (!nonce.has_value())
return Unexpected(INTERNAL_ERROR);
// Serialize inner tx as sfExportedTxn object.
Serializer innerSer;
innerTx->add(innerSer);
// Build the ttEXPORT wrapper as an STObject first so we can
// compute the fee, set it, then construct the STTx from the
// final serialised bytes. This avoids mutating the STTx after
// construction (which would leave a stale cached txid — see
// the tefNONDIR_EMIT check in Transactor::preclaim).
//
// The fee field is a fixed 9 bytes regardless of value, so
// patching it on the STObject doesn't change the serialised size.
STObject exportObj(sfGeneric);
{
exportObj.setFieldU16(sfTransactionType, ttEXPORT);
exportObj[sfAccount] = hookCtx.result.account;
exportObj[sfSequence] = 0u;
exportObj.setFieldVL(sfSigningPubKey, Blob{});
exportObj[sfFirstLedgerSequence] = ledgerSeq + 1;
exportObj[sfLastLedgerSequence] = ledgerSeq + 5;
exportObj[sfFee] = STAmount{0};
// sfExportedTxn inner object
SerialIter sit(innerSer.slice());
exportObj.set(std::make_unique<STObject>(sit, sfExportedTxn));
// sfEmitDetails
STObject emitDetails(sfEmitDetails);
emitDetails.setFieldU32(
sfEmitGeneration, static_cast<uint32_t>(etxn_generation()));
{
auto const burdenResult = etxn_burden();
emitDetails.setFieldU64(
sfEmitBurden,
burdenResult ? static_cast<uint64_t>(*burdenResult) : 1ULL);
}
emitDetails.setFieldH256(
sfEmitParentTxnID, applyCtx.tx.getTransactionID());
emitDetails.setFieldH256(sfEmitNonce, *nonce);
emitDetails.setFieldH256(sfEmitHookHash, hookCtx.result.hookHash);
if (hookCtx.result.hasCallback)
emitDetails.setAccountID(sfEmitCallback, hookCtx.result.account);
exportObj.set(std::move(emitDetails));
// Compute fee from serialised size and patch it in.
Serializer feeSer;
exportObj.add(feeSer);
auto feeResult = etxn_fee_base(feeSer.slice());
if (!feeResult)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: Fee calculation failed for ttEXPORT wrapper";
return Unexpected(EXPORT_FAILURE);
}
exportObj[sfFee] = STAmount{static_cast<uint64_t>(*feeResult)};
}
// Construct the STTx from the finalised STObject bytes.
Serializer exportSer;
exportObj.add(exportSer);
STTx exportStx(SerialIter{exportSer.slice()});
// Preflight the wrapper.
auto preflightResult = ripple::preflight(
app, view.rules(), exportStx, ripple::ApplyFlags::tapPREFLIGHT_EMIT, j);
if (!isTesSuccess(preflightResult.ter))
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: ttEXPORT wrapper preflight failure: "
<< transHuman(preflightResult.ter);
return Unexpected(EXPORT_FAILURE);
}
// Wrap in Transaction and push to emittedTxn queue.
auto stpExport = std::make_shared<STTx const>(std::move(exportStx));
std::string reason;
auto tpTrans = std::make_shared<Transaction>(stpExport, reason, app);
if (tpTrans->getStatus() != NEW)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: tpTrans->getStatus() != NEW for wrapper";
return Unexpected(EXPORT_FAILURE);
}
// Push onto emittedTxn. The wrapper ttEXPORT flows through the
// normal emitted txn path (emitted dir → TxQ → open ledger →
// retriable Export transactor).
hookCtx.result.emittedTxn.push(tpTrans);
++hookCtx.export_count;
// Return the inner tx hash — this is what the hook author cares
// about (the cross-chain transaction they built).
return innerTx->getTransactionID();
}
Expected<uint64_t, HookReturnCode>
HookAPI::xport_cancel(uint32_t ticketSeq) const
{
auto& app = hookCtx.applyCtx.app;
auto j = app.journal("View");
TER const ter = ExportLedgerOps::cancelShadowTicket(
hookCtx.applyCtx.view(), hookCtx.result.account, ticketSeq, j);
if (!isTesSuccess(ter))
return Unexpected(DOESNT_EXIST);
return ticketSeq;
}
uint32_t
HookAPI::etxn_generation() const
{

View File

@@ -7,7 +7,6 @@
#include <xrpld/app/misc/TxQ.h>
#include <xrpld/app/tx/detail/Import.h>
#include <xrpld/app/tx/detail/NFTokenUtils.h>
#include <xrpld/ledger/View.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/protocol/ErrorCodes.h>
@@ -585,9 +584,7 @@ getTransactionalStakeHolders(STTx const& tx, ReadView const& rv)
case ttFEE:
case ttUNL_MODIFY:
case ttEMIT_FAILURE:
case ttUNL_REPORT:
case ttEXPORT:
case ttCONSENSUS_ENTROPY: {
case ttUNL_REPORT: {
break;
}
default: {
@@ -1660,7 +1657,6 @@ hook::finalizeHookResult(
// directory) if we are allowed to
std::vector<std::pair<uint256 /* txnid */, uint256 /* emit nonce */>>
emission_txnid;
std::vector<uint256 /* txnid */> exported_txnid;
if (doEmit)
{
@@ -1695,8 +1691,7 @@ hook::finalizeHookResult(
ptr->add(s);
SerialIter sit(s.slice());
sleEmitted->set(
std::make_unique<ripple::STObject>(sit, sfEmittedTxn));
sleEmitted->emplace_back(ripple::STObject(sit, sfEmittedTxn));
auto page = applyCtx.view().dirInsert(
keylet::emittedDir(), emittedId, [&](SLE::ref sle) {
(*sle)[sfFlags] = lsfEmittedDir;
@@ -1717,12 +1712,6 @@ hook::finalizeHookResult(
}
}
}
// Exported txns now flow through the emitted txn path above
// (xport() pushes a ttEXPORT wrapper onto emittedTxn).
// The export backlog cap is enforced after hook finalization by
// ApplyContext::checkExportEmissionLimit(), so strong and weak hook
// emissions use the same fee-only reset path.
}
bool const fixV2 = applyCtx.view().rules().enabled(fixXahauV2);
@@ -1749,10 +1738,6 @@ hook::finalizeHookResult(
meta.setFieldU16(
sfHookEmitCount,
emission_txnid.size()); // this will never wrap, hard limit
if (applyCtx.view().rules().enabled(featureExport))
{
meta.setFieldU16(sfHookExportCount, exported_txnid.size());
}
meta.setFieldU16(sfHookExecutionIndex, exec_index);
meta.setFieldU16(sfHookStateChangeCount, hookResult.changedStateCount);
meta.setFieldH256(sfHookHash, hookResult.hookHash);
@@ -2745,26 +2730,23 @@ DEFINE_HOOK_FUNCTION(
return serialize_keylet(kl, memory, write_ptr, write_len);
}
// These keylet types are not yet implemented. Their
// corresponding amendments are not yet supported on the
// network. Each case needs a full implementation (see
// above cases for reference) before its amendment can be
// enabled.
// featureXChainBridge
case keylet_code::BRIDGE:
case keylet_code::XCHAIN_OWNED_CLAIM_ID:
case keylet_code::XCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID: {
if (!applyCtx.view().rules().enabled(featureXChainBridge))
return INVALID_ARGUMENT;
}
case keylet_code::XCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID:
// featureMPTokensV1
case keylet_code::MPTOKEN_ISSUANCE:
case keylet_code::MPTOKEN: {
if (!applyCtx.view().rules().enabled(featureMPTokensV1))
return INVALID_ARGUMENT;
}
case keylet_code::CREDENTIAL: {
if (!applyCtx.view().rules().enabled(featureCredentials))
return INVALID_ARGUMENT;
}
case keylet_code::PERMISSIONED_DOMAIN: {
if (!applyCtx.view().rules().enabled(
featurePermissionedDomains))
return INVALID_ARGUMENT;
}
case keylet_code::MPTOKEN:
// featureCredentials
case keylet_code::CREDENTIAL:
// featurePermissionedDomains
case keylet_code::PERMISSIONED_DOMAIN:
return INVALID_ARGUMENT;
}
}
catch (std::exception& e)
@@ -3044,31 +3026,6 @@ DEFINE_HOOK_FUNCTION(int64_t, etxn_reserve, uint32_t count)
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, xport_reserve, uint32_t count)
{
HOOK_SETUP(); // populates memory_ctx, memory, memory_length, applyCtx,
// hookCtx on current stack
auto const result = api.xport_reserve(count);
if (!result)
return result.error();
return result.value();
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, xport_cancel, uint32_t ticket_seq)
{
HOOK_SETUP();
auto const result = api.xport_cancel(ticket_seq);
if (!result)
return result.error();
return result.value();
HOOK_TEARDOWN();
}
// Compute the burden of an emitted transaction based on a number of factors
DEFINE_HOOK_FUNCTION(int64_t, etxn_burden)
{
@@ -4132,177 +4089,6 @@ DEFINE_HOOK_FUNCTION(
HOOK_TEARDOWN();
}
//@@start xport-impl
DEFINE_HOOK_FUNCTION(
int64_t,
xport,
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len)
{
HOOK_SETUP();
if (NOT_IN_BOUNDS(read_ptr, read_len, memory_length))
return OUT_OF_BOUNDS;
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
return OUT_OF_BOUNDS;
if (write_len < 32)
return TOO_SMALL;
// Delegate to decoupled HookAPI for xport logic
ripple::Slice txBlob{
reinterpret_cast<const void*>(memory + read_ptr), read_len};
auto const res = api.xport(txBlob);
if (!res)
return res.error();
auto const& innerTxHash = *res;
if (innerTxHash.size() > write_len)
return TOO_SMALL;
if (NOT_IN_BOUNDS(write_ptr, innerTxHash.size(), memory_length))
return OUT_OF_BOUNDS;
WRITE_WASM_MEMORY_AND_RETURN(
write_ptr,
innerTxHash.size(),
innerTxHash.data(),
innerTxHash.size(),
memory,
memory_length);
HOOK_TEARDOWN();
}
//@@end xport-impl
// byteCount must be a multiple of 32
inline std::vector<uint8_t>
fairRng(ApplyContext& applyCtx, hook::HookResult& hr, uint32_t byteCount)
{
if (byteCount > 512)
byteCount = 512;
// force the byte count to be a multiple of 32
byteCount &= ~0b11111;
if (byteCount == 0)
return {};
auto& view = applyCtx.view();
auto const sleEntropy = view.peek(ripple::keylet::consensusEntropy());
auto const seq = view.info().seq;
auto const entropySeq =
sleEntropy ? sleEntropy->getFieldU32(sfLedgerSequence) : 0u;
// Allow entropy from current ledger (during close) or previous ledger
// (open ledger / speculative execution). On the real network hooks
// always execute during buildLCL where the entropy pseudo-tx has
// already updated the SLE to the current seq.
// TODO: open-ledger entropy uses previous ledger's entropy, so
// dice/random results will differ between speculative and final
// execution. This needs further thought re: UX implications.
if (!sleEntropy || entropySeq > seq || (seq - entropySeq) > 1 ||
sleEntropy->getFieldU16(sfEntropyCount) < 5)
return {};
// we'll generate bytes in lots of 32
uint256 rndData = sha512Half(
view.info().seq,
applyCtx.tx.getTransactionID(),
hr.otxnAccount,
hr.hookHash,
hr.account,
hr.hookChainPosition,
hr.executeAgainAsWeak ? std::string("weak") : std::string("strong"),
sleEntropy->getFieldH256(sfDigest),
hr.rngCallCounter++);
std::vector<uint8_t> bytesOut;
bytesOut.resize(byteCount);
uint8_t* ptr = bytesOut.data();
while (1)
{
std::memcpy(ptr, rndData.data(), 32);
ptr += 32;
if (ptr - bytesOut.data() >= byteCount)
break;
rndData = sha512Half(rndData);
}
return bytesOut;
}
DEFINE_HOOK_FUNCTION(int64_t, dice, uint32_t sides)
{
HOOK_SETUP();
if (sides == 0)
return INVALID_ARGUMENT;
auto vec = fairRng(applyCtx, hookCtx.result, 32);
if (vec.empty())
return TOO_LITTLE_ENTROPY;
if (vec.size() != 32)
return INTERNAL_ERROR;
uint32_t value;
std::memcpy(&value, vec.data(), sizeof(uint32_t));
return value % sides;
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, random, uint32_t write_ptr, uint32_t write_len)
{
HOOK_SETUP();
if (write_len == 0)
return TOO_SMALL;
if (write_len > 512)
return TOO_BIG;
uint32_t required = write_len;
if ((required & ~0b11111) == required)
{
// already a multiple of 32 bytes
}
else
{
// round up
required &= ~0b11111;
required += 32;
}
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
return OUT_OF_BOUNDS;
auto vec = fairRng(applyCtx, hookCtx.result, required);
if (vec.empty())
return TOO_LITTLE_ENTROPY;
WRITE_WASM_MEMORY_AND_RETURN(
write_ptr, write_len, vec.data(), vec.size(), memory, memory_length);
HOOK_TEARDOWN();
}
/*
DEFINE_HOOK_FUNCTION(

View File

@@ -26,7 +26,6 @@
#include <xrpld/nodestore/Database.h>
#include <xrpl/basics/Log.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/STTx.h>
#include <xrpl/protocol/digest.h>
namespace ripple {
@@ -65,15 +64,6 @@ ConsensusTransSetSF::gotNode(
stx->getTransactionID() == nodeHash.as_uint256(),
"ripple::ConsensusTransSetSF::gotNode : transaction hash "
"match");
//@@start rng-pseudo-tx-submission-filtering
// Don't submit pseudo-transactions (consensus entropy, fees,
// amendments, etc.) — they exist as SHAMap entries for
// content-addressed identification but are not real user txns.
if (isPseudoTx(*stx))
return;
//@@end rng-pseudo-tx-submission-filtering
auto const pap = &app_;
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
pap->getOPs().submitTransaction(stx);

View File

@@ -23,15 +23,12 @@
#include <xrpld/overlay/Peer.h>
#include <xrpld/shamap/SHAMap.h>
#include <xrpl/beast/clock/abstract_clock.h>
#include <cstdint>
#include <memory>
namespace ripple {
class Application;
enum class InboundSetKind : std::uint8_t { transaction, sidecar };
/** Manages the acquisition and lifetime of transaction sets.
*/
@@ -52,15 +49,11 @@ public:
* @param setHash The transaction set ID (digest of the SHAMap root node).
* @param acquire Whether to fetch the transaction set from the network if
* it is missing.
* @param kind The kind of SHAMap payload to acquire if the set is missing.
* @return The transaction set with ID setHash, or nullptr if it is
* missing.
*/
virtual std::shared_ptr<SHAMap>
getSet(
uint256 const& setHash,
bool acquire,
InboundSetKind kind = InboundSetKind::transaction) = 0;
getSet(uint256 const& setHash, bool acquire) = 0;
/** Add a transaction set from a LedgerData message.
*

View File

@@ -1,52 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/app/ledger/SidecarSetSF.h>
namespace ripple {
SidecarSetSF::SidecarSetSF(NodeCache& nodeCache) : m_nodeCache(nodeCache)
{
}
void
SidecarSetSF::gotNode(
bool fromFilter,
SHAMapHash const& nodeHash,
std::uint32_t,
Blob&& nodeData,
SHAMapNodeType) const
{
if (fromFilter)
return;
m_nodeCache.insert(nodeHash, nodeData);
}
std::optional<Blob>
SidecarSetSF::getNode(SHAMapHash const& nodeHash) const
{
Blob nodeData;
if (m_nodeCache.retrieve(nodeHash, nodeData))
return nodeData;
return std::nullopt;
}
} // namespace ripple

View File

@@ -1,57 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_LEDGER_SIDECARSETSF_H_INCLUDED
#define RIPPLE_APP_LEDGER_SIDECARSETSF_H_INCLUDED
#include <xrpld/shamap/SHAMapSyncFilter.h>
#include <xrpl/basics/TaggedCache.h>
#include <optional>
namespace ripple {
// Sync filter for sidecar SHAMaps. Sidecar leaves are STObject(sfGeneric)
// payloads, not STTx transactions, so acquisition must not submit them.
// Validation stays with the consensus extension merge step, where the expected
// sidecar kind and active validator view are known.
class SidecarSetSF : public SHAMapSyncFilter
{
public:
using NodeCache = TaggedCache<SHAMapHash, Blob>;
explicit SidecarSetSF(NodeCache& nodeCache);
void
gotNode(
bool fromFilter,
SHAMapHash const& nodeHash,
std::uint32_t ledgerSeq,
Blob&& nodeData,
SHAMapNodeType type) const override;
std::optional<Blob>
getNode(SHAMapHash const& nodeHash) const override;
private:
NodeCache& m_nodeCache;
};
} // namespace ripple
#endif

View File

@@ -25,7 +25,6 @@
#include <xrpld/app/misc/CanonicalTXSet.h>
#include <xrpld/app/tx/apply.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/TxFormats.h>
namespace ripple {
@@ -107,47 +106,6 @@ applyTransactions(
bool certainRetry = true;
std::size_t count = 0;
//@@start rng-entropy-first-application
// CRITICAL: Apply consensus entropy pseudo-tx FIRST before any other
// transactions. This ensures hooks can read entropy during this ledger.
for (auto it = txns.begin(); it != txns.end(); /* manual */)
{
if (it->second->getTxnType() != ttCONSENSUS_ENTROPY)
{
++it;
continue;
}
auto const txid = it->first.getTXID();
JLOG(j.debug()) << "Applying entropy tx FIRST: " << txid;
try
{
auto const result =
applyTransaction(app, view, *it->second, true, tapNONE, j);
if (result == ApplyTransactionResult::Success)
{
++count;
JLOG(j.debug()) << "Entropy tx applied successfully";
}
else
{
failed.insert(txid);
JLOG(j.warn()) << "Entropy tx failed to apply";
}
}
catch (std::exception const& ex)
{
JLOG(j.warn()) << "Entropy tx throws: " << ex.what();
failed.insert(txid);
}
it = txns.erase(it);
break; // Only one entropy tx per ledger
}
//@@end rng-entropy-first-application
// Attempt to apply all of the retriable transactions
for (int pass = 0; pass < LEDGER_TOTAL_PASSES; ++pass)
{

View File

@@ -93,10 +93,7 @@ public:
}
std::shared_ptr<SHAMap>
getSet(
uint256 const& hash,
bool acquire,
InboundSetKind kind = InboundSetKind::transaction) override
getSet(uint256 const& hash, bool acquire) override
{
TransactionAcquire::pointer ta;
@@ -120,7 +117,7 @@ public:
return std::shared_ptr<SHAMap>();
ta = std::make_shared<TransactionAcquire>(
app_, hash, m_peerSetBuilder->build(), kind);
app_, hash, m_peerSetBuilder->build());
auto& obj = m_map[hash];
obj.mAcquire = ta;

View File

@@ -27,14 +27,12 @@ LedgerReplay::LedgerReplay(
std::shared_ptr<Ledger const> replay)
: parent_{std::move(parent)}, replay_{std::move(replay)}
{
//@@start ledger-replay-ordered-txns
for (auto const& item : replay_->txMap())
{
auto txPair = replay_->txRead(item.key()); // non-const so can be moved
auto const txIndex = (*txPair.second)[sfTransactionIndex];
orderedTxns_.emplace(txIndex, std::move(txPair.first));
}
//@@end ledger-replay-ordered-txns
}
LedgerReplay::LedgerReplay(

Some files were not shown because too many files have changed in this diff Show More