mirror of
https://github.com/Xahau/xahaud.git
synced 2026-04-29 15:37:46 +00:00
Compare commits
238 Commits
backport-l
...
feature-ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b12cee5d47 | ||
|
|
a3b1e45f4d | ||
|
|
3938ba7af4 | ||
|
|
96b1104646 | ||
|
|
92bdd2ed9f | ||
|
|
d87cfdc604 | ||
|
|
a956abb2d1 | ||
|
|
aa36a80ab7 | ||
|
|
e729aa11eb | ||
|
|
c58da3da58 | ||
|
|
0c2c59d258 | ||
|
|
15662eb1b1 | ||
|
|
492fe90643 | ||
|
|
ea413873b2 | ||
|
|
625419eab7 | ||
|
|
2218bdd7f3 | ||
|
|
f13233b00a | ||
|
|
a61f334ca2 | ||
|
|
53a119ce30 | ||
|
|
63d1197345 | ||
|
|
aafd5b940b | ||
|
|
efc497cf23 | ||
|
|
f4e78c9a24 | ||
|
|
7b5865c69c | ||
|
|
9f1ad521e1 | ||
|
|
26bbef8efd | ||
|
|
6e71f84867 | ||
|
|
ab9b48f67a | ||
|
|
cd00ed72d8 | ||
|
|
05a3e04f2d | ||
|
|
66f7294120 | ||
|
|
7f6ac75617 | ||
|
|
4150f0383c | ||
|
|
25123b370a | ||
|
|
04077c1a55 | ||
|
|
d94079d762 | ||
|
|
92ec07a1be | ||
|
|
664db62588 | ||
|
|
03a436d918 | ||
|
|
7474048295 | ||
|
|
1ee660529e | ||
|
|
311dfa1c23 | ||
|
|
f27cd2c567 | ||
|
|
f34fdc297c | ||
|
|
65fa63883d | ||
|
|
d8c683fb4c | ||
|
|
fd53af304b | ||
|
|
2a3f0ec923 | ||
|
|
00f1f7ba30 | ||
|
|
49f05e4e47 | ||
|
|
1f51b9c594 | ||
|
|
88a548a8ef | ||
|
|
db302a0f78 | ||
|
|
383d9ec2e7 | ||
|
|
52671bfc99 | ||
|
|
8307fca3b9 | ||
|
|
6526621c16 | ||
|
|
2a9b1c9c22 | ||
|
|
54ca21b604 | ||
|
|
462db6004c | ||
|
|
cfca708aae | ||
|
|
5f70e5259c | ||
|
|
8697c5d821 | ||
|
|
9436e5868e | ||
|
|
c6fa973cf6 | ||
|
|
939e03714c | ||
|
|
969f98f57e | ||
|
|
435deb0e78 | ||
|
|
b80352e512 | ||
|
|
57c46c61fc | ||
|
|
37ff13df50 | ||
|
|
1b363b7eac | ||
|
|
9562b457cf | ||
|
|
724633ceb5 | ||
|
|
152d82e798 | ||
|
|
0bb31ce7ce | ||
|
|
4cb3de0497 | ||
|
|
c6b315412d | ||
|
|
72395bec75 | ||
|
|
8ed4d86f0f | ||
|
|
419fd16b9a | ||
|
|
a8097cd9a6 | ||
|
|
02a0552325 | ||
|
|
3698193b0a | ||
|
|
de43ca2385 | ||
|
|
8c747a1916 | ||
|
|
cea110f29a | ||
|
|
3ca056a94b | ||
|
|
705d8400db | ||
|
|
655b751698 | ||
|
|
f324081277 | ||
|
|
24a284180a | ||
|
|
6f003cc983 | ||
|
|
3a58020388 | ||
|
|
829441b52e | ||
|
|
3a055663cc | ||
|
|
985a194bdc | ||
|
|
869f366d8a | ||
|
|
03936aa928 | ||
|
|
6d180307ad | ||
|
|
f2ca499c97 | ||
|
|
bd68364f25 | ||
|
|
42a6407815 | ||
|
|
a387c853ab | ||
|
|
9311e567d3 | ||
|
|
c26582bdf9 | ||
|
|
417b999c7f | ||
|
|
0205be4500 | ||
|
|
89274b5387 | ||
|
|
b65d9faf12 | ||
|
|
aa1a7e5320 | ||
|
|
6f0f17aad9 | ||
|
|
407bfa1467 | ||
|
|
f0dfcf6b81 | ||
|
|
503d2ebf98 | ||
|
|
e52bc51384 | ||
|
|
91860db578 | ||
|
|
0b317a8e7a | ||
|
|
dbd230b695 | ||
|
|
30cefcba85 | ||
|
|
94edb5759d | ||
|
|
ce57b6a3a0 | ||
|
|
fca5cad470 | ||
|
|
bb77c2090b | ||
|
|
90a94294e4 | ||
|
|
c2209b4472 | ||
|
|
8fcb2ed336 | ||
|
|
fd1567d1ba | ||
|
|
d32f34d3bf | ||
|
|
c491c5c82f | ||
|
|
74817765ae | ||
|
|
fc23fa8535 | ||
|
|
34c0f17b6b | ||
|
|
765ad6a278 | ||
|
|
f623ca89b9 | ||
|
|
e4865f09f9 | ||
|
|
4c182e4738 | ||
|
|
d0c869c8a6 | ||
|
|
cac5efcd3c | ||
|
|
514e60b71c | ||
|
|
2a34e32e05 | ||
|
|
b969024a25 | ||
|
|
f30b9a4c3a | ||
|
|
0e019fec4e | ||
|
|
7e0c72fd22 | ||
|
|
07d741cdd7 | ||
|
|
b99c38c09d | ||
|
|
64e50209ff | ||
|
|
b1ce2103ad | ||
|
|
50c4cf1df3 | ||
|
|
6fc14f398d | ||
|
|
592a8600c7 | ||
|
|
e71768700a | ||
|
|
e598e405bd | ||
|
|
8af3ce2f5b | ||
|
|
b67cb78b97 | ||
|
|
0b1b82282e | ||
|
|
d4c5a7e8ab | ||
|
|
82837864fa | ||
|
|
e1caee6459 | ||
|
|
3206b4a4e1 | ||
|
|
0c2e09050e | ||
|
|
83922d5c20 | ||
|
|
6bae42ff01 | ||
|
|
35e86d926e | ||
|
|
9c4ee9315d | ||
|
|
0f17cf02aa | ||
|
|
7753dc3cbe | ||
|
|
cc7f3c59ae | ||
|
|
e8c1b25ab4 | ||
|
|
b9dd854595 | ||
|
|
3bead8dcb6 | ||
|
|
908a78a1d9 | ||
|
|
a9e3dc41d4 | ||
|
|
02990eb4ee | ||
|
|
ce76632322 | ||
|
|
9eac54d690 | ||
|
|
24e4ac16ad | ||
|
|
94ce15d233 | ||
|
|
8f331a538e | ||
|
|
7425ab0a39 | ||
|
|
c5292bfe0d | ||
|
|
79b2f9f410 | ||
|
|
e8358a82b1 | ||
|
|
d850e740e1 | ||
|
|
61a166bcb0 | ||
|
|
41a41ec625 | ||
|
|
bc98c589b7 | ||
|
|
4f009e4698 | ||
|
|
b6811a6f59 | ||
|
|
ae88fd3d24 | ||
|
|
db3ed0c2eb | ||
|
|
960808b172 | ||
|
|
a9dffd38ff | ||
|
|
382e6fa673 | ||
|
|
2905b0509c | ||
|
|
4911c1bf52 | ||
|
|
1744d21410 | ||
|
|
34ff53f65d | ||
|
|
893f8d5a10 | ||
|
|
3e5389d652 | ||
|
|
c44dea3acf | ||
|
|
a6dd54fa48 | ||
|
|
28bd0a22d3 | ||
|
|
960fffcf82 | ||
|
|
e7867c07a1 | ||
|
|
a828e8a44d | ||
|
|
bb33e7cf64 | ||
|
|
7e8e0654cd | ||
|
|
38af0626e0 | ||
|
|
8500e86f57 | ||
|
|
1fc4fd9bfd | ||
|
|
e4875e5398 | ||
|
|
5b1b142be0 | ||
|
|
5ba832204a | ||
|
|
1257b3a65c | ||
|
|
6013ed2cb6 | ||
|
|
034010716e | ||
|
|
b28793b0fa | ||
|
|
4bce392c31 | ||
|
|
244a28b981 | ||
|
|
f2838351c9 | ||
|
|
dae082d6a5 | ||
|
|
619a4a68f7 | ||
|
|
4a6db8bb05 | ||
|
|
c86479bc58 | ||
|
|
dc6a2dc6ff | ||
|
|
c01b9a657b | ||
|
|
652b181b5d | ||
|
|
8329d78f32 | ||
|
|
bf4579c1d1 | ||
|
|
73e099eb23 | ||
|
|
2e311b4259 | ||
|
|
7c8e940091 | ||
|
|
9b90c50789 | ||
|
|
a18e2cb2c6 | ||
|
|
be5f425122 | ||
|
|
fc6f4762da |
4
.github/workflows/levelization.yml
vendored
4
.github/workflows/levelization.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check levelization
|
||||
run: Builds/levelization/levelization.sh
|
||||
run: python Builds/levelization/levelization.py
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
run: |
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run './Builds/levelization/levelization.sh' in your repo,
|
||||
2. Run 'python Builds/levelization/levelization.py' in your repo,
|
||||
commit, and push.
|
||||
|
||||
See Builds/levelization/README.md for more info.
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -53,6 +53,9 @@ Builds/levelization/results/paths.txt
|
||||
Builds/levelization/results/includes/
|
||||
Builds/levelization/results/includedby/
|
||||
|
||||
# Python
|
||||
__pycache__
|
||||
|
||||
# Ignore tmp directory.
|
||||
tmp
|
||||
|
||||
@@ -124,5 +127,8 @@ bld.rippled/
|
||||
generated
|
||||
.vscode
|
||||
|
||||
# AI docs (local working documents)
|
||||
.ai-docs/
|
||||
|
||||
# Suggested in-tree build directory
|
||||
/.build/
|
||||
|
||||
4
.testnet/.gitignore
vendored
Normal file
4
.testnet/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
output/
|
||||
__pycache__/
|
||||
scenarios/odd-cases/
|
||||
scenarios/suite-experiments.yml
|
||||
27
.testnet/scenarios/entropy/consensus_entropy_crash.py
Normal file
27
.testnet/scenarios/entropy/consensus_entropy_crash.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""Scenario: ConsensusEntropy amendment crashes non-supporting node.
|
||||
|
||||
Votes ConsensusEntropy accept on all nodes except n4, then waits for n4
|
||||
to crash as the amendment activates without its support.
|
||||
|
||||
x-testnet run --scenario-script consensus_entropy_crash.py
|
||||
"""
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await ctx.wait_for_ledger_close()
|
||||
ctx.feature("ConsensusEntropy", vetoed=False, exclude_nodes=[4])
|
||||
|
||||
log("Waiting for ConsensusEntropy to be voted for...")
|
||||
await ctx.wait_for_feature(
|
||||
"ConsensusEntropy",
|
||||
check=lambda s: not s.get("vetoed"),
|
||||
exclude_nodes=[4],
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
log("Waiting for n4 to crash...")
|
||||
op = await ctx.wait_for_nodes_down(nodes=[4], timeout=600)
|
||||
|
||||
ctx.assert_log("unsupported amendments activated", since=op.started, nodes=[4])
|
||||
ctx.assert_exit_status(0, nodes=[4])
|
||||
log("PASS: n4 shut down due to unsupported amendment")
|
||||
52
.testnet/scenarios/entropy/entropy_with_transactions.py
Normal file
52
.testnet/scenarios/entropy/entropy_with_transactions.py
Normal file
@@ -0,0 +1,52 @@
|
||||
""":descr: entropy stays valid under transaction load"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from helpers import require_entropy, get_entropy_tx, assert_valid_entropy
|
||||
|
||||
variants = [
|
||||
{"label": "light", "min_txns": 5, "max_txns": 10},
|
||||
{"label": "heavy", "min_txns": 50, "max_txns": 60},
|
||||
{"label": "super_heavy", "min_txns": 90, "max_txns": 120},
|
||||
]
|
||||
|
||||
|
||||
async def scenario(ctx, log, *, min_txns=5, max_txns=10, **_):
|
||||
await require_entropy(ctx, log)
|
||||
|
||||
gen = ctx.txn_generator(min_txns=min_txns, max_txns=max_txns)
|
||||
await gen.start()
|
||||
await gen.wait_until_ready()
|
||||
log(f"Transaction generator ready ({min_txns}-{max_txns} txns/ledger)")
|
||||
|
||||
# Wait for pipeline warmup + a few txn-bearing ledgers.
|
||||
await ctx.wait_for_ledgers(3, node_id=0, timeout=60)
|
||||
|
||||
start_seq = ctx.validated_ledger_index(0)
|
||||
await ctx.wait_for_ledgers(10, node_id=0, timeout=120)
|
||||
end_seq = ctx.validated_ledger_index(0)
|
||||
log(f"Inspecting ledgers {start_seq + 1} → {end_seq}")
|
||||
|
||||
digests = set()
|
||||
total_user_txns = 0
|
||||
|
||||
for seq in range(start_seq + 1, end_seq + 1):
|
||||
ce, user_txns = get_entropy_tx(ctx, seq)
|
||||
digest, count = assert_valid_entropy(ce, seq, seen_digests=digests)
|
||||
total_user_txns += len(user_txns)
|
||||
log(
|
||||
f" Ledger {seq}: EntropyCount={count} "
|
||||
f"user_txns={len(user_txns)} Digest={digest[:16]}..."
|
||||
)
|
||||
|
||||
await gen.stop()
|
||||
|
||||
log(
|
||||
f"Verified {end_seq - start_seq} ledgers: {total_user_txns} user txns, "
|
||||
f"all entropy valid and unique"
|
||||
)
|
||||
|
||||
if total_user_txns == 0:
|
||||
raise AssertionError("No user transactions were included in any ledger")
|
||||
|
||||
log("PASS")
|
||||
117
.testnet/scenarios/entropy/quorum_degradation_smoke.py
Normal file
117
.testnet/scenarios/entropy/quorum_degradation_smoke.py
Normal file
@@ -0,0 +1,117 @@
|
||||
""":descr: 4/5 liveness, 3/5 zero-entropy fallback, recovery"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from helpers import require_entropy, get_entropy_tx, entropy_fields
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await require_entropy(ctx, log)
|
||||
|
||||
# Baseline: wait 1 ledger to confirm network is healthy.
|
||||
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
|
||||
|
||||
# --- 4/5 liveness ---
|
||||
ctx.stop_node(4)
|
||||
await ctx.wait_for_nodes_down(nodes=[4], timeout=30)
|
||||
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
|
||||
log("4/5: liveness OK")
|
||||
|
||||
# Snapshot validated seq before dropping to 3/5.
|
||||
val_before = ctx.validated_ledger_index(0)
|
||||
|
||||
# --- 3/5 degraded window ---
|
||||
ctx.stop_node(3)
|
||||
await ctx.wait_for_nodes_down(nodes=[3], timeout=30)
|
||||
|
||||
# 10s ≈ 3 rounds at 3s cadence.
|
||||
await ctx.sleep(10)
|
||||
|
||||
val_after = ctx.validated_ledger_index(0)
|
||||
log(f"3/5: validated ledger {val_before} → {val_after}")
|
||||
|
||||
# Accepted/built ledgers may still later appear as validated once the full
|
||||
# network rejoins. For ConsensusEntropy the key invariant is that every
|
||||
# ledger created during this sub-quorum window carries ZERO entropy.
|
||||
degraded_zero = 0
|
||||
degraded_end = val_after or val_before
|
||||
if val_before and degraded_end and degraded_end > val_before:
|
||||
for seq in range(val_before + 1, degraded_end + 1):
|
||||
ce, _ = get_entropy_tx(ctx, seq)
|
||||
digest, entropy_count, is_zero = entropy_fields(ce)
|
||||
|
||||
if not is_zero:
|
||||
raise AssertionError(
|
||||
f"Ledger {seq}: expected ZERO entropy during 3/5 window, "
|
||||
f"got Digest={digest[:16]}... EntropyCount={entropy_count}"
|
||||
)
|
||||
|
||||
degraded_zero += 1
|
||||
log(f" Degraded ledger {seq}: EntropyCount={entropy_count} ZERO")
|
||||
|
||||
log(f"3/5 entropy summary: {degraded_zero} zero")
|
||||
|
||||
# Log checks tied to actual transition mechanics:
|
||||
# - seq=1 proposals are emitted once commit-set phase is entered
|
||||
# - ConvergingCommit transition is the gateway out of seq=0-only behavior
|
||||
# - establish gate blocked indicates tx-consensus/pause prevented accept
|
||||
ctx.log_level("LedgerConsensus", "trace")
|
||||
op = await ctx.sleep(6, name="stall_window")
|
||||
|
||||
ctx.assert_not_log(
|
||||
r"RNG: transitioned to ConvergingCommit", within=op.window, nodes=[0, 1, 2]
|
||||
)
|
||||
ctx.assert_not_log(r"RNG: propose seq=1", within=op.window, nodes=[0, 1, 2])
|
||||
|
||||
gate_blocked = ctx.search_logs(
|
||||
r"STALLDIAG: establish gate blocked reason=(pause|no-tx-consensus)",
|
||||
within=op.window,
|
||||
nodes=[0, 1, 2],
|
||||
)
|
||||
log(f"3/5: establish gate-blocked logs in 6s: {gate_blocked.count}")
|
||||
|
||||
skips = ctx.search_logs(r"RNG: bootstrap skip", within=op.window, nodes=[0, 1, 2])
|
||||
log(f"3/5: RNG bootstrap skips in 6s: {skips.count}")
|
||||
|
||||
# --- Recovery: restart nodes, verify ledger advancement ---
|
||||
ctx.start_node(3)
|
||||
ctx.start_node(4)
|
||||
await ctx.wait_for_ledgers(1, node_id=0, timeout=120)
|
||||
|
||||
val_recovered = ctx.validated_ledger_index(0)
|
||||
pre_recovery = max(v for v in [val_before, val_after] if v is not None)
|
||||
log(f"Recovered: validated seq {pre_recovery} → {val_recovered}")
|
||||
|
||||
if not val_recovered or val_recovered <= pre_recovery:
|
||||
raise AssertionError(
|
||||
f"Validated ledger did not advance after recovery "
|
||||
f"({pre_recovery} → {val_recovered})"
|
||||
)
|
||||
|
||||
# Inspect post-recovery ledgers separately from the degraded window above.
|
||||
# Once the network is back at quorum, non-zero entropy is valid again but
|
||||
# must still be quorum-met.
|
||||
zero_count = 0
|
||||
nonzero_count = 0
|
||||
for seq in range(pre_recovery + 1, val_recovered + 1):
|
||||
ce, _ = get_entropy_tx(ctx, seq)
|
||||
digest, entropy_count, is_zero = entropy_fields(ce)
|
||||
|
||||
if is_zero:
|
||||
zero_count += 1
|
||||
else:
|
||||
nonzero_count += 1
|
||||
if entropy_count < 4:
|
||||
raise AssertionError(
|
||||
f"Ledger {seq}: non-zero entropy with sub-quorum "
|
||||
f"EntropyCount={entropy_count} (need >= 4)"
|
||||
)
|
||||
|
||||
log(
|
||||
f" Ledger {seq}: EntropyCount={entropy_count} "
|
||||
f"{'ZERO' if is_zero else 'REAL'}"
|
||||
)
|
||||
|
||||
log(f"Entropy summary: {zero_count} zero, {nonzero_count} non-zero")
|
||||
|
||||
log("PASS")
|
||||
46
.testnet/scenarios/entropy/quorum_recovery_smoke.py
Normal file
46
.testnet/scenarios/entropy/quorum_recovery_smoke.py
Normal file
@@ -0,0 +1,46 @@
|
||||
""":descr: drop 2 nodes (3/5 stall), restart both, verify recovery"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await ctx.wait_for_ledger_close(timeout=120)
|
||||
|
||||
feature = ctx.feature_check("ConsensusEntropy", node_id=0)
|
||||
if not feature or not feature.get("enabled", False):
|
||||
raise AssertionError(f"ConsensusEntropy not enabled: {feature}")
|
||||
|
||||
await ctx.wait_for_ledgers(1, node_id=0, timeout=60)
|
||||
log("Baseline OK")
|
||||
|
||||
# Drop 2 nodes → validation stall.
|
||||
ctx.stop_node(3)
|
||||
ctx.stop_node(4)
|
||||
await ctx.wait_for_nodes_down(nodes=[3, 4], timeout=30)
|
||||
|
||||
info = ctx.rpc.server_info(node_id=0)
|
||||
val_before = info.get("info", {}).get("validated_ledger", {}).get("seq", 0)
|
||||
log(f"Stalled at validated seq {val_before}")
|
||||
|
||||
# Let it sit for a few rounds in degraded state.
|
||||
await ctx.sleep(6)
|
||||
|
||||
# Bring both nodes back.
|
||||
ctx.start_node(3)
|
||||
ctx.start_node(4)
|
||||
log("Restarted n3 and n4, waiting for recovery...")
|
||||
|
||||
# Recovery: wait for ANY validated ledger advance on n0.
|
||||
await ctx.wait_for_ledger_close(node_id=0, timeout=60)
|
||||
|
||||
info = ctx.rpc.server_info(node_id=0)
|
||||
val_after = info.get("info", {}).get("validated_ledger", {}).get("seq", 0)
|
||||
log(f"Recovered: validated seq {val_before} → {val_after}")
|
||||
|
||||
if val_after <= val_before:
|
||||
raise AssertionError(
|
||||
f"Validated ledger did not advance after recovery "
|
||||
f"({val_before} → {val_after})"
|
||||
)
|
||||
|
||||
log("PASS")
|
||||
27
.testnet/scenarios/entropy/steady_state_entropy.py
Normal file
27
.testnet/scenarios/entropy/steady_state_entropy.py
Normal file
@@ -0,0 +1,27 @@
|
||||
""":descr: all 5 nodes healthy, every ledger has valid unique quorum-met entropy"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from helpers import require_entropy, get_entropy_tx, assert_valid_entropy
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await require_entropy(ctx, log)
|
||||
|
||||
# Wait for RNG pipeline to warm up past bootstrap skip.
|
||||
await ctx.wait_for_ledgers(3, node_id=0, timeout=60)
|
||||
log("Pipeline warmed up")
|
||||
|
||||
start_seq = ctx.validated_ledger_index(0)
|
||||
await ctx.wait_for_ledgers(10, node_id=0, timeout=120)
|
||||
end_seq = ctx.validated_ledger_index(0)
|
||||
log(f"Inspecting ledgers {start_seq + 1} → {end_seq}")
|
||||
|
||||
digests = set()
|
||||
for seq in range(start_seq + 1, end_seq + 1):
|
||||
ce, _ = get_entropy_tx(ctx, seq)
|
||||
digest, count = assert_valid_entropy(ce, seq, seen_digests=digests)
|
||||
log(f" Ledger {seq}: EntropyCount={count} Digest={digest[:16]}...")
|
||||
|
||||
log(f"Verified {end_seq - start_seq} ledgers: all quorum entropy, all unique")
|
||||
log("PASS")
|
||||
86
.testnet/scenarios/export-suite.yml
Normal file
86
.testnet/scenarios/export-suite.yml
Normal file
@@ -0,0 +1,86 @@
|
||||
defaults:
|
||||
network:
|
||||
node_count: 5
|
||||
launcher: tmux
|
||||
slave_delay: 0.2
|
||||
features:
|
||||
- ConsensusEntropy
|
||||
- Export
|
||||
track_features:
|
||||
- ConsensusEntropy
|
||||
- Export
|
||||
log_levels:
|
||||
TxQ: info
|
||||
Protocol: debug
|
||||
Peer: debug
|
||||
LedgerConsensus: debug
|
||||
NetworkOPs: info
|
||||
env:
|
||||
XAHAU_RESOURCE_PER_PORT: "1"
|
||||
XAHAU_RNG_POLL_MS: "333"
|
||||
|
||||
tests:
|
||||
# --- CE + Export (80% quorum, SHAMap convergence) ---
|
||||
- name: steady_state_export_ce
|
||||
script: .testnet/scenarios/export/steady_state_export.py
|
||||
|
||||
- name: retriable_export_ce
|
||||
script: .testnet/scenarios/export/retriable_export.py
|
||||
|
||||
- name: export_degradation_ce
|
||||
script: .testnet/scenarios/export/export_degradation.py
|
||||
network:
|
||||
node_env:
|
||||
3:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
4:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
|
||||
# CE + Export: 1 node suppressed, 4/5 = 80% quorum, should succeed
|
||||
- name: export_ce_one_node_down
|
||||
script: .testnet/scenarios/export/export_quorum.py
|
||||
params:
|
||||
expect_success: true
|
||||
network:
|
||||
node_env:
|
||||
4:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
|
||||
# --- Export only, no CE (80% active-view quorum) ---
|
||||
- name: export_only_all_up
|
||||
script: .testnet/scenarios/export/export_quorum.py
|
||||
params:
|
||||
expect_success: true
|
||||
network:
|
||||
features:
|
||||
- Export
|
||||
track_features:
|
||||
- Export
|
||||
|
||||
- name: export_only_one_node_down
|
||||
script: .testnet/scenarios/export/export_quorum.py
|
||||
params:
|
||||
expect_success: true
|
||||
network:
|
||||
features:
|
||||
- Export
|
||||
track_features:
|
||||
- Export
|
||||
node_env:
|
||||
4:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
|
||||
- name: export_only_two_nodes_down
|
||||
script: .testnet/scenarios/export/export_quorum.py
|
||||
params:
|
||||
expect_success: false
|
||||
network:
|
||||
features:
|
||||
- Export
|
||||
track_features:
|
||||
- Export
|
||||
node_env:
|
||||
3:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
4:
|
||||
XAHAUD_NO_EXPORT_SIG: "1"
|
||||
102
.testnet/scenarios/export/export_degradation.py
Normal file
102
.testnet/scenarios/export/export_degradation.py
Normal file
@@ -0,0 +1,102 @@
|
||||
""":descr: Submit ttEXPORT with 2 nodes suppressing export sigs, verify it
|
||||
retries via terRETRY_EXPORT until LLS expiry (not enough sigs for quorum).
|
||||
|
||||
Nodes 3 and 4 have XAHAUD_NO_EXPORT_SIG=1, so only 3/5 nodes provide
|
||||
export signatures. With 80% quorum = ceil(5*0.8) = 4 required, the
|
||||
export cannot reach quorum and should expire via tecEXPORT_EXPIRED.
|
||||
|
||||
Flow:
|
||||
1. Fund alice and bob
|
||||
2. alice submits ttEXPORT with tight LLS
|
||||
3. Export retries (only 3/5 sigs available, need 4)
|
||||
4. Verify export expires with tecEXPORT_EXPIRED
|
||||
5. Verify subsequent payment still works (sequence not permanently blocked)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from export_helpers import require_export, assert_shadow_ticket
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await require_export(ctx, log)
|
||||
|
||||
# --- Setup ---
|
||||
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
|
||||
log("Accounts funded")
|
||||
|
||||
alice = ctx.account("alice")
|
||||
bob = ctx.account("bob")
|
||||
current_seq = ctx.validated_ledger_index(0)
|
||||
|
||||
log(f"Current ledger: {current_seq}")
|
||||
log("Nodes 3,4 have XAHAUD_NO_EXPORT_SIG=1 (3/5 sigs, need 4)")
|
||||
|
||||
# --- Submit ttEXPORT (should retry then expire -- only 3/5 sigs) ---
|
||||
result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Export",
|
||||
"LastLedgerSequence": current_seq + 8,
|
||||
"Fee": "1000000",
|
||||
"ExportedTxn": {
|
||||
"TransactionType": "Payment",
|
||||
"Account": alice.address,
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "10",
|
||||
"Sequence": 0,
|
||||
"TicketSequence": 1,
|
||||
"FirstLedgerSequence": current_seq + 1,
|
||||
"LastLedgerSequence": current_seq + 6,
|
||||
"Flags": 2147483648,
|
||||
"SigningPubKey": "",
|
||||
},
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
final_seq = ctx.validated_ledger_index(0)
|
||||
engine_result = result.get("engine_result", "")
|
||||
log(f"Export completed at ledger {final_seq}, result: {engine_result}")
|
||||
|
||||
# With only 3/5 sigs and 80% quorum (4 required), export MUST fail
|
||||
if engine_result == "tesSUCCESS":
|
||||
raise AssertionError(
|
||||
"Export should NOT have succeeded with only 3/5 sigs "
|
||||
"(need 4 for 80% quorum) -- check XAHAUD_NO_EXPORT_SIG config"
|
||||
)
|
||||
|
||||
# Should be tecEXPORT_EXPIRED (LLS reached without quorum)
|
||||
if engine_result != "tecEXPORT_EXPIRED":
|
||||
log(f"WARNING: expected tecEXPORT_EXPIRED, got {engine_result}")
|
||||
|
||||
log(f"Export failed as expected ({engine_result})")
|
||||
|
||||
# No shadow ticket should exist (export never reached quorum)
|
||||
assert_shadow_ticket(ctx, alice.address, log, expect_exists=False)
|
||||
|
||||
# --- Verify subsequent payment works regardless ---
|
||||
log("Submitting payment from alice to bob...")
|
||||
pay_result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Payment",
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "12",
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
pay_engine = pay_result.get("engine_result", "")
|
||||
log(f"Payment result: {pay_engine}")
|
||||
|
||||
if pay_engine != "tesSUCCESS":
|
||||
raise AssertionError(
|
||||
f"Payment failed after expired export: {pay_engine} "
|
||||
f"-- sequence may be blocked"
|
||||
)
|
||||
|
||||
log("Payment succeeded -- account not permanently blocked")
|
||||
log("PASS")
|
||||
144
.testnet/scenarios/export/export_helpers.py
Normal file
144
.testnet/scenarios/export/export_helpers.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""Shared helpers for Export scenario tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
async def require_export(ctx, log):
|
||||
"""Wait for first ledger and assert Export amendment is enabled."""
|
||||
await ctx.wait_for_ledger_close(timeout=120)
|
||||
feature = ctx.feature_check("Export", node_id=0)
|
||||
if not feature or not feature.get("enabled", False):
|
||||
raise AssertionError(f"Export not enabled: {feature}")
|
||||
log("Export amendment enabled")
|
||||
|
||||
|
||||
def find_export_txns(ctx, seq):
|
||||
"""Find Export transactions in a ledger.
|
||||
|
||||
Returns list of Export transaction dicts.
|
||||
"""
|
||||
result = ctx.ledger(seq, transactions=True)
|
||||
if not result:
|
||||
return []
|
||||
|
||||
txns = result.get("ledger", {}).get("transactions", [])
|
||||
return [tx for tx in txns if tx.get("TransactionType") == "Export"]
|
||||
|
||||
|
||||
def dst_param(address):
|
||||
"""Encode an address as a HookParameter entry for the DST param."""
|
||||
from xrpl.core.addresscodec import decode_classic_address
|
||||
|
||||
dst_hex = decode_classic_address(address).hex().upper()
|
||||
return {
|
||||
"HookParameter": {
|
||||
"HookParameterName": "445354", # "DST"
|
||||
"HookParameterValue": dst_hex,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def assert_hook_accepted(meta, log, *, expected_emits=1):
|
||||
"""Assert hook executed with ACCEPT and the expected emit count.
|
||||
|
||||
Checks sfHookExecutions in transaction metadata.
|
||||
Returns the hook execution entry for further inspection.
|
||||
"""
|
||||
hook_execs = meta.get("HookExecutions", [])
|
||||
if not hook_execs:
|
||||
raise AssertionError("No HookExecutions in metadata")
|
||||
|
||||
exec_entry = hook_execs[0].get("HookExecution", {})
|
||||
hook_result = exec_entry.get("HookResult", -1)
|
||||
emit_count = exec_entry.get("HookEmitCount", -1)
|
||||
return_code = exec_entry.get("HookReturnCode", "")
|
||||
|
||||
log(f" HookResult={hook_result} EmitCount={emit_count} ReturnCode={return_code}")
|
||||
|
||||
# HookResult 3 = ExitType::ACCEPT
|
||||
if hook_result != 3:
|
||||
raise AssertionError(
|
||||
f"Hook did not ACCEPT: HookResult={hook_result} "
|
||||
f"ReturnCode={return_code}"
|
||||
)
|
||||
|
||||
if emit_count != expected_emits:
|
||||
raise AssertionError(
|
||||
f"Expected {expected_emits} emits, got {emit_count}"
|
||||
)
|
||||
|
||||
# ReturnCode 0 = success; non-zero = ASSERT line number in hook
|
||||
if return_code and str(return_code) != "0":
|
||||
raise AssertionError(
|
||||
f"Hook returned error code {return_code} "
|
||||
f"(likely ASSERT failure at that line)"
|
||||
)
|
||||
|
||||
return exec_entry
|
||||
|
||||
|
||||
def assert_export_result(meta, log, *, require_signers=True):
|
||||
"""Assert ExportResult is present and well-formed in metadata.
|
||||
|
||||
Returns the ExportResult dict.
|
||||
"""
|
||||
export_result = meta.get("ExportResult", {})
|
||||
if not export_result:
|
||||
raise AssertionError("ExportResult not found in metadata")
|
||||
|
||||
# Must have LedgerSequence and TransactionHash
|
||||
if "LedgerSequence" not in export_result:
|
||||
raise AssertionError("ExportResult missing LedgerSequence")
|
||||
if "TransactionHash" not in export_result:
|
||||
raise AssertionError("ExportResult missing TransactionHash")
|
||||
|
||||
# Must have the inner ExportedTxn object
|
||||
inner = export_result.get("ExportedTxn", {})
|
||||
if not inner:
|
||||
raise AssertionError("ExportResult missing ExportedTxn (multisigned blob)")
|
||||
|
||||
log(f" ExportResult: seq={export_result['LedgerSequence']} "
|
||||
f"hash={export_result['TransactionHash'][:16]}...")
|
||||
|
||||
# Inner tx should have Account, Destination, TransactionType
|
||||
if "Account" not in inner:
|
||||
raise AssertionError("ExportedTxn missing Account")
|
||||
if "TransactionType" not in inner:
|
||||
raise AssertionError("ExportedTxn missing TransactionType")
|
||||
|
||||
# Should have empty SigningPubKey (multisigned)
|
||||
if inner.get("SigningPubKey", "NOT_EMPTY") != "":
|
||||
raise AssertionError(
|
||||
f"ExportedTxn SigningPubKey should be empty, "
|
||||
f"got '{inner.get('SigningPubKey')}'"
|
||||
)
|
||||
|
||||
if require_signers:
|
||||
signers = inner.get("Signers", [])
|
||||
if not signers:
|
||||
raise AssertionError("ExportedTxn has no Signers (multisig not applied)")
|
||||
log(f" Signers: {len(signers)} validator(s)")
|
||||
|
||||
return export_result
|
||||
|
||||
|
||||
def assert_shadow_ticket(ctx, account_address, log, *, expect_exists=True):
|
||||
"""Assert shadow ticket exists (or doesn't) for the account."""
|
||||
obj_result = ctx.rpc.request(
|
||||
0, "account_objects", {"account": account_address}
|
||||
)
|
||||
all_objects = (obj_result or {}).get("account_objects", [])
|
||||
shadow_tickets = [
|
||||
obj for obj in all_objects
|
||||
if obj.get("LedgerEntryType") == "ShadowTicket"
|
||||
]
|
||||
log(f" Shadow tickets: {len(shadow_tickets)}")
|
||||
|
||||
if expect_exists and not shadow_tickets:
|
||||
raise AssertionError("Expected shadow ticket but none found")
|
||||
if not expect_exists and shadow_tickets:
|
||||
raise AssertionError(
|
||||
f"Expected no shadow tickets but found {len(shadow_tickets)}"
|
||||
)
|
||||
|
||||
return shadow_tickets
|
||||
112
.testnet/scenarios/export/export_quorum.py
Normal file
112
.testnet/scenarios/export/export_quorum.py
Normal file
@@ -0,0 +1,112 @@
|
||||
""":descr: Test Export quorum behavior. When enough active validators sign,
|
||||
the export should succeed whether or not CE is enabled. When fewer than the
|
||||
active-view quorum sign, the export should expire.
|
||||
|
||||
Parameterized via `expect_success` kwarg from suite.yml.
|
||||
|
||||
Flow:
|
||||
1. Fund alice and bob
|
||||
2. alice submits ttEXPORT
|
||||
3. Verify result matches expectation (tesSUCCESS or tecEXPORT_EXPIRED)
|
||||
4. Verify ExportResult + shadow ticket on success, absence on failure
|
||||
5. Verify subsequent payment works regardless
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from export_helpers import (
|
||||
require_export,
|
||||
assert_export_result,
|
||||
assert_shadow_ticket,
|
||||
)
|
||||
|
||||
|
||||
async def scenario(ctx, log, expect_success=True):
|
||||
await require_export(ctx, log)
|
||||
|
||||
# --- Setup ---
|
||||
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
|
||||
log("Accounts funded")
|
||||
|
||||
alice = ctx.account("alice")
|
||||
bob = ctx.account("bob")
|
||||
current_seq = ctx.validated_ledger_index(0)
|
||||
|
||||
log(f"Current ledger: {current_seq}")
|
||||
outcome = "success" if expect_success else "failure (below quorum)"
|
||||
log(f"Expecting export {outcome}")
|
||||
|
||||
# --- Submit ttEXPORT ---
|
||||
result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Export",
|
||||
"LastLedgerSequence": current_seq + 10,
|
||||
"Fee": "1000000",
|
||||
"ExportedTxn": {
|
||||
"TransactionType": "Payment",
|
||||
"Account": alice.address,
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "10",
|
||||
"Sequence": 0,
|
||||
"TicketSequence": 1,
|
||||
"FirstLedgerSequence": current_seq + 1,
|
||||
"LastLedgerSequence": current_seq + 8,
|
||||
"Flags": 2147483648,
|
||||
"SigningPubKey": "",
|
||||
},
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
final_seq = ctx.validated_ledger_index(0)
|
||||
engine_result = result.get("engine_result", "")
|
||||
meta = result.get("meta", {})
|
||||
|
||||
log(f"Export at ledger {final_seq}, result: {engine_result}")
|
||||
|
||||
if expect_success:
|
||||
if engine_result != "tesSUCCESS":
|
||||
raise AssertionError(
|
||||
f"Expected tesSUCCESS, got {engine_result}"
|
||||
)
|
||||
|
||||
# Assert ExportResult is well-formed with signers
|
||||
assert_export_result(meta, log, require_signers=True)
|
||||
|
||||
# Assert shadow ticket was created
|
||||
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
|
||||
|
||||
log("Export succeeded as expected (active-view quorum reached)")
|
||||
else:
|
||||
if engine_result == "tesSUCCESS":
|
||||
raise AssertionError(
|
||||
"Export should NOT have succeeded below active-view quorum"
|
||||
)
|
||||
log(f"Export failed as expected ({engine_result})")
|
||||
|
||||
# No shadow ticket should exist
|
||||
assert_shadow_ticket(ctx, alice.address, log, expect_exists=False)
|
||||
|
||||
# --- Verify subsequent payment works ---
|
||||
log("Submitting payment from alice to bob...")
|
||||
pay_result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Payment",
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "12",
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
pay_engine = pay_result.get("engine_result", "")
|
||||
log(f"Payment result: {pay_engine}")
|
||||
|
||||
if pay_engine != "tesSUCCESS":
|
||||
raise AssertionError(f"Payment failed: {pay_engine}")
|
||||
|
||||
log("Payment succeeded -- account not blocked")
|
||||
log("PASS")
|
||||
94
.testnet/scenarios/export/retriable_export.py
Normal file
94
.testnet/scenarios/export/retriable_export.py
Normal file
@@ -0,0 +1,94 @@
|
||||
""":descr: Submit ttEXPORT directly (no hook), verify it succeeds with
|
||||
ExportResult in metadata. Then submit a payment from the same account
|
||||
to verify sequence handling doesn't block subsequent transactions.
|
||||
|
||||
Flow:
|
||||
1. Fund alice and bob
|
||||
2. alice submits ttEXPORT with inner payment -> tesSUCCESS (provisional)
|
||||
3. Validators attach sigs via proposals -> quorum -> ExportResult in metadata
|
||||
4. alice submits a Payment to bob -> should succeed (sequence not blocked)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from export_helpers import require_export, assert_export_result, assert_shadow_ticket
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
await require_export(ctx, log)
|
||||
|
||||
# --- Setup ---
|
||||
await ctx.fund_accounts({"alice": 10000, "bob": 1000})
|
||||
log("Accounts funded")
|
||||
|
||||
alice = ctx.account("alice")
|
||||
bob = ctx.account("bob")
|
||||
current_seq = ctx.validated_ledger_index(0)
|
||||
|
||||
log(f"Current ledger: {current_seq}")
|
||||
|
||||
# --- 1. Submit ttEXPORT ---
|
||||
result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Export",
|
||||
"LastLedgerSequence": current_seq + 15,
|
||||
"Fee": "1000000",
|
||||
"ExportedTxn": {
|
||||
"TransactionType": "Payment",
|
||||
"Account": alice.address,
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "10",
|
||||
"Sequence": 0,
|
||||
"TicketSequence": 1,
|
||||
"FirstLedgerSequence": current_seq + 1,
|
||||
"LastLedgerSequence": current_seq + 10,
|
||||
"Flags": 2147483648,
|
||||
"SigningPubKey": "",
|
||||
},
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
export_seq = ctx.validated_ledger_index(0)
|
||||
engine_result = result.get("engine_result", "")
|
||||
log(f"Export completed at ledger {export_seq}, result: {engine_result}")
|
||||
|
||||
if engine_result != "tesSUCCESS":
|
||||
raise AssertionError(
|
||||
f"Expected tesSUCCESS for export, got {engine_result}"
|
||||
)
|
||||
|
||||
# Assert ExportResult is well-formed with signers
|
||||
meta = result.get("meta", {})
|
||||
assert_export_result(meta, log, require_signers=True)
|
||||
|
||||
# Assert shadow ticket was created
|
||||
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
|
||||
|
||||
# --- 2. Submit Payment from same account ---
|
||||
log("Submitting payment from alice to bob...")
|
||||
pay_result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Payment",
|
||||
"Destination": bob.address,
|
||||
"Amount": "1000000",
|
||||
"Fee": "12",
|
||||
},
|
||||
alice.wallet,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
pay_engine = pay_result.get("engine_result", "")
|
||||
log(f"Payment result: {pay_engine}")
|
||||
|
||||
if pay_engine != "tesSUCCESS":
|
||||
raise AssertionError(f"Payment failed: {pay_engine}")
|
||||
|
||||
log(
|
||||
f"Both transactions succeeded: "
|
||||
f"Export at ledger {export_seq}, Payment at ledger {ctx.validated_ledger_index(0)}"
|
||||
)
|
||||
log("Sequence handling OK - export didn't block subsequent txns")
|
||||
log("PASS")
|
||||
211
.testnet/scenarios/export/steady_state_export.py
Normal file
211
.testnet/scenarios/export/steady_state_export.py
Normal file
@@ -0,0 +1,211 @@
|
||||
""":descr: install xport hook, trigger export, verify emitted ttEXPORT lifecycle
|
||||
|
||||
1. Fund alice (hook holder), bob (trigger), carol (export destination)
|
||||
2. Install xport hook on alice
|
||||
3. bob pays alice with DST=carol → hook calls xport() → emits ttEXPORT
|
||||
4. Emitted ttEXPORT enters open ledger, validators attach sigs via proposals
|
||||
5. Verify Export transaction appears in a subsequent ledger
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from export_helpers import (
|
||||
require_export,
|
||||
find_export_txns,
|
||||
dst_param,
|
||||
assert_hook_accepted,
|
||||
assert_export_result,
|
||||
assert_shadow_ticket,
|
||||
)
|
||||
|
||||
# C source for the xport hook — verbatim from src/test/app/Export_test_hooks.h
|
||||
# On Payment to the hook account, exports a 1 XAH payment to the DST param.
|
||||
XPORT_HOOK_C = r"""
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t id, uint32_t maxiter);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
|
||||
extern int64_t xport_reserve(uint32_t count);
|
||||
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
|
||||
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
|
||||
extern int64_t otxn_type(void);
|
||||
extern int64_t ledger_seq(void);
|
||||
|
||||
#define SBUF(x) (uint32_t)(x), sizeof(x)
|
||||
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
|
||||
|
||||
#define ttPAYMENT 0
|
||||
#define tfCANONICAL 0x80000000UL
|
||||
#define amAMOUNT 1
|
||||
#define amFEE 8
|
||||
#define atACCOUNT 1
|
||||
#define atDESTINATION 3
|
||||
|
||||
#define ENCODE_TT(buf_out, tt) \
|
||||
buf_out[0] = 0x12U; buf_out[1] = (tt >> 8) & 0xFFU; buf_out[2] = tt & 0xFFU; buf_out += 3;
|
||||
|
||||
#define ENCODE_FLAGS(buf_out, flags) \
|
||||
buf_out[0] = 0x22U; buf_out[1] = (flags >> 24) & 0xFFU; buf_out[2] = (flags >> 16) & 0xFFU; \
|
||||
buf_out[3] = (flags >> 8) & 0xFFU; buf_out[4] = flags & 0xFFU; buf_out += 5;
|
||||
|
||||
#define ENCODE_SEQUENCE(buf_out, seq) \
|
||||
buf_out[0] = 0x24U; buf_out[1] = (seq >> 24) & 0xFFU; buf_out[2] = (seq >> 16) & 0xFFU; \
|
||||
buf_out[3] = (seq >> 8) & 0xFFU; buf_out[4] = seq & 0xFFU; buf_out += 5;
|
||||
|
||||
#define ENCODE_FLS(buf_out, fls) \
|
||||
buf_out[0] = 0x20U; buf_out[1] = 0x1AU; buf_out[2] = (fls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (fls >> 16) & 0xFFU; buf_out[4] = (fls >> 8) & 0xFFU; \
|
||||
buf_out[5] = fls & 0xFFU; buf_out += 6;
|
||||
|
||||
#define ENCODE_LLS(buf_out, lls) \
|
||||
buf_out[0] = 0x20U; buf_out[1] = 0x1BU; buf_out[2] = (lls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (lls >> 16) & 0xFFU; buf_out[4] = (lls >> 8) & 0xFFU; \
|
||||
buf_out[5] = lls & 0xFFU; buf_out += 6;
|
||||
|
||||
#define ENCODE_DROPS(buf_out, drops, amt_type) \
|
||||
buf_out[0] = 0x60U + amt_type; buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
|
||||
buf_out[2] = (drops >> 48) & 0xFFU; buf_out[3] = (drops >> 40) & 0xFFU; \
|
||||
buf_out[4] = (drops >> 32) & 0xFFU; buf_out[5] = (drops >> 24) & 0xFFU; \
|
||||
buf_out[6] = (drops >> 16) & 0xFFU; buf_out[7] = (drops >> 8) & 0xFFU; \
|
||||
buf_out[8] = drops & 0xFFU; buf_out += 9;
|
||||
|
||||
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
|
||||
buf_out[0] = 0x73U; buf_out[1] = 0x00U; buf_out += 2;
|
||||
|
||||
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
|
||||
buf_out[0] = 0x80U + acc_type; buf_out[1] = 0x14U; \
|
||||
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; buf_out += 22;
|
||||
|
||||
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
|
||||
|
||||
int64_t hook(uint32_t reserved) {
|
||||
_g(1, 1);
|
||||
|
||||
if (otxn_type() != ttPAYMENT)
|
||||
return accept(0, 0, 0);
|
||||
|
||||
ASSERT(xport_reserve(1) == 1);
|
||||
|
||||
uint8_t dst[20];
|
||||
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
|
||||
ASSERT(dst_len == 20);
|
||||
|
||||
uint8_t acc[20];
|
||||
ASSERT(hook_account(SBUF(acc)) == 20);
|
||||
|
||||
uint32_t cls = (uint32_t)ledger_seq();
|
||||
|
||||
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
|
||||
uint8_t* buf = tx;
|
||||
|
||||
ENCODE_TT(buf, ttPAYMENT);
|
||||
ENCODE_FLAGS(buf, tfCANONICAL);
|
||||
ENCODE_SEQUENCE(buf, 0);
|
||||
ENCODE_FLS(buf, cls + 1);
|
||||
ENCODE_LLS(buf, cls + 5);
|
||||
// sfTicketSequence = UINT32 field 41 = 0x20 0x29
|
||||
buf[0] = 0x20U; buf[1] = 0x29U;
|
||||
buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 1;
|
||||
buf += 6;
|
||||
|
||||
uint64_t drops = 1000000;
|
||||
ENCODE_DROPS(buf, drops, amAMOUNT);
|
||||
ENCODE_DROPS(buf, 10, amFEE);
|
||||
|
||||
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
|
||||
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
|
||||
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
|
||||
|
||||
uint8_t hash[32];
|
||||
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
|
||||
ASSERT(xport_result == 32);
|
||||
|
||||
return accept(0, 0, 0);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
async def scenario(ctx, log):
|
||||
# Wait for network to start and amendments to activate
|
||||
await require_export(ctx, log)
|
||||
|
||||
# --- Setup ---
|
||||
await ctx.fund_accounts({"alice": 10000, "bob": 10000, "carol": 1000})
|
||||
log("Accounts funded")
|
||||
|
||||
alice = ctx.account("alice")
|
||||
carol = ctx.account("carol")
|
||||
|
||||
# Compile and install xport hook on alice
|
||||
wasm = ctx.compile_hook(XPORT_HOOK_C, label="xport")
|
||||
await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "SetHook",
|
||||
"Hooks": [
|
||||
{
|
||||
"Hook": {
|
||||
"CreateCode": wasm.hex().upper(),
|
||||
"HookOn": "0" * 64,
|
||||
"HookNamespace": "0" * 64,
|
||||
"HookApiVersion": 0,
|
||||
"Flags": 1, # hsfOVERRIDE
|
||||
}
|
||||
}
|
||||
],
|
||||
"Fee": "100000000",
|
||||
},
|
||||
alice.wallet,
|
||||
)
|
||||
log(
|
||||
f"Hook installed on alice ({alice.address[:12]}...) "
|
||||
f"ledger {ctx.validated_ledger_index(0)}"
|
||||
)
|
||||
|
||||
# --- Trigger ---
|
||||
# bob pays alice → hook calls xport() → emits ttEXPORT
|
||||
trigger_result = await ctx.submit_and_wait(
|
||||
{
|
||||
"TransactionType": "Payment",
|
||||
"Destination": alice.address,
|
||||
"Amount": "100000000",
|
||||
"Fee": "1000000",
|
||||
"HookParameters": [dst_param(carol.address)],
|
||||
},
|
||||
ctx.account("bob").wallet,
|
||||
)
|
||||
trigger_seq = ctx.validated_ledger_index(0)
|
||||
log(f"Export triggered at ledger {trigger_seq}")
|
||||
|
||||
# Assert hook fired with ACCEPT and emitted 1 tx
|
||||
trigger_meta = trigger_result.get("meta", {})
|
||||
assert_hook_accepted(trigger_meta, log, expected_emits=1)
|
||||
|
||||
# --- Verify: check each ledger close for the Export transaction ---
|
||||
max_ledgers = 10
|
||||
for i in range(max_ledgers):
|
||||
await ctx.wait_for_ledgers(1, node_id=0, timeout=30)
|
||||
seq = ctx.validated_ledger_index(0)
|
||||
exports = find_export_txns(ctx, seq)
|
||||
if exports:
|
||||
export_tx = exports[0]
|
||||
meta = export_tx.get("meta", export_tx.get("metaData", {}))
|
||||
result = meta.get("TransactionResult", "")
|
||||
log(f"Ledger {seq}: Export txn found, result={result}")
|
||||
|
||||
if result != "tesSUCCESS":
|
||||
raise AssertionError(f"Export did not succeed: {result}")
|
||||
|
||||
# Assert ExportResult is well-formed with signers and inner tx
|
||||
assert_export_result(meta, log, require_signers=True)
|
||||
|
||||
# Assert shadow ticket was created
|
||||
assert_shadow_ticket(ctx, alice.address, log, expect_exists=True)
|
||||
|
||||
log("PASS")
|
||||
return
|
||||
log(f"Ledger {seq}: no Export txn yet")
|
||||
|
||||
raise AssertionError(
|
||||
f"No Export transaction found after {max_ledgers} ledger closes"
|
||||
)
|
||||
60
.testnet/scenarios/helpers.py
Normal file
60
.testnet/scenarios/helpers.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Shared helpers for ConsensusEntropy scenario tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
ZERO_DIGEST = "0" * 64
|
||||
|
||||
|
||||
async def require_entropy(ctx, log):
|
||||
"""Wait for first ledger and assert ConsensusEntropy is enabled."""
|
||||
await ctx.wait_for_ledger_close(timeout=120)
|
||||
feature = ctx.feature_check("ConsensusEntropy", node_id=0)
|
||||
if not feature or not feature.get("enabled", False):
|
||||
raise AssertionError(f"ConsensusEntropy not enabled: {feature}")
|
||||
log("ConsensusEntropy enabled")
|
||||
|
||||
|
||||
def get_entropy_tx(ctx, seq):
|
||||
"""Fetch ledger and return (ce_tx, user_txns) or raise."""
|
||||
result = ctx.ledger(seq, transactions=True)
|
||||
if not result:
|
||||
raise AssertionError(f"Ledger {seq}: fetch failed")
|
||||
|
||||
txns = result.get("ledger", {}).get("transactions", [])
|
||||
ce = [tx for tx in txns if tx.get("TransactionType") == "ConsensusEntropy"]
|
||||
user = [tx for tx in txns if tx.get("TransactionType") != "ConsensusEntropy"]
|
||||
|
||||
if len(ce) != 1:
|
||||
raise AssertionError(
|
||||
f"Ledger {seq}: expected 1 ConsensusEntropy txn, got {len(ce)}"
|
||||
)
|
||||
|
||||
return ce[0], user
|
||||
|
||||
|
||||
def entropy_fields(ce_tx):
|
||||
"""Return (digest, entropy_count, is_zero) from a ConsensusEntropy tx."""
|
||||
digest = ce_tx.get("Digest", "")
|
||||
entropy_count = ce_tx.get("EntropyCount", -1)
|
||||
is_zero = digest == ZERO_DIGEST and entropy_count == 0
|
||||
return digest, entropy_count, is_zero
|
||||
|
||||
|
||||
def assert_valid_entropy(ce_tx, seq, seen_digests=None):
|
||||
"""Assert non-zero quorum-met entropy. Optionally check uniqueness."""
|
||||
digest, entropy_count, is_zero = entropy_fields(ce_tx)
|
||||
|
||||
if is_zero or not digest:
|
||||
raise AssertionError(f"Ledger {seq}: zero/empty Digest")
|
||||
|
||||
if entropy_count < 4:
|
||||
raise AssertionError(
|
||||
f"Ledger {seq}: EntropyCount={entropy_count} < 4 (sub-quorum)"
|
||||
)
|
||||
|
||||
if seen_digests is not None:
|
||||
if digest in seen_digests:
|
||||
raise AssertionError(f"Ledger {seq}: duplicate Digest {digest[:16]}...")
|
||||
seen_digests.add(digest)
|
||||
|
||||
return digest, entropy_count
|
||||
42
.testnet/scenarios/suite.yml
Normal file
42
.testnet/scenarios/suite.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
defaults:
|
||||
network:
|
||||
node_count: 5
|
||||
launcher: tmux
|
||||
slave_delay: 0.2
|
||||
features:
|
||||
- ConsensusEntropy
|
||||
track_features:
|
||||
- ConsensusEntropy
|
||||
log_levels:
|
||||
TxQ: info
|
||||
Protocol: debug
|
||||
Peer: debug
|
||||
LedgerConsensus: debug
|
||||
NetworkOPs: info
|
||||
env:
|
||||
XAHAU_RESOURCE_PER_PORT: "1"
|
||||
XAHAU_RNG_POLL_MS: "333"
|
||||
|
||||
tests:
|
||||
- name: steady_state_entropy
|
||||
script: .testnet/scenarios/entropy/steady_state_entropy.py
|
||||
|
||||
- name: steady_state_entropy_fast_start
|
||||
script: .testnet/scenarios/entropy/steady_state_entropy.py
|
||||
network:
|
||||
env:
|
||||
XAHAUD_BOOTSTRAP_FAST_START: "1"
|
||||
|
||||
- name: entropy_with_transactions
|
||||
script: .testnet/scenarios/entropy/entropy_with_transactions.py
|
||||
|
||||
- name: quorum_recovery_smoke
|
||||
script: .testnet/scenarios/entropy/quorum_recovery_smoke.py
|
||||
|
||||
- name: quorum_degradation_smoke
|
||||
script: .testnet/scenarios/entropy/quorum_degradation_smoke.py
|
||||
network:
|
||||
log_levels:
|
||||
LedgerConsensus: trace
|
||||
|
||||
# Export scenarios: see export-suite.yml
|
||||
@@ -50,7 +50,7 @@ that `test` code should *never* be included in `ripple` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization.sh](levelization.sh) script takes no parameters,
|
||||
The [levelization.py](levelization.py) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
@@ -84,7 +84,7 @@ It generates many files of [results](results):
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
done anything else to improve levelization, run `levelization.py`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
@@ -108,7 +108,7 @@ The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `levelization.sh`
|
||||
1. Run `levelization.py`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
* For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A Builds/levelization/results/paths.txt | grep -w B`
|
||||
|
||||
283
Builds/levelization/levelization.py
Executable file
283
Builds/levelization/levelization.py
Executable file
@@ -0,0 +1,283 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Usage: levelization.py
|
||||
This script takes no parameters, and can be called from any directory in the file system.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
# Compile regex patterns once at module level
|
||||
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
|
||||
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
|
||||
|
||||
|
||||
def dictionary_sort_key(s):
|
||||
"""
|
||||
Create a sort key that mimics 'sort -d' (dictionary order).
|
||||
Dictionary order only considers blanks and alphanumeric characters.
|
||||
"""
|
||||
return "".join(c for c in s if c.isalnum() or c.isspace())
|
||||
|
||||
|
||||
def get_level(file_path):
|
||||
"""
|
||||
Extract the level from a file path (second and third directory components).
|
||||
Equivalent to bash: cut -d/ -f 2,3
|
||||
|
||||
Examples:
|
||||
src/ripple/app/main.cpp -> ripple.app
|
||||
src/test/app/Import_test.cpp -> test.app
|
||||
"""
|
||||
parts = file_path.split("/")
|
||||
|
||||
if len(parts) >= 3:
|
||||
level = f"{parts[1]}/{parts[2]}"
|
||||
elif len(parts) >= 2:
|
||||
level = f"{parts[1]}/toplevel"
|
||||
else:
|
||||
level = file_path
|
||||
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if "." in level.split("/")[-1]:
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level = level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return level.replace("/", ".")
|
||||
|
||||
|
||||
def extract_include_level(include_line):
|
||||
"""
|
||||
Extract the include path from an #include directive.
|
||||
Gets the first two directory components from the include path.
|
||||
Equivalent to bash: cut -d/ -f 1,2
|
||||
|
||||
Examples:
|
||||
#include <ripple/basics/base_uint.h> -> ripple.basics
|
||||
#include "ripple/app/main/Application.h" -> ripple.app
|
||||
"""
|
||||
match = INCLUDE_PATH_PATTERN.search(include_line)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
include_path = match.group(1)
|
||||
parts = include_path.split("/")
|
||||
|
||||
if len(parts) >= 2:
|
||||
include_level = f"{parts[0]}/{parts[1]}"
|
||||
else:
|
||||
include_level = include_path
|
||||
|
||||
# If the "includelevel" indicates a file, cut off the filename
|
||||
if "." in include_level.split("/")[-1]:
|
||||
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
|
||||
|
||||
return include_level.replace("/", ".")
|
||||
|
||||
|
||||
def find_repository_directories(start_path, depth_limit=10):
|
||||
"""
|
||||
Find the repository root by looking for src or include folders.
|
||||
Walks up the directory tree from the start path.
|
||||
"""
|
||||
current = start_path.resolve()
|
||||
|
||||
for _ in range(depth_limit):
|
||||
src_path = current / "src"
|
||||
include_path = current / "include"
|
||||
has_src = src_path.exists()
|
||||
has_include = include_path.exists()
|
||||
|
||||
if has_src or has_include:
|
||||
dirs = []
|
||||
if has_src:
|
||||
dirs.append(src_path)
|
||||
if has_include:
|
||||
dirs.append(include_path)
|
||||
return current, dirs
|
||||
|
||||
parent = current.parent
|
||||
if parent == current:
|
||||
break
|
||||
current = parent
|
||||
|
||||
raise RuntimeError(
|
||||
"Could not find repository root. "
|
||||
"Expected to find a directory containing 'src' and/or 'include' folders."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
os.chdir(script_dir)
|
||||
|
||||
# Clean up and create results directory.
|
||||
results_dir = script_dir / "results"
|
||||
if results_dir.exists():
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(results_dir)
|
||||
results_dir.mkdir()
|
||||
|
||||
# Find the repository root.
|
||||
try:
|
||||
repo_root, scan_dirs = find_repository_directories(script_dir)
|
||||
print(f"Found repository root: {repo_root}")
|
||||
for scan_dir in scan_dirs:
|
||||
print(f" Scanning: {scan_dir.relative_to(repo_root)}")
|
||||
except RuntimeError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Find all #include directives.
|
||||
print("\nScanning for raw includes...")
|
||||
raw_includes = []
|
||||
rawincludes_file = results_dir / "rawincludes.txt"
|
||||
|
||||
with open(rawincludes_file, "w", buffering=8192) as raw_f:
|
||||
for dir_path in scan_dirs:
|
||||
for file_path in dir_path.rglob("*"):
|
||||
if not file_path.is_file():
|
||||
continue
|
||||
try:
|
||||
rel_path_str = str(file_path.relative_to(repo_root))
|
||||
with open(
|
||||
file_path, "r", encoding="utf-8", errors="ignore", buffering=8192
|
||||
) as f:
|
||||
for line in f:
|
||||
if "#include" not in line or "boost" in line:
|
||||
continue
|
||||
if INCLUDE_PATTERN.match(line):
|
||||
line_stripped = line.strip()
|
||||
entry = f"{rel_path_str}:{line_stripped}\n"
|
||||
print(entry, end="")
|
||||
raw_f.write(entry)
|
||||
raw_includes.append((rel_path_str, line_stripped))
|
||||
except Exception as e:
|
||||
print(f"Error reading {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
# Build levelization paths and count directly.
|
||||
print("Build levelization paths")
|
||||
path_counts = defaultdict(int)
|
||||
|
||||
for file_path, include_line in raw_includes:
|
||||
include_level = extract_include_level(include_line)
|
||||
if not include_level:
|
||||
continue
|
||||
level = get_level(file_path)
|
||||
if level != include_level:
|
||||
path_counts[(level, include_level)] += 1
|
||||
|
||||
# Sort and deduplicate paths.
|
||||
print("Sort and deduplicate paths")
|
||||
sorted_items = sorted(
|
||||
path_counts.items(),
|
||||
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
|
||||
)
|
||||
|
||||
paths_file = results_dir / "paths.txt"
|
||||
with open(paths_file, "w") as f:
|
||||
for (level, include_level), count in sorted_items:
|
||||
line = f"{count:7} {level} {include_level}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Split into flat-file database.
|
||||
print("Split into flat-file database")
|
||||
includes_dir = results_dir / "includes"
|
||||
includedby_dir = results_dir / "includedby"
|
||||
includes_dir.mkdir()
|
||||
includedby_dir.mkdir()
|
||||
|
||||
includes_data = defaultdict(list)
|
||||
includedby_data = defaultdict(list)
|
||||
|
||||
for (level, include_level), count in sorted_items:
|
||||
includes_data[level].append((include_level, count))
|
||||
includedby_data[include_level].append((level, count))
|
||||
|
||||
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
|
||||
with open(includes_dir / level, "w") as f:
|
||||
for include_level, count in includes_data[level]:
|
||||
line = f"{include_level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
for include_level in sorted(includedby_data.keys(), key=dictionary_sort_key):
|
||||
with open(includedby_dir / include_level, "w") as f:
|
||||
for level, count in includedby_data[include_level]:
|
||||
line = f"{level} {count}\n"
|
||||
print(line.rstrip())
|
||||
f.write(line)
|
||||
|
||||
# Search for loops.
|
||||
print("Search for loops")
|
||||
loops_file = results_dir / "loops.txt"
|
||||
ordering_file = results_dir / "ordering.txt"
|
||||
|
||||
# Pre-load all include files into memory for fast lookup.
|
||||
includes_cache = {}
|
||||
includes_lookup = {}
|
||||
|
||||
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
|
||||
if not include_file.is_file():
|
||||
continue
|
||||
includes_cache[include_file.name] = []
|
||||
includes_lookup[include_file.name] = {}
|
||||
with open(include_file, "r") as f:
|
||||
for line in f:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
name, count = parts[0], int(parts[1])
|
||||
includes_cache[include_file.name].append((name, count))
|
||||
includes_lookup[include_file.name][name] = count
|
||||
|
||||
loops_found = set()
|
||||
|
||||
with open(loops_file, "w", buffering=8192) as loops_f, open(
|
||||
ordering_file, "w", buffering=8192
|
||||
) as ordering_f:
|
||||
for source in sorted(includes_cache.keys()):
|
||||
for include, include_freq in includes_cache[source]:
|
||||
if include not in includes_lookup:
|
||||
continue
|
||||
|
||||
source_freq = includes_lookup[include].get(source)
|
||||
|
||||
if source_freq is not None:
|
||||
loop_key = tuple(sorted([source, include]))
|
||||
if loop_key in loops_found:
|
||||
continue
|
||||
loops_found.add(loop_key)
|
||||
|
||||
loops_f.write(f"Loop: {source} {include}\n")
|
||||
|
||||
diff = include_freq - source_freq
|
||||
if diff > 3:
|
||||
loops_f.write(f" {source} > {include}\n\n")
|
||||
elif diff < -3:
|
||||
loops_f.write(f" {include} > {source}\n\n")
|
||||
elif source_freq == include_freq:
|
||||
loops_f.write(f" {include} == {source}\n\n")
|
||||
else:
|
||||
loops_f.write(f" {include} ~= {source}\n\n")
|
||||
else:
|
||||
ordering_f.write(f"{source} > {include}\n")
|
||||
|
||||
# Print results.
|
||||
print("\nOrdering:")
|
||||
with open(ordering_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
print("\nLoops:")
|
||||
with open(loops_file, "r") as f:
|
||||
print(f.read(), end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,130 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: levelization.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
# Ensure all sorting is ASCII-order consistently across platforms.
|
||||
export LANG=C
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../..
|
||||
echo Raw includes:
|
||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir includedby
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and dedup paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a includedby/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
@@ -26,7 +26,7 @@ Loop: xrpld.app xrpld.nodestore
|
||||
xrpld.app > xrpld.nodestore
|
||||
|
||||
Loop: xrpld.app xrpld.overlay
|
||||
xrpld.overlay ~= xrpld.app
|
||||
xrpld.overlay == xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.peerfinder
|
||||
xrpld.app > xrpld.peerfinder
|
||||
@@ -47,7 +47,7 @@ Loop: xrpld.net xrpld.rpc
|
||||
xrpld.rpc > xrpld.net
|
||||
|
||||
Loop: xrpld.overlay xrpld.rpc
|
||||
xrpld.rpc ~= xrpld.overlay
|
||||
xrpld.rpc > xrpld.overlay
|
||||
|
||||
Loop: xrpld.perflog xrpld.rpc
|
||||
xrpld.rpc ~= xrpld.perflog
|
||||
|
||||
@@ -43,6 +43,7 @@ test.consensus > xrpld.app
|
||||
test.consensus > xrpld.consensus
|
||||
test.consensus > xrpld.core
|
||||
test.consensus > xrpld.ledger
|
||||
test.consensus > xrpl.json
|
||||
test.consensus > xrpl.protocol
|
||||
test.core > test.jtx
|
||||
test.core > test.toplevel
|
||||
|
||||
@@ -12,7 +12,7 @@ The server software that powers Xahau is called `xahaud` and is available in thi
|
||||
|
||||
### Build from Source
|
||||
|
||||
* [Read the build instructions in our documentation](https://xahau.network/infrastructure/building-xahau)
|
||||
* [Read the build instructions in our documentation](https://xahau.network/docs/infrastructure/build-xahaud/)
|
||||
* If you encounter any issues, please [open an issue](https://github.com/xahau/xahaud/issues)
|
||||
|
||||
## Highlights of Xahau
|
||||
|
||||
@@ -68,6 +68,17 @@ target_link_libraries(xrpl.imports.main
|
||||
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
|
||||
)
|
||||
|
||||
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
|
||||
if(TARGET date::date-tz)
|
||||
target_link_libraries(xrpl.imports.main INTERFACE date::date-tz)
|
||||
endif()
|
||||
|
||||
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
|
||||
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
|
||||
target_compile_definitions(xrpl.imports.main INTERFACE
|
||||
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
|
||||
)
|
||||
|
||||
include(add_module)
|
||||
include(target_link_modules)
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ target_compile_definitions (opts
|
||||
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
|
||||
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
|
||||
$<$<BOOL:${single_io_service_thread}>:RIPPLE_SINGLE_IO_SERVICE_THREAD=1>
|
||||
# Enhanced logging is enabled for Debug builds, or explicitly via
|
||||
# -DBEAST_ENHANCED_LOGGING=ON for other build types.
|
||||
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
|
||||
$<$<BOOL:${voidstar}>:ENABLE_VOIDSTAR>)
|
||||
target_compile_options (opts
|
||||
INTERFACE
|
||||
|
||||
@@ -47,5 +47,8 @@
|
||||
#define MEM_OVERLAP -43
|
||||
#define TOO_MANY_STATE_MODIFICATIONS -44
|
||||
#define TOO_MANY_NAMESPACES -45
|
||||
#define EXPORT_FAILURE -46
|
||||
#define TOO_MANY_EXPORTED_TXN -47
|
||||
#define TOO_LITTLE_ENTROPY -48
|
||||
#define HOOK_ERROR_CODES
|
||||
#endif //HOOK_ERROR_CODES
|
||||
|
||||
@@ -336,5 +336,24 @@ prepare(
|
||||
uint32_t read_ptr,
|
||||
uint32_t read_len);
|
||||
|
||||
extern int64_t
|
||||
xport_reserve(uint32_t count);
|
||||
|
||||
extern int64_t
|
||||
xport(
|
||||
uint32_t write_ptr,
|
||||
uint32_t write_len,
|
||||
uint32_t read_ptr,
|
||||
uint32_t read_len);
|
||||
|
||||
extern int64_t
|
||||
xport_cancel(uint32_t ticket_seq);
|
||||
|
||||
extern int64_t
|
||||
dice(uint32_t sides);
|
||||
|
||||
extern int64_t
|
||||
random(uint32_t write_ptr, uint32_t write_len);
|
||||
|
||||
#define HOOK_EXTERN
|
||||
#endif // HOOK_EXTERN
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#define sfUNLModifyDisabling ((16U << 16U) + 17U)
|
||||
#define sfHookResult ((16U << 16U) + 18U)
|
||||
#define sfWasLockingChainSend ((16U << 16U) + 19U)
|
||||
#define sfSidecarType ((16U << 16U) + 20U)
|
||||
#define sfLedgerEntryType ((1U << 16U) + 1U)
|
||||
#define sfTransactionType ((1U << 16U) + 2U)
|
||||
#define sfSignerWeight ((1U << 16U) + 3U)
|
||||
@@ -22,6 +23,8 @@
|
||||
#define sfHookApiVersion ((1U << 16U) + 20U)
|
||||
#define sfHookStateScale ((1U << 16U) + 21U)
|
||||
#define sfLedgerFixType ((1U << 16U) + 22U)
|
||||
#define sfHookExportCount ((1U << 16U) + 98U)
|
||||
#define sfEntropyCount ((1U << 16U) + 99U)
|
||||
#define sfNetworkID ((2U << 16U) + 1U)
|
||||
#define sfFlags ((2U << 16U) + 2U)
|
||||
#define sfSourceTag ((2U << 16U) + 3U)
|
||||
@@ -80,6 +83,7 @@
|
||||
#define sfRewardTime ((2U << 16U) + 98U)
|
||||
#define sfRewardLgrFirst ((2U << 16U) + 99U)
|
||||
#define sfRewardLgrLast ((2U << 16U) + 100U)
|
||||
#define sfCancelTicketSequence ((2U << 16U) + 101U)
|
||||
#define sfIndexNext ((3U << 16U) + 1U)
|
||||
#define sfIndexPrevious ((3U << 16U) + 2U)
|
||||
#define sfBookNode ((3U << 16U) + 3U)
|
||||
@@ -159,6 +163,7 @@
|
||||
#define sfEmittedTxnID ((5U << 16U) + 97U)
|
||||
#define sfGovernanceMarks ((5U << 16U) + 98U)
|
||||
#define sfGovernanceFlags ((5U << 16U) + 99U)
|
||||
#define sfEntropyDigest ((5U << 16U) + 100U)
|
||||
#define sfNumber ((9U << 16U) + 1U)
|
||||
#define sfAmount ((6U << 16U) + 1U)
|
||||
#define sfBalance ((6U << 16U) + 2U)
|
||||
@@ -286,6 +291,7 @@
|
||||
#define sfXChainCreateAccountAttestationCollectionElement ((14U << 16U) + 31U)
|
||||
#define sfPriceData ((14U << 16U) + 32U)
|
||||
#define sfCredential ((14U << 16U) + 33U)
|
||||
#define sfExportedTxn ((14U << 16U) + 90U)
|
||||
#define sfAmountEntry ((14U << 16U) + 91U)
|
||||
#define sfMintURIToken ((14U << 16U) + 92U)
|
||||
#define sfHookEmission ((14U << 16U) + 93U)
|
||||
@@ -293,6 +299,7 @@
|
||||
#define sfActiveValidator ((14U << 16U) + 95U)
|
||||
#define sfGenesisMint ((14U << 16U) + 96U)
|
||||
#define sfRemark ((14U << 16U) + 97U)
|
||||
#define sfExportResult ((14U << 16U) + 98U)
|
||||
#define sfSigners ((15U << 16U) + 3U)
|
||||
#define sfSignerEntries ((15U << 16U) + 4U)
|
||||
#define sfTemplate ((15U << 16U) + 5U)
|
||||
|
||||
@@ -61,6 +61,7 @@
|
||||
#define ttNFTOKEN_MODIFY 70
|
||||
#define ttPERMISSIONED_DOMAIN_SET 71
|
||||
#define ttPERMISSIONED_DOMAIN_DELETE 72
|
||||
#define ttEXPORT 91
|
||||
#define ttCRON 92
|
||||
#define ttCRON_SET 93
|
||||
#define ttREMARKS_SET 94
|
||||
@@ -74,3 +75,4 @@
|
||||
#define ttUNL_MODIFY 102
|
||||
#define ttEMIT_FAILURE 103
|
||||
#define ttUNL_REPORT 104
|
||||
#define ttCONSENSUS_ENTROPY 105
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
#define uint256 std::string
|
||||
#define featureHooksUpdate1 "1"
|
||||
#define featureHooksUpdate2 "1"
|
||||
#define featureExport "1"
|
||||
#define featureConsensusEntropy "1"
|
||||
#define fix20250131 "1"
|
||||
namespace hook_api {
|
||||
struct Rules
|
||||
@@ -383,7 +385,10 @@ enum hook_return_code : int64_t {
|
||||
MEM_OVERLAP = -43, // one or more specified buffers are the same memory
|
||||
TOO_MANY_STATE_MODIFICATIONS = -44, // more than 5000 modified state
|
||||
// entires in the combined hook chains
|
||||
TOO_MANY_NAMESPACES = -45
|
||||
TOO_MANY_NAMESPACES = -45,
|
||||
EXPORT_FAILURE = -46,
|
||||
TOO_MANY_EXPORTED_TXN = -47,
|
||||
TOO_LITTLE_ENTROPY = -48,
|
||||
};
|
||||
|
||||
enum ExitType : uint8_t {
|
||||
@@ -397,6 +402,7 @@ const uint16_t max_state_modifications = 256;
|
||||
const uint8_t max_slots = 255;
|
||||
const uint8_t max_nonce = 255;
|
||||
const uint8_t max_emit = 255;
|
||||
const uint8_t max_export = 2;
|
||||
const uint8_t max_params = 16;
|
||||
const double fee_base_multiplier = 1.1f;
|
||||
|
||||
@@ -437,10 +443,6 @@ getImportWhitelist(Rules const& rules)
|
||||
return whitelist;
|
||||
}
|
||||
|
||||
#undef HOOK_API_DEFINITION
|
||||
#undef I32
|
||||
#undef I64
|
||||
|
||||
enum GuardRulesVersion : uint64_t {
|
||||
GuardRuleFix20250131 = 0x00000001,
|
||||
};
|
||||
|
||||
@@ -372,3 +372,28 @@ HOOK_API_DEFINITION(
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, prepare, (uint32_t, uint32_t, uint32_t, uint32_t),
|
||||
featureHooksUpdate2)
|
||||
|
||||
// int64_t xport_reserve(uint32_t count);
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, xport_reserve, (uint32_t),
|
||||
featureExport)
|
||||
|
||||
// int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, xport, (uint32_t, uint32_t, uint32_t, uint32_t),
|
||||
featureExport)
|
||||
|
||||
// int64_t xport_cancel(uint32_t ticket_seq);
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, xport_cancel, (uint32_t),
|
||||
featureExport)
|
||||
|
||||
// int64_t dice(uint32_t sides);
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, dice, (uint32_t),
|
||||
featureConsensusEntropy)
|
||||
|
||||
// int64_t random(uint32_t write_ptr, uint32_t write_len);
|
||||
HOOK_API_DEFINITION(
|
||||
int64_t, random, (uint32_t, uint32_t),
|
||||
featureConsensusEntropy)
|
||||
|
||||
2
include/xrpl/proto/.clang-format
Normal file
2
include/xrpl/proto/.clang-format
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
DisableFormat: true
|
||||
@@ -166,6 +166,14 @@ message TMProposeSet
|
||||
|
||||
// Number of hops traveled
|
||||
optional uint32 hops = 12 [deprecated=true];
|
||||
|
||||
// Export signatures for pending exports seen in the proposal set. The
|
||||
// proposal's ExtendedPosition includes a digest of this repeated field, so
|
||||
// these side-channel blobs are covered by the proposal signature.
|
||||
// Each entry is: txnHash (32 bytes) + validator pubkey (33 bytes)
|
||||
// + multisign signature (variable length). Validators attach these
|
||||
// so export quorum can be reached within the same consensus round.
|
||||
repeated bytes exportSignatures = 13;
|
||||
}
|
||||
|
||||
enum TxSetStatus
|
||||
@@ -384,4 +392,3 @@ message TMHaveTransactions
|
||||
{
|
||||
repeated bytes hashes = 1;
|
||||
}
|
||||
|
||||
|
||||
33
include/xrpl/protocol/ExportLimits.h
Normal file
33
include/xrpl/protocol/ExportLimits.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef RIPPLE_PROTOCOL_EXPORT_LIMITS_H_INCLUDED
|
||||
#define RIPPLE_PROTOCOL_EXPORT_LIMITS_H_INCLUDED
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// Export system caps.
|
||||
//
|
||||
// These limits bound the DoS surface of the export signature system:
|
||||
// - Each pending export requires every validator to sign it every round
|
||||
// (sign-once, attach once via TMProposeSet)
|
||||
// - Inbound signature processing involves crypto verification per sig
|
||||
// - The open-ledger cap (maxPendingExports) is the root constraint;
|
||||
// signing throughput and inbound processing are transitively bounded by it
|
||||
struct ExportLimits
|
||||
{
|
||||
// Maximum exports a single hook execution may produce
|
||||
// (also enforced by hook_api::max_export in Enum.h)
|
||||
static constexpr std::uint8_t maxExportsPerHook = 2;
|
||||
|
||||
// Maximum pending export transactions in an open/apply ledger.
|
||||
// Hook-emitted export backlog drains into the open ledger at this cap.
|
||||
// This transitively caps:
|
||||
// - signatures per TMProposeSet message (1 per pending export)
|
||||
// - inbound proposal signature processing (clamped to this)
|
||||
// - validator signing work per round
|
||||
static constexpr std::uint8_t maxPendingExports = 8;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -80,7 +80,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 113;
|
||||
static constexpr std::size_t numFeatures = 115;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
|
||||
@@ -96,6 +96,9 @@ enum class HashPrefix : std::uint32_t {
|
||||
|
||||
/** Credentials signature */
|
||||
credential = detail::make_hash_prefix('C', 'R', 'D'),
|
||||
|
||||
/** consensus extension sidecar object */
|
||||
sidecar = detail::make_hash_prefix('S', 'C', 'R'),
|
||||
};
|
||||
|
||||
template <class Hasher>
|
||||
|
||||
@@ -62,6 +62,9 @@ emittedDir() noexcept;
|
||||
Keylet
|
||||
emittedTxn(uint256 const& id) noexcept;
|
||||
|
||||
Keylet
|
||||
shadowTicket(AccountID const& account, std::uint32_t ticketSeq) noexcept;
|
||||
|
||||
Keylet
|
||||
hookDefinition(uint256 const& hash) noexcept;
|
||||
|
||||
@@ -118,6 +121,10 @@ negativeUNL() noexcept;
|
||||
Keylet const&
|
||||
UNLReport() noexcept;
|
||||
|
||||
/** The (fixed) index of the object containing consensus-derived entropy. */
|
||||
Keylet const&
|
||||
consensusEntropy() noexcept;
|
||||
|
||||
/** The beginning of an order book */
|
||||
struct book_t
|
||||
{
|
||||
|
||||
21
include/xrpl/protocol/SidecarType.h
Normal file
21
include/xrpl/protocol/SidecarType.h
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef RIPPLE_PROTOCOL_SIDECAR_TYPE_H_INCLUDED
|
||||
#define RIPPLE_PROTOCOL_SIDECAR_TYPE_H_INCLUDED
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/// Discriminator for sidecar set entries (SHAMap leaves used for
|
||||
/// consensus extension data: RNG commit/reveal, export signatures).
|
||||
///
|
||||
/// Stored in sfSidecarType (UINT8) on each STObject entry.
|
||||
/// Makes sidecar sets self-describing — no content-sniffing needed.
|
||||
enum SidecarType : std::uint8_t {
|
||||
sidecarRngCommit = 1,
|
||||
sidecarRngReveal = 2,
|
||||
sidecarExportSig = 3,
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -68,6 +68,7 @@ enum TELcodes : TERUnderlyingType {
|
||||
telNON_LOCAL_EMITTED_TXN,
|
||||
telIMPORT_VL_KEY_NOT_RECOGNISED,
|
||||
telCAN_NOT_QUEUE_IMPORT,
|
||||
telSHADOW_TICKET_REQUIRED,
|
||||
telENV_RPC_FAILED,
|
||||
};
|
||||
|
||||
@@ -233,8 +234,10 @@ enum TERcodes : TERUnderlyingType {
|
||||
terQUEUED, // Transaction is being held in TxQ until fee drops
|
||||
terPRE_TICKET, // Ticket is not yet in ledger but might be on its way
|
||||
terNO_AMM, // AMM doesn't exist for the asset pair
|
||||
terNO_HOOK // Transaction requires a non-existent hook definition
|
||||
terNO_HOOK, // Transaction requires a non-existent hook definition
|
||||
// (referenced by sfHookHash)
|
||||
terRETRY_EXPORT // Export does not yet have enough validator signatures.
|
||||
// Retained in retriable set for next ledger.
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -362,6 +365,7 @@ enum TECcodes : TERUnderlyingType {
|
||||
tecARRAY_TOO_LARGE = 197,
|
||||
tecLOCKED = 198,
|
||||
tecBAD_CREDENTIALS = 199,
|
||||
tecEXPORT_EXPIRED = 200,
|
||||
tecLAST_POSSIBLE_ENTRY = 255,
|
||||
};
|
||||
|
||||
|
||||
@@ -274,6 +274,13 @@ enum BridgeModifyFlags : uint32_t {
|
||||
tfClearAccountCreateAmount = 0x00010000,
|
||||
};
|
||||
constexpr std::uint32_t tfBridgeModifyMask = ~(tfUniversal | tfClearAccountCreateAmount);
|
||||
|
||||
// ConsensusEntropy flags (used on ttCONSENSUS_ENTROPY SHAMap entries):
|
||||
enum ConsensusEntropyFlags : uint32_t {
|
||||
tfEntropyCommit = 0x00000001, // entry is a commitment in commitSet
|
||||
tfEntropyReveal = 0x00000002, // entry is a reveal in entropySet
|
||||
};
|
||||
// flag=0 (no tfEntropyCommit/tfEntropyReveal) = final injected pseudo-tx
|
||||
// clang-format on
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -140,6 +140,12 @@ public:
|
||||
mHookEmissions = hookEmissions;
|
||||
}
|
||||
|
||||
void
|
||||
setExportResult(STObject const& exportResult)
|
||||
{
|
||||
mExportResult = exportResult;
|
||||
}
|
||||
|
||||
bool
|
||||
hasHookExecutions() const
|
||||
{
|
||||
@@ -152,6 +158,12 @@ public:
|
||||
return static_cast<bool>(mHookEmissions);
|
||||
}
|
||||
|
||||
bool
|
||||
hasExportResult() const
|
||||
{
|
||||
return static_cast<bool>(mExportResult);
|
||||
}
|
||||
|
||||
STAmount
|
||||
getDeliveredAmount() const
|
||||
{
|
||||
@@ -176,6 +188,7 @@ private:
|
||||
std::optional<STAmount> mDelivered;
|
||||
std::optional<STArray> mHookExecutions;
|
||||
std::optional<STArray> mHookEmissions;
|
||||
std::optional<STObject> mExportResult;
|
||||
|
||||
STArray mNodes;
|
||||
};
|
||||
|
||||
@@ -56,6 +56,8 @@ XRPL_FEATURE(AMM, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FIX (ReducedOffersV1, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(HooksUpdate2, Supported::yes, VoteBehavior::DefaultNo);
|
||||
XRPL_FEATURE(HookOnV2, Supported::yes, VoteBehavior::DefaultNo);
|
||||
XRPL_FEATURE(Export, Supported::yes, VoteBehavior::DefaultNo);
|
||||
XRPL_FEATURE(ConsensusEntropy, Supported::yes, VoteBehavior::DefaultNo);
|
||||
XRPL_FIX (HookAPI20251128, Supported::yes, VoteBehavior::DefaultYes);
|
||||
XRPL_FIX (CronStacking, Supported::yes, VoteBehavior::DefaultYes);
|
||||
XRPL_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
|
||||
|
||||
@@ -223,6 +223,20 @@ LEDGER_ENTRY(ltURI_TOKEN, 0x0055, URIToken, uri_token, ({
|
||||
{sfPreviousTxnLgrSeq, soeREQUIRED},
|
||||
}))
|
||||
|
||||
/** The ledger object which stores consensus-derived entropy.
|
||||
|
||||
\note This is a singleton: only one such object exists in the ledger.
|
||||
|
||||
\sa keylet::consensusEntropy
|
||||
*/
|
||||
LEDGER_ENTRY_DUPLICATE(ltCONSENSUS_ENTROPY, 0x0058, ConsensusEntropy, consensus_entropy, ({
|
||||
{sfDigest, soeREQUIRED},
|
||||
{sfEntropyCount, soeREQUIRED},
|
||||
{sfLedgerSequence, soeREQUIRED},
|
||||
{sfPreviousTxnID, soeREQUIRED},
|
||||
{sfPreviousTxnLgrSeq, soeREQUIRED},
|
||||
}))
|
||||
|
||||
/** A ledger object which describes an account.
|
||||
|
||||
\sa keylet::account
|
||||
@@ -590,6 +604,22 @@ LEDGER_ENTRY(ltDID, 0x008D, DID, did, ({
|
||||
{sfPreviousTxnLgrSeq, soeREQUIRED},
|
||||
}))
|
||||
|
||||
//@@start shadow-ticket-ledger-entry
|
||||
/** A shadow ticket for export replay protection.
|
||||
|
||||
Created when a transaction is exported. Consumed when
|
||||
proof-of-execution is imported back. Account-owned (pays reserve).
|
||||
|
||||
\sa keylet::shadowTicket
|
||||
*/
|
||||
LEDGER_ENTRY(ltSHADOW_TICKET, 0x5374, ShadowTicket, shadow_ticket, ({
|
||||
{sfAccount, soeREQUIRED},
|
||||
{sfTicketSequence, soeREQUIRED},
|
||||
{sfTransactionHash, soeREQUIRED},
|
||||
{sfLedgerSequence, soeREQUIRED},
|
||||
{sfOwnerNode, soeREQUIRED},
|
||||
}))
|
||||
//@@end shadow-ticket-ledger-entry
|
||||
|
||||
#undef EXPAND
|
||||
#undef LEDGER_ENTRY_DUPLICATE
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ TYPED_SFIELD(sfTickSize, UINT8, 16)
|
||||
TYPED_SFIELD(sfUNLModifyDisabling, UINT8, 17)
|
||||
TYPED_SFIELD(sfHookResult, UINT8, 18)
|
||||
TYPED_SFIELD(sfWasLockingChainSend, UINT8, 19)
|
||||
TYPED_SFIELD(sfSidecarType, UINT8, 20)
|
||||
|
||||
// 16-bit integers (common)
|
||||
TYPED_SFIELD(sfLedgerEntryType, UINT16, 1, SField::sMD_Never)
|
||||
@@ -59,6 +60,8 @@ TYPED_SFIELD(sfHookExecutionIndex, UINT16, 19)
|
||||
TYPED_SFIELD(sfHookApiVersion, UINT16, 20)
|
||||
TYPED_SFIELD(sfHookStateScale, UINT16, 21)
|
||||
TYPED_SFIELD(sfLedgerFixType, UINT16, 22)
|
||||
TYPED_SFIELD(sfHookExportCount, UINT16, 98)
|
||||
TYPED_SFIELD(sfEntropyCount, UINT16, 99)
|
||||
|
||||
// 32-bit integers (common)
|
||||
TYPED_SFIELD(sfNetworkID, UINT32, 1)
|
||||
@@ -123,6 +126,7 @@ TYPED_SFIELD(sfImportSequence, UINT32, 97)
|
||||
TYPED_SFIELD(sfRewardTime, UINT32, 98)
|
||||
TYPED_SFIELD(sfRewardLgrFirst, UINT32, 99)
|
||||
TYPED_SFIELD(sfRewardLgrLast, UINT32, 100)
|
||||
TYPED_SFIELD(sfCancelTicketSequence, UINT32, 101)
|
||||
|
||||
// 64-bit integers (common)
|
||||
TYPED_SFIELD(sfIndexNext, UINT64, 1)
|
||||
@@ -217,6 +221,7 @@ TYPED_SFIELD(sfHookCanEmit, UINT256, 96)
|
||||
TYPED_SFIELD(sfEmittedTxnID, UINT256, 97)
|
||||
TYPED_SFIELD(sfGovernanceMarks, UINT256, 98)
|
||||
TYPED_SFIELD(sfGovernanceFlags, UINT256, 99)
|
||||
TYPED_SFIELD(sfEntropyDigest, UINT256, 100)
|
||||
|
||||
// number (common)
|
||||
TYPED_SFIELD(sfNumber, NUMBER, 1)
|
||||
@@ -379,6 +384,7 @@ UNTYPED_SFIELD(sfXChainClaimAttestationCollectionElement, OBJECT, 30)
|
||||
UNTYPED_SFIELD(sfXChainCreateAccountAttestationCollectionElement, OBJECT, 31)
|
||||
UNTYPED_SFIELD(sfPriceData, OBJECT, 32)
|
||||
UNTYPED_SFIELD(sfCredential, OBJECT, 33)
|
||||
UNTYPED_SFIELD(sfExportedTxn, OBJECT, 90)
|
||||
UNTYPED_SFIELD(sfAmountEntry, OBJECT, 91)
|
||||
UNTYPED_SFIELD(sfMintURIToken, OBJECT, 92)
|
||||
UNTYPED_SFIELD(sfHookEmission, OBJECT, 93)
|
||||
@@ -386,6 +392,7 @@ UNTYPED_SFIELD(sfImportVLKey, OBJECT, 94)
|
||||
UNTYPED_SFIELD(sfActiveValidator, OBJECT, 95)
|
||||
UNTYPED_SFIELD(sfGenesisMint, OBJECT, 96)
|
||||
UNTYPED_SFIELD(sfRemark, OBJECT, 97)
|
||||
UNTYPED_SFIELD(sfExportResult, OBJECT, 98)
|
||||
|
||||
// array of objects (common)
|
||||
// ARRAY/1 is reserved for end of array
|
||||
|
||||
@@ -500,6 +500,17 @@ TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 72, PermissionedDomainDelete, ({
|
||||
{sfDomainID, soeREQUIRED},
|
||||
}))
|
||||
|
||||
//@@start export-transaction-types
|
||||
/* User-submittable export: creates a cross-chain transaction for
|
||||
validator signing. Retries via terRETRY_EXPORT until quorum.
|
||||
Also supports shadow ticket cancellation via sfCancelTicketSequence.
|
||||
At least one of sfExportedTxn or sfCancelTicketSequence must be present. */
|
||||
TRANSACTION(ttEXPORT, 91, Export, ({
|
||||
{sfExportedTxn, soeOPTIONAL},
|
||||
{sfCancelTicketSequence, soeOPTIONAL},
|
||||
}))
|
||||
//@@end export-transaction-types
|
||||
|
||||
/* A pseudo-txn alarm signal for invoking a hook, emitted by validators after alarm set conditions are met */
|
||||
TRANSACTION(ttCRON, 92, Cron, ({
|
||||
{sfOwner, soeREQUIRED},
|
||||
@@ -605,3 +616,10 @@ TRANSACTION(ttUNL_REPORT, 104, UNLReport, ({
|
||||
{sfActiveValidator, soeOPTIONAL},
|
||||
{sfImportVLKey, soeOPTIONAL},
|
||||
}))
|
||||
|
||||
TRANSACTION(ttCONSENSUS_ENTROPY, 105, ConsensusEntropy, ({
|
||||
{sfLedgerSequence, soeREQUIRED},
|
||||
{sfDigest, soeREQUIRED},
|
||||
{sfEntropyCount, soeREQUIRED},
|
||||
{sfBlob, soeOPTIONAL},
|
||||
}))
|
||||
|
||||
@@ -109,14 +109,22 @@ public:
|
||||
Consumer
|
||||
newInboundEndpoint(beast::IP::Endpoint const& address)
|
||||
{
|
||||
//@@start rng-local-testnet-resource-bucket
|
||||
// Inbound connections from the same IP normally share one
|
||||
// resource bucket (port stripped) for DoS protection. For
|
||||
// loopback addresses, preserve the port so local testnet nodes
|
||||
// each get their own bucket instead of all sharing one.
|
||||
auto const key = is_loopback(address) ? address : address.at_port(0);
|
||||
//@@end rng-local-testnet-resource-bucket
|
||||
|
||||
Entry* entry(nullptr);
|
||||
|
||||
{
|
||||
std::lock_guard _(lock_);
|
||||
auto [resultIt, resultInserted] = table_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::make_tuple(kindInbound, address.at_port(0)), // Key
|
||||
std::make_tuple(m_clock.now())); // Entry
|
||||
std::make_tuple(kindInbound, key),
|
||||
std::make_tuple(m_clock.now()));
|
||||
|
||||
entry = &resultIt->second;
|
||||
entry->key = &resultIt->first;
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
@@ -351,9 +352,18 @@ Logs::format(
|
||||
|
||||
if (useLocalTime)
|
||||
{
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto local = date::make_zoned(date::current_zone(), now);
|
||||
output = date::format(fmt, local);
|
||||
try
|
||||
{
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto local = date::make_zoned(date::current_zone(), now);
|
||||
output = date::format(fmt, local);
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
// Enhanced logging should not make startup fatal if tzdb lookup is
|
||||
// unavailable or misconfigured. Fall back to UTC formatting.
|
||||
output = date::format(fmt, std::chrono::system_clock::now());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -72,6 +72,7 @@ enum class LedgerNameSpace : std::uint16_t {
|
||||
HOOK_DEFINITION = 'D',
|
||||
EMITTED_TXN = 'E',
|
||||
EMITTED_DIR = 'F',
|
||||
SHADOW_TICKET = 0x5374, // St
|
||||
NFTOKEN_OFFER = 'q',
|
||||
NFTOKEN_BUY_OFFERS = 'h',
|
||||
NFTOKEN_SELL_OFFERS = 'i',
|
||||
@@ -79,6 +80,7 @@ enum class LedgerNameSpace : std::uint16_t {
|
||||
IMPORT_VLSEQ = 'I',
|
||||
UNL_REPORT = 'R',
|
||||
CRON = 'L',
|
||||
CONSENSUS_ENTROPY = 'X',
|
||||
AMM = 'A',
|
||||
BRIDGE = 'H',
|
||||
XCHAIN_CLAIM_ID = 'Q',
|
||||
@@ -186,6 +188,15 @@ emittedTxn(uint256 const& id) noexcept
|
||||
return {ltEMITTED_TXN, indexHash(LedgerNameSpace::EMITTED_TXN, id)};
|
||||
}
|
||||
|
||||
Keylet
|
||||
shadowTicket(AccountID const& account, std::uint32_t ticketSeq) noexcept
|
||||
{
|
||||
return {
|
||||
ltSHADOW_TICKET,
|
||||
indexHash(
|
||||
LedgerNameSpace::SHADOW_TICKET, account, std::uint32_t(ticketSeq))};
|
||||
}
|
||||
|
||||
Keylet
|
||||
hook(AccountID const& id) noexcept
|
||||
{
|
||||
@@ -544,6 +555,14 @@ cron(uint32_t timestamp, std::optional<AccountID> const& id)
|
||||
return {ltCRON, uint256::fromVoid(h)};
|
||||
}
|
||||
|
||||
Keylet const&
|
||||
consensusEntropy() noexcept
|
||||
{
|
||||
static Keylet const ret{
|
||||
ltCONSENSUS_ENTROPY, indexHash(LedgerNameSpace::CONSENSUS_ENTROPY)};
|
||||
return ret;
|
||||
}
|
||||
|
||||
Keylet
|
||||
amm(Asset const& issue1, Asset const& issue2) noexcept
|
||||
{
|
||||
|
||||
@@ -78,6 +78,7 @@ InnerObjectFormats::InnerObjectFormats()
|
||||
{sfHookExecutionIndex, soeREQUIRED},
|
||||
{sfHookStateChangeCount, soeREQUIRED},
|
||||
{sfHookEmitCount, soeREQUIRED},
|
||||
{sfHookExportCount, soeOPTIONAL},
|
||||
{sfFlags, soeOPTIONAL}});
|
||||
|
||||
add(sfHookEmission.jsonName,
|
||||
|
||||
@@ -684,7 +684,8 @@ isPseudoTx(STObject const& tx)
|
||||
|
||||
auto tt = safe_cast<TxType>(*t);
|
||||
return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY ||
|
||||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON;
|
||||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON ||
|
||||
tt == ttCONSENSUS_ENTROPY;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -124,6 +124,7 @@ transResults()
|
||||
MAKE_ERROR(tecARRAY_TOO_LARGE, "Array is too large."),
|
||||
MAKE_ERROR(tecLOCKED, "Fund is locked."),
|
||||
MAKE_ERROR(tecBAD_CREDENTIALS, "Bad credentials."),
|
||||
MAKE_ERROR(tecEXPORT_EXPIRED, "Export expired without reaching signature quorum."),
|
||||
|
||||
MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."),
|
||||
MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."),
|
||||
@@ -171,6 +172,7 @@ transResults()
|
||||
MAKE_ERROR(telNON_LOCAL_EMITTED_TXN, "Emitted transaction cannot be applied because it was not generated locally."),
|
||||
MAKE_ERROR(telIMPORT_VL_KEY_NOT_RECOGNISED, "Import vl key was not recognized."),
|
||||
MAKE_ERROR(telCAN_NOT_QUEUE_IMPORT, "Import transaction was not able to be directly applied and cannot be queued."),
|
||||
MAKE_ERROR(telSHADOW_TICKET_REQUIRED, "The imported transaction uses a TicketSequence but no shadow ticket exists."),
|
||||
MAKE_ERROR(telENV_RPC_FAILED, "Unit test RPC failure."),
|
||||
|
||||
MAKE_ERROR(temMALFORMED, "Malformed transaction."),
|
||||
@@ -238,6 +240,7 @@ transResults()
|
||||
MAKE_ERROR(terPRE_TICKET, "Ticket is not yet in ledger."),
|
||||
MAKE_ERROR(terNO_HOOK, "No hook with that hash exists on the ledger."),
|
||||
MAKE_ERROR(terNO_AMM, "AMM doesn't exist for the asset pair."),
|
||||
MAKE_ERROR(terRETRY_EXPORT, "Export awaiting validator signatures."),
|
||||
|
||||
MAKE_ERROR(tesSUCCESS, "The transaction was applied. Only final in a validated ledger."),
|
||||
MAKE_ERROR(tesPARTIAL, "The transaction was applied but should be submitted again until returning tesSUCCESS."),
|
||||
|
||||
@@ -49,6 +49,11 @@ TxMeta::TxMeta(
|
||||
|
||||
if (obj.isFieldPresent(sfHookEmissions))
|
||||
setHookEmissions(obj.getFieldArray(sfHookEmissions));
|
||||
|
||||
if (obj.isFieldPresent(sfExportResult))
|
||||
setExportResult(const_cast<STObject&>(obj)
|
||||
.getField(sfExportResult)
|
||||
.downcast<STObject>());
|
||||
}
|
||||
|
||||
TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj)
|
||||
@@ -75,6 +80,11 @@ TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj)
|
||||
|
||||
if (obj.isFieldPresent(sfHookEmissions))
|
||||
setHookEmissions(obj.getFieldArray(sfHookEmissions));
|
||||
|
||||
if (obj.isFieldPresent(sfExportResult))
|
||||
setExportResult(const_cast<STObject&>(obj)
|
||||
.getField(sfExportResult)
|
||||
.downcast<STObject>());
|
||||
}
|
||||
|
||||
TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, Blob const& vec)
|
||||
@@ -245,6 +255,14 @@ TxMeta::getAsObject() const
|
||||
if (hasHookEmissions())
|
||||
metaData.setFieldArray(sfHookEmissions, getHookEmissions());
|
||||
|
||||
if (hasExportResult())
|
||||
{
|
||||
Serializer s;
|
||||
mExportResult->add(s);
|
||||
SerialIter sit(s.slice());
|
||||
metaData.emplace_back(STObject(sit, sfExportResult));
|
||||
}
|
||||
|
||||
return metaData;
|
||||
}
|
||||
|
||||
|
||||
437
src/test/app/ConsensusEntropy_test.cpp
Normal file
437
src/test/app/ConsensusEntropy_test.cpp
Normal file
@@ -0,0 +1,437 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2026 XRPL Labs
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/app/ConsensusEntropy_test_hooks.h>
|
||||
#include <test/jtx.h>
|
||||
#include <test/jtx/hook.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/hook/Enum.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
using TestHook = std::vector<uint8_t> const&;
|
||||
|
||||
#define BEAST_REQUIRE(x) \
|
||||
{ \
|
||||
BEAST_EXPECT(!!(x)); \
|
||||
if (!(x)) \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define HSFEE fee(100'000'000)
|
||||
#define M(m) memo(m, "", "")
|
||||
|
||||
class ConsensusEntropy_test : public beast::unit_test::suite
|
||||
{
|
||||
static void
|
||||
overrideFlag(Json::Value& jv)
|
||||
{
|
||||
jv[jss::Flags] = hsfOVERRIDE;
|
||||
}
|
||||
|
||||
void
|
||||
testSLECreated()
|
||||
{
|
||||
testcase("SLE created on ledger close");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
|
||||
|
||||
env.close();
|
||||
|
||||
auto const sle = env.le(keylet::consensusEntropy());
|
||||
BEAST_REQUIRE(sle);
|
||||
|
||||
auto const digest = sle->getFieldH256(sfDigest);
|
||||
BEAST_EXPECT(digest != uint256{});
|
||||
|
||||
auto const count = sle->getFieldU16(sfEntropyCount);
|
||||
BEAST_EXPECT(count >= 5);
|
||||
|
||||
auto const sleSeq = sle->getFieldU32(sfLedgerSequence);
|
||||
BEAST_EXPECT(sleSeq == env.closed()->seq());
|
||||
}
|
||||
|
||||
void
|
||||
testSLEUpdatedOnSubsequentClose()
|
||||
{
|
||||
testcase("SLE updated on subsequent ledger close");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
env.close();
|
||||
auto const sle1 = env.le(keylet::consensusEntropy());
|
||||
BEAST_REQUIRE(sle1);
|
||||
|
||||
auto const digest1 = sle1->getFieldH256(sfDigest);
|
||||
auto const seq1 = sle1->getFieldU32(sfLedgerSequence);
|
||||
|
||||
env.close();
|
||||
|
||||
auto const sle2 = env.le(keylet::consensusEntropy());
|
||||
BEAST_REQUIRE(sle2);
|
||||
|
||||
auto const digest2 = sle2->getFieldH256(sfDigest);
|
||||
auto const seq2 = sle2->getFieldU32(sfLedgerSequence);
|
||||
|
||||
BEAST_EXPECT(digest2 != digest1);
|
||||
BEAST_EXPECT(seq2 == seq1 + 1);
|
||||
}
|
||||
|
||||
void
|
||||
testNoSLEWithoutAmendment()
|
||||
{
|
||||
testcase("No SLE without amendment");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{*this};
|
||||
|
||||
env.close();
|
||||
env.close();
|
||||
|
||||
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
|
||||
}
|
||||
|
||||
void
|
||||
testDice()
|
||||
{
|
||||
testcase("Hook dice() API");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
auto const alice = Account{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
// Entropy SLE must exist before hook can use dice()
|
||||
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
|
||||
|
||||
// Set the hook
|
||||
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
|
||||
// dice(6) should return 0..5
|
||||
int64_t result = dice(6);
|
||||
|
||||
// negative means error
|
||||
if (result < 0)
|
||||
rollback(0, 0, result);
|
||||
|
||||
if (result >= 6)
|
||||
rollback(0, 0, -1);
|
||||
|
||||
// return the dice result as the accept code
|
||||
return accept(0, 0, result);
|
||||
}
|
||||
)[test.hook]"];
|
||||
|
||||
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
|
||||
M("set dice hook"),
|
||||
HSFEE);
|
||||
env.close();
|
||||
|
||||
// Invoke the hook
|
||||
Json::Value invoke;
|
||||
invoke[jss::TransactionType] = "Invoke";
|
||||
invoke[jss::Account] = alice.human();
|
||||
env(invoke, M("test dice"), fee(XRP(1)));
|
||||
|
||||
auto meta = env.meta();
|
||||
BEAST_REQUIRE(meta);
|
||||
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
|
||||
|
||||
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
|
||||
BEAST_REQUIRE(hookExecutions.size() == 1);
|
||||
|
||||
auto const returnCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
|
||||
std::cerr << " dice(6) returnCode = " << returnCode << " (hex 0x"
|
||||
<< std::hex << returnCode << std::dec << ")\n";
|
||||
// dice(6) returns 0..5
|
||||
BEAST_EXPECT(returnCode <= 5);
|
||||
|
||||
// Result should be 3 (accept)
|
||||
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
|
||||
}
|
||||
|
||||
void
|
||||
testRandom()
|
||||
{
|
||||
testcase("Hook random() API");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
auto const alice = Account{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
|
||||
|
||||
// Hook calls random() to fill a 32-byte buffer, then checks
|
||||
// the buffer is not all zeroes.
|
||||
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
|
||||
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
|
||||
uint8_t buf[32];
|
||||
for (int i = 0; GUARD(32), i < 32; ++i)
|
||||
buf[i] = 0;
|
||||
|
||||
int64_t result = random((uint32_t)buf, 32);
|
||||
|
||||
// Should return 32 (bytes written)
|
||||
if (result != 32)
|
||||
rollback(0, 0, result);
|
||||
|
||||
// Verify buffer is not all zeroes
|
||||
int nonzero = 0;
|
||||
for (int i = 0; GUARD(32), i < 32; ++i)
|
||||
if (buf[i] != 0) nonzero = 1;
|
||||
|
||||
if (!nonzero)
|
||||
rollback(0, 0, -2);
|
||||
|
||||
return accept(0, 0, 0);
|
||||
}
|
||||
)[test.hook]"];
|
||||
|
||||
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
|
||||
M("set random hook"),
|
||||
HSFEE);
|
||||
env.close();
|
||||
|
||||
Json::Value invoke;
|
||||
invoke[jss::TransactionType] = "Invoke";
|
||||
invoke[jss::Account] = alice.human();
|
||||
env(invoke, M("test random"), fee(XRP(1)));
|
||||
|
||||
auto meta = env.meta();
|
||||
BEAST_REQUIRE(meta);
|
||||
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
|
||||
|
||||
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
|
||||
BEAST_REQUIRE(hookExecutions.size() == 1);
|
||||
|
||||
// Return code 0 = all checks passed in the hook
|
||||
BEAST_EXPECT(hookExecutions[0].getFieldU64(sfHookReturnCode) == 0);
|
||||
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
|
||||
}
|
||||
|
||||
void
|
||||
testDiceConsecutiveCallsDiffer()
|
||||
{
|
||||
testcase("Hook dice() consecutive calls return different values");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
auto const alice = Account{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
|
||||
|
||||
// dice(1000000) twice — large range makes collision near-impossible
|
||||
// encode r1 in low 20 bits, r2 in high bits
|
||||
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
int64_t r1 = dice(1000000);
|
||||
if (r1 < 0)
|
||||
rollback(0, 0, r1);
|
||||
|
||||
int64_t r2 = dice(1000000);
|
||||
if (r2 < 0)
|
||||
rollback(0, 0, r2);
|
||||
|
||||
// consecutive calls should differ (rngCallCounter)
|
||||
if (r1 == r2)
|
||||
rollback(0, 0, -1);
|
||||
|
||||
return accept(0, 0, r1 | (r2 << 20));
|
||||
}
|
||||
)[test.hook]"];
|
||||
|
||||
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
|
||||
M("set dice hook"),
|
||||
HSFEE);
|
||||
env.close();
|
||||
|
||||
Json::Value invoke;
|
||||
invoke[jss::TransactionType] = "Invoke";
|
||||
invoke[jss::Account] = alice.human();
|
||||
env(invoke, M("test dice consecutive"), fee(XRP(1)));
|
||||
|
||||
auto meta = env.meta();
|
||||
BEAST_REQUIRE(meta);
|
||||
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
|
||||
|
||||
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
|
||||
BEAST_REQUIRE(hookExecutions.size() == 1);
|
||||
|
||||
auto const rc = hookExecutions[0].getFieldU64(sfHookReturnCode);
|
||||
auto const r1 = rc & 0xFFFFF;
|
||||
auto const r2 = (rc >> 20) & 0xFFFFF;
|
||||
|
||||
std::cerr << " two-call dice(1000000): returnCode=" << rc << " hex=0x"
|
||||
<< std::hex << rc << std::dec << " r1=" << r1 << " r2=" << r2
|
||||
<< "\n";
|
||||
|
||||
// hookResult 3 = accept (would be 1 if r1==r2 triggered rollback)
|
||||
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
|
||||
BEAST_EXPECT(r1 < 1000000);
|
||||
BEAST_EXPECT(r2 < 1000000);
|
||||
BEAST_EXPECT(r1 != r2);
|
||||
}
|
||||
|
||||
void
|
||||
testDiceZeroSides()
|
||||
{
|
||||
testcase("Hook dice(0) returns INVALID_ARGUMENT");
|
||||
using namespace jtx;
|
||||
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureConsensusEntropy,
|
||||
nullptr};
|
||||
|
||||
auto const alice = Account{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
|
||||
|
||||
// Hook calls dice(0) and returns whatever dice returns.
|
||||
// dice(0) should return INVALID_ARGUMENT (-7).
|
||||
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
int64_t result = dice(0);
|
||||
// dice(0) should return negative error code, pass it through
|
||||
return accept(0, 0, result);
|
||||
}
|
||||
)[test.hook]"];
|
||||
|
||||
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
|
||||
M("set dice0 hook"),
|
||||
HSFEE);
|
||||
env.close();
|
||||
|
||||
Json::Value invoke;
|
||||
invoke[jss::TransactionType] = "Invoke";
|
||||
invoke[jss::Account] = alice.human();
|
||||
env(invoke, M("test dice(0)"), fee(XRP(1)));
|
||||
|
||||
auto meta = env.meta();
|
||||
BEAST_REQUIRE(meta);
|
||||
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
|
||||
|
||||
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
|
||||
BEAST_REQUIRE(hookExecutions.size() == 1);
|
||||
|
||||
// INVALID_ARGUMENT = -7, encoded as 0x8000000000000000 + abs(code)
|
||||
// (see applyHook.cpp unsigned_exit_code encoding)
|
||||
auto const rawCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
|
||||
int64_t returnCode = (rawCode & 0x8000000000000000ULL)
|
||||
? -static_cast<int64_t>(rawCode & 0x7FFFFFFFFFFFFFFFULL)
|
||||
: static_cast<int64_t>(rawCode);
|
||||
std::cerr << " dice(0) returnCode = " << returnCode << " (raw 0x"
|
||||
<< std::hex << rawCode << std::dec << ")\n";
|
||||
BEAST_EXPECT(returnCode == -7);
|
||||
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testSLECreated();
|
||||
testSLEUpdatedOnSubsequentClose();
|
||||
testNoSLEWithoutAmendment();
|
||||
testDice();
|
||||
testDiceZeroSides();
|
||||
testRandom();
|
||||
testDiceConsecutiveCallsDiffer();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(ConsensusEntropy, app, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
235
src/test/app/ConsensusEntropy_test_hooks.h
Normal file
235
src/test/app/ConsensusEntropy_test_hooks.h
Normal file
@@ -0,0 +1,235 @@
|
||||
|
||||
// This file is generated by build_test_hooks.py
|
||||
#ifndef CONSENSUSENTROPY_TEST_WASM_INCLUDED
|
||||
#define CONSENSUSENTROPY_TEST_WASM_INCLUDED
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
std::map<std::string, std::vector<uint8_t>> consensusentropy_test_wasm = {
|
||||
/* ==== WASM: 0 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
|
||||
// dice(6) should return 0..5
|
||||
int64_t result = dice(6);
|
||||
|
||||
// negative means error
|
||||
if (result < 0)
|
||||
rollback(0, 0, result);
|
||||
|
||||
if (result >= 6)
|
||||
rollback(0, 0, -1);
|
||||
|
||||
// return the dice result as the accept code
|
||||
return accept(0, 0, result);
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
|
||||
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
|
||||
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
|
||||
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
|
||||
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
|
||||
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
|
||||
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
|
||||
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
|
||||
0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U,
|
||||
0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
|
||||
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U,
|
||||
0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
|
||||
0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU,
|
||||
0x6FU, 0x6BU, 0x00U, 0x04U, 0x0AU, 0xD0U, 0x80U, 0x00U, 0x01U, 0xCCU,
|
||||
0x80U, 0x00U, 0x01U, 0x02U, 0x7EU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U,
|
||||
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x06U, 0x10U, 0x81U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x01U, 0x21U, 0x02U, 0x02U, 0x40U,
|
||||
0x02U, 0x40U, 0x20U, 0x01U, 0x42U, 0x00U, 0x53U, 0x0DU, 0x00U, 0x42U,
|
||||
0x7FU, 0x21U, 0x02U, 0x20U, 0x01U, 0x42U, 0x06U, 0x53U, 0x0DU, 0x01U,
|
||||
0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x02U, 0x10U, 0x82U, 0x80U,
|
||||
0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
|
||||
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x0BU,
|
||||
}},
|
||||
|
||||
/* ==== WASM: 1 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
|
||||
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
|
||||
uint8_t buf[32];
|
||||
for (int i = 0; GUARD(32), i < 32; ++i)
|
||||
buf[i] = 0;
|
||||
|
||||
int64_t result = random((uint32_t)buf, 32);
|
||||
|
||||
// Should return 32 (bytes written)
|
||||
if (result != 32)
|
||||
rollback(0, 0, result);
|
||||
|
||||
// Verify buffer is not all zeroes
|
||||
int nonzero = 0;
|
||||
for (int i = 0; GUARD(32), i < 32; ++i)
|
||||
if (buf[i] != 0) nonzero = 1;
|
||||
|
||||
if (!nonzero)
|
||||
rollback(0, 0, -2);
|
||||
|
||||
return accept(0, 0, 0);
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x19U,
|
||||
0x04U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x02U, 0x7FU,
|
||||
0x7FU, 0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU,
|
||||
0x60U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x33U, 0x04U, 0x03U, 0x65U,
|
||||
0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U, 0x03U, 0x65U, 0x6EU,
|
||||
0x76U, 0x06U, 0x72U, 0x61U, 0x6EU, 0x64U, 0x6FU, 0x6DU, 0x00U, 0x01U,
|
||||
0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U,
|
||||
0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U,
|
||||
0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x02U,
|
||||
0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U, 0x05U,
|
||||
0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
|
||||
0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU,
|
||||
0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
|
||||
0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU,
|
||||
0x00U, 0x04U, 0x0AU, 0x86U, 0x82U, 0x00U, 0x01U, 0x82U, 0x82U, 0x00U,
|
||||
0x03U, 0x02U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U, 0x80U,
|
||||
0x80U, 0x80U, 0x00U, 0x41U, 0x20U, 0x6BU, 0x22U, 0x01U, 0x24U, 0x80U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x80U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x8EU, 0x80U, 0x80U, 0x80U,
|
||||
0x78U, 0x41U, 0x21U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
|
||||
0x41U, 0x00U, 0x21U, 0x02U, 0x03U, 0x40U, 0x41U, 0x8EU, 0x80U, 0x80U,
|
||||
0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x00U, 0x1AU, 0x20U, 0x01U, 0x20U,
|
||||
0x02U, 0x6AU, 0x41U, 0x00U, 0x3AU, 0x00U, 0x00U, 0x41U, 0x8EU, 0x80U,
|
||||
0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x1AU, 0x01U, 0x01U, 0x01U, 0x01U,
|
||||
0x01U, 0x1AU, 0x20U, 0x02U, 0x41U, 0x01U, 0x6AU, 0x22U, 0x02U, 0x41U,
|
||||
0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U,
|
||||
0x20U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x03U, 0x42U,
|
||||
0x20U, 0x51U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x03U,
|
||||
0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x99U,
|
||||
0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x80U, 0x80U, 0x80U,
|
||||
0x80U, 0x00U, 0x1AU, 0x41U, 0x00U, 0x21U, 0x02U, 0x41U, 0x00U, 0x21U,
|
||||
0x04U, 0x03U, 0x40U, 0x41U, 0x99U, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U,
|
||||
0x21U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x20U, 0x01U,
|
||||
0x20U, 0x02U, 0x6AU, 0x2DU, 0x00U, 0x00U, 0x21U, 0x05U, 0x41U, 0x01U,
|
||||
0x20U, 0x04U, 0x20U, 0x05U, 0x1BU, 0x21U, 0x04U, 0x20U, 0x02U, 0x41U,
|
||||
0x01U, 0x6AU, 0x22U, 0x02U, 0x41U, 0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU,
|
||||
0x02U, 0x40U, 0x20U, 0x04U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U,
|
||||
0x42U, 0x7EU, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU,
|
||||
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x83U, 0x80U, 0x80U,
|
||||
0x80U, 0x00U, 0x21U, 0x03U, 0x20U, 0x01U, 0x41U, 0x20U, 0x6AU, 0x24U,
|
||||
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x20U, 0x03U, 0x0BU,
|
||||
}},
|
||||
|
||||
/* ==== WASM: 2 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
int64_t r1 = dice(1000000);
|
||||
if (r1 < 0)
|
||||
rollback(0, 0, r1);
|
||||
|
||||
int64_t r2 = dice(1000000);
|
||||
if (r2 < 0)
|
||||
rollback(0, 0, r2);
|
||||
|
||||
// consecutive calls should differ (rngCallCounter)
|
||||
if (r1 == r2)
|
||||
rollback(0, 0, -1);
|
||||
|
||||
return accept(0, 0, r1 | (r2 << 20));
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
|
||||
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
|
||||
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
|
||||
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
|
||||
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
|
||||
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
|
||||
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
|
||||
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
|
||||
0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U,
|
||||
0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
|
||||
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U,
|
||||
0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
|
||||
0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU,
|
||||
0x6FU, 0x6BU, 0x00U, 0x04U, 0x0AU, 0xFEU, 0x80U, 0x00U, 0x01U, 0xFAU,
|
||||
0x80U, 0x00U, 0x01U, 0x02U, 0x7EU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U,
|
||||
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x41U, 0xC0U,
|
||||
0x84U, 0x3DU, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x01U,
|
||||
0x42U, 0x7FU, 0x55U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
|
||||
0x01U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
|
||||
0x40U, 0x41U, 0xC0U, 0x84U, 0x3DU, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U,
|
||||
0x00U, 0x22U, 0x02U, 0x42U, 0x7FU, 0x55U, 0x0DU, 0x00U, 0x41U, 0x00U,
|
||||
0x41U, 0x00U, 0x20U, 0x02U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U,
|
||||
0x1AU, 0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x20U, 0x02U, 0x52U, 0x0DU,
|
||||
0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x7FU, 0x10U, 0x82U, 0x80U,
|
||||
0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U,
|
||||
0x02U, 0x42U, 0x14U, 0x86U, 0x20U, 0x01U, 0x84U, 0x10U, 0x83U, 0x80U,
|
||||
0x80U, 0x80U, 0x00U, 0x0BU,
|
||||
}},
|
||||
|
||||
/* ==== WASM: 3 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t, uint32_t);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t dice(uint32_t sides);
|
||||
|
||||
int64_t hook(uint32_t r)
|
||||
{
|
||||
_g(1,1);
|
||||
int64_t result = dice(0);
|
||||
// dice(0) should return negative error code, pass it through
|
||||
return accept(0, 0, result);
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
|
||||
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
|
||||
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
|
||||
0x22U, 0x03U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
|
||||
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
|
||||
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U, 0x61U, 0x63U, 0x63U,
|
||||
0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x02U, 0x01U, 0x01U, 0x05U,
|
||||
0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U, 0x05U, 0x7FU, 0x01U, 0x41U,
|
||||
0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
|
||||
0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
|
||||
0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x07U,
|
||||
0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x00U, 0x03U, 0x0AU,
|
||||
0xA3U, 0x80U, 0x00U, 0x01U, 0x9FU, 0x80U, 0x00U, 0x00U, 0x41U, 0x01U,
|
||||
0x41U, 0x01U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U,
|
||||
0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U,
|
||||
0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x0BU,
|
||||
}},
|
||||
|
||||
};
|
||||
}
|
||||
} // namespace ripple
|
||||
#endif
|
||||
162
src/test/app/ExportSigCollector_test.cpp
Normal file
162
src/test/app/ExportSigCollector_test.cpp
Normal file
@@ -0,0 +1,162 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/misc/ExportSigCollector.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
namespace {
|
||||
|
||||
uint256
|
||||
makeHash(char const* label)
|
||||
{
|
||||
return sha512Half(Slice(label, std::strlen(label)));
|
||||
}
|
||||
|
||||
PublicKey
|
||||
makePublicKey(char const* hex)
|
||||
{
|
||||
auto const raw = strUnHex(hex);
|
||||
return PublicKey{makeSlice(*raw)};
|
||||
}
|
||||
|
||||
Buffer
|
||||
makeSignature(std::uint8_t seed)
|
||||
{
|
||||
std::uint8_t bytes[] = {
|
||||
seed,
|
||||
static_cast<std::uint8_t>(seed + 1),
|
||||
static_cast<std::uint8_t>(seed + 2)};
|
||||
return Buffer(bytes, sizeof(bytes));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class ExportSigCollector_test : public beast::unit_test::suite
|
||||
{
|
||||
PublicKey const validator_ = makePublicKey(
|
||||
"0388935426E0D08083314842EDFBB2D517BD47699F9A4527318A8E10468C97C05"
|
||||
"2");
|
||||
|
||||
public:
|
||||
void
|
||||
testCleanupUsesFirstSeenSeq()
|
||||
{
|
||||
testcase("cleanup uses first seen sequence");
|
||||
|
||||
ExportSigCollector collector;
|
||||
auto const tx = makeHash("cleanup-verified");
|
||||
auto const sig = makeSignature(1);
|
||||
|
||||
collector.addVerifiedSignature(tx, validator_, sig, 10);
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 1);
|
||||
|
||||
collector.cleanupStale(266);
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 1);
|
||||
|
||||
collector.cleanupStale(267);
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testUpgradeSetsFirstSeenSeq()
|
||||
{
|
||||
testcase("upgrade sets first seen sequence");
|
||||
|
||||
ExportSigCollector collector;
|
||||
auto const tx = makeHash("cleanup-upgraded");
|
||||
auto const sig = makeSignature(5);
|
||||
|
||||
collector.addUnverifiedSignature(tx, validator_, sig);
|
||||
BEAST_EXPECT(collector.hasUnverifiedSignatures());
|
||||
|
||||
collector.upgradeSignature(tx, validator_, sig, 10);
|
||||
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 1);
|
||||
|
||||
collector.cleanupStale(266);
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 1);
|
||||
|
||||
collector.cleanupStale(267);
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testRemoveInvalidUnverifiedSignature()
|
||||
{
|
||||
testcase("remove invalid unverified signature");
|
||||
|
||||
ExportSigCollector collector;
|
||||
auto const tx = makeHash("remove-invalid");
|
||||
auto const sig = makeSignature(9);
|
||||
auto const otherSig = makeSignature(10);
|
||||
|
||||
collector.addUnverifiedSignature(tx, validator_, sig, 10);
|
||||
BEAST_EXPECT(collector.hasUnverifiedSignatures());
|
||||
|
||||
BEAST_EXPECT(!collector.removeSignature(tx, validator_, otherSig));
|
||||
BEAST_EXPECT(collector.hasUnverifiedSignatures());
|
||||
|
||||
BEAST_EXPECT(collector.removeSignature(tx, validator_, sig));
|
||||
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
|
||||
BEAST_EXPECT(collector.signatureCount(tx) == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testClearAll()
|
||||
{
|
||||
testcase("clear all signatures and round state");
|
||||
|
||||
ExportSigCollector collector;
|
||||
auto const verifiedTx = makeHash("clear-all-verified");
|
||||
auto const unverifiedTx = makeHash("clear-all-unverified");
|
||||
auto const sig = makeSignature(12);
|
||||
|
||||
collector.addVerifiedSignature(verifiedTx, validator_, sig, 10);
|
||||
collector.addUnverifiedSignature(unverifiedTx, validator_, sig, 10);
|
||||
BEAST_EXPECT(collector.signatureCount(verifiedTx) == 1);
|
||||
BEAST_EXPECT(collector.hasUnverifiedSignatures());
|
||||
BEAST_EXPECT(collector.markSent(verifiedTx));
|
||||
BEAST_EXPECT(!collector.markSent(verifiedTx));
|
||||
|
||||
collector.clearAll();
|
||||
|
||||
BEAST_EXPECT(collector.signatureCount(verifiedTx) == 0);
|
||||
BEAST_EXPECT(!collector.hasUnverifiedSignatures());
|
||||
BEAST_EXPECT(collector.markSent(verifiedTx));
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testCleanupUsesFirstSeenSeq();
|
||||
testUpgradeSetsFirstSeenSeq();
|
||||
testRemoveInvalidUnverifiedSignature();
|
||||
testClearAll();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(ExportSigCollector, app, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
1078
src/test/app/Export_test.cpp
Normal file
1078
src/test/app/Export_test.cpp
Normal file
File diff suppressed because it is too large
Load Diff
483
src/test/app/Export_test_hooks.h
Normal file
483
src/test/app/Export_test_hooks.h
Normal file
@@ -0,0 +1,483 @@
|
||||
|
||||
// This file is generated by build_test_hooks.py
|
||||
#ifndef EXPORT_TEST_WASM_INCLUDED
|
||||
#define EXPORT_TEST_WASM_INCLUDED
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
std::map<std::string, std::vector<uint8_t>> export_test_wasm = {
|
||||
/* ==== WASM: 0 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t id, uint32_t maxiter);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
|
||||
extern int64_t xport_reserve(uint32_t count);
|
||||
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
|
||||
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
|
||||
extern int64_t otxn_type(void);
|
||||
extern int64_t ledger_seq(void);
|
||||
|
||||
#define SBUF(x) (uint32_t)(x), sizeof(x)
|
||||
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
|
||||
|
||||
#define ttPAYMENT 0
|
||||
#define tfCANONICAL 0x80000000UL
|
||||
|
||||
#define amAMOUNT 1
|
||||
#define amFEE 8
|
||||
#define atACCOUNT 1
|
||||
#define atDESTINATION 3
|
||||
|
||||
#define ENCODE_TT(buf_out, tt) \
|
||||
buf_out[0] = 0x12U; \
|
||||
buf_out[1] = (tt >> 8) & 0xFFU; \
|
||||
buf_out[2] = tt & 0xFFU; \
|
||||
buf_out += 3;
|
||||
|
||||
#define ENCODE_FLAGS(buf_out, flags) \
|
||||
buf_out[0] = 0x22U; \
|
||||
buf_out[1] = (flags >> 24) & 0xFFU; \
|
||||
buf_out[2] = (flags >> 16) & 0xFFU; \
|
||||
buf_out[3] = (flags >> 8) & 0xFFU; \
|
||||
buf_out[4] = flags & 0xFFU; \
|
||||
buf_out += 5;
|
||||
|
||||
#define ENCODE_SEQUENCE(buf_out, seq) \
|
||||
buf_out[0] = 0x24U; \
|
||||
buf_out[1] = (seq >> 24) & 0xFFU; \
|
||||
buf_out[2] = (seq >> 16) & 0xFFU; \
|
||||
buf_out[3] = (seq >> 8) & 0xFFU; \
|
||||
buf_out[4] = seq & 0xFFU; \
|
||||
buf_out += 5;
|
||||
|
||||
#define ENCODE_FLS(buf_out, fls) \
|
||||
buf_out[0] = 0x20U; \
|
||||
buf_out[1] = 0x1AU; \
|
||||
buf_out[2] = (fls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (fls >> 16) & 0xFFU; \
|
||||
buf_out[4] = (fls >> 8) & 0xFFU; \
|
||||
buf_out[5] = fls & 0xFFU; \
|
||||
buf_out += 6;
|
||||
|
||||
#define ENCODE_LLS(buf_out, lls) \
|
||||
buf_out[0] = 0x20U; \
|
||||
buf_out[1] = 0x1BU; \
|
||||
buf_out[2] = (lls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (lls >> 16) & 0xFFU; \
|
||||
buf_out[4] = (lls >> 8) & 0xFFU; \
|
||||
buf_out[5] = lls & 0xFFU; \
|
||||
buf_out += 6;
|
||||
|
||||
#define ENCODE_DROPS(buf_out, drops, amt_type) \
|
||||
buf_out[0] = 0x60U + amt_type; \
|
||||
buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
|
||||
buf_out[2] = (drops >> 48) & 0xFFU; \
|
||||
buf_out[3] = (drops >> 40) & 0xFFU; \
|
||||
buf_out[4] = (drops >> 32) & 0xFFU; \
|
||||
buf_out[5] = (drops >> 24) & 0xFFU; \
|
||||
buf_out[6] = (drops >> 16) & 0xFFU; \
|
||||
buf_out[7] = (drops >> 8) & 0xFFU; \
|
||||
buf_out[8] = drops & 0xFFU; \
|
||||
buf_out += 9;
|
||||
|
||||
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
|
||||
buf_out[0] = 0x73U; \
|
||||
buf_out[1] = 0x00U; \
|
||||
buf_out += 2;
|
||||
|
||||
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
|
||||
buf_out[0] = 0x80U + acc_type; \
|
||||
buf_out[1] = 0x14U; \
|
||||
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; \
|
||||
buf_out += 22;
|
||||
|
||||
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
|
||||
|
||||
int64_t hook(uint32_t reserved) {
|
||||
_g(1, 1);
|
||||
|
||||
if (otxn_type() != ttPAYMENT)
|
||||
return accept(0, 0, 0);
|
||||
|
||||
ASSERT(xport_reserve(1) == 1);
|
||||
|
||||
uint8_t dst[20];
|
||||
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
|
||||
ASSERT(dst_len == 20);
|
||||
|
||||
uint8_t acc[20];
|
||||
ASSERT(hook_account(SBUF(acc)) == 20);
|
||||
|
||||
uint32_t cls = (uint32_t)ledger_seq();
|
||||
|
||||
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
|
||||
uint8_t* buf = tx;
|
||||
|
||||
ENCODE_TT(buf, ttPAYMENT);
|
||||
ENCODE_FLAGS(buf, tfCANONICAL);
|
||||
ENCODE_SEQUENCE(buf, 0);
|
||||
ENCODE_FLS(buf, cls + 1);
|
||||
ENCODE_LLS(buf, cls + 5);
|
||||
// sfTicketSequence = UINT32 field 41 = 0x20 0x29
|
||||
buf[0] = 0x20U; buf[1] = 0x29U;
|
||||
buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 1;
|
||||
buf += 6;
|
||||
|
||||
uint64_t drops = 1000000;
|
||||
ENCODE_DROPS(buf, drops, amAMOUNT);
|
||||
ENCODE_DROPS(buf, 10, amFEE);
|
||||
|
||||
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
|
||||
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
|
||||
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
|
||||
|
||||
uint8_t hash[32];
|
||||
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
|
||||
ASSERT(xport_result == 32);
|
||||
|
||||
return accept(0, 0, 0);
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x25U,
|
||||
0x06U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x00U, 0x01U,
|
||||
0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U, 0x01U,
|
||||
0x7FU, 0x01U, 0x7EU, 0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U,
|
||||
0x7EU, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x8BU, 0x01U,
|
||||
0x09U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U,
|
||||
0x03U, 0x65U, 0x6EU, 0x76U, 0x09U, 0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU,
|
||||
0x74U, 0x79U, 0x70U, 0x65U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U,
|
||||
0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U,
|
||||
0x65U, 0x6EU, 0x76U, 0x0DU, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x5FU,
|
||||
0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x00U, 0x03U, 0x03U,
|
||||
0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U, 0x61U,
|
||||
0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6FU,
|
||||
0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU, 0x00U,
|
||||
0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0CU, 0x68U, 0x6FU, 0x6FU, 0x6BU,
|
||||
0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x00U, 0x05U,
|
||||
0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
|
||||
0x72U, 0x5FU, 0x73U, 0x65U, 0x71U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU,
|
||||
0x76U, 0x05U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x00U, 0x04U, 0x03U,
|
||||
0x02U, 0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U,
|
||||
0x05U, 0x7FU, 0x01U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
|
||||
0x41U, 0xD9U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
|
||||
0x7FU, 0x00U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
|
||||
0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU,
|
||||
0x6BU, 0x00U, 0x09U, 0x0AU, 0xC5U, 0x84U, 0x00U, 0x01U, 0xC1U, 0x84U,
|
||||
0x00U, 0x03U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0xF0U, 0x02U, 0x6BU, 0x22U, 0x01U,
|
||||
0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U,
|
||||
0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x02U,
|
||||
0x40U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x50U, 0x0DU, 0x00U,
|
||||
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U,
|
||||
0x80U, 0x00U, 0x21U, 0x02U, 0x0CU, 0x01U, 0x0BU, 0x02U, 0x40U, 0x41U,
|
||||
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x01U, 0x51U,
|
||||
0x0DU, 0x00U, 0x41U, 0x80U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x16U,
|
||||
0x42U, 0xDFU, 0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
|
||||
0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U,
|
||||
0x14U, 0x41U, 0x96U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x03U, 0x10U,
|
||||
0x85U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
|
||||
0x41U, 0x9AU, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x0EU, 0x42U, 0xE3U,
|
||||
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
|
||||
0x40U, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U, 0x6AU, 0x41U, 0x14U, 0x10U,
|
||||
0x86U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
|
||||
0x41U, 0xA8U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x1EU, 0x42U, 0xE6U,
|
||||
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x10U,
|
||||
0x87U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U, 0x20U, 0x01U, 0x41U,
|
||||
0xCEU, 0x00U, 0x6AU, 0x41U, 0x00U, 0x3BU, 0x01U, 0x00U, 0x20U, 0x01U,
|
||||
0x41U, 0xC0U, 0x00U, 0x3AU, 0x00U, 0x49U, 0x20U, 0x01U, 0x42U, 0x80U,
|
||||
0x80U, 0x80U, 0x80U, 0xF0U, 0xC1U, 0x90U, 0xA0U, 0xE8U, 0x00U, 0x37U,
|
||||
0x00U, 0x41U, 0x20U, 0x01U, 0x42U, 0xA0U, 0xD2U, 0x80U, 0x80U, 0x80U,
|
||||
0xA0U, 0xC0U, 0xB0U, 0xC0U, 0x00U, 0x37U, 0x00U, 0x39U, 0x20U, 0x01U,
|
||||
0x41U, 0xA0U, 0x36U, 0x3BU, 0x00U, 0x33U, 0x20U, 0x01U, 0x41U, 0xA0U,
|
||||
0x34U, 0x3BU, 0x00U, 0x2DU, 0x20U, 0x01U, 0x41U, 0x00U, 0x36U, 0x00U,
|
||||
0x29U, 0x20U, 0x01U, 0x41U, 0x24U, 0x3AU, 0x00U, 0x28U, 0x20U, 0x01U,
|
||||
0x42U, 0x92U, 0x80U, 0x80U, 0x90U, 0x82U, 0x10U, 0x37U, 0x03U, 0x20U,
|
||||
0x20U, 0x01U, 0x41U, 0x00U, 0x36U, 0x01U, 0x4AU, 0x20U, 0x01U, 0x20U,
|
||||
0x02U, 0xA7U, 0x22U, 0x03U, 0x41U, 0x05U, 0x6AU, 0x22U, 0x04U, 0x3AU,
|
||||
0x00U, 0x38U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U, 0x76U, 0x3AU,
|
||||
0x00U, 0x37U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U, 0x76U, 0x3AU,
|
||||
0x00U, 0x36U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U, 0x76U, 0x3AU,
|
||||
0x00U, 0x35U, 0x20U, 0x01U, 0x20U, 0x03U, 0x41U, 0x01U, 0x6AU, 0x22U,
|
||||
0x04U, 0x3AU, 0x00U, 0x32U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U,
|
||||
0x76U, 0x3AU, 0x00U, 0x31U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U,
|
||||
0x76U, 0x3AU, 0x00U, 0x30U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U,
|
||||
0x76U, 0x3AU, 0x00U, 0x2FU, 0x20U, 0x01U, 0x41U, 0xDDU, 0x00U, 0x6AU,
|
||||
0x20U, 0x01U, 0x29U, 0x03U, 0xB8U, 0x02U, 0x37U, 0x00U, 0x00U, 0x20U,
|
||||
0x01U, 0x41U, 0xE5U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U,
|
||||
0x6AU, 0x41U, 0x10U, 0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x00U, 0x00U,
|
||||
0x20U, 0x01U, 0x41U, 0xF3U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x29U, 0x03U,
|
||||
0xD8U, 0x02U, 0x37U, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0xFBU, 0x00U,
|
||||
0x6AU, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U, 0x10U, 0x6AU,
|
||||
0x28U, 0x02U, 0x00U, 0x36U, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0x14U,
|
||||
0x3AU, 0x00U, 0x54U, 0x20U, 0x01U, 0x41U, 0x8AU, 0xE6U, 0x81U, 0x88U,
|
||||
0x78U, 0x36U, 0x02U, 0x50U, 0x20U, 0x01U, 0x41U, 0x83U, 0x29U, 0x3BU,
|
||||
0x00U, 0x69U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xB0U, 0x02U,
|
||||
0x37U, 0x00U, 0x55U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xD0U,
|
||||
0x02U, 0x37U, 0x00U, 0x6BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0x20U,
|
||||
0x20U, 0x01U, 0x41U, 0x20U, 0x6AU, 0x41U, 0xDFU, 0x00U, 0x10U, 0x88U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x20U, 0x51U, 0x0DU, 0x00U, 0x41U,
|
||||
0xC6U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x13U, 0x42U, 0x81U, 0x01U,
|
||||
0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U,
|
||||
0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U,
|
||||
0x21U, 0x02U, 0x0BU, 0x20U, 0x01U, 0x41U, 0xF0U, 0x02U, 0x6AU, 0x24U,
|
||||
0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x20U, 0x02U, 0x0BU, 0x0BU, 0x60U,
|
||||
0x01U, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x59U, 0x78U, 0x70U, 0x6FU,
|
||||
0x72U, 0x74U, 0x5FU, 0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U,
|
||||
0x28U, 0x31U, 0x29U, 0x20U, 0x3DU, 0x3DU, 0x20U, 0x31U, 0x00U, 0x44U,
|
||||
0x53U, 0x54U, 0x00U, 0x64U, 0x73U, 0x74U, 0x5FU, 0x6CU, 0x65U, 0x6EU,
|
||||
0x20U, 0x3DU, 0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x68U, 0x6FU, 0x6FU,
|
||||
0x6BU, 0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x28U,
|
||||
0x53U, 0x42U, 0x55U, 0x46U, 0x28U, 0x61U, 0x63U, 0x63U, 0x29U, 0x29U,
|
||||
0x20U, 0x3DU, 0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x78U, 0x70U, 0x6FU,
|
||||
0x72U, 0x74U, 0x5FU, 0x72U, 0x65U, 0x73U, 0x75U, 0x6CU, 0x74U, 0x20U,
|
||||
0x3DU, 0x3DU, 0x20U, 0x33U, 0x32U, 0x00U,
|
||||
}},
|
||||
|
||||
/* ==== WASM: 1 ==== */
|
||||
{R"[test.hook](
|
||||
#include <stdint.h>
|
||||
extern int32_t _g(uint32_t id, uint32_t maxiter);
|
||||
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t xport(uint32_t write_ptr, uint32_t write_len, uint32_t read_ptr, uint32_t read_len);
|
||||
extern int64_t xport_reserve(uint32_t count);
|
||||
extern int64_t hook_account(uint32_t write_ptr, uint32_t write_len);
|
||||
extern int64_t otxn_param(uint32_t write_ptr, uint32_t write_len, uint32_t name_ptr, uint32_t name_len);
|
||||
extern int64_t otxn_type(void);
|
||||
extern int64_t ledger_seq(void);
|
||||
|
||||
#define SBUF(x) (uint32_t)(x), sizeof(x)
|
||||
#define ASSERT(x) if (!(x)) rollback((uint32_t)#x, sizeof(#x), __LINE__)
|
||||
|
||||
#define ttPAYMENT 0
|
||||
#define tfCANONICAL 0x80000000UL
|
||||
|
||||
#define amAMOUNT 1
|
||||
#define amFEE 8
|
||||
#define atACCOUNT 1
|
||||
#define atDESTINATION 3
|
||||
|
||||
#define ENCODE_TT(buf_out, tt) \
|
||||
buf_out[0] = 0x12U; \
|
||||
buf_out[1] = (tt >> 8) & 0xFFU; \
|
||||
buf_out[2] = tt & 0xFFU; \
|
||||
buf_out += 3;
|
||||
|
||||
#define ENCODE_FLAGS(buf_out, flags) \
|
||||
buf_out[0] = 0x22U; \
|
||||
buf_out[1] = (flags >> 24) & 0xFFU; \
|
||||
buf_out[2] = (flags >> 16) & 0xFFU; \
|
||||
buf_out[3] = (flags >> 8) & 0xFFU; \
|
||||
buf_out[4] = flags & 0xFFU; \
|
||||
buf_out += 5;
|
||||
|
||||
#define ENCODE_SEQUENCE(buf_out, seq) \
|
||||
buf_out[0] = 0x24U; \
|
||||
buf_out[1] = (seq >> 24) & 0xFFU; \
|
||||
buf_out[2] = (seq >> 16) & 0xFFU; \
|
||||
buf_out[3] = (seq >> 8) & 0xFFU; \
|
||||
buf_out[4] = seq & 0xFFU; \
|
||||
buf_out += 5;
|
||||
|
||||
// sfNetworkID = UINT32 field 1 = 0x21
|
||||
#define ENCODE_NETWORK_ID(buf_out, id) \
|
||||
buf_out[0] = 0x21U; \
|
||||
buf_out[1] = (id >> 24) & 0xFFU; \
|
||||
buf_out[2] = (id >> 16) & 0xFFU; \
|
||||
buf_out[3] = (id >> 8) & 0xFFU; \
|
||||
buf_out[4] = id & 0xFFU; \
|
||||
buf_out += 5;
|
||||
|
||||
#define ENCODE_FLS(buf_out, fls) \
|
||||
buf_out[0] = 0x20U; \
|
||||
buf_out[1] = 0x1AU; \
|
||||
buf_out[2] = (fls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (fls >> 16) & 0xFFU; \
|
||||
buf_out[4] = (fls >> 8) & 0xFFU; \
|
||||
buf_out[5] = fls & 0xFFU; \
|
||||
buf_out += 6;
|
||||
|
||||
#define ENCODE_LLS(buf_out, lls) \
|
||||
buf_out[0] = 0x20U; \
|
||||
buf_out[1] = 0x1BU; \
|
||||
buf_out[2] = (lls >> 24) & 0xFFU; \
|
||||
buf_out[3] = (lls >> 16) & 0xFFU; \
|
||||
buf_out[4] = (lls >> 8) & 0xFFU; \
|
||||
buf_out[5] = lls & 0xFFU; \
|
||||
buf_out += 6;
|
||||
|
||||
#define ENCODE_DROPS(buf_out, drops, amt_type) \
|
||||
buf_out[0] = 0x60U + amt_type; \
|
||||
buf_out[1] = 0x40U + ((drops >> 56) & 0x3FU); \
|
||||
buf_out[2] = (drops >> 48) & 0xFFU; \
|
||||
buf_out[3] = (drops >> 40) & 0xFFU; \
|
||||
buf_out[4] = (drops >> 32) & 0xFFU; \
|
||||
buf_out[5] = (drops >> 24) & 0xFFU; \
|
||||
buf_out[6] = (drops >> 16) & 0xFFU; \
|
||||
buf_out[7] = (drops >> 8) & 0xFFU; \
|
||||
buf_out[8] = drops & 0xFFU; \
|
||||
buf_out += 9;
|
||||
|
||||
#define ENCODE_SIGNING_PUBKEY_EMPTY(buf_out) \
|
||||
buf_out[0] = 0x73U; \
|
||||
buf_out[1] = 0x00U; \
|
||||
buf_out += 2;
|
||||
|
||||
#define ENCODE_ACCOUNT(buf_out, acc, acc_type) \
|
||||
buf_out[0] = 0x80U + acc_type; \
|
||||
buf_out[1] = 0x14U; \
|
||||
for (int i = 0; i < 20; ++i) buf_out[2+i] = acc[i]; \
|
||||
buf_out += 22;
|
||||
|
||||
#define PREPARE_PAYMENT_SIMPLE_SIZE 270U
|
||||
|
||||
int64_t hook(uint32_t reserved) {
|
||||
_g(1, 1);
|
||||
|
||||
if (otxn_type() != ttPAYMENT)
|
||||
return accept(0, 0, 0);
|
||||
|
||||
ASSERT(xport_reserve(1) == 1);
|
||||
|
||||
uint8_t dst[20];
|
||||
int64_t dst_len = otxn_param(SBUF(dst), "DST", 3);
|
||||
ASSERT(dst_len == 20);
|
||||
|
||||
uint8_t acc[20];
|
||||
ASSERT(hook_account(SBUF(acc)) == 20);
|
||||
|
||||
uint32_t cls = (uint32_t)ledger_seq();
|
||||
|
||||
uint8_t tx[PREPARE_PAYMENT_SIMPLE_SIZE];
|
||||
uint8_t* buf = tx;
|
||||
|
||||
ENCODE_TT(buf, ttPAYMENT);
|
||||
ENCODE_NETWORK_ID(buf, 21337); // must precede Sequence (canonical order)
|
||||
ENCODE_FLAGS(buf, tfCANONICAL);
|
||||
ENCODE_SEQUENCE(buf, 0);
|
||||
ENCODE_FLS(buf, cls + 1);
|
||||
ENCODE_LLS(buf, cls + 5);
|
||||
|
||||
uint64_t drops = 1000000;
|
||||
ENCODE_DROPS(buf, drops, amAMOUNT);
|
||||
ENCODE_DROPS(buf, 10, amFEE);
|
||||
|
||||
ENCODE_SIGNING_PUBKEY_EMPTY(buf);
|
||||
ENCODE_ACCOUNT(buf, acc, atACCOUNT);
|
||||
ENCODE_ACCOUNT(buf, dst, atDESTINATION);
|
||||
|
||||
uint8_t hash[32];
|
||||
int64_t xport_result = xport(SBUF(hash), (uint32_t)tx, buf - tx);
|
||||
// xport should return EXPORT_FAILURE (-46), ASSERT will rollback
|
||||
ASSERT(xport_result == 32);
|
||||
|
||||
return accept(0, 0, 0);
|
||||
}
|
||||
)[test.hook]",
|
||||
{
|
||||
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x25U,
|
||||
0x06U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x00U, 0x01U,
|
||||
0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U, 0x01U,
|
||||
0x7FU, 0x01U, 0x7EU, 0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U,
|
||||
0x7EU, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x8BU, 0x01U,
|
||||
0x09U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U,
|
||||
0x03U, 0x65U, 0x6EU, 0x76U, 0x09U, 0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU,
|
||||
0x74U, 0x79U, 0x70U, 0x65U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U,
|
||||
0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U,
|
||||
0x65U, 0x6EU, 0x76U, 0x0DU, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x5FU,
|
||||
0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x00U, 0x03U, 0x03U,
|
||||
0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U, 0x61U,
|
||||
0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6FU,
|
||||
0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU, 0x00U,
|
||||
0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0CU, 0x68U, 0x6FU, 0x6FU, 0x6BU,
|
||||
0x5FU, 0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x00U, 0x05U,
|
||||
0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
|
||||
0x72U, 0x5FU, 0x73U, 0x65U, 0x71U, 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU,
|
||||
0x76U, 0x05U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U, 0x00U, 0x04U, 0x03U,
|
||||
0x02U, 0x01U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U,
|
||||
0x05U, 0x7FU, 0x01U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
|
||||
0x41U, 0xD9U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
|
||||
0x7FU, 0x00U, 0x41U, 0xE0U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
|
||||
0x80U, 0x08U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, 0x6FU, 0x6FU,
|
||||
0x6BU, 0x00U, 0x09U, 0x0AU, 0xCDU, 0x84U, 0x00U, 0x01U, 0xC9U, 0x84U,
|
||||
0x00U, 0x03U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x7FU, 0x23U, 0x80U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0xF0U, 0x02U, 0x6BU, 0x22U, 0x01U,
|
||||
0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U,
|
||||
0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x02U, 0x40U, 0x02U,
|
||||
0x40U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x50U, 0x0DU, 0x00U,
|
||||
0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U,
|
||||
0x80U, 0x00U, 0x21U, 0x02U, 0x0CU, 0x01U, 0x0BU, 0x02U, 0x40U, 0x41U,
|
||||
0x01U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x01U, 0x51U,
|
||||
0x0DU, 0x00U, 0x41U, 0x80U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x16U,
|
||||
0x42U, 0xE8U, 0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
|
||||
0x0BU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U,
|
||||
0x14U, 0x41U, 0x96U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x03U, 0x10U,
|
||||
0x85U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
|
||||
0x41U, 0x9AU, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x0EU, 0x42U, 0xECU,
|
||||
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U,
|
||||
0x40U, 0x20U, 0x01U, 0x41U, 0xB0U, 0x02U, 0x6AU, 0x41U, 0x14U, 0x10U,
|
||||
0x86U, 0x80U, 0x80U, 0x80U, 0x00U, 0x42U, 0x14U, 0x51U, 0x0DU, 0x00U,
|
||||
0x41U, 0xA8U, 0x88U, 0x80U, 0x80U, 0x00U, 0x41U, 0x1EU, 0x42U, 0xEFU,
|
||||
0x00U, 0x10U, 0x84U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x10U,
|
||||
0x87U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U, 0x20U, 0x01U, 0x41U,
|
||||
0xC0U, 0x00U, 0x3AU, 0x00U, 0x48U, 0x20U, 0x01U, 0x42U, 0x80U, 0x80U,
|
||||
0x80U, 0x80U, 0xF0U, 0xC1U, 0x90U, 0xA0U, 0xE8U, 0x00U, 0x37U, 0x03U,
|
||||
0x40U, 0x20U, 0x01U, 0x41U, 0xE1U, 0x80U, 0x01U, 0x3BU, 0x01U, 0x3EU,
|
||||
0x20U, 0x01U, 0x41U, 0xA0U, 0x36U, 0x3BU, 0x01U, 0x38U, 0x20U, 0x01U,
|
||||
0x41U, 0xA0U, 0x34U, 0x3BU, 0x01U, 0x32U, 0x20U, 0x01U, 0x41U, 0x00U,
|
||||
0x36U, 0x01U, 0x2EU, 0x20U, 0x01U, 0x41U, 0x80U, 0xC8U, 0x00U, 0x3BU,
|
||||
0x01U, 0x2CU, 0x20U, 0x01U, 0x41U, 0xA2U, 0x80U, 0x02U, 0x36U, 0x02U,
|
||||
0x28U, 0x20U, 0x01U, 0x42U, 0x92U, 0x80U, 0x80U, 0x88U, 0x82U, 0x80U,
|
||||
0xC0U, 0xA9U, 0xD9U, 0x00U, 0x37U, 0x03U, 0x20U, 0x20U, 0x01U, 0x20U,
|
||||
0x02U, 0xA7U, 0x22U, 0x03U, 0x41U, 0x05U, 0x6AU, 0x22U, 0x04U, 0x3AU,
|
||||
0x00U, 0x3DU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U, 0x76U, 0x3AU,
|
||||
0x00U, 0x3CU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U, 0x76U, 0x3AU,
|
||||
0x00U, 0x3BU, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U, 0x76U, 0x3AU,
|
||||
0x00U, 0x3AU, 0x20U, 0x01U, 0x20U, 0x03U, 0x41U, 0x01U, 0x6AU, 0x22U,
|
||||
0x04U, 0x3AU, 0x00U, 0x37U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x08U,
|
||||
0x76U, 0x3AU, 0x00U, 0x36U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x10U,
|
||||
0x76U, 0x3AU, 0x00U, 0x35U, 0x20U, 0x01U, 0x20U, 0x04U, 0x41U, 0x18U,
|
||||
0x76U, 0x3AU, 0x00U, 0x34U, 0x20U, 0x01U, 0x41U, 0xCDU, 0x00U, 0x6AU,
|
||||
0x41U, 0x00U, 0x3BU, 0x00U, 0x00U, 0x20U, 0x01U, 0x41U, 0xDCU, 0x00U,
|
||||
0x6AU, 0x20U, 0x01U, 0x29U, 0x03U, 0xB8U, 0x02U, 0x37U, 0x02U, 0x00U,
|
||||
0x20U, 0x01U, 0x41U, 0xE4U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xB0U,
|
||||
0x02U, 0x6AU, 0x41U, 0x10U, 0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x02U,
|
||||
0x00U, 0x20U, 0x01U, 0x41U, 0xF2U, 0x00U, 0x6AU, 0x20U, 0x01U, 0x29U,
|
||||
0x03U, 0xD8U, 0x02U, 0x37U, 0x01U, 0x00U, 0x20U, 0x01U, 0x41U, 0xFAU,
|
||||
0x00U, 0x6AU, 0x20U, 0x01U, 0x41U, 0xD0U, 0x02U, 0x6AU, 0x41U, 0x10U,
|
||||
0x6AU, 0x28U, 0x02U, 0x00U, 0x36U, 0x01U, 0x00U, 0x20U, 0x01U, 0x41U,
|
||||
0x00U, 0x36U, 0x00U, 0x49U, 0x20U, 0x01U, 0x41U, 0x8AU, 0xE6U, 0x81U,
|
||||
0x88U, 0x78U, 0x36U, 0x00U, 0x4FU, 0x20U, 0x01U, 0x41U, 0x14U, 0x3AU,
|
||||
0x00U, 0x53U, 0x20U, 0x01U, 0x41U, 0x83U, 0x29U, 0x3BU, 0x01U, 0x68U,
|
||||
0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xB0U, 0x02U, 0x37U, 0x02U,
|
||||
0x54U, 0x20U, 0x01U, 0x20U, 0x01U, 0x29U, 0x03U, 0xD0U, 0x02U, 0x37U,
|
||||
0x01U, 0x6AU, 0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0x20U, 0x20U, 0x01U,
|
||||
0x41U, 0x20U, 0x6AU, 0x41U, 0xDEU, 0x00U, 0x10U, 0x88U, 0x80U, 0x80U,
|
||||
0x80U, 0x00U, 0x42U, 0x20U, 0x51U, 0x0DU, 0x00U, 0x41U, 0xC6U, 0x88U,
|
||||
0x80U, 0x80U, 0x00U, 0x41U, 0x13U, 0x42U, 0x88U, 0x01U, 0x10U, 0x84U,
|
||||
0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U,
|
||||
0x42U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x02U,
|
||||
0x0BU, 0x20U, 0x01U, 0x41U, 0xF0U, 0x02U, 0x6AU, 0x24U, 0x80U, 0x80U,
|
||||
0x80U, 0x80U, 0x00U, 0x20U, 0x02U, 0x0BU, 0x0BU, 0x60U, 0x01U, 0x00U,
|
||||
0x41U, 0x80U, 0x08U, 0x0BU, 0x59U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U,
|
||||
0x5FU, 0x72U, 0x65U, 0x73U, 0x65U, 0x72U, 0x76U, 0x65U, 0x28U, 0x31U,
|
||||
0x29U, 0x20U, 0x3DU, 0x3DU, 0x20U, 0x31U, 0x00U, 0x44U, 0x53U, 0x54U,
|
||||
0x00U, 0x64U, 0x73U, 0x74U, 0x5FU, 0x6CU, 0x65U, 0x6EU, 0x20U, 0x3DU,
|
||||
0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x5FU,
|
||||
0x61U, 0x63U, 0x63U, 0x6FU, 0x75U, 0x6EU, 0x74U, 0x28U, 0x53U, 0x42U,
|
||||
0x55U, 0x46U, 0x28U, 0x61U, 0x63U, 0x63U, 0x29U, 0x29U, 0x20U, 0x3DU,
|
||||
0x3DU, 0x20U, 0x32U, 0x30U, 0x00U, 0x78U, 0x70U, 0x6FU, 0x72U, 0x74U,
|
||||
0x5FU, 0x72U, 0x65U, 0x73U, 0x75U, 0x6CU, 0x74U, 0x20U, 0x3DU, 0x3DU,
|
||||
0x20U, 0x33U, 0x32U, 0x00U,
|
||||
}},
|
||||
|
||||
};
|
||||
}
|
||||
} // namespace ripple
|
||||
#endif
|
||||
301
src/test/app/XPOP_test.cpp
Normal file
301
src/test/app/XPOP_test.cpp
Normal file
@@ -0,0 +1,301 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2026 XRPL Labs
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
#include <test/jtx/import.h>
|
||||
#include <test/jtx/xpop.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/proof/LedgerProof.h>
|
||||
#include <xrpld/app/proof/ProofBuilder.h>
|
||||
#include <xrpld/app/proof/XPOPv1.h>
|
||||
#include <xrpl/protocol/Import.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
struct XPOP_test : public beast::unit_test::suite
|
||||
{
|
||||
void
|
||||
testBuildLedgerProof()
|
||||
{
|
||||
testcase("Build LedgerProof from a payment");
|
||||
|
||||
using namespace jtx;
|
||||
|
||||
Env env{*this};
|
||||
|
||||
Account const alice{"alice"};
|
||||
Account const bob{"bob"};
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
// Submit a payment and close the ledger.
|
||||
env(pay(alice, bob, XRP(100)));
|
||||
env.close();
|
||||
|
||||
// Get the tx hash from the last closed ledger.
|
||||
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
|
||||
BEAST_EXPECT(lcl);
|
||||
|
||||
// Find a payment tx in the ledger.
|
||||
uint256 paymentHash;
|
||||
bool found = false;
|
||||
lcl->txMap().visitLeaves(
|
||||
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
|
||||
if (!found)
|
||||
{
|
||||
paymentHash = item->key();
|
||||
found = true;
|
||||
}
|
||||
});
|
||||
BEAST_EXPECT(found);
|
||||
|
||||
// Build the proof.
|
||||
auto const lp = proof::buildLedgerProof(*lcl, paymentHash);
|
||||
BEAST_EXPECT(lp.has_value());
|
||||
|
||||
if (lp)
|
||||
{
|
||||
// Verify header fields are populated.
|
||||
BEAST_EXPECT(lp->ledgerIndex > 0);
|
||||
BEAST_EXPECT(lp->totalCoins > 0);
|
||||
BEAST_EXPECT(lp->parentHash != uint256{});
|
||||
BEAST_EXPECT(lp->txRoot != uint256{});
|
||||
BEAST_EXPECT(lp->accountRoot != uint256{});
|
||||
|
||||
// Verify tx blob is non-empty.
|
||||
BEAST_EXPECT(!lp->txBlob.empty());
|
||||
BEAST_EXPECT(!lp->metaBlob.empty());
|
||||
|
||||
// Verify merkle proof exists and is valid.
|
||||
BEAST_EXPECT(lp->txProof.has_value());
|
||||
if (lp->txProof)
|
||||
{
|
||||
auto const computedRoot = lp->txProof->computeRoot();
|
||||
BEAST_EXPECT(computedRoot.has_value());
|
||||
if (computedRoot)
|
||||
BEAST_EXPECT(*computedRoot == lp->txRoot);
|
||||
}
|
||||
|
||||
// Verify ledger hash reconstruction.
|
||||
auto const computedHash = lp->computeLedgerHash();
|
||||
BEAST_EXPECT(computedHash == lcl->info().hash);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testBuildXPOPv1()
|
||||
{
|
||||
testcase("Build XPOP v1 JSON from a payment");
|
||||
|
||||
using namespace jtx;
|
||||
|
||||
Env env{*this};
|
||||
|
||||
Account const alice{"alice"};
|
||||
Account const bob{"bob"};
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
env(pay(alice, bob, XRP(100)));
|
||||
env.close();
|
||||
|
||||
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
|
||||
BEAST_EXPECT(lcl);
|
||||
|
||||
// Find a tx.
|
||||
uint256 txHash;
|
||||
lcl->txMap().visitLeaves(
|
||||
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
|
||||
txHash = item->key();
|
||||
});
|
||||
|
||||
// Build XPOP using the test helper.
|
||||
auto const xpop = xpop::buildTestXPOP(env, txHash, 3);
|
||||
BEAST_EXPECT(!xpop.isNull());
|
||||
|
||||
// Verify structure.
|
||||
BEAST_EXPECT(xpop.isMember(jss::ledger));
|
||||
BEAST_EXPECT(xpop.isMember(jss::transaction));
|
||||
BEAST_EXPECT(xpop.isMember(jss::validation));
|
||||
|
||||
// Ledger section.
|
||||
auto const& lgr = xpop[jss::ledger];
|
||||
BEAST_EXPECT(lgr.isMember(jss::index));
|
||||
BEAST_EXPECT(lgr.isMember(jss::coins));
|
||||
BEAST_EXPECT(lgr.isMember(jss::phash));
|
||||
BEAST_EXPECT(lgr.isMember(jss::txroot));
|
||||
BEAST_EXPECT(lgr.isMember(jss::acroot));
|
||||
BEAST_EXPECT(lgr.isMember(jss::close));
|
||||
BEAST_EXPECT(lgr.isMember(jss::pclose));
|
||||
BEAST_EXPECT(lgr.isMember(jss::cres));
|
||||
BEAST_EXPECT(lgr.isMember(jss::flags));
|
||||
|
||||
// Transaction section.
|
||||
auto const& txn = xpop[jss::transaction];
|
||||
BEAST_EXPECT(txn.isMember(jss::blob));
|
||||
BEAST_EXPECT(txn.isMember(jss::meta));
|
||||
BEAST_EXPECT(txn.isMember(jss::proof));
|
||||
BEAST_EXPECT(txn[jss::blob].asString().size() > 0);
|
||||
BEAST_EXPECT(txn[jss::meta].asString().size() > 0);
|
||||
|
||||
// Validation section.
|
||||
auto const& val = xpop[jss::validation];
|
||||
BEAST_EXPECT(val.isMember(jss::data));
|
||||
BEAST_EXPECT(val.isMember(jss::unl));
|
||||
BEAST_EXPECT(val[jss::data].size() == 3); // 3 validators
|
||||
|
||||
auto const& unl = val[jss::unl];
|
||||
BEAST_EXPECT(unl.isMember(jss::public_key));
|
||||
BEAST_EXPECT(unl.isMember(jss::manifest));
|
||||
BEAST_EXPECT(unl.isMember(jss::blob));
|
||||
BEAST_EXPECT(unl.isMember(jss::signature));
|
||||
BEAST_EXPECT(unl.isMember(jss::version));
|
||||
}
|
||||
|
||||
void
|
||||
testMerkleProofVerification()
|
||||
{
|
||||
testcase("Merkle proof verifies against tx root");
|
||||
|
||||
using namespace jtx;
|
||||
|
||||
Env env{*this};
|
||||
|
||||
Account const alice{"alice"};
|
||||
Account const bob{"bob"};
|
||||
Account const carol{"carol"};
|
||||
|
||||
env.fund(XRP(10000), alice, bob, carol);
|
||||
env.close();
|
||||
|
||||
// Multiple transactions to create a deeper trie.
|
||||
env(pay(alice, bob, XRP(10)));
|
||||
env(pay(bob, carol, XRP(5)));
|
||||
env(pay(carol, alice, XRP(1)));
|
||||
env.close();
|
||||
|
||||
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
|
||||
BEAST_EXPECT(lcl);
|
||||
|
||||
// Verify proof for each transaction in the ledger.
|
||||
int proofCount = 0;
|
||||
lcl->txMap().visitLeaves(
|
||||
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
|
||||
auto const lp = proof::buildLedgerProof(*lcl, item->key());
|
||||
BEAST_EXPECT(lp.has_value());
|
||||
|
||||
if (lp && lp->txProof)
|
||||
{
|
||||
// Proof must verify against the ledger's tx root.
|
||||
BEAST_EXPECT(lp->txProof->verify(lp->txRoot));
|
||||
|
||||
// JSON v1 serialization must round-trip.
|
||||
auto const json = lp->txProof->toJsonV1();
|
||||
BEAST_EXPECT(!json.isNull());
|
||||
BEAST_EXPECT(json.isArray());
|
||||
|
||||
++proofCount;
|
||||
}
|
||||
});
|
||||
|
||||
// We should have proven at least 3 transactions.
|
||||
BEAST_EXPECT(proofCount >= 3);
|
||||
}
|
||||
|
||||
void
|
||||
testImportWithGeneratedXPOP()
|
||||
{
|
||||
testcase("Import accepts dynamically generated XPOP");
|
||||
|
||||
using namespace jtx;
|
||||
|
||||
// Create XPOP context (VL publisher + validators).
|
||||
auto const xpopCtx = xpop::TestXPOPContext::create(3);
|
||||
|
||||
// --- Source "network": generate a payment and build XPOP ---
|
||||
Env srcEnv{*this};
|
||||
Account const alice{"alice"};
|
||||
Account const bob{"bob"};
|
||||
|
||||
srcEnv.fund(XRP(10000), alice, bob);
|
||||
srcEnv.close();
|
||||
|
||||
// Import requires: no sfNetworkID + sfOperationLimit = dest NETWORK_ID.
|
||||
Json::Value payTx;
|
||||
payTx[jss::TransactionType] = jss::Payment;
|
||||
payTx[jss::Account] = alice.human();
|
||||
payTx[jss::Destination] = bob.human();
|
||||
payTx[jss::Amount] = "100000000";
|
||||
payTx[sfOperationLimit.jsonName] = 21337;
|
||||
srcEnv(payTx, fee(XRP(1)));
|
||||
srcEnv.close();
|
||||
|
||||
// Find the tx hash and build the XPOP.
|
||||
auto const srcLcl = srcEnv.app().getLedgerMaster().getClosedLedger();
|
||||
BEAST_EXPECT(srcLcl);
|
||||
|
||||
uint256 paymentHash;
|
||||
srcLcl->txMap().visitLeaves(
|
||||
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
|
||||
paymentHash = item->key();
|
||||
});
|
||||
|
||||
auto const xpopJson = xpopCtx.buildXPOP(*srcLcl, paymentHash);
|
||||
BEAST_EXPECT(!xpopJson.isNull());
|
||||
|
||||
// --- Destination "network": import the XPOP ---
|
||||
Env dstEnv{*this, xpopCtx.makeEnvConfig(21337)};
|
||||
|
||||
// Burn some XRP so B2M can credit.
|
||||
auto const master = Account("masterpassphrase");
|
||||
dstEnv(noop(master), fee(10'000'000'000), ter(tesSUCCESS));
|
||||
dstEnv.close();
|
||||
|
||||
Account const importAlice{"alice"};
|
||||
dstEnv.fund(XRP(1000), importAlice);
|
||||
dstEnv.close();
|
||||
|
||||
auto const feeDrops = dstEnv.current()->fees().base;
|
||||
|
||||
// Submit the import — should succeed (B2M path).
|
||||
dstEnv(
|
||||
import::import(importAlice, xpopJson),
|
||||
fee(feeDrops * 10),
|
||||
ter(tesSUCCESS));
|
||||
dstEnv.close();
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testBuildLedgerProof();
|
||||
testBuildXPOPv1();
|
||||
testMerkleProofVerification();
|
||||
testImportWithGeneratedXPOP();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(XPOP, app, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
732
src/test/consensus/ConsensusExtensions_test.cpp
Normal file
732
src/test/consensus/ConsensusExtensions_test.cpp
Normal file
@@ -0,0 +1,732 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
#include <xrpld/app/consensus/ConsensusExtensions.h>
|
||||
#include <xrpld/app/ledger/Ledger.h>
|
||||
#include <xrpld/app/misc/ValidatorKeys.h>
|
||||
#include <xrpld/consensus/ConsensusExtensionsTick.h>
|
||||
#include <xrpld/consensus/ConsensusProposal.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
namespace {
|
||||
|
||||
uint256
|
||||
makeHash(char const* label)
|
||||
{
|
||||
return sha512Half(Slice(label, std::strlen(label)));
|
||||
}
|
||||
|
||||
NodeID
|
||||
makeNode(std::uint8_t id)
|
||||
{
|
||||
NodeID node;
|
||||
node.zero();
|
||||
node.data()[NodeID::size() - 1] = id;
|
||||
return node;
|
||||
}
|
||||
|
||||
std::string
|
||||
makeExportSigBlob(uint256 const& txHash, PublicKey const& publicKey)
|
||||
{
|
||||
std::string blob;
|
||||
blob.append(reinterpret_cast<char const*>(txHash.data()), uint256::size());
|
||||
blob.append(
|
||||
reinterpret_cast<char const*>(publicKey.data()), publicKey.size());
|
||||
blob.push_back('\x30');
|
||||
return blob;
|
||||
}
|
||||
|
||||
struct FakeTxSet
|
||||
{
|
||||
using ID = uint256;
|
||||
|
||||
uint256 hash;
|
||||
|
||||
uint256
|
||||
id() const
|
||||
{
|
||||
return hash;
|
||||
}
|
||||
};
|
||||
|
||||
class FakePeerPosition
|
||||
{
|
||||
public:
|
||||
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
|
||||
|
||||
FakePeerPosition(NodeID const& nodeId, ExtendedPosition const& position)
|
||||
: proposal_(
|
||||
uint256{},
|
||||
Proposal::seqJoin,
|
||||
position,
|
||||
NetClock::time_point{},
|
||||
NetClock::time_point{},
|
||||
nodeId)
|
||||
{
|
||||
}
|
||||
|
||||
Proposal const&
|
||||
proposal() const
|
||||
{
|
||||
return proposal_;
|
||||
}
|
||||
|
||||
private:
|
||||
Proposal proposal_;
|
||||
};
|
||||
|
||||
struct FakeExtensions
|
||||
{
|
||||
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
|
||||
|
||||
beast::Journal j_{beast::Journal::getNullSink()};
|
||||
EstablishState estState_{EstablishState::ConvergingTx};
|
||||
std::chrono::steady_clock::time_point revealPhaseStart_{};
|
||||
std::chrono::steady_clock::time_point commitHashConflictStart_{};
|
||||
bool explicitFinalProposalSent_{false};
|
||||
bool entropySetPublished_{false};
|
||||
std::chrono::steady_clock::time_point entropyPublishStart_{};
|
||||
bool exportSigGateStarted_{false};
|
||||
std::chrono::steady_clock::time_point exportSigGateStart_{};
|
||||
bool exportSigConvergenceFailed_{false};
|
||||
bool rngOn{false};
|
||||
bool localExportSigs{true};
|
||||
bool consensusExportTxns{false};
|
||||
bool exportOn{true};
|
||||
bool entropyFailed{false};
|
||||
std::size_t exportQuorum{4};
|
||||
uint256 exportHash{makeHash("local-export-sig-set")};
|
||||
uint256 entropyHash{makeHash("local-entropy-set")};
|
||||
std::vector<uint256> fetchedExportSets;
|
||||
std::vector<uint256> fetchedEntropySets;
|
||||
int exportBuilds = 0;
|
||||
int entropyBuilds = 0;
|
||||
|
||||
bool
|
||||
rngEnabled() const
|
||||
{
|
||||
return rngOn;
|
||||
}
|
||||
|
||||
bool
|
||||
exportEnabled() const
|
||||
{
|
||||
return exportOn;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
quorumThreshold() const
|
||||
{
|
||||
return exportQuorum;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
exportSigQuorumThreshold() const
|
||||
{
|
||||
return exportQuorum;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
pendingCommitCount() const
|
||||
{
|
||||
return rngOn ? exportQuorum : 0;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
pendingRevealCount() const
|
||||
{
|
||||
return rngOn ? exportQuorum : 0;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
expectedProposerCount() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
hasQuorumOfCommits() const
|
||||
{
|
||||
return rngOn;
|
||||
}
|
||||
|
||||
bool
|
||||
hasMinimumReveals() const
|
||||
{
|
||||
return rngOn;
|
||||
}
|
||||
|
||||
bool
|
||||
hasAnyReveals() const
|
||||
{
|
||||
return rngOn;
|
||||
}
|
||||
|
||||
uint256
|
||||
buildCommitSet(LedgerIndex)
|
||||
{
|
||||
return makeHash("commit-set");
|
||||
}
|
||||
|
||||
uint256
|
||||
buildEntropySet(LedgerIndex)
|
||||
{
|
||||
++entropyBuilds;
|
||||
return entropyHash;
|
||||
}
|
||||
|
||||
uint256
|
||||
getEntropySecret() const
|
||||
{
|
||||
return makeHash("entropy-secret");
|
||||
}
|
||||
|
||||
void
|
||||
selfSeedReveal()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
setEntropyFailed()
|
||||
{
|
||||
entropyFailed = true;
|
||||
}
|
||||
|
||||
void
|
||||
fetchRngSetIfNeeded(std::optional<uint256> const& hash, SidecarKind kind)
|
||||
{
|
||||
if (kind == SidecarKind::reveal && hash)
|
||||
fetchedEntropySets.push_back(*hash);
|
||||
else if (kind == SidecarKind::exportSig && hash)
|
||||
fetchedExportSets.push_back(*hash);
|
||||
}
|
||||
|
||||
bool
|
||||
shouldSendExplicitFinalProposal() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::optional<FakeTxSet>
|
||||
buildExplicitFinalProposalTxSet(FakeTxSet const&, LedgerIndex)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
bool
|
||||
hasPendingExportSigs() const
|
||||
{
|
||||
return localExportSigs;
|
||||
}
|
||||
|
||||
bool
|
||||
hasConsensusExportTxns() const
|
||||
{
|
||||
return consensusExportTxns;
|
||||
}
|
||||
|
||||
uint256
|
||||
buildExportSigSet(LedgerIndex)
|
||||
{
|
||||
++exportBuilds;
|
||||
return exportHash;
|
||||
}
|
||||
|
||||
void
|
||||
setExportSigConvergenceFailed()
|
||||
{
|
||||
exportSigConvergenceFailed_ = true;
|
||||
}
|
||||
};
|
||||
|
||||
struct ExportTickHarness
|
||||
{
|
||||
ExtendedPosition position{makeHash("tx-set")};
|
||||
FakeTxSet txns{position.txSetHash};
|
||||
hash_map<NodeID, FakePeerPosition> peers;
|
||||
ConsensusParms parms;
|
||||
NetClock::time_point netNow{NetClock::duration{123}};
|
||||
std::chrono::steady_clock::time_point start{};
|
||||
std::size_t prevProposers = 4;
|
||||
int updates = 0;
|
||||
int proposes = 0;
|
||||
|
||||
void
|
||||
addPeer(
|
||||
std::uint8_t id,
|
||||
std::optional<uint256> exportSigSetHash,
|
||||
uint256 txSetHash = makeHash("tx-set"))
|
||||
{
|
||||
ExtendedPosition peerPosition{txSetHash};
|
||||
peerPosition.exportSigSetHash = exportSigSetHash;
|
||||
peers.emplace(
|
||||
makeNode(id), FakePeerPosition{makeNode(id), peerPosition});
|
||||
}
|
||||
|
||||
void
|
||||
addEntropyPeer(
|
||||
std::uint8_t id,
|
||||
std::optional<uint256> entropySetHash,
|
||||
uint256 txSetHash = makeHash("tx-set"))
|
||||
{
|
||||
ExtendedPosition peerPosition{txSetHash};
|
||||
peerPosition.entropySetHash = entropySetHash;
|
||||
peers.emplace(
|
||||
makeNode(id), FakePeerPosition{makeNode(id), peerPosition});
|
||||
}
|
||||
|
||||
ExtensionTickResult
|
||||
tick(FakeExtensions& ext, std::chrono::milliseconds elapsed = {})
|
||||
{
|
||||
ConsensusTick<ExtendedPosition, FakePeerPosition, FakeTxSet> ctx{
|
||||
.buildSeq = 2,
|
||||
.now = netNow,
|
||||
.nowSteady = start + elapsed,
|
||||
.roundTime = elapsed,
|
||||
.mode = ConsensusMode::proposing,
|
||||
.prevProposers = prevProposers,
|
||||
.peerPositions = peers,
|
||||
.parms = parms,
|
||||
.haveCloseTimeConsensus = true,
|
||||
.convergePercent = 100,
|
||||
.j = beast::Journal{beast::Journal::getNullSink()},
|
||||
.getPosition = [&]() -> ExtendedPosition const& {
|
||||
return position;
|
||||
},
|
||||
.updatePosition =
|
||||
[&](ExtendedPosition const& newPosition) {
|
||||
position = newPosition;
|
||||
++updates;
|
||||
},
|
||||
.propose = [&]() { ++proposes; },
|
||||
.haveConsensus = []() { return true; },
|
||||
.cacheAndShareTxSet = [](FakeTxSet const&) {},
|
||||
.getTxns = [&]() -> FakeTxSet const& { return txns; }};
|
||||
|
||||
return extensionsTick(ext, ctx);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
class ConsensusExtensions_test : public beast::unit_test::suite
|
||||
{
|
||||
std::vector<PublicKey>
|
||||
makeValidatorKeys() const
|
||||
{
|
||||
std::vector<std::string> const rawKeys = {
|
||||
"0388935426E0D08083314842EDFBB2D517BD47699F9A4527318A8E10468C97C05"
|
||||
"2",
|
||||
"02691AC5AE1C4C333AE5DF8A93BDC495F0EEBFC6DB0DA7EB6EF808F3AFC006E3F"
|
||||
"E"};
|
||||
|
||||
std::vector<PublicKey> keys;
|
||||
keys.reserve(rawKeys.size());
|
||||
for (auto const& rawKey : rawKeys)
|
||||
{
|
||||
auto const pkHex = strUnHex(rawKey);
|
||||
keys.emplace_back(makeSlice(*pkHex));
|
||||
}
|
||||
return keys;
|
||||
}
|
||||
|
||||
void
|
||||
testActiveValidatorViewAppliesNegativeUNL()
|
||||
{
|
||||
testcase("Active validator view applies NegativeUNL");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{
|
||||
*this,
|
||||
envconfig(),
|
||||
supported_amendments() | featureNegativeUNL,
|
||||
nullptr};
|
||||
|
||||
auto const vlKeys = makeValidatorKeys();
|
||||
auto const genesis = std::make_shared<Ledger>(
|
||||
create_genesis,
|
||||
env.app().config(),
|
||||
std::vector<uint256>{},
|
||||
env.app().getNodeFamily());
|
||||
auto l = std::make_shared<Ledger>(
|
||||
*genesis, env.app().timeKeeper().closeTime());
|
||||
BEAST_EXPECT(l->rules().enabled(featureNegativeUNL));
|
||||
|
||||
auto report = std::make_shared<SLE>(keylet::UNLReport());
|
||||
std::vector<STObject> activeValidators;
|
||||
for (auto const& pk : vlKeys)
|
||||
{
|
||||
activeValidators.push_back(
|
||||
STObject::makeInnerObject(sfActiveValidator));
|
||||
activeValidators.back().setFieldVL(sfPublicKey, pk);
|
||||
}
|
||||
report->setFieldArray(
|
||||
sfActiveValidators, STArray(activeValidators, sfActiveValidators));
|
||||
|
||||
auto negUnl = std::make_shared<SLE>(keylet::negativeUNL());
|
||||
std::vector<STObject> disabledValidators;
|
||||
disabledValidators.push_back(
|
||||
STObject::makeInnerObject(sfDisabledValidator));
|
||||
disabledValidators.back().setFieldVL(sfPublicKey, vlKeys[0]);
|
||||
disabledValidators.back().setFieldU32(sfFirstLedgerSequence, l->seq());
|
||||
negUnl->setFieldArray(
|
||||
sfDisabledValidators,
|
||||
STArray(disabledValidators, sfDisabledValidators));
|
||||
OpenView accum(&*l);
|
||||
accum.rawInsert(report);
|
||||
accum.rawInsert(negUnl);
|
||||
accum.apply(*l);
|
||||
|
||||
ConsensusExtensions ce{env.app(), env.journal};
|
||||
auto const view = ce.makeActiveValidatorView(l);
|
||||
|
||||
BEAST_EXPECT(view->fromUNLReport);
|
||||
BEAST_EXPECT(view->size() == 1);
|
||||
BEAST_EXPECT(!view->containsMaster(vlKeys[0]));
|
||||
BEAST_EXPECT(!view->containsNode(calcNodeID(vlKeys[0])));
|
||||
BEAST_EXPECT(view->containsMaster(vlKeys[1]));
|
||||
BEAST_EXPECT(view->containsNode(calcNodeID(vlKeys[1])));
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateRequiresQuorumAlignment()
|
||||
{
|
||||
testcase("Export sig gate requires quorum alignment");
|
||||
|
||||
FakeExtensions ext;
|
||||
ExportTickHarness harness;
|
||||
auto const localHash = ext.exportHash;
|
||||
|
||||
harness.addPeer(1, localHash);
|
||||
harness.addPeer(2, localHash);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(harness.position.exportSigSetHash == localHash);
|
||||
BEAST_EXPECT(ext.exportSigGateStarted_);
|
||||
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
|
||||
|
||||
result = harness.tick(
|
||||
ext,
|
||||
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
|
||||
}
|
||||
|
||||
void
|
||||
testRngEntropyGateRequiresFullObservation()
|
||||
{
|
||||
testcase("RNG entropy gate requires full sidecar observation");
|
||||
|
||||
FakeExtensions ext;
|
||||
ext.rngOn = true;
|
||||
ext.exportOn = false;
|
||||
ext.estState_ = EstablishState::ConvergingReveal;
|
||||
|
||||
ExportTickHarness harness;
|
||||
auto const localHash = ext.entropyHash;
|
||||
|
||||
harness.addEntropyPeer(1, localHash);
|
||||
harness.addEntropyPeer(2, localHash);
|
||||
harness.addEntropyPeer(3, localHash);
|
||||
harness.addEntropyPeer(4, std::nullopt);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(harness.position.entropySetHash == localHash);
|
||||
BEAST_EXPECT(ext.entropySetPublished_);
|
||||
|
||||
// Quorum alignment is not safe if a tx-converged peer has not
|
||||
// advertised any entropySetHash. Otherwise local observation order
|
||||
// can split non-zero entropy from deterministic zero fallback.
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.entropyFailed);
|
||||
BEAST_EXPECT(harness.position.entropySetHash == localHash);
|
||||
|
||||
result = harness.tick(
|
||||
ext,
|
||||
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(ext.entropyFailed);
|
||||
BEAST_EXPECT(!harness.position.entropySetHash);
|
||||
}
|
||||
|
||||
void
|
||||
testRngFastPathWaitsAfterEntropyPublish()
|
||||
{
|
||||
testcase("RNG fast path waits after entropy publish");
|
||||
|
||||
FakeExtensions ext;
|
||||
ext.rngOn = true;
|
||||
ext.exportOn = false;
|
||||
ext.estState_ = EstablishState::ConvergingCommit;
|
||||
|
||||
ExportTickHarness harness;
|
||||
auto const localHash = ext.entropyHash;
|
||||
|
||||
harness.addEntropyPeer(1, localHash);
|
||||
harness.addEntropyPeer(2, localHash);
|
||||
harness.addEntropyPeer(3, localHash);
|
||||
harness.addEntropyPeer(4, localHash);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(ext.estState_ == EstablishState::ConvergingReveal);
|
||||
BEAST_EXPECT(ext.entropySetPublished_);
|
||||
BEAST_EXPECT(harness.position.entropySetHash == localHash);
|
||||
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.entropyFailed);
|
||||
BEAST_EXPECT(harness.position.entropySetHash == localHash);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateAllowsAlignedQuorumDespiteMinorityConflict()
|
||||
{
|
||||
testcase("Export sig gate ignores minority conflict after quorum");
|
||||
|
||||
FakeExtensions ext;
|
||||
ExportTickHarness harness;
|
||||
auto const localHash = ext.exportHash;
|
||||
auto const conflictHash = makeHash("conflicting-export-sig-set");
|
||||
|
||||
harness.addPeer(1, localHash);
|
||||
harness.addPeer(2, localHash);
|
||||
harness.addPeer(3, localHash);
|
||||
harness.addPeer(4, conflictHash);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.size() == 1);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.front() == conflictHash);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateRequiresFullObservation()
|
||||
{
|
||||
testcase("Export sig gate requires full sidecar observation");
|
||||
|
||||
FakeExtensions ext;
|
||||
ExportTickHarness harness;
|
||||
auto const localHash = ext.exportHash;
|
||||
|
||||
harness.addPeer(1, localHash);
|
||||
harness.addPeer(2, localHash);
|
||||
harness.addPeer(3, localHash);
|
||||
harness.addPeer(4, std::nullopt);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(harness.position.exportSigSetHash == localHash);
|
||||
BEAST_EXPECT(ext.exportSigGateStarted_);
|
||||
|
||||
// Local quorum alignment is not enough if a tx-converged peer has
|
||||
// not advertised any exportSigSetHash yet.
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
|
||||
|
||||
result = harness.tick(
|
||||
ext,
|
||||
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateFetchesAdvertisedPeerSets()
|
||||
{
|
||||
testcase("Export sig gate fetches advertised peer sets");
|
||||
|
||||
FakeExtensions ext;
|
||||
ext.localExportSigs = false;
|
||||
ExportTickHarness harness;
|
||||
auto const peerHash = makeHash("peer-export-sig-set");
|
||||
|
||||
harness.addPeer(1, peerHash);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigGateStarted_);
|
||||
BEAST_EXPECT(!harness.position.exportSigSetHash);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.size() == 1);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.front() == peerHash);
|
||||
|
||||
result = harness.tick(
|
||||
ext,
|
||||
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateBoundsCandidateObservationWindow()
|
||||
{
|
||||
testcase("Export sig gate bounds candidate observation window");
|
||||
|
||||
FakeExtensions ext;
|
||||
ext.localExportSigs = false;
|
||||
ext.consensusExportTxns = true;
|
||||
ExportTickHarness harness;
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigGateStarted_);
|
||||
BEAST_EXPECT(!harness.position.exportSigSetHash);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.empty());
|
||||
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
|
||||
|
||||
result = harness.tick(ext, std::chrono::milliseconds{100});
|
||||
BEAST_EXPECT(!result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.exportSigConvergenceFailed_);
|
||||
|
||||
result = harness.tick(
|
||||
ext,
|
||||
harness.parms.rngREVEAL_TIMEOUT * 2 + std::chrono::milliseconds{1});
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(ext.exportSigConvergenceFailed_);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSigGateSkipsWhenExportDisabled()
|
||||
{
|
||||
testcase("Export sig gate skips when Export disabled");
|
||||
|
||||
FakeExtensions ext;
|
||||
ext.exportOn = false;
|
||||
ExportTickHarness harness;
|
||||
|
||||
harness.addPeer(1, ext.exportHash);
|
||||
|
||||
auto result = harness.tick(ext);
|
||||
BEAST_EXPECT(result.readyForAccept);
|
||||
BEAST_EXPECT(!ext.exportSigGateStarted_);
|
||||
BEAST_EXPECT(!harness.position.exportSigSetHash);
|
||||
BEAST_EXPECT(ext.exportBuilds == 0);
|
||||
BEAST_EXPECT(ext.fetchedExportSets.empty());
|
||||
}
|
||||
|
||||
void
|
||||
testExportDisabledRoundClearsCollector()
|
||||
{
|
||||
testcase("Export disabled round clears collector");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{*this, envconfig(), supported_amendments(), nullptr};
|
||||
ConsensusExtensions ce{env.app(), env.journal};
|
||||
auto const tx = makeHash("export-disabled-clears-collector");
|
||||
auto const pk = makeValidatorKeys().front();
|
||||
std::uint8_t const sigBytes[] = {1, 2, 3};
|
||||
Buffer const sig{sigBytes, sizeof(sigBytes)};
|
||||
|
||||
ce.setExportEnabledThisRound(true);
|
||||
ce.exportSigCollector().addVerifiedSignature(tx, pk, sig, 10);
|
||||
ce.clearRngState();
|
||||
BEAST_EXPECT(ce.exportSigCollector().signatureCount(tx) == 1);
|
||||
|
||||
ce.setExportEnabledThisRound(false);
|
||||
ce.clearRngState();
|
||||
BEAST_EXPECT(ce.exportSigCollector().signatureCount(tx) == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testReplayedProposalHarvestsExportSigs()
|
||||
{
|
||||
testcase("Replayed proposal harvests export signatures");
|
||||
|
||||
using namespace jtx;
|
||||
Env env{
|
||||
*this, envconfig(validator, ""), supported_amendments(), nullptr};
|
||||
auto const& valKeys = env.app().getValidatorKeys();
|
||||
BEAST_EXPECT(valKeys.keys);
|
||||
if (!valKeys.keys)
|
||||
return;
|
||||
|
||||
ConsensusExtensions ce{env.app(), env.journal};
|
||||
ce.setExportEnabledThisRound(true);
|
||||
ce.cacheUNLReport();
|
||||
|
||||
auto const activeView = ce.activeValidatorView();
|
||||
BEAST_EXPECT(activeView->sourceLedgerHash);
|
||||
if (!activeView->sourceLedgerHash)
|
||||
return;
|
||||
|
||||
auto const senderPK = valKeys.keys->publicKey;
|
||||
BEAST_EXPECT(ce.isActiveValidator(senderPK, *activeView));
|
||||
if (!ce.isActiveValidator(senderPK, *activeView))
|
||||
return;
|
||||
|
||||
auto const tx = makeHash("replayed-export-sig-tx");
|
||||
auto const blob = makeExportSigBlob(tx, senderPK);
|
||||
ExtendedPosition position{makeHash("replayed-position")};
|
||||
position.exportSignaturesHash =
|
||||
proposalExportSignaturesHash(std::vector<std::string>{blob});
|
||||
|
||||
ce.onTrustedPeerProposal(
|
||||
calcNodeID(senderPK),
|
||||
senderPK,
|
||||
position,
|
||||
0,
|
||||
NetClock::time_point{},
|
||||
*activeView->sourceLedgerHash,
|
||||
Slice{},
|
||||
std::vector<std::string>{blob});
|
||||
|
||||
BEAST_EXPECT(ce.exportSigCollector().hasUnverifiedSignatures());
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testActiveValidatorViewAppliesNegativeUNL();
|
||||
testExportSigGateRequiresQuorumAlignment();
|
||||
testRngEntropyGateRequiresFullObservation();
|
||||
testRngFastPathWaitsAfterEntropyPublish();
|
||||
testExportSigGateAllowsAlignedQuorumDespiteMinorityConflict();
|
||||
testExportSigGateRequiresFullObservation();
|
||||
testExportSigGateFetchesAdvertisedPeerSets();
|
||||
testExportSigGateBoundsCandidateObservationWindow();
|
||||
testExportSigGateSkipsWhenExportDisabled();
|
||||
testExportDisabledRoundClearsCollector();
|
||||
testReplayedProposalHarvestsExportSigs();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(ConsensusExtensions, consensus, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
1215
src/test/consensus/ConsensusRng_test.cpp
Normal file
1215
src/test/consensus/ConsensusRng_test.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -22,6 +22,8 @@
|
||||
#include <xrpld/consensus/ConsensusProposal.h>
|
||||
#include <xrpl/beast/clock/manual_clock.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/json/to_string.h>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace ripple {
|
||||
@@ -40,6 +42,7 @@ public:
|
||||
testShouldCloseLedger()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
testcase("should close ledger");
|
||||
|
||||
// Use default parameters
|
||||
ConsensusParms const p{};
|
||||
@@ -78,46 +81,102 @@ public:
|
||||
testCheckConsensus()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
testcase("check consensus");
|
||||
|
||||
// Use default parameterss
|
||||
ConsensusParms const p{};
|
||||
|
||||
///////////////
|
||||
// Disputes still in doubt
|
||||
//
|
||||
// Not enough time has elapsed
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
|
||||
checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
|
||||
|
||||
// If not enough peers have propsed, ensure
|
||||
// more time for proposals
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
|
||||
checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
|
||||
|
||||
// Enough time has elapsed and we all agree
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
|
||||
checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
|
||||
|
||||
// Enough time has elapsed and we don't yet agree
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
|
||||
checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
|
||||
|
||||
// Our peers have moved on
|
||||
// Enough time has elapsed and we all agree
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::MovedOn ==
|
||||
checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
|
||||
checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
|
||||
|
||||
// If no peers, don't agree until time has passed.
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
|
||||
checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
|
||||
|
||||
// Agree if no peers and enough time has passed.
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(0, 0, 0, 0, 3s, 16s, p, true, journal_));
|
||||
checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
|
||||
|
||||
// Expire if too much time has passed without agreement
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Expired ==
|
||||
checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
|
||||
|
||||
///////////////
|
||||
// Stalled
|
||||
//
|
||||
// Not enough time has elapsed
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
|
||||
|
||||
// If not enough peers have propsed, ensure
|
||||
// more time for proposals
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
|
||||
|
||||
// Enough time has elapsed and we all agree
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
|
||||
|
||||
// Enough time has elapsed and we don't yet agree, but there's nothing
|
||||
// left to dispute
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
|
||||
|
||||
// Our peers have moved on
|
||||
// Enough time has elapsed and we all agree, nothing left to dispute
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
|
||||
|
||||
// If no peers, don't agree until time has passed.
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::No ==
|
||||
checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
|
||||
|
||||
// Agree if no peers and enough time has passed.
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
|
||||
|
||||
// We are done if there's nothing left to dispute, no matter how much
|
||||
// time has passed
|
||||
BEAST_EXPECT(
|
||||
ConsensusState::Yes ==
|
||||
checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -125,6 +184,7 @@ public:
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using namespace csf;
|
||||
testcase("standalone");
|
||||
|
||||
Sim s;
|
||||
PeerGroup peers = s.createGroup(1);
|
||||
@@ -149,7 +209,9 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("peers agree");
|
||||
|
||||
//@@start peers-agree
|
||||
ConsensusParms const parms{};
|
||||
Sim sim;
|
||||
PeerGroup peers = sim.createGroup(5);
|
||||
@@ -179,6 +241,7 @@ public:
|
||||
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
|
||||
}
|
||||
}
|
||||
//@@end peers-agree
|
||||
}
|
||||
|
||||
void
|
||||
@@ -186,11 +249,13 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("slow peers");
|
||||
|
||||
// Several tests of a complete trust graph with a subset of peers
|
||||
// that have significantly longer network delays to the rest of the
|
||||
// network
|
||||
|
||||
//@@start slow-peer-scenario
|
||||
// Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
|
||||
{
|
||||
ConsensusParms const parms{};
|
||||
@@ -229,16 +294,18 @@ public:
|
||||
BEAST_EXPECT(
|
||||
peer->prevRoundTime == network[0]->prevRoundTime);
|
||||
|
||||
// Slow peer's transaction (Tx{0}) didn't make it in time
|
||||
BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
|
||||
for (std::uint32_t i = 2; i < network.size(); ++i)
|
||||
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
|
||||
|
||||
// Tx 0 didn't make it
|
||||
// Tx 0 is still in the open transaction set for next round
|
||||
BEAST_EXPECT(
|
||||
peer->openTxs.find(Tx{0}) != peer->openTxs.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
//@@end slow-peer-scenario
|
||||
|
||||
// Test when the slow peers delay a consensus quorum (4/6 agree)
|
||||
{
|
||||
@@ -351,6 +418,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("close time disagree");
|
||||
|
||||
// This is a very specialized test to get ledgers to disagree on
|
||||
// the close time. It unfortunately assumes knowledge about current
|
||||
@@ -417,6 +485,8 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("wrong LCL");
|
||||
|
||||
// Specialized test to exercise a temporary fork in which some peers
|
||||
// are working on an incorrect prior ledger.
|
||||
|
||||
@@ -426,6 +496,7 @@ public:
|
||||
// the wrong LCL at different phases of consensus
|
||||
for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
|
||||
{
|
||||
//@@start wrong-lcl-scenario
|
||||
// Consider 10 peers:
|
||||
// 0 1 2 3 4 5 6 7 8 9
|
||||
// minority majorityA majorityB
|
||||
@@ -446,6 +517,7 @@ public:
|
||||
|
||||
// This topology can potentially fork with the above trust relations
|
||||
// but that is intended for this test.
|
||||
//@@end wrong-lcl-scenario
|
||||
|
||||
Sim sim;
|
||||
|
||||
@@ -589,6 +661,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("consensus close time rounding");
|
||||
|
||||
// This is a specialized test engineered to yield ledgers with different
|
||||
// close times even though the peers believe they had close time
|
||||
@@ -604,9 +677,6 @@ public:
|
||||
PeerGroup fast = sim.createGroup(4);
|
||||
PeerGroup network = fast + slow;
|
||||
|
||||
for (Peer* peer : network)
|
||||
peer->consensusParms = parms;
|
||||
|
||||
// Connected trust graph
|
||||
network.trust(network);
|
||||
|
||||
@@ -692,6 +762,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("fork");
|
||||
|
||||
std::uint32_t numPeers = 10;
|
||||
// Vary overlap between two UNLs
|
||||
@@ -729,6 +800,7 @@ public:
|
||||
}
|
||||
sim.run(1);
|
||||
|
||||
//@@start fork-threshold
|
||||
// Fork should not happen for 40% or greater overlap
|
||||
// Since the overlapped nodes have a UNL that is the union of the
|
||||
// two cliques, the maximum sized UNL list is the number of peers
|
||||
@@ -740,6 +812,7 @@ public:
|
||||
// One for cliqueA, one for cliqueB and one for nodes in both
|
||||
BEAST_EXPECT(sim.branches() <= 3);
|
||||
}
|
||||
//@@end fork-threshold
|
||||
}
|
||||
}
|
||||
|
||||
@@ -748,6 +821,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("hub network");
|
||||
|
||||
// Simulate a set of 5 validators that aren't directly connected but
|
||||
// rely on a single hub node for communication
|
||||
@@ -835,6 +909,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("preferred by branch");
|
||||
|
||||
// Simulate network splits that are prevented from forking when using
|
||||
// preferred ledger by trie. This is a contrived example that involves
|
||||
@@ -967,6 +1042,7 @@ public:
|
||||
{
|
||||
using namespace csf;
|
||||
using namespace std::chrono;
|
||||
testcase("pause for laggards");
|
||||
|
||||
// Test that validators that jump ahead of the network slow
|
||||
// down.
|
||||
@@ -1052,6 +1128,410 @@ public:
|
||||
BEAST_EXPECT(sim.synchronized());
|
||||
}
|
||||
|
||||
// RNG consensus tests in ConsensusRng_test.cpp
|
||||
|
||||
// MERGE NOTE (sync-2.5.0): upstream testDisputes() is already present
|
||||
// below with j/clog stalled() params from 86ef16dbeb. If upstream
|
||||
// auto-merges a duplicate, delete it — keep only this version.
|
||||
void
|
||||
testDisputes()
|
||||
{
|
||||
testcase("disputes");
|
||||
|
||||
using namespace csf;
|
||||
|
||||
// Test dispute objects directly
|
||||
using Dispute = DisputedTx<Tx, PeerID>;
|
||||
|
||||
Tx const txTrue{99};
|
||||
Tx const txFalse{98};
|
||||
Tx const txFollowingTrue{97};
|
||||
Tx const txFollowingFalse{96};
|
||||
int const numPeers = 100;
|
||||
ConsensusParms p;
|
||||
std::size_t peersUnchanged = 0;
|
||||
|
||||
auto logs = std::make_unique<Logs>(beast::severities::kError);
|
||||
auto j = logs->journal("Test");
|
||||
auto clog = std::make_unique<std::stringstream>();
|
||||
|
||||
// Three cases:
|
||||
// 1 proposing, initial vote yes
|
||||
// 2 proposing, initial vote no
|
||||
// 3 not proposing, initial vote doesn't matter after the first update,
|
||||
// use yes
|
||||
{
|
||||
Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
|
||||
Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
|
||||
Dispute followingTrue{
|
||||
txFollowingTrue.id(), true, numPeers, journal_};
|
||||
Dispute followingFalse{
|
||||
txFollowingFalse.id(), false, numPeers, journal_};
|
||||
BEAST_EXPECT(proposingTrue.ID() == 99);
|
||||
BEAST_EXPECT(proposingFalse.ID() == 98);
|
||||
BEAST_EXPECT(followingTrue.ID() == 97);
|
||||
BEAST_EXPECT(followingFalse.ID() == 96);
|
||||
|
||||
// Create an even split in the peer votes
|
||||
for (int i = 0; i < numPeers; ++i)
|
||||
{
|
||||
BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
|
||||
BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
|
||||
BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
|
||||
BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
|
||||
}
|
||||
// Switch the middle vote to match mine
|
||||
BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
|
||||
BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
|
||||
BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
|
||||
BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
|
||||
|
||||
// no changes yet
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(
|
||||
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(clog->str() == "");
|
||||
|
||||
// I'm in the majority, my vote should not change
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
|
||||
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
|
||||
|
||||
peersUnchanged = 2;
|
||||
BEAST_EXPECT(
|
||||
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(clog->str() == "");
|
||||
|
||||
// Right now, the vote is 51%. The requirement is about to jump to
|
||||
// 65%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == false);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
// 16 validators change their vote to match my original vote
|
||||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
auto pTrue = PeerID(numPeers - i - 1);
|
||||
auto pFalse = PeerID(i);
|
||||
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
|
||||
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
|
||||
}
|
||||
// The vote should now be 66%, threshold is 65%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// Threshold jumps to 70%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == false);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// 5 more validators change their vote to match my original vote
|
||||
for (int i = 16; i < 21; ++i)
|
||||
{
|
||||
auto pTrue = PeerID(numPeers - i - 1);
|
||||
auto pFalse = PeerID(i);
|
||||
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
|
||||
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
|
||||
}
|
||||
|
||||
// The vote should now be 71%, threshold is 70%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// The vote should now be 71%, threshold is 70%
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// The vote should now be 71%, threshold is 70%
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
peersUnchanged = 3;
|
||||
BEAST_EXPECT(
|
||||
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(clog->str() == "");
|
||||
|
||||
// Threshold jumps to 95%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == false);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// 25 more validators change their vote to match my original vote
|
||||
for (int i = 21; i < 46; ++i)
|
||||
{
|
||||
auto pTrue = PeerID(numPeers - i - 1);
|
||||
auto pFalse = PeerID(i);
|
||||
BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
|
||||
BEAST_EXPECT(followingTrue.setVote(pTrue, true));
|
||||
BEAST_EXPECT(followingFalse.setVote(pFalse, false));
|
||||
}
|
||||
|
||||
// The vote should now be 96%, threshold is 95%
|
||||
BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
|
||||
BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
BEAST_EXPECT(clog->str() == "");
|
||||
}
|
||||
|
||||
auto expectStalled = [this, &clog](
|
||||
int txid,
|
||||
bool ourVote,
|
||||
int ourTime,
|
||||
int peerTime,
|
||||
int support,
|
||||
std::uint32_t line) {
|
||||
using namespace std::string_literals;
|
||||
|
||||
auto const s = clog->str();
|
||||
expect(s.find("stalled"), s, __FILE__, line);
|
||||
expect(
|
||||
s.starts_with("Transaction "s + std::to_string(txid)),
|
||||
s,
|
||||
__FILE__,
|
||||
line);
|
||||
expect(
|
||||
s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos,
|
||||
s,
|
||||
__FILE__,
|
||||
line);
|
||||
expect(
|
||||
s.find("for "s + std::to_string(ourTime) + " rounds."s) !=
|
||||
s.npos,
|
||||
s,
|
||||
__FILE__,
|
||||
line);
|
||||
expect(
|
||||
s.find(
|
||||
"votes in "s + std::to_string(peerTime) + " rounds.") !=
|
||||
s.npos,
|
||||
s,
|
||||
__FILE__,
|
||||
line);
|
||||
expect(
|
||||
s.ends_with(
|
||||
"has "s + std::to_string(support) + "% support. "s),
|
||||
s,
|
||||
__FILE__,
|
||||
line);
|
||||
clog = std::make_unique<std::stringstream>();
|
||||
};
|
||||
|
||||
for (int i = 0; i < 1; ++i)
|
||||
{
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.updateVote(250 + 10 * i, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// true vote has changed recently, so not stalled
|
||||
BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
|
||||
BEAST_EXPECT(clog->str() == "");
|
||||
// remaining votes have been unchanged in so long that we only
|
||||
// need to hit the second round at 95% to be stalled, regardless
|
||||
// of peers
|
||||
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
|
||||
expectStalled(98, false, 11, 0, 2, __LINE__);
|
||||
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
|
||||
expectStalled(97, true, 11, 0, 97, __LINE__);
|
||||
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
|
||||
expectStalled(96, false, 11, 0, 3, __LINE__);
|
||||
|
||||
// true vote has changed recently, so not stalled
|
||||
BEAST_EXPECT(
|
||||
!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
BEAST_EXPECTS(clog->str() == "", clog->str());
|
||||
// remaining votes have been unchanged in so long that we only
|
||||
// need to hit the second round at 95% to be stalled, regardless
|
||||
// of peers
|
||||
BEAST_EXPECT(
|
||||
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
expectStalled(98, false, 11, 6, 2, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(97, true, 11, 6, 97, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(96, false, 11, 6, 3, __LINE__);
|
||||
}
|
||||
for (int i = 1; i < 3; ++i)
|
||||
{
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.updateVote(250 + 10 * i, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
// true vote changed 2 rounds ago, and peers are changing, so
|
||||
// not stalled
|
||||
BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
|
||||
BEAST_EXPECTS(clog->str() == "", clog->str());
|
||||
// still stalled
|
||||
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
|
||||
expectStalled(98, false, 11 + i, 0, 2, __LINE__);
|
||||
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
|
||||
expectStalled(97, true, 11 + i, 0, 97, __LINE__);
|
||||
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
|
||||
expectStalled(96, false, 11 + i, 0, 3, __LINE__);
|
||||
|
||||
// true vote changed 2 rounds ago, and peers are NOT changing,
|
||||
// so stalled
|
||||
BEAST_EXPECT(
|
||||
proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
expectStalled(99, true, 1 + i, 6, 97, __LINE__);
|
||||
// still stalled
|
||||
BEAST_EXPECT(
|
||||
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
expectStalled(98, false, 11 + i, 6, 2, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(97, true, 11 + i, 6, 97, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(96, false, 11 + i, 6, 3, __LINE__);
|
||||
}
|
||||
for (int i = 3; i < 5; ++i)
|
||||
{
|
||||
BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
|
||||
BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
|
||||
BEAST_EXPECT(
|
||||
!followingFalse.updateVote(250 + 10 * i, false, p));
|
||||
|
||||
BEAST_EXPECT(proposingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(proposingFalse.getOurVote() == false);
|
||||
BEAST_EXPECT(followingTrue.getOurVote() == true);
|
||||
BEAST_EXPECT(followingFalse.getOurVote() == false);
|
||||
|
||||
BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
|
||||
expectStalled(99, true, 1 + i, 0, 97, __LINE__);
|
||||
BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
|
||||
expectStalled(98, false, 11 + i, 0, 2, __LINE__);
|
||||
BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
|
||||
expectStalled(97, true, 11 + i, 0, 97, __LINE__);
|
||||
BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
|
||||
expectStalled(96, false, 11 + i, 0, 3, __LINE__);
|
||||
|
||||
BEAST_EXPECT(
|
||||
proposingTrue.stalled(p, true, peersUnchanged, j, clog));
|
||||
expectStalled(99, true, 1 + i, 6, 97, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
proposingFalse.stalled(p, true, peersUnchanged, j, clog));
|
||||
expectStalled(98, false, 11 + i, 6, 2, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingTrue.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(97, true, 11 + i, 6, 97, __LINE__);
|
||||
BEAST_EXPECT(
|
||||
followingFalse.stalled(p, false, peersUnchanged, j, clog));
|
||||
expectStalled(96, false, 11 + i, 6, 3, __LINE__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
@@ -1068,6 +1548,8 @@ public:
|
||||
testHubNetwork();
|
||||
testPreferredByBranch();
|
||||
testPauseForLaggards();
|
||||
// RNG consensus tests moved to ConsensusRng_test.cpp
|
||||
testDisputes();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
478
src/test/consensus/ExtendedPosition_test.cpp
Normal file
478
src/test/consensus/ExtendedPosition_test.cpp
Normal file
@@ -0,0 +1,478 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2026 XRPL Labs
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/consensus/RCLCxPeerPos.h>
|
||||
#include <xrpld/consensus/ConsensusProposal.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/SecretKey.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
class ExtendedPosition_test : public beast::unit_test::suite
|
||||
{
|
||||
// Generate deterministic test hashes
|
||||
static uint256
|
||||
makeHash(char const* label)
|
||||
{
|
||||
return sha512Half(Slice(label, std::strlen(label)));
|
||||
}
|
||||
|
||||
void
|
||||
testSerializationRoundTrip()
|
||||
{
|
||||
testcase("Serialization round-trip");
|
||||
|
||||
// Empty position (legacy compat)
|
||||
{
|
||||
auto const txSet = makeHash("txset-a");
|
||||
ExtendedPosition pos{txSet};
|
||||
|
||||
Serializer s;
|
||||
pos.add(s);
|
||||
|
||||
// Should be exactly 32 bytes (no flags byte)
|
||||
BEAST_EXPECT(s.getDataLength() == 32);
|
||||
|
||||
SerialIter sit(s.slice());
|
||||
auto deserialized =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
|
||||
BEAST_EXPECT(deserialized.has_value());
|
||||
if (!deserialized)
|
||||
return;
|
||||
BEAST_EXPECT(deserialized->txSetHash == txSet);
|
||||
BEAST_EXPECT(!deserialized->myCommitment);
|
||||
BEAST_EXPECT(!deserialized->myReveal);
|
||||
BEAST_EXPECT(!deserialized->commitSetHash);
|
||||
BEAST_EXPECT(!deserialized->entropySetHash);
|
||||
BEAST_EXPECT(!deserialized->exportSigSetHash);
|
||||
BEAST_EXPECT(!deserialized->exportSignaturesHash);
|
||||
}
|
||||
|
||||
// Position with commitment
|
||||
{
|
||||
auto const txSet = makeHash("txset-b");
|
||||
auto const commit = makeHash("commit-b");
|
||||
|
||||
ExtendedPosition pos{txSet};
|
||||
pos.myCommitment = commit;
|
||||
|
||||
Serializer s;
|
||||
pos.add(s);
|
||||
|
||||
// 32 (txSet) + 1 (flags) + 32 (commitment) = 65
|
||||
BEAST_EXPECT(s.getDataLength() == 65);
|
||||
|
||||
SerialIter sit(s.slice());
|
||||
auto deserialized =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
|
||||
BEAST_EXPECT(deserialized.has_value());
|
||||
if (!deserialized)
|
||||
return;
|
||||
BEAST_EXPECT(deserialized->txSetHash == txSet);
|
||||
BEAST_EXPECT(deserialized->myCommitment == commit);
|
||||
BEAST_EXPECT(!deserialized->myReveal);
|
||||
}
|
||||
|
||||
// Position with all fields
|
||||
{
|
||||
auto const txSet = makeHash("txset-c");
|
||||
auto const commitSet = makeHash("commitset-c");
|
||||
auto const entropySet = makeHash("entropyset-c");
|
||||
auto const exportSigSet = makeHash("exportsigset-c");
|
||||
auto const exportSigs = makeHash("exportsigs-c");
|
||||
auto const commit = makeHash("commit-c");
|
||||
auto const reveal = makeHash("reveal-c");
|
||||
|
||||
ExtendedPosition pos{txSet};
|
||||
pos.commitSetHash = commitSet;
|
||||
pos.entropySetHash = entropySet;
|
||||
pos.exportSigSetHash = exportSigSet;
|
||||
pos.exportSignaturesHash = exportSigs;
|
||||
pos.myCommitment = commit;
|
||||
pos.myReveal = reveal;
|
||||
|
||||
Serializer s;
|
||||
pos.add(s);
|
||||
|
||||
// 32 + 1 + 6*32 = 225
|
||||
BEAST_EXPECT(s.getDataLength() == 225);
|
||||
|
||||
SerialIter sit(s.slice());
|
||||
auto deserialized =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
|
||||
BEAST_EXPECT(deserialized.has_value());
|
||||
if (!deserialized)
|
||||
return;
|
||||
BEAST_EXPECT(deserialized->txSetHash == txSet);
|
||||
BEAST_EXPECT(deserialized->commitSetHash == commitSet);
|
||||
BEAST_EXPECT(deserialized->entropySetHash == entropySet);
|
||||
BEAST_EXPECT(deserialized->exportSigSetHash == exportSigSet);
|
||||
BEAST_EXPECT(deserialized->exportSignaturesHash == exportSigs);
|
||||
BEAST_EXPECT(deserialized->myCommitment == commit);
|
||||
BEAST_EXPECT(deserialized->myReveal == reveal);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testSigningConsistency()
|
||||
{
|
||||
testcase("Signing hash consistency");
|
||||
|
||||
// The signing hash from ConsensusProposal::signingHash() must match
|
||||
// what a receiver would compute via the same function after
|
||||
// deserializing the ExtendedPosition from the wire.
|
||||
|
||||
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
|
||||
auto const nodeId = calcNodeID(pk);
|
||||
auto const prevLedger = makeHash("prevledger");
|
||||
auto const closeTime =
|
||||
NetClock::time_point{NetClock::duration{1234567}};
|
||||
|
||||
// Test with commitment (the case that was failing)
|
||||
{
|
||||
auto const txSet = makeHash("txset-sign");
|
||||
auto const commit = makeHash("commitment-sign");
|
||||
|
||||
ExtendedPosition pos{txSet};
|
||||
pos.myCommitment = commit;
|
||||
|
||||
using Proposal =
|
||||
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
|
||||
|
||||
Proposal prop{
|
||||
prevLedger,
|
||||
Proposal::seqJoin,
|
||||
pos,
|
||||
closeTime,
|
||||
NetClock::time_point{},
|
||||
nodeId};
|
||||
|
||||
// Sign it (same as propose() does)
|
||||
auto const signingHash = prop.signingHash();
|
||||
auto sig = signDigest(pk, sk, signingHash);
|
||||
|
||||
// Serialize position to wire format
|
||||
Serializer positionData;
|
||||
pos.add(positionData);
|
||||
auto const posSlice = positionData.slice();
|
||||
|
||||
// Deserialize (same as PeerImp::onMessage does)
|
||||
SerialIter sit(posSlice);
|
||||
auto const maybeReceivedPos =
|
||||
ExtendedPosition::fromSerialIter(sit, posSlice.size());
|
||||
|
||||
BEAST_EXPECT(maybeReceivedPos.has_value());
|
||||
if (!maybeReceivedPos)
|
||||
return;
|
||||
|
||||
// Reconstruct proposal on receiver side
|
||||
Proposal receivedProp{
|
||||
prevLedger,
|
||||
Proposal::seqJoin,
|
||||
*maybeReceivedPos,
|
||||
closeTime,
|
||||
NetClock::time_point{},
|
||||
nodeId};
|
||||
|
||||
// The signing hash must match
|
||||
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
|
||||
|
||||
// Verify signature (same as checkSign does)
|
||||
BEAST_EXPECT(
|
||||
verifyDigest(pk, receivedProp.signingHash(), sig, false));
|
||||
}
|
||||
|
||||
// Test without commitment (legacy case)
|
||||
{
|
||||
auto const txSet = makeHash("txset-legacy");
|
||||
ExtendedPosition pos{txSet};
|
||||
|
||||
using Proposal =
|
||||
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
|
||||
|
||||
Proposal prop{
|
||||
prevLedger,
|
||||
Proposal::seqJoin,
|
||||
pos,
|
||||
closeTime,
|
||||
NetClock::time_point{},
|
||||
nodeId};
|
||||
|
||||
auto const signingHash = prop.signingHash();
|
||||
auto sig = signDigest(pk, sk, signingHash);
|
||||
|
||||
Serializer positionData;
|
||||
pos.add(positionData);
|
||||
|
||||
SerialIter sit(positionData.slice());
|
||||
auto const maybeReceivedPos = ExtendedPosition::fromSerialIter(
|
||||
sit, positionData.getDataLength());
|
||||
|
||||
BEAST_EXPECT(maybeReceivedPos.has_value());
|
||||
if (!maybeReceivedPos)
|
||||
return;
|
||||
|
||||
Proposal receivedProp{
|
||||
prevLedger,
|
||||
Proposal::seqJoin,
|
||||
*maybeReceivedPos,
|
||||
closeTime,
|
||||
NetClock::time_point{},
|
||||
nodeId};
|
||||
|
||||
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
|
||||
BEAST_EXPECT(
|
||||
verifyDigest(pk, receivedProp.signingHash(), sig, false));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testSuppressionConsistency()
|
||||
{
|
||||
testcase("Suppression hash consistency");
|
||||
|
||||
// proposalUniqueId must produce the same result on sender and
|
||||
// receiver when given the same ExtendedPosition data.
|
||||
|
||||
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
|
||||
auto const prevLedger = makeHash("prevledger-supp");
|
||||
auto const closeTime =
|
||||
NetClock::time_point{NetClock::duration{1234567}};
|
||||
std::uint32_t const proposeSeq = 0;
|
||||
|
||||
auto const txSet = makeHash("txset-supp");
|
||||
auto const commit = makeHash("commitment-supp");
|
||||
|
||||
ExtendedPosition pos{txSet};
|
||||
pos.myCommitment = commit;
|
||||
|
||||
// Sign (to get a real signature for suppression)
|
||||
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
|
||||
Proposal prop{
|
||||
prevLedger,
|
||||
proposeSeq,
|
||||
pos,
|
||||
closeTime,
|
||||
NetClock::time_point{},
|
||||
calcNodeID(pk)};
|
||||
|
||||
auto sig = signDigest(pk, sk, prop.signingHash());
|
||||
|
||||
// Sender computes suppression
|
||||
auto const senderSuppression =
|
||||
proposalUniqueId(pos, prevLedger, proposeSeq, closeTime, pk, sig);
|
||||
|
||||
// Simulate wire: serialize and deserialize
|
||||
Serializer positionData;
|
||||
pos.add(positionData);
|
||||
SerialIter sit(positionData.slice());
|
||||
auto const maybeReceivedPos =
|
||||
ExtendedPosition::fromSerialIter(sit, positionData.getDataLength());
|
||||
|
||||
BEAST_EXPECT(maybeReceivedPos.has_value());
|
||||
if (!maybeReceivedPos)
|
||||
return;
|
||||
|
||||
// Receiver computes suppression
|
||||
auto const receiverSuppression = proposalUniqueId(
|
||||
*maybeReceivedPos, prevLedger, proposeSeq, closeTime, pk, sig);
|
||||
|
||||
BEAST_EXPECT(senderSuppression == receiverSuppression);
|
||||
}
|
||||
|
||||
void
|
||||
testMalformedPayload()
|
||||
{
|
||||
testcase("Malformed payload rejected");
|
||||
|
||||
// Too short (< 32 bytes)
|
||||
{
|
||||
Serializer s;
|
||||
s.add32(0xDEADBEEF); // only 4 bytes
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Empty payload
|
||||
{
|
||||
Serializer s;
|
||||
SerialIter sit(s.slice());
|
||||
auto result = ExtendedPosition::fromSerialIter(sit, 0);
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Flags claim fields that aren't present (truncated)
|
||||
{
|
||||
auto const txSet = makeHash("txset-malformed");
|
||||
Serializer s;
|
||||
s.addBitString(txSet);
|
||||
// flags = 0x0F (all 4 fields), but no field data follows
|
||||
s.add8(0x0F);
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Flags claim 2 fields but only 1 field's worth of data
|
||||
{
|
||||
auto const txSet = makeHash("txset-malformed2");
|
||||
auto const commit = makeHash("commit-malformed2");
|
||||
Serializer s;
|
||||
s.addBitString(txSet);
|
||||
// flags = 0x03 (commitSetHash + entropySetHash), but only
|
||||
// provide commitSetHash data
|
||||
s.add8(0x03);
|
||||
s.addBitString(commit);
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Unknown flag bits above known extension fields (wire malleability)
|
||||
{
|
||||
auto const txSet = makeHash("txset-unkflags");
|
||||
Serializer s;
|
||||
s.addBitString(txSet);
|
||||
s.add8(0x41); // bit 6 is unknown, bit 0 = commitSetHash
|
||||
s.addBitString(makeHash("commitset-unkflags"));
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Trailing extra bytes after valid fields
|
||||
{
|
||||
auto const txSet = makeHash("txset-trailing");
|
||||
auto const commitSet = makeHash("commitset-trailing");
|
||||
Serializer s;
|
||||
s.addBitString(txSet);
|
||||
s.add8(0x01); // commitSetHash only
|
||||
s.addBitString(commitSet);
|
||||
s.add32(0xDEADBEEF); // 4 extra trailing bytes
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(!result.has_value());
|
||||
}
|
||||
|
||||
// Valid flags with exactly the right amount of data (should succeed)
|
||||
{
|
||||
auto const txSet = makeHash("txset-ok");
|
||||
auto const commitSet = makeHash("commitset-ok");
|
||||
Serializer s;
|
||||
s.addBitString(txSet);
|
||||
s.add8(0x01); // commitSetHash only
|
||||
s.addBitString(commitSet);
|
||||
SerialIter sit(s.slice());
|
||||
auto result =
|
||||
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
|
||||
BEAST_EXPECT(result.has_value());
|
||||
if (result)
|
||||
{
|
||||
BEAST_EXPECT(result->txSetHash == txSet);
|
||||
BEAST_EXPECT(result->commitSetHash == commitSet);
|
||||
BEAST_EXPECT(!result->entropySetHash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testEquality()
|
||||
{
|
||||
testcase("Equality is txSetHash only");
|
||||
|
||||
auto const txSet = makeHash("txset-eq");
|
||||
auto const txSet2 = makeHash("txset-eq-2");
|
||||
|
||||
ExtendedPosition a{txSet};
|
||||
a.myCommitment = makeHash("commit1-eq");
|
||||
|
||||
ExtendedPosition b{txSet};
|
||||
b.myCommitment = makeHash("commit2-eq");
|
||||
|
||||
// Same txSetHash, different leaves -> equal
|
||||
BEAST_EXPECT(a == b);
|
||||
|
||||
// Same txSetHash, different commitSetHash -> still equal
|
||||
// (sub-state quorum handles commitSetHash agreement)
|
||||
b.commitSetHash = makeHash("cs-eq");
|
||||
BEAST_EXPECT(a == b);
|
||||
|
||||
// Same txSetHash, different entropySetHash -> still equal
|
||||
b.entropySetHash = makeHash("es-eq");
|
||||
BEAST_EXPECT(a == b);
|
||||
|
||||
// Same txSetHash, different export signature digest -> still equal
|
||||
b.exportSignaturesHash = makeHash("export-sigs-eq");
|
||||
BEAST_EXPECT(a == b);
|
||||
|
||||
// Different txSetHash -> not equal
|
||||
ExtendedPosition c{txSet2};
|
||||
BEAST_EXPECT(a != c);
|
||||
}
|
||||
|
||||
void
|
||||
testExportSignatureDigest()
|
||||
{
|
||||
testcase("Export signature digest");
|
||||
|
||||
std::vector<std::string> blobs;
|
||||
blobs.emplace_back("txhash-pubkey-sig-a");
|
||||
blobs.emplace_back("txhash-pubkey-sig-b");
|
||||
|
||||
auto const digest = proposalExportSignaturesHash(blobs);
|
||||
BEAST_EXPECT(digest == proposalExportSignaturesHash(blobs));
|
||||
|
||||
auto reordered = blobs;
|
||||
std::swap(reordered[0], reordered[1]);
|
||||
BEAST_EXPECT(digest != proposalExportSignaturesHash(reordered));
|
||||
|
||||
auto mutated = blobs;
|
||||
mutated[1].push_back('x');
|
||||
BEAST_EXPECT(digest != proposalExportSignaturesHash(mutated));
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testSerializationRoundTrip();
|
||||
testSigningConsistency();
|
||||
testSuppressionConsistency();
|
||||
testMalformedPayload();
|
||||
testEquality();
|
||||
testExportSignatureDigest();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(ExtendedPosition, consensus, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <test/csf/Histogram.h>
|
||||
#include <test/csf/Peer.h>
|
||||
#include <test/csf/PeerGroup.h>
|
||||
#include <test/csf/PeerTick.h>
|
||||
#include <test/csf/Proposal.h>
|
||||
#include <test/csf/Scheduler.h>
|
||||
#include <test/csf/Sim.h>
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#define RIPPLE_TEST_CSF_PEER_H_INCLUDED
|
||||
|
||||
#include <test/csf/CollectorRef.h>
|
||||
#include <test/csf/Proposal.h>
|
||||
#include <test/csf/Scheduler.h>
|
||||
#include <test/csf/TrustGraph.h>
|
||||
#include <test/csf/Tx.h>
|
||||
@@ -28,11 +29,14 @@
|
||||
#include <test/csf/ledgers.h>
|
||||
#include <xrpld/consensus/Consensus.h>
|
||||
#include <xrpld/consensus/Validations.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/beast/utility/WrappedSink.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <boost/container/flat_map.hpp>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
@@ -51,6 +55,41 @@ namespace bc = boost::container;
|
||||
by Collectors
|
||||
- Exposes most internal state for forcibly simulating arbitrary scenarios
|
||||
*/
|
||||
/// Content-addressed sidecar set store, simulating InboundTransactions.
|
||||
/// Shared across all peers in a simulation — peers publish sets by hash
|
||||
/// and fetch them by hash, just like the real SHAMap fetch pipeline.
|
||||
///
|
||||
/// Each entry is tagged with its type so fetchRngSetIfNeeded can merge
|
||||
/// into the correct local set without content-sniffing heuristics.
|
||||
struct SidecarStore
|
||||
{
|
||||
enum class Type { commit, reveal, exportSig };
|
||||
|
||||
using EntrySet = hash_map<PeerID, uint256>;
|
||||
|
||||
struct TaggedSet
|
||||
{
|
||||
Type type;
|
||||
EntrySet entries;
|
||||
};
|
||||
|
||||
void
|
||||
publish(uint256 const& hash, Type type, EntrySet const& entries)
|
||||
{
|
||||
sets_[hash] = {type, entries};
|
||||
}
|
||||
|
||||
TaggedSet const*
|
||||
fetch(uint256 const& hash) const
|
||||
{
|
||||
auto it = sets_.find(hash);
|
||||
return it != sets_.end() ? &it->second : nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<uint256, TaggedSet> sets_;
|
||||
};
|
||||
|
||||
struct Peer
|
||||
{
|
||||
/** Basic wrapper of a proposed position taken by a peer.
|
||||
@@ -61,6 +100,8 @@ struct Peer
|
||||
class Position
|
||||
{
|
||||
public:
|
||||
using Proposal = csf::Proposal;
|
||||
|
||||
Position(Proposal const& p) : proposal_(p)
|
||||
{
|
||||
}
|
||||
@@ -77,6 +118,18 @@ struct Peer
|
||||
return proposal_.getJson();
|
||||
}
|
||||
|
||||
PeerKey
|
||||
publicKey() const
|
||||
{
|
||||
return {proposal_.nodeID(), 0};
|
||||
}
|
||||
|
||||
std::uint64_t
|
||||
signature() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string
|
||||
render() const
|
||||
{
|
||||
@@ -169,6 +222,7 @@ struct Peer
|
||||
using NodeKey_t = PeerKey;
|
||||
using TxSet_t = TxSet;
|
||||
using PeerPosition_t = Position;
|
||||
using Position_t = ProposalPosition;
|
||||
using Result = ConsensusResult<Peer>;
|
||||
using NodeKey = Validation::NodeKey;
|
||||
|
||||
@@ -188,6 +242,9 @@ struct Peer
|
||||
//! The oracle that manages unique ledgers
|
||||
LedgerOracle& oracle;
|
||||
|
||||
//! Shared sidecar store (simulates InboundTransactions)
|
||||
SidecarStore& sidecarStore;
|
||||
|
||||
//! Scheduler of events
|
||||
Scheduler& scheduler;
|
||||
|
||||
@@ -257,6 +314,686 @@ struct Peer
|
||||
// Simulation parameters
|
||||
ConsensusParms consensusParms;
|
||||
|
||||
/// RNG consensus extensions for CSF. Owns all RNG state and methods,
|
||||
/// same pattern as ConsensusExtensions for production.
|
||||
struct Extensions
|
||||
{
|
||||
Peer& peer;
|
||||
beast::Journal j_;
|
||||
|
||||
// Sub-state machine
|
||||
EstablishState estState_{EstablishState::ConvergingTx};
|
||||
std::chrono::steady_clock::time_point revealPhaseStart_{};
|
||||
std::chrono::steady_clock::time_point commitHashConflictStart_{};
|
||||
bool explicitFinalProposalSent_{false};
|
||||
bool entropySetPublished_{false};
|
||||
std::chrono::steady_clock::time_point entropyPublishStart_{};
|
||||
bool exportSigGateStarted_{false};
|
||||
std::chrono::steady_clock::time_point exportSigGateStart_{};
|
||||
bool exportSigConvergenceFailed_{false};
|
||||
|
||||
// RNG state
|
||||
bool enableRngConsensus_ = false;
|
||||
bool enableExportConsensus_ = false;
|
||||
hash_set<PeerID> unlNodes_;
|
||||
hash_set<PeerID> likelyParticipants_;
|
||||
hash_map<PeerID, uint256> pendingCommits_;
|
||||
hash_map<PeerID, uint256> pendingReveals_;
|
||||
hash_map<PeerID, uint256> pendingExportSigs_;
|
||||
hash_map<PeerID, PeerKey> nodeKeys_;
|
||||
uint256 myEntropySecret_;
|
||||
bool entropyFailed_ = false;
|
||||
|
||||
// Last round summary (for test assertions)
|
||||
uint256 lastEntropyDigest_;
|
||||
std::uint16_t lastEntropyCount_ = 0;
|
||||
bool lastEntropyWasFallback_ = true;
|
||||
bool lastExportSucceeded_ = false;
|
||||
bool lastExportRetried_ = false;
|
||||
std::size_t exportSigFetchMerges_ = 0;
|
||||
|
||||
// Optional test hook: force a specific commit-set hash
|
||||
std::optional<uint256> forcedCommitSetHash_;
|
||||
// Optional test hook: force a specific entropy-set hash
|
||||
std::optional<uint256> forcedEntropySetHash_;
|
||||
// Optional test hook: force a specific export sig-set hash
|
||||
std::optional<uint256> forcedExportSigSetHash_;
|
||||
|
||||
// Optional test hook: drop reveals from specific peers
|
||||
// (simulates asymmetric reveal delivery / packet loss)
|
||||
hash_set<PeerID> dropRevealFrom_;
|
||||
// Optional test hook: drop proposal-carried export signatures.
|
||||
hash_set<PeerID> dropExportSigFrom_;
|
||||
// Optional test hook: stay an active proposer but do not originate an
|
||||
// export signature, so tests can force sidecar-fetch-only convergence.
|
||||
bool suppressOwnExportSig_ = false;
|
||||
|
||||
explicit Extensions(Peer& p) : peer(p), j_(p.j)
|
||||
{
|
||||
}
|
||||
|
||||
// --- RNG methods ---
|
||||
|
||||
bool
|
||||
rngEnabled() const
|
||||
{
|
||||
return enableRngConsensus_;
|
||||
}
|
||||
|
||||
bool
|
||||
exportEnabled() const
|
||||
{
|
||||
return enableExportConsensus_;
|
||||
}
|
||||
|
||||
std::size_t
|
||||
quorumThreshold() const
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return (std::numeric_limits<std::size_t>::max)() / 4;
|
||||
auto const base = unlNodes_.size();
|
||||
return calculateQuorumThreshold(base == 0 ? 1 : base);
|
||||
}
|
||||
|
||||
std::size_t
|
||||
exportSigQuorumThreshold() const
|
||||
{
|
||||
if (!enableExportConsensus_)
|
||||
return (std::numeric_limits<std::size_t>::max)() / 4;
|
||||
auto const base =
|
||||
unlNodes_.empty() ? std::size_t{1} : unlNodes_.size();
|
||||
return calculateQuorumThreshold(base);
|
||||
}
|
||||
|
||||
std::size_t
|
||||
pendingCommitCount() const
|
||||
{
|
||||
return pendingCommits_.size();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
pendingRevealCount() const
|
||||
{
|
||||
return pendingReveals_.size();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
expectedProposerCount() const
|
||||
{
|
||||
return likelyParticipants_.size();
|
||||
}
|
||||
|
||||
bool
|
||||
hasQuorumOfCommits() const
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return false;
|
||||
return pendingCommits_.size() >= quorumThreshold();
|
||||
}
|
||||
|
||||
bool
|
||||
hasMinimumReveals() const
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return false;
|
||||
return pendingReveals_.size() >= pendingCommits_.size();
|
||||
}
|
||||
|
||||
bool
|
||||
hasAnyReveals() const
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return false;
|
||||
return !pendingReveals_.empty();
|
||||
}
|
||||
|
||||
bool
|
||||
shouldZeroEntropy() const
|
||||
{
|
||||
if (entropyFailed_ || pendingReveals_.empty())
|
||||
return true;
|
||||
// Match production: zero when reveals < quorum threshold.
|
||||
auto const threshold = unlNodes_.empty()
|
||||
? std::size_t{1}
|
||||
: calculateQuorumThreshold(unlNodes_.size());
|
||||
return pendingReveals_.size() < threshold;
|
||||
}
|
||||
|
||||
uint256
|
||||
buildCommitSet(Ledger::Seq seq)
|
||||
{
|
||||
if (forcedCommitSetHash_)
|
||||
return *forcedCommitSetHash_;
|
||||
auto const hash = hashRngSet(pendingCommits_, seq, "commit");
|
||||
peer.sidecarStore.publish(
|
||||
hash, SidecarStore::Type::commit, pendingCommits_);
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint256
|
||||
buildEntropySet(Ledger::Seq seq)
|
||||
{
|
||||
if (forcedEntropySetHash_)
|
||||
return *forcedEntropySetHash_;
|
||||
auto const hash = hashRngSet(pendingReveals_, seq, "reveal");
|
||||
peer.sidecarStore.publish(
|
||||
hash, SidecarStore::Type::reveal, pendingReveals_);
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint256
|
||||
buildExportSigSet(Ledger::Seq seq)
|
||||
{
|
||||
if (forcedExportSigSetHash_)
|
||||
return *forcedExportSigSetHash_;
|
||||
auto const hash = hashRngSet(pendingExportSigs_, seq, "export-sig");
|
||||
peer.sidecarStore.publish(
|
||||
hash, SidecarStore::Type::exportSig, pendingExportSigs_);
|
||||
return hash;
|
||||
}
|
||||
|
||||
void
|
||||
generateEntropySecret()
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return;
|
||||
auto const seq =
|
||||
static_cast<std::uint32_t>(peer.lastClosedLedger.seq()) + 1;
|
||||
myEntropySecret_ = sha512Half(
|
||||
std::string("csf-rng-secret"),
|
||||
static_cast<std::uint32_t>(peer.id),
|
||||
peer.key.second,
|
||||
seq,
|
||||
peer.completedLedgers);
|
||||
}
|
||||
|
||||
uint256
|
||||
getEntropySecret() const
|
||||
{
|
||||
return myEntropySecret_;
|
||||
}
|
||||
|
||||
void
|
||||
selfSeedReveal()
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return;
|
||||
// Self-seed our own reveal into pendingReveals_ so it
|
||||
// counts toward reveal quorum. The real code does this
|
||||
// in decorateMessage; the CSF does it here since it has
|
||||
// no equivalent serialization hook.
|
||||
if (myEntropySecret_ != uint256{})
|
||||
pendingReveals_[peer.id] = myEntropySecret_;
|
||||
}
|
||||
|
||||
void
|
||||
setEntropyFailed()
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
return;
|
||||
entropyFailed_ = true;
|
||||
}
|
||||
|
||||
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
|
||||
|
||||
void
|
||||
fetchRngSetIfNeeded(
|
||||
std::optional<uint256> const& hash,
|
||||
SidecarKind kind = SidecarKind::commit)
|
||||
{
|
||||
if (!hash)
|
||||
return;
|
||||
auto const* fetched = peer.sidecarStore.fetch(*hash);
|
||||
if (!fetched)
|
||||
return;
|
||||
// Union merge into the correct local set based on type.
|
||||
auto& target = [&]() -> hash_map<PeerID, uint256>& {
|
||||
switch (fetched->type)
|
||||
{
|
||||
case SidecarStore::Type::commit:
|
||||
return pendingCommits_;
|
||||
case SidecarStore::Type::reveal:
|
||||
return pendingReveals_;
|
||||
case SidecarStore::Type::exportSig:
|
||||
return pendingExportSigs_;
|
||||
}
|
||||
return pendingCommits_;
|
||||
}();
|
||||
for (auto const& [nodeId, digest] : fetched->entries)
|
||||
{
|
||||
auto const [_, inserted] = target.emplace(nodeId, digest);
|
||||
if (fetched->type == SidecarStore::Type::exportSig && inserted)
|
||||
++exportSigFetchMerges_;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
fetchSidecarsIfNeeded(ProposalPosition const& pos)
|
||||
{
|
||||
fetchRngSetIfNeeded(pos.commitSetHash, SidecarKind::commit);
|
||||
fetchRngSetIfNeeded(pos.entropySetHash, SidecarKind::reveal);
|
||||
fetchRngSetIfNeeded(pos.exportSigSetHash, SidecarKind::exportSig);
|
||||
}
|
||||
|
||||
void
|
||||
clearRngState()
|
||||
{
|
||||
pendingCommits_.clear();
|
||||
pendingReveals_.clear();
|
||||
pendingExportSigs_.clear();
|
||||
nodeKeys_.clear();
|
||||
likelyParticipants_.clear();
|
||||
myEntropySecret_.zero();
|
||||
entropyFailed_ = false;
|
||||
exportSigGateStarted_ = false;
|
||||
exportSigGateStart_ = {};
|
||||
exportSigConvergenceFailed_ = false;
|
||||
}
|
||||
|
||||
void
|
||||
cacheUNLReport()
|
||||
{
|
||||
unlNodes_.clear();
|
||||
for (auto const* p : peer.trustGraph.trustedPeers(&peer))
|
||||
{
|
||||
if (!peer.runAsValidator && p->id == peer.id)
|
||||
continue;
|
||||
unlNodes_.insert(p->id);
|
||||
}
|
||||
if (peer.runAsValidator)
|
||||
unlNodes_.insert(peer.id);
|
||||
}
|
||||
|
||||
void
|
||||
setExpectedProposers(hash_set<PeerID> proposers)
|
||||
{
|
||||
bool const includeSelf = peer.runAsValidator;
|
||||
if (!proposers.empty())
|
||||
{
|
||||
hash_set<PeerID> filtered;
|
||||
for (auto const& nid : proposers)
|
||||
{
|
||||
if (!includeSelf && nid == peer.id)
|
||||
continue;
|
||||
if (isUNLReportMember(nid))
|
||||
filtered.insert(nid);
|
||||
}
|
||||
if (includeSelf)
|
||||
filtered.insert(peer.id);
|
||||
likelyParticipants_ = std::move(filtered);
|
||||
return;
|
||||
}
|
||||
likelyParticipants_.clear();
|
||||
if (!unlNodes_.empty())
|
||||
likelyParticipants_ = unlNodes_;
|
||||
}
|
||||
|
||||
void
|
||||
harvestRngData(
|
||||
PeerID const& nodeId,
|
||||
PeerKey const& publicKey,
|
||||
ProposalPosition const& position,
|
||||
std::uint32_t,
|
||||
NetClock::time_point,
|
||||
Ledger::ID const& prevLedger,
|
||||
std::uint64_t)
|
||||
{
|
||||
if (!enableRngConsensus_ && !enableExportConsensus_)
|
||||
return;
|
||||
if (!isUNLReportMember(nodeId))
|
||||
return;
|
||||
|
||||
nodeKeys_.insert_or_assign(nodeId, publicKey);
|
||||
|
||||
if (enableRngConsensus_ && position.myCommitment)
|
||||
{
|
||||
auto [it, inserted] =
|
||||
pendingCommits_.emplace(nodeId, *position.myCommitment);
|
||||
if (!inserted && it->second != *position.myCommitment)
|
||||
{
|
||||
it->second = *position.myCommitment;
|
||||
pendingReveals_.erase(nodeId);
|
||||
}
|
||||
}
|
||||
|
||||
if (!enableRngConsensus_ || !position.myReveal)
|
||||
{
|
||||
if (enableExportConsensus_ && position.myExportSignature &&
|
||||
dropExportSigFrom_.count(nodeId) == 0)
|
||||
pendingExportSigs_[nodeId] = *position.myExportSignature;
|
||||
return;
|
||||
}
|
||||
|
||||
// Test hook: drop reveals from specific peers
|
||||
if (dropRevealFrom_.count(nodeId) == 0)
|
||||
{
|
||||
auto const commitIt = pendingCommits_.find(nodeId);
|
||||
if (commitIt != pendingCommits_.end())
|
||||
{
|
||||
auto const prevIt = peer.ledgers.find(prevLedger);
|
||||
if (prevIt != peer.ledgers.end())
|
||||
{
|
||||
auto const seq =
|
||||
static_cast<std::uint32_t>(prevIt->second.seq()) +
|
||||
1;
|
||||
auto const expected = sha512Half(
|
||||
*position.myReveal,
|
||||
static_cast<std::uint32_t>(publicKey.first),
|
||||
publicKey.second,
|
||||
seq);
|
||||
if (expected == commitIt->second)
|
||||
pendingReveals_[nodeId] = *position.myReveal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (enableExportConsensus_ && position.myExportSignature &&
|
||||
dropExportSigFrom_.count(nodeId) == 0)
|
||||
pendingExportSigs_[nodeId] = *position.myExportSignature;
|
||||
}
|
||||
|
||||
bool
|
||||
isUNLReportMember(PeerID const& nodeId) const
|
||||
{
|
||||
return unlNodes_.count(nodeId) > 0;
|
||||
}
|
||||
|
||||
void
|
||||
finalizeRoundEntropy(std::uint32_t seq)
|
||||
{
|
||||
if (!enableRngConsensus_)
|
||||
{
|
||||
lastEntropyDigest_.zero();
|
||||
lastEntropyCount_ = 0;
|
||||
lastEntropyWasFallback_ = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (shouldZeroEntropy())
|
||||
{
|
||||
lastEntropyDigest_.zero();
|
||||
lastEntropyCount_ = 0;
|
||||
lastEntropyWasFallback_ = true;
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::pair<PeerKey, uint256>> ordered;
|
||||
ordered.reserve(pendingReveals_.size());
|
||||
for (auto const& [nodeId, reveal] : pendingReveals_)
|
||||
{
|
||||
auto const it = nodeKeys_.find(nodeId);
|
||||
if (it == nodeKeys_.end())
|
||||
continue;
|
||||
ordered.emplace_back(it->second, reveal);
|
||||
}
|
||||
|
||||
if (ordered.empty())
|
||||
{
|
||||
lastEntropyDigest_.zero();
|
||||
lastEntropyCount_ = 0;
|
||||
lastEntropyWasFallback_ = true;
|
||||
return;
|
||||
}
|
||||
|
||||
std::sort(
|
||||
ordered.begin(),
|
||||
ordered.end(),
|
||||
[](auto const& a, auto const& b) {
|
||||
if (a.first.first != b.first.first)
|
||||
return a.first.first < b.first.first;
|
||||
return a.first.second < b.first.second;
|
||||
});
|
||||
|
||||
uint256 digest = sha512Half(
|
||||
std::string("csf-rng-entropy"),
|
||||
static_cast<std::uint32_t>(seq));
|
||||
for (auto const& [keyId, reveal] : ordered)
|
||||
{
|
||||
digest = sha512Half(
|
||||
digest,
|
||||
static_cast<std::uint32_t>(keyId.first),
|
||||
keyId.second,
|
||||
reveal);
|
||||
}
|
||||
|
||||
lastEntropyDigest_ = digest;
|
||||
lastEntropyCount_ = static_cast<std::uint16_t>(ordered.size());
|
||||
lastEntropyWasFallback_ = false;
|
||||
}
|
||||
|
||||
void
|
||||
finalizeRoundExport()
|
||||
{
|
||||
if (!enableExportConsensus_)
|
||||
{
|
||||
lastExportSucceeded_ = false;
|
||||
lastExportRetried_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
auto const activeSigCount = std::count_if(
|
||||
pendingExportSigs_.begin(),
|
||||
pendingExportSigs_.end(),
|
||||
[&](auto const& entry) {
|
||||
return isUNLReportMember(entry.first);
|
||||
});
|
||||
lastExportSucceeded_ = !exportSigConvergenceFailed_ &&
|
||||
static_cast<std::size_t>(activeSigCount) >=
|
||||
exportSigQuorumThreshold();
|
||||
lastExportRetried_ = !lastExportSucceeded_;
|
||||
}
|
||||
|
||||
// --- Lifecycle hooks (matching design doc) ---
|
||||
|
||||
template <class Ledger_t>
|
||||
void
|
||||
onRoundStart(
|
||||
Ledger_t const& /* prevLedger */,
|
||||
hash_set<PeerID> lastProposers)
|
||||
{
|
||||
clearRngState();
|
||||
cacheUNLReport();
|
||||
setExpectedProposers(std::move(lastProposers));
|
||||
resetSubState();
|
||||
}
|
||||
|
||||
void
|
||||
onTrustedPeerProposal(
|
||||
PeerID const& nodeId,
|
||||
PeerKey const& publicKey,
|
||||
ProposalPosition const& position,
|
||||
std::uint32_t proposeSeq,
|
||||
NetClock::time_point closeTime,
|
||||
Ledger::ID const& prevLedger,
|
||||
std::uint64_t signature)
|
||||
{
|
||||
harvestRngData(
|
||||
nodeId,
|
||||
publicKey,
|
||||
position,
|
||||
proposeSeq,
|
||||
closeTime,
|
||||
prevLedger,
|
||||
signature);
|
||||
}
|
||||
|
||||
void
|
||||
onAcceptComplete()
|
||||
{
|
||||
}
|
||||
|
||||
template <class Ledger_t>
|
||||
void
|
||||
decoratePosition(
|
||||
ProposalPosition& pos,
|
||||
Ledger_t const& prevLedger,
|
||||
bool proposing)
|
||||
{
|
||||
decorateExportPosition(pos, prevLedger, proposing);
|
||||
|
||||
if (!enableRngConsensus_ || !proposing || !peer.runAsValidator)
|
||||
return;
|
||||
generateEntropySecret();
|
||||
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
|
||||
auto const commitment = sha512Half(
|
||||
myEntropySecret_,
|
||||
static_cast<std::uint32_t>(peer.id),
|
||||
peer.key.second,
|
||||
seq);
|
||||
pos.myCommitment = commitment;
|
||||
pendingCommits_[peer.id] = commitment;
|
||||
nodeKeys_.insert_or_assign(peer.id, peer.key);
|
||||
}
|
||||
|
||||
template <class Ledger_t>
|
||||
void
|
||||
decorateExportPosition(
|
||||
ProposalPosition& pos,
|
||||
Ledger_t const& prevLedger,
|
||||
bool proposing)
|
||||
{
|
||||
if (!enableExportConsensus_ || !proposing || !peer.runAsValidator)
|
||||
return;
|
||||
|
||||
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
|
||||
auto const sig = sha512Half(
|
||||
std::string("csf-export-sig"),
|
||||
static_cast<std::uint32_t>(peer.id),
|
||||
peer.key.second,
|
||||
seq);
|
||||
if (!suppressOwnExportSig_)
|
||||
{
|
||||
pos.myExportSignature = sig;
|
||||
pendingExportSigs_[peer.id] = sig;
|
||||
}
|
||||
nodeKeys_.insert_or_assign(peer.id, peer.key);
|
||||
}
|
||||
|
||||
void
|
||||
appendJson(Json::Value&) const
|
||||
{
|
||||
}
|
||||
|
||||
template <class Pos>
|
||||
void
|
||||
logPosition(
|
||||
Pos const&,
|
||||
beast::Journal,
|
||||
beast::severities::Severity = beast::severities::kTrace) const
|
||||
{
|
||||
}
|
||||
|
||||
// --- Stubs for features CSF doesn't model ---
|
||||
bool
|
||||
bootstrapFastStartEnabled() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
bool
|
||||
shouldSendExplicitFinalProposal() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
std::optional<TxSet>
|
||||
buildExplicitFinalProposalTxSet(TxSet const&, Ledger::Seq)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
bool
|
||||
hasPendingExportSigs() const
|
||||
{
|
||||
return enableExportConsensus_ && !pendingExportSigs_.empty();
|
||||
}
|
||||
bool
|
||||
hasConsensusExportTxns() const
|
||||
{
|
||||
return enableExportConsensus_;
|
||||
}
|
||||
void
|
||||
setExportSigConvergenceFailed()
|
||||
{
|
||||
if (enableExportConsensus_)
|
||||
exportSigConvergenceFailed_ = true;
|
||||
}
|
||||
|
||||
// --- Sub-state accessors ---
|
||||
bool
|
||||
extensionsBusy() const
|
||||
{
|
||||
return estState_ != EstablishState::ConvergingTx ||
|
||||
(exportEnabled() &&
|
||||
(exportSigGateStarted_ || hasPendingExportSigs()));
|
||||
}
|
||||
EstablishState
|
||||
estState() const
|
||||
{
|
||||
return estState_;
|
||||
}
|
||||
void
|
||||
resetSubState()
|
||||
{
|
||||
estState_ = EstablishState::ConvergingTx;
|
||||
revealPhaseStart_ = {};
|
||||
commitHashConflictStart_ = {};
|
||||
explicitFinalProposalSent_ = false;
|
||||
entropySetPublished_ = false;
|
||||
entropyPublishStart_ = {};
|
||||
exportSigGateStarted_ = false;
|
||||
exportSigGateStart_ = {};
|
||||
exportSigConvergenceFailed_ = false;
|
||||
}
|
||||
|
||||
/// Defined in test/csf/PeerTick.h (keeps xrpld/app dependency
|
||||
/// out of this header).
|
||||
template <class Ctx>
|
||||
ExtensionTickResult
|
||||
onTick(Ctx const& ctx);
|
||||
|
||||
private:
|
||||
uint256
|
||||
hashRngSet(
|
||||
hash_map<PeerID, uint256> const& entries,
|
||||
Ledger::Seq seq,
|
||||
std::string const& domain) const
|
||||
{
|
||||
std::vector<std::pair<std::uint32_t, uint256>> ordered;
|
||||
ordered.reserve(entries.size());
|
||||
for (auto const& [nodeId, digest] : entries)
|
||||
{
|
||||
if (!isUNLReportMember(nodeId))
|
||||
continue;
|
||||
ordered.emplace_back(
|
||||
static_cast<std::uint32_t>(nodeId), digest);
|
||||
}
|
||||
if (ordered.empty())
|
||||
return uint256{};
|
||||
std::sort(
|
||||
ordered.begin(),
|
||||
ordered.end(),
|
||||
[](auto const& a, auto const& b) { return a.first < b.first; });
|
||||
uint256 out = sha512Half(
|
||||
std::string("csf-rng-set"),
|
||||
domain,
|
||||
static_cast<std::uint32_t>(seq));
|
||||
for (auto const& [nodeId, digest] : ordered)
|
||||
out = sha512Half(out, nodeId, digest);
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
Extensions extensions_{*this};
|
||||
|
||||
Extensions&
|
||||
ce()
|
||||
{
|
||||
return extensions_;
|
||||
}
|
||||
Extensions const&
|
||||
ce() const
|
||||
{
|
||||
return extensions_;
|
||||
}
|
||||
|
||||
//! The collectors to report events to
|
||||
CollectorRefs& collectors;
|
||||
|
||||
@@ -278,13 +1015,15 @@ struct Peer
|
||||
BasicNetwork<Peer*>& n,
|
||||
TrustGraph<Peer*>& tg,
|
||||
CollectorRefs& c,
|
||||
beast::Journal jIn)
|
||||
beast::Journal jIn,
|
||||
SidecarStore& sc)
|
||||
: sink(jIn, "Peer " + to_string(i) + ": ")
|
||||
, j(sink)
|
||||
, consensus(s.clock(), *this, j)
|
||||
, id{i}
|
||||
, key{id, 0}
|
||||
, oracle{o}
|
||||
, sidecarStore{sc}
|
||||
, scheduler{s}
|
||||
, net{n}
|
||||
, trustGraph(tg)
|
||||
@@ -510,15 +1249,15 @@ struct Peer
|
||||
{
|
||||
issue(CloseLedger{prevLedger, openTxs});
|
||||
|
||||
Position_t pos{TxSet::calcID(openTxs)};
|
||||
|
||||
ce().decoratePosition(
|
||||
pos, prevLedger, mode == ConsensusMode::proposing);
|
||||
|
||||
return Result(
|
||||
TxSet{openTxs},
|
||||
Proposal(
|
||||
prevLedger.id(),
|
||||
Proposal::seqJoin,
|
||||
TxSet::calcID(openTxs),
|
||||
closeTime,
|
||||
now(),
|
||||
id));
|
||||
prevLedger.id(), Proposal::seqJoin, pos, closeTime, now(), id));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -553,6 +1292,10 @@ struct Peer
|
||||
schedule(delays.ledgerAccept, [=, this]() {
|
||||
const bool proposing = mode == ConsensusMode::proposing;
|
||||
const bool consensusFail = result.state == ConsensusState::MovedOn;
|
||||
auto const seq = static_cast<std::uint32_t>(prevLedger.seq()) + 1;
|
||||
|
||||
ce().finalizeRoundEntropy(seq);
|
||||
ce().finalizeRoundExport();
|
||||
|
||||
TxSet const acceptedTxs = injectTxs(prevLedger, result.txns);
|
||||
Ledger const newLedger = oracle.accept(
|
||||
|
||||
14
src/test/csf/PeerTick.h
Normal file
14
src/test/csf/PeerTick.h
Normal file
@@ -0,0 +1,14 @@
|
||||
#ifndef RIPPLE_TEST_CSF_PEERTICK_H_INCLUDED
|
||||
#define RIPPLE_TEST_CSF_PEERTICK_H_INCLUDED
|
||||
|
||||
#include <test/csf/Peer.h>
|
||||
#include <xrpld/consensus/ConsensusExtensionsTick.h>
|
||||
|
||||
template <class Ctx>
|
||||
ripple::ExtensionTickResult
|
||||
ripple::test::csf::Peer::Extensions::onTick(Ctx const& ctx)
|
||||
{
|
||||
return ripple::extensionsTick(*this, ctx);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -23,17 +23,123 @@
|
||||
#include <test/csf/Validation.h>
|
||||
#include <test/csf/ledgers.h>
|
||||
#include <xrpld/consensus/ConsensusProposal.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/beast/hash/hash_append.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
namespace csf {
|
||||
/** Proposal is a position taken in the consensus process and is represented
|
||||
directly from the generic types.
|
||||
/** Position sidecar for CSF that can model RNG commit/reveal fields.
|
||||
|
||||
Core tx-set convergence remains keyed on txSetHash only, matching
|
||||
production's ExtendedPosition behavior.
|
||||
*/
|
||||
using Proposal = ConsensusProposal<PeerID, Ledger::ID, TxSet::ID>;
|
||||
struct RngPosition
|
||||
{
|
||||
TxSet::ID txSetHash{};
|
||||
std::optional<uint256> commitSetHash;
|
||||
std::optional<uint256> entropySetHash;
|
||||
std::optional<uint256> exportSigSetHash;
|
||||
std::optional<uint256> myCommitment;
|
||||
std::optional<uint256> myReveal;
|
||||
std::optional<uint256> myExportSignature;
|
||||
|
||||
RngPosition() = default;
|
||||
explicit RngPosition(TxSet::ID txSet) : txSetHash(txSet)
|
||||
{
|
||||
}
|
||||
|
||||
operator TxSet::ID() const
|
||||
{
|
||||
return txSetHash;
|
||||
}
|
||||
|
||||
void
|
||||
updateTxSet(TxSet::ID txSet)
|
||||
{
|
||||
txSetHash = txSet;
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(RngPosition const& other) const
|
||||
{
|
||||
return txSetHash == other.txSetHash;
|
||||
}
|
||||
|
||||
bool
|
||||
operator!=(RngPosition const& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(TxSet::ID txSet) const
|
||||
{
|
||||
return txSetHash == txSet;
|
||||
}
|
||||
|
||||
bool
|
||||
operator!=(TxSet::ID txSet) const
|
||||
{
|
||||
return txSetHash != txSet;
|
||||
}
|
||||
};
|
||||
|
||||
inline bool
|
||||
operator==(TxSet::ID txSet, RngPosition const& pos)
|
||||
{
|
||||
return pos == txSet;
|
||||
}
|
||||
|
||||
inline bool
|
||||
operator!=(TxSet::ID txSet, RngPosition const& pos)
|
||||
{
|
||||
return pos != txSet;
|
||||
}
|
||||
|
||||
inline std::string
|
||||
to_string(RngPosition const& pos)
|
||||
{
|
||||
return std::to_string(pos.txSetHash);
|
||||
}
|
||||
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& os, RngPosition const& pos)
|
||||
{
|
||||
return os << pos.txSetHash;
|
||||
}
|
||||
|
||||
template <class Hasher>
|
||||
void
|
||||
hash_append(Hasher& h, RngPosition const& pos)
|
||||
{
|
||||
using beast::hash_append;
|
||||
auto appendOpt = [&](std::optional<uint256> const& o) {
|
||||
hash_append(h, static_cast<std::uint8_t>(o.has_value() ? 1 : 0));
|
||||
if (o)
|
||||
hash_append(h, *o);
|
||||
};
|
||||
|
||||
hash_append(h, pos.txSetHash);
|
||||
appendOpt(pos.commitSetHash);
|
||||
appendOpt(pos.entropySetHash);
|
||||
appendOpt(pos.exportSigSetHash);
|
||||
appendOpt(pos.myCommitment);
|
||||
appendOpt(pos.myReveal);
|
||||
appendOpt(pos.myExportSignature);
|
||||
}
|
||||
|
||||
/** Proposal is a position taken in the consensus process.
|
||||
*/
|
||||
using Proposal = ConsensusProposal<PeerID, Ledger::ID, RngPosition>;
|
||||
using ProposalPosition = RngPosition;
|
||||
|
||||
} // namespace csf
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <test/csf/Digraph.h>
|
||||
#include <test/csf/Peer.h>
|
||||
#include <test/csf/PeerGroup.h>
|
||||
#include <test/csf/PeerTick.h>
|
||||
#include <test/csf/Scheduler.h>
|
||||
#include <test/csf/SimTime.h>
|
||||
#include <test/csf/TrustGraph.h>
|
||||
@@ -83,6 +84,7 @@ public:
|
||||
BasicNetwork<Peer*> net;
|
||||
TrustGraph<Peer*> trustGraph;
|
||||
CollectorRefs collectors;
|
||||
SidecarStore sidecarStore;
|
||||
|
||||
/** Create a simulation
|
||||
|
||||
@@ -119,7 +121,8 @@ public:
|
||||
net,
|
||||
trustGraph,
|
||||
collectors,
|
||||
j);
|
||||
j,
|
||||
sidecarStore);
|
||||
newPeers.emplace_back(&peers.back());
|
||||
}
|
||||
PeerGroup res{newPeers};
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
ID
|
||||
ID const&
|
||||
id() const
|
||||
{
|
||||
return id_;
|
||||
|
||||
@@ -82,7 +82,12 @@ supported_amendments()
|
||||
Throw<std::runtime_error>(
|
||||
"Unknown feature: " + s + " in supportedAmendments.");
|
||||
}
|
||||
return FeatureBitset(feats);
|
||||
//@@start rng-test-environment-gating
|
||||
// TODO: ConsensusEntropy injects a pseudo-tx every ledger which
|
||||
// breaks existing test transaction count assumptions. Exclude from
|
||||
// default test set until dedicated tests are written.
|
||||
return FeatureBitset(feats) - featureConsensusEntropy;
|
||||
//@@end rng-test-environment-gating
|
||||
}();
|
||||
return ids;
|
||||
}
|
||||
|
||||
@@ -130,7 +130,8 @@ Env::close(
|
||||
// Go through the rpc interface unless we need to simulate
|
||||
// a specific consensus delay.
|
||||
if (consensusDelay)
|
||||
app().getOPs().acceptLedger(consensusDelay);
|
||||
app().getOPs().acceptLedger(
|
||||
consensusDelay, "Env::close(consensusDelay)");
|
||||
else
|
||||
{
|
||||
auto resp = rpc("ledger_accept");
|
||||
|
||||
@@ -65,29 +65,16 @@ hso_delete(void (*f)(Json::Value& jv))
|
||||
Json::Value
|
||||
hso(std::vector<uint8_t> const& wasmBytes, void (*f)(Json::Value& jv))
|
||||
{
|
||||
if (wasmBytes.size() == 0)
|
||||
throw std::runtime_error("empty hook wasm passed to hso()");
|
||||
|
||||
Json::Value jv;
|
||||
jv[jss::CreateCode] = strHex(wasmBytes);
|
||||
{
|
||||
jv[jss::HookOn] =
|
||||
"0000000000000000000000000000000000000000000000000000000000000000";
|
||||
jv[jss::HookNamespace] = to_string(uint256{beast::zero});
|
||||
jv[jss::HookApiVersion] = Json::Value{0};
|
||||
}
|
||||
|
||||
if (f)
|
||||
f(jv);
|
||||
|
||||
return jv;
|
||||
return hso(strHex(wasmBytes), f);
|
||||
}
|
||||
|
||||
Json::Value
|
||||
hso(std::string const& wasmHex, void (*f)(Json::Value& jv))
|
||||
{
|
||||
if (wasmHex.size() == 0)
|
||||
throw std::runtime_error("empty hook wasm passed to hso()");
|
||||
throw std::runtime_error(
|
||||
"empty hook wasm passed to hso(): run "
|
||||
"src/test/app/build_test_hooks.sh to generate the hook wasm");
|
||||
|
||||
Json::Value jv;
|
||||
jv[jss::CreateCode] = wasmHex;
|
||||
|
||||
212
src/test/jtx/xpop.h
Normal file
212
src/test/jtx/xpop.h
Normal file
@@ -0,0 +1,212 @@
|
||||
#ifndef RIPPLE_TEST_JTX_XPOP_H_INCLUDED
|
||||
#define RIPPLE_TEST_JTX_XPOP_H_INCLUDED
|
||||
|
||||
#include <test/jtx/Env.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/proof/LedgerProof.h>
|
||||
#include <xrpld/app/proof/XPOPv1.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/basics/base64.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <xrpl/protocol/SecretKey.h>
|
||||
#include <xrpl/protocol/Sign.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
namespace jtx {
|
||||
namespace xpop {
|
||||
|
||||
/// Build a manifest string (binary, not base64).
|
||||
inline std::string
|
||||
makeManifestRaw(
|
||||
PublicKey const& masterPub,
|
||||
SecretKey const& masterSec,
|
||||
PublicKey const& signingPub,
|
||||
SecretKey const& signingSec,
|
||||
int seq = 1)
|
||||
{
|
||||
STObject st(sfGeneric);
|
||||
st[sfSequence] = seq;
|
||||
st[sfPublicKey] = masterPub;
|
||||
st[sfSigningPubKey] = signingPub;
|
||||
|
||||
sign(st, HashPrefix::manifest, *publicKeyType(signingPub), signingSec);
|
||||
sign(
|
||||
st,
|
||||
HashPrefix::manifest,
|
||||
*publicKeyType(masterPub),
|
||||
masterSec,
|
||||
sfMasterSignature);
|
||||
|
||||
Serializer s;
|
||||
st.add(s);
|
||||
return std::string(static_cast<char const*>(s.data()), s.size());
|
||||
}
|
||||
|
||||
/// A complete test validator with all keys and manifest.
|
||||
struct TestValidator
|
||||
{
|
||||
PublicKey masterPublic;
|
||||
SecretKey masterSecret;
|
||||
PublicKey signingPublic;
|
||||
SecretKey signingSecret;
|
||||
std::string manifestRaw;
|
||||
std::string manifestBase64;
|
||||
|
||||
static TestValidator
|
||||
create()
|
||||
{
|
||||
auto const ms = randomSecretKey();
|
||||
auto const mp = derivePublicKey(KeyType::ed25519, ms);
|
||||
auto const [sp, ss] = randomKeyPair(KeyType::secp256k1);
|
||||
auto raw = makeManifestRaw(mp, ms, sp, ss, 1);
|
||||
return {mp, ms, sp, ss, raw, base64_encode(raw)};
|
||||
}
|
||||
|
||||
proof::ValidatorKeys
|
||||
toValidatorKeys() const
|
||||
{
|
||||
return {
|
||||
masterPublic,
|
||||
masterSecret,
|
||||
signingPublic,
|
||||
signingSecret,
|
||||
manifestBase64};
|
||||
}
|
||||
};
|
||||
|
||||
/// A complete test VL publisher with keys and manifest.
|
||||
struct TestVLPublisher
|
||||
{
|
||||
PublicKey masterPublic;
|
||||
SecretKey masterSecret;
|
||||
PublicKey signingPublic;
|
||||
SecretKey signingSecret;
|
||||
std::string manifestBase64;
|
||||
|
||||
static TestVLPublisher
|
||||
create()
|
||||
{
|
||||
auto const ms = randomSecretKey();
|
||||
auto const mp = derivePublicKey(KeyType::ed25519, ms);
|
||||
auto const [sp, ss] = randomKeyPair(KeyType::secp256k1);
|
||||
return {
|
||||
mp, ms, sp, ss, base64_encode(makeManifestRaw(mp, ms, sp, ss, 1))};
|
||||
}
|
||||
|
||||
/// Build VL data for these validators.
|
||||
proof::VLData
|
||||
buildVLData(
|
||||
std::vector<TestValidator> const& validators,
|
||||
std::uint32_t sequence = 1,
|
||||
std::uint32_t expiration = 767784645) const
|
||||
{
|
||||
// Build the JSON blob
|
||||
std::string data = "{\"sequence\":" + std::to_string(sequence) +
|
||||
",\"expiration\":" + std::to_string(expiration) +
|
||||
",\"validators\":[";
|
||||
|
||||
for (std::size_t i = 0; i < validators.size(); ++i)
|
||||
{
|
||||
if (i > 0)
|
||||
data += ",";
|
||||
data += "{\"validation_public_key\":\"" +
|
||||
strHex(validators[i].masterPublic) + "\",\"manifest\":\"" +
|
||||
validators[i].manifestBase64 + "\"}";
|
||||
}
|
||||
data += "]}";
|
||||
|
||||
auto const blob = base64_encode(data);
|
||||
auto const sig =
|
||||
strHex(sign(signingPublic, signingSecret, makeSlice(data)));
|
||||
|
||||
return proof::VLData{
|
||||
masterPublic, masterSecret, manifestBase64, blob, sig, 1};
|
||||
}
|
||||
};
|
||||
|
||||
/// Everything needed to build and import XPOPs in tests.
|
||||
struct TestXPOPContext
|
||||
{
|
||||
std::vector<TestValidator> validators;
|
||||
TestVLPublisher publisher;
|
||||
proof::VLData vlData;
|
||||
|
||||
static TestXPOPContext
|
||||
create(int validatorCount = 5)
|
||||
{
|
||||
auto pub = TestVLPublisher::create();
|
||||
std::vector<TestValidator> vals;
|
||||
for (int i = 0; i < validatorCount; ++i)
|
||||
vals.push_back(TestValidator::create());
|
||||
auto vl = pub.buildVLData(vals);
|
||||
return {std::move(vals), std::move(pub), std::move(vl)};
|
||||
}
|
||||
|
||||
/// Get the VL master public key hex for IMPORT_VL_KEYS config.
|
||||
std::string
|
||||
vlKeyHex() const
|
||||
{
|
||||
return strHex(publisher.masterPublic);
|
||||
}
|
||||
|
||||
/// Build an Env config with NETWORK_ID and IMPORT_VL_KEYS set.
|
||||
std::unique_ptr<Config>
|
||||
makeEnvConfig(std::uint32_t networkID = 21337) const
|
||||
{
|
||||
auto cfg = envconfig(jtx::validator, "");
|
||||
cfg->NETWORK_ID = networkID;
|
||||
auto const keyHex = vlKeyHex();
|
||||
auto const pkHex = strUnHex(keyHex);
|
||||
if (pkHex)
|
||||
cfg->IMPORT_VL_KEYS.emplace(keyHex, makeSlice(*pkHex));
|
||||
return cfg;
|
||||
}
|
||||
|
||||
/// Build XPOP from a closed ledger for a specific tx.
|
||||
Json::Value
|
||||
buildXPOP(Ledger const& ledger, uint256 const& txHash) const
|
||||
{
|
||||
std::vector<proof::ValidatorKeys> valKeys;
|
||||
for (auto const& v : validators)
|
||||
valKeys.push_back(v.toValidatorKeys());
|
||||
return proof::buildXPOPv1(ledger, txHash, valKeys, vlData);
|
||||
}
|
||||
|
||||
/// Build XPOP from an Env's last closed ledger.
|
||||
Json::Value
|
||||
buildXPOP(Env& env, uint256 const& txHash) const
|
||||
{
|
||||
auto const lcl = env.app().getLedgerMaster().getClosedLedger();
|
||||
if (!lcl)
|
||||
return {};
|
||||
return buildXPOP(*lcl, txHash);
|
||||
}
|
||||
};
|
||||
|
||||
/// Build a complete XPOP v1 JSON from an Env's last closed ledger.
|
||||
/// Creates fresh validator keys and VL publisher for each call.
|
||||
inline Json::Value
|
||||
buildTestXPOP(Env& env, uint256 const& txHash, int validatorCount = 5)
|
||||
{
|
||||
auto ctx = TestXPOPContext::create(validatorCount);
|
||||
return ctx.buildXPOP(env, txHash);
|
||||
}
|
||||
|
||||
/// Get the hex-encoded XPOP blob suitable for sfBlob in ttIMPORT.
|
||||
inline std::string
|
||||
buildTestXPOPHex(Env& env, uint256 const& txHash, int validatorCount = 5)
|
||||
{
|
||||
auto const xpop = buildTestXPOP(env, txHash, validatorCount);
|
||||
if (xpop.isNull())
|
||||
return {};
|
||||
return proof::xpopToHex(xpop);
|
||||
}
|
||||
|
||||
} // namespace xpop
|
||||
} // namespace jtx
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
530
src/test/rpc/RuntimeConfig_test.cpp
Normal file
530
src/test/rpc/RuntimeConfig_test.cpp
Normal file
@@ -0,0 +1,530 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2026 XRPL Labs
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
#include <xrpld/app/misc/RuntimeConfig.h>
|
||||
#include <xrpld/overlay/detail/TrafficCount.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class RuntimeConfig_test : public beast::unit_test::suite
|
||||
{
|
||||
// Helper to call runtime_config RPC with JSON params
|
||||
Json::Value
|
||||
runtimeConfig(test::jtx::Env& env, Json::Value const& params)
|
||||
{
|
||||
return env.rpc(
|
||||
"json", "runtime_config", to_string(params))[jss::result];
|
||||
}
|
||||
|
||||
// Helper to call runtime_config RPC with no params (GET)
|
||||
Json::Value
|
||||
runtimeConfig(test::jtx::Env& env)
|
||||
{
|
||||
return env.rpc("runtime_config")[jss::result];
|
||||
}
|
||||
|
||||
void
|
||||
testGetEmpty()
|
||||
{
|
||||
testcase("GET empty config");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
auto result = runtimeConfig(env);
|
||||
BEAST_EXPECT(result.isMember("configs"));
|
||||
BEAST_EXPECT(result["configs"].size() == 0);
|
||||
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
|
||||
}
|
||||
|
||||
void
|
||||
testSetGlobal()
|
||||
{
|
||||
testcase("SET global config");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["*"]["send_delay_jitter_ms"] = 20;
|
||||
params["set"]["*"]["send_drop_pct"] = 5.5;
|
||||
|
||||
auto result = runtimeConfig(env, params);
|
||||
BEAST_EXPECT(result.isMember("configs"));
|
||||
|
||||
auto const& configs = result["configs"];
|
||||
if (!BEAST_EXPECT(configs.isMember("*")))
|
||||
return;
|
||||
|
||||
auto const& global = configs["*"];
|
||||
BEAST_EXPECT(global["send_delay_ms"].asInt() == 100);
|
||||
BEAST_EXPECT(global["send_delay_jitter_ms"].asInt() == 20);
|
||||
BEAST_EXPECT(global["send_drop_pct"].asDouble() == 5.5);
|
||||
|
||||
// Verify active state via RuntimeConfig directly
|
||||
BEAST_EXPECT(env.app().getRuntimeConfig().active());
|
||||
|
||||
// Verify getConfig returns the global for any peer
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("10.0.0.1:51235");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->sendDelayMs == 100);
|
||||
BEAST_EXPECT(cfg->sendDelayJitterMs == 20);
|
||||
BEAST_EXPECT(cfg->sendDropPctX100 == 550);
|
||||
}
|
||||
|
||||
void
|
||||
testSetPerPeer()
|
||||
{
|
||||
testcase("SET per-peer config with merge");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set global first
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["*"]["send_drop_pct"] = 10.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
// Set per-peer override (only delay, no drop)
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 500;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
auto& rc = env.app().getRuntimeConfig();
|
||||
|
||||
// Per-peer should have merged values: delay from override, drop from *
|
||||
auto peerCfg = rc.getConfig("10.0.0.2:51235");
|
||||
if (!BEAST_EXPECT(peerCfg.has_value()))
|
||||
return;
|
||||
BEAST_EXPECT(peerCfg->sendDelayMs == 500); // overridden
|
||||
BEAST_EXPECT(peerCfg->sendDropPctX100 == 1000); // inherited from *
|
||||
|
||||
// Other peers still get the global
|
||||
auto otherCfg = rc.getConfig("10.0.0.3:51235");
|
||||
if (!BEAST_EXPECT(otherCfg.has_value()))
|
||||
return;
|
||||
BEAST_EXPECT(otherCfg->sendDelayMs == 100);
|
||||
BEAST_EXPECT(otherCfg->sendDropPctX100 == 1000);
|
||||
}
|
||||
|
||||
void
|
||||
testClear()
|
||||
{
|
||||
testcase("CLEAR specific target");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set global + per-peer
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 50;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 200;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
// Clear per-peer
|
||||
{
|
||||
Json::Value params;
|
||||
params["clear"] = Json::arrayValue;
|
||||
params["clear"].append("10.0.0.2:51235");
|
||||
auto result = runtimeConfig(env, params);
|
||||
// Should still have "*"
|
||||
BEAST_EXPECT(result["configs"].isMember("*"));
|
||||
BEAST_EXPECT(!result["configs"].isMember("10.0.0.2:51235"));
|
||||
}
|
||||
|
||||
// Per-peer now falls back to global
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("10.0.0.2:51235");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->sendDelayMs == 50);
|
||||
}
|
||||
|
||||
void
|
||||
testClearAll()
|
||||
{
|
||||
testcase("CLEAR_ALL");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set some configs
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["send_drop_pct"] = 50.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
BEAST_EXPECT(env.app().getRuntimeConfig().active());
|
||||
|
||||
// Clear all
|
||||
{
|
||||
Json::Value params;
|
||||
params["clear_all"] = true;
|
||||
auto result = runtimeConfig(env, params);
|
||||
BEAST_EXPECT(result["configs"].size() == 0);
|
||||
}
|
||||
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
|
||||
BEAST_EXPECT(!env.app().getRuntimeConfig().getConfig("*").has_value());
|
||||
}
|
||||
|
||||
void
|
||||
testPerPeerWithoutGlobal()
|
||||
{
|
||||
testcase("Per-peer config without global");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set only per-peer, no global
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["send_delay_ms"] = 300;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
auto& rc = env.app().getRuntimeConfig();
|
||||
BEAST_EXPECT(rc.active());
|
||||
|
||||
// Targeted peer gets the config
|
||||
auto peerCfg = rc.getConfig("10.0.0.2:51235");
|
||||
BEAST_EXPECT(peerCfg.has_value());
|
||||
BEAST_EXPECT(peerCfg->sendDelayMs == 300);
|
||||
|
||||
// Other peers get nothing
|
||||
BEAST_EXPECT(!rc.getConfig("10.0.0.3:51235").has_value());
|
||||
}
|
||||
|
||||
void
|
||||
testMessageTypeFilter()
|
||||
{
|
||||
testcase("Message type filter");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set with message_types filter
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["*"]["message_types"] = Json::arrayValue;
|
||||
params["set"]["*"]["message_types"].append("proposal");
|
||||
params["set"]["*"]["message_types"].append("validation");
|
||||
auto result = runtimeConfig(env, params);
|
||||
|
||||
// Verify response includes message_types
|
||||
auto const& global = result["configs"]["*"];
|
||||
BEAST_EXPECT(global.isMember("message_types"));
|
||||
BEAST_EXPECT(global["message_types"].size() == 2);
|
||||
}
|
||||
|
||||
auto& rc = env.app().getRuntimeConfig();
|
||||
auto cfg = rc.getConfig("10.0.0.1:51235");
|
||||
if (!BEAST_EXPECT(cfg.has_value()))
|
||||
return;
|
||||
|
||||
// Applies to proposal and validation categories
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::proposal));
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::validation));
|
||||
|
||||
// Does NOT apply to other categories
|
||||
BEAST_EXPECT(!cfg->appliesTo(TrafficCount::category::transaction));
|
||||
BEAST_EXPECT(!cfg->appliesTo(TrafficCount::category::base));
|
||||
}
|
||||
|
||||
void
|
||||
testMessageTypeFilterEmpty()
|
||||
{
|
||||
testcase("No message type filter means all");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set without message_types — applies to all
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
if (!BEAST_EXPECT(cfg.has_value()))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(!cfg->messageCategories.has_value());
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::proposal));
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::validation));
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::transaction));
|
||||
BEAST_EXPECT(cfg->appliesTo(TrafficCount::category::base));
|
||||
}
|
||||
|
||||
void
|
||||
testInvalidMessageType()
|
||||
{
|
||||
testcase("Invalid message type returns error");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["*"]["message_types"] = Json::arrayValue;
|
||||
params["set"]["*"]["message_types"].append("proposals"); // typo
|
||||
auto result = runtimeConfig(env, params);
|
||||
|
||||
BEAST_EXPECT(result.isMember("error"));
|
||||
BEAST_EXPECT(result["error"].asString() == "invalidParams");
|
||||
// Config should NOT have been applied
|
||||
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
|
||||
}
|
||||
|
||||
void
|
||||
testDropPctClamping()
|
||||
{
|
||||
testcase("send_drop_pct clamped to 0-100");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Over 100
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_drop_pct"] = 200.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->sendDropPctX100 == 10000); // clamped to 100%
|
||||
|
||||
// Negative
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_drop_pct"] = -50.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->sendDropPctX100 == 0); // clamped to 0%
|
||||
}
|
||||
|
||||
void
|
||||
testRngClaimDropPct()
|
||||
{
|
||||
testcase("rng_claim_drop_pct round-trips");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Set rng_claim_drop_pct
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["rng_claim_drop_pct"] = 50.0;
|
||||
auto result = runtimeConfig(env, params);
|
||||
|
||||
auto const& global = result["configs"]["*"];
|
||||
BEAST_EXPECT(global["rng_claim_drop_pct"].asDouble() == 50.0);
|
||||
}
|
||||
|
||||
BEAST_EXPECT(env.app().getRuntimeConfig().active());
|
||||
|
||||
// Verify via getConfig
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 5000);
|
||||
|
||||
// Clear and verify removal
|
||||
{
|
||||
Json::Value params;
|
||||
params["clear_all"] = true;
|
||||
auto result = runtimeConfig(env, params);
|
||||
BEAST_EXPECT(result["configs"].size() == 0);
|
||||
}
|
||||
BEAST_EXPECT(!env.app().getRuntimeConfig().active());
|
||||
}
|
||||
|
||||
void
|
||||
testRngClaimDropPctClamping()
|
||||
{
|
||||
testcase("rng_claim_drop_pct clamped to 0-100");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Over 100
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["rng_claim_drop_pct"] = 150.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
auto cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 10000); // clamped to 100%
|
||||
|
||||
// Negative
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["rng_claim_drop_pct"] = -10.0;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
cfg = env.app().getRuntimeConfig().getConfig("*");
|
||||
BEAST_EXPECT(cfg.has_value());
|
||||
BEAST_EXPECT(cfg->rngClaimDropPctX100 == 0); // clamped to 0%
|
||||
}
|
||||
|
||||
void
|
||||
testExplicitFinalProposalToggle()
|
||||
{
|
||||
testcase("explicit_final_proposal round-trips and merges");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Global default for this node: skip explicit final proposal.
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["explicit_final_proposal"] = false;
|
||||
auto result = runtimeConfig(env, params);
|
||||
|
||||
auto const& global = result["configs"]["*"];
|
||||
BEAST_EXPECT(global["explicit_final_proposal"].asBool() == false);
|
||||
}
|
||||
|
||||
auto& rc = env.app().getRuntimeConfig();
|
||||
BEAST_EXPECT(rc.active());
|
||||
|
||||
// Global view is false.
|
||||
auto globalCfg = rc.getConfig("*");
|
||||
BEAST_EXPECT(globalCfg.has_value());
|
||||
BEAST_EXPECT(globalCfg->explicitFinalProposal.has_value());
|
||||
BEAST_EXPECT(*globalCfg->explicitFinalProposal == false);
|
||||
|
||||
// Per-peer override can re-enable.
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["explicit_final_proposal"] = true;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
auto peerCfg = rc.getConfig("10.0.0.2:51235");
|
||||
BEAST_EXPECT(peerCfg.has_value());
|
||||
BEAST_EXPECT(peerCfg->explicitFinalProposal.has_value());
|
||||
BEAST_EXPECT(*peerCfg->explicitFinalProposal == true);
|
||||
|
||||
auto otherCfg = rc.getConfig("10.0.0.3:51235");
|
||||
BEAST_EXPECT(otherCfg.has_value());
|
||||
BEAST_EXPECT(otherCfg->explicitFinalProposal.has_value());
|
||||
BEAST_EXPECT(*otherCfg->explicitFinalProposal == false);
|
||||
}
|
||||
|
||||
void
|
||||
testPerPeerClearInheritedFilter()
|
||||
{
|
||||
testcase("Per-peer can override global filter to all");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// Global: only proposals
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["*"] = Json::objectValue;
|
||||
params["set"]["*"]["send_delay_ms"] = 100;
|
||||
params["set"]["*"]["message_types"] = Json::arrayValue;
|
||||
params["set"]["*"]["message_types"].append("proposal");
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
// Per-peer: message_types = [] (explicitly all)
|
||||
{
|
||||
Json::Value params;
|
||||
params["set"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"] = Json::objectValue;
|
||||
params["set"]["10.0.0.2:51235"]["message_types"] = Json::arrayValue;
|
||||
runtimeConfig(env, params);
|
||||
}
|
||||
|
||||
auto& rc = env.app().getRuntimeConfig();
|
||||
|
||||
// Per-peer should apply to all categories (empty set override)
|
||||
auto peerCfg = rc.getConfig("10.0.0.2:51235");
|
||||
BEAST_EXPECT(peerCfg.has_value());
|
||||
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::proposal));
|
||||
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::validation));
|
||||
BEAST_EXPECT(peerCfg->appliesTo(TrafficCount::category::transaction));
|
||||
|
||||
// Other peers still only get proposal filter from global
|
||||
auto otherCfg = rc.getConfig("10.0.0.3:51235");
|
||||
BEAST_EXPECT(otherCfg.has_value());
|
||||
BEAST_EXPECT(otherCfg->appliesTo(TrafficCount::category::proposal));
|
||||
BEAST_EXPECT(!otherCfg->appliesTo(TrafficCount::category::validation));
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testGetEmpty();
|
||||
testSetGlobal();
|
||||
testSetPerPeer();
|
||||
testClear();
|
||||
testClearAll();
|
||||
testPerPeerWithoutGlobal();
|
||||
testMessageTypeFilter();
|
||||
testMessageTypeFilterEmpty();
|
||||
testInvalidMessageType();
|
||||
testDropPctClamping();
|
||||
testRngClaimDropPct();
|
||||
testRngClaimDropPctClamping();
|
||||
testExplicitFinalProposalToggle();
|
||||
testPerPeerClearInheritedFilter();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(RuntimeConfig, rpc, ripple);
|
||||
|
||||
} // namespace ripple
|
||||
131
src/test/unit_test/SuiteLogsWithOverrides.h
Normal file
131
src/test/unit_test/SuiteLogsWithOverrides.h
Normal file
@@ -0,0 +1,131 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2026 XRPL Labs
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef TEST_UNIT_TEST_SUITE_LOGS_WITH_OVERRIDES_H
|
||||
#define TEST_UNIT_TEST_SUITE_LOGS_WITH_OVERRIDES_H
|
||||
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
/** A Journal::Sink that writes directly to stderr.
|
||||
*
|
||||
* Unlike SuiteJournalSink (which writes to suite_.log and is only
|
||||
* visible when tests fail), this always produces visible output.
|
||||
*/
|
||||
class StderrJournalSink : public beast::Journal::Sink
|
||||
{
|
||||
std::string partition_;
|
||||
|
||||
public:
|
||||
StderrJournalSink(
|
||||
std::string const& partition,
|
||||
beast::severities::Severity threshold)
|
||||
: Sink(threshold, false), partition_(partition)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
active(beast::severities::Severity level) const override
|
||||
{
|
||||
return level >= threshold();
|
||||
}
|
||||
|
||||
void
|
||||
write(beast::severities::Severity level, std::string const& text) override
|
||||
{
|
||||
if (level >= threshold())
|
||||
writeAlways(level, text);
|
||||
}
|
||||
|
||||
void
|
||||
writeAlways(beast::severities::Severity level, std::string const& text)
|
||||
override
|
||||
{
|
||||
static std::mutex mtx;
|
||||
std::lock_guard lock(mtx);
|
||||
std::cerr << partition_ << ":" << text << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/** SuiteLogs with per-partition severity overrides written to stderr.
|
||||
*
|
||||
* Overridden partitions write to stderr (always visible).
|
||||
* All other partitions use SuiteJournalSink (suite_.log, only on failure).
|
||||
*
|
||||
* Usage:
|
||||
* #include <test/unit_test/SuiteLogsWithOverrides.h>
|
||||
*
|
||||
* using Sev = beast::severities::Severity;
|
||||
* Env env{*this, cfg, features,
|
||||
* std::make_unique<SuiteLogsWithOverrides>(
|
||||
* *this,
|
||||
* SuiteLogsWithOverrides::Overrides{
|
||||
* {"Export", Sev::kTrace},
|
||||
* {"TxQ", Sev::kInfo},
|
||||
* {"View", Sev::kDebug},
|
||||
* })};
|
||||
*/
|
||||
class SuiteLogsWithOverrides : public Logs
|
||||
{
|
||||
beast::unit_test::suite& suite_;
|
||||
std::set<std::string> overridden_;
|
||||
|
||||
public:
|
||||
using Overrides = std::initializer_list<
|
||||
std::pair<std::string, beast::severities::Severity>>;
|
||||
|
||||
SuiteLogsWithOverrides(
|
||||
beast::unit_test::suite& suite,
|
||||
Overrides overrides,
|
||||
beast::severities::Severity defaultThresh = beast::severities::kError)
|
||||
: Logs(defaultThresh), suite_(suite)
|
||||
{
|
||||
for (auto const& [name, sev] : overrides)
|
||||
{
|
||||
overridden_.insert(name);
|
||||
get(name).threshold(sev);
|
||||
}
|
||||
}
|
||||
|
||||
~SuiteLogsWithOverrides() override = default;
|
||||
|
||||
std::unique_ptr<beast::Journal::Sink>
|
||||
makeSink(
|
||||
std::string const& partition,
|
||||
beast::severities::Severity threshold) override
|
||||
{
|
||||
if (overridden_.count(partition))
|
||||
return std::make_unique<StderrJournalSink>(partition, threshold);
|
||||
return std::make_unique<SuiteJournalSink>(partition, threshold, suite_);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
2268
src/xrpld/app/consensus/ConsensusExtensions.cpp
Normal file
2268
src/xrpld/app/consensus/ConsensusExtensions.cpp
Normal file
File diff suppressed because it is too large
Load Diff
458
src/xrpld/app/consensus/ConsensusExtensions.h
Normal file
458
src/xrpld/app/consensus/ConsensusExtensions.h
Normal file
@@ -0,0 +1,458 @@
|
||||
#ifndef RIPPLE_APP_CONSENSUS_CONSENSUSEXTENSIONS_H_INCLUDED
|
||||
#define RIPPLE_APP_CONSENSUS_CONSENSUSEXTENSIONS_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/consensus/RCLCxLedger.h>
|
||||
#include <xrpld/app/consensus/RCLCxPeerPos.h>
|
||||
#include <xrpld/app/consensus/RCLCxTx.h>
|
||||
#include <xrpld/app/misc/ExportSigCollector.h>
|
||||
#include <xrpld/consensus/ConsensusParms.h>
|
||||
#include <xrpld/consensus/ConsensusTypes.h>
|
||||
#include <xrpld/overlay/Message.h>
|
||||
#include <xrpld/shamap/SHAMap.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class Application;
|
||||
class CanonicalTXSet;
|
||||
class Ledger;
|
||||
|
||||
/// Concrete alias for the consensus tick context.
|
||||
using TickContext = ConsensusTick<ExtendedPosition, RCLCxPeerPos, RCLTxSet>;
|
||||
|
||||
/// Concrete Xahau-owned manager for consensus extensions (RNG + Export).
|
||||
///
|
||||
/// Owns all RNG/Export state that was previously scattered across
|
||||
/// RCLCxAdaptor and Consensus.h. Lifecycle hooks are grouped by
|
||||
/// caller/threading context.
|
||||
class ConsensusExtensions
|
||||
{
|
||||
Application& app_;
|
||||
ExportSigCollector exportSigCollector_;
|
||||
|
||||
public:
|
||||
beast::Journal j_; // public: accessed by extensionsTick template
|
||||
|
||||
// Type of sidecar set, known at fetch time from proposal context.
|
||||
enum class SidecarKind : uint8_t { commit, reveal, exportSig };
|
||||
|
||||
struct ActiveValidatorView
|
||||
{
|
||||
hash_set<PublicKey> masterKeys;
|
||||
hash_set<NodeID> nodeIds;
|
||||
std::optional<uint256> sourceLedgerHash;
|
||||
bool fromUNLReport = false;
|
||||
|
||||
// Export paths receive validator keys; RNG sidecars identify
|
||||
// validators by NodeID. Keep both indexes in lockstep.
|
||||
void
|
||||
insertMaster(PublicKey const& masterKey)
|
||||
{
|
||||
masterKeys.insert(masterKey);
|
||||
nodeIds.insert(calcNodeID(masterKey));
|
||||
}
|
||||
|
||||
void
|
||||
eraseMaster(PublicKey const& masterKey)
|
||||
{
|
||||
masterKeys.erase(masterKey);
|
||||
nodeIds.erase(calcNodeID(masterKey));
|
||||
}
|
||||
|
||||
std::size_t
|
||||
size() const
|
||||
{
|
||||
return masterKeys.size();
|
||||
}
|
||||
|
||||
bool
|
||||
containsMaster(PublicKey const& masterKey) const
|
||||
{
|
||||
return masterKeys.count(masterKey) > 0;
|
||||
}
|
||||
|
||||
bool
|
||||
containsNode(NodeID const& nodeId) const
|
||||
{
|
||||
return nodeIds.count(nodeId) > 0;
|
||||
}
|
||||
};
|
||||
using ActiveValidatorViewPtr = std::shared_ptr<ActiveValidatorView const>;
|
||||
|
||||
private:
|
||||
// --- RNG Pipelined Storage ---
|
||||
hash_map<NodeID, uint256> pendingCommits_;
|
||||
hash_map<NodeID, uint256> pendingReveals_;
|
||||
hash_map<NodeID, PublicKey> nodeIdToKey_;
|
||||
|
||||
// Ephemeral entropy secret (in-memory only, crash = non-revealer)
|
||||
uint256 myEntropySecret_;
|
||||
bool entropyFailed_ = false;
|
||||
bool rngEnabledThisRound_ = false;
|
||||
bool exportEnabledThisRound_ = false;
|
||||
|
||||
// Real SHAMaps for the current round (unbacked, ephemeral)
|
||||
std::shared_ptr<SHAMap> commitSetMap_;
|
||||
std::shared_ptr<SHAMap> entropySetMap_;
|
||||
std::shared_ptr<SHAMap> exportSigSetMap_;
|
||||
std::optional<LedgerIndex> rngRoundSeq_;
|
||||
std::shared_ptr<SHAMap const> consensusTxSetMap_;
|
||||
hash_map<uint256, std::shared_ptr<STTx const>> consensusExportTxns_;
|
||||
std::optional<uint256> consensusTxSetHash_;
|
||||
|
||||
// Track pending sidecar set fetches by hash → kind.
|
||||
// Kind is known at fetch time (call site context), so
|
||||
// onAcquiredSidecarSet can dispatch without content-sniffing.
|
||||
hash_map<uint256, SidecarKind> pendingRngFetches_;
|
||||
|
||||
// Parent-ledger validator view used by RNG and Export quorum logic.
|
||||
ActiveValidatorViewPtr activeValidatorView_ =
|
||||
std::make_shared<ActiveValidatorView const>();
|
||||
mutable std::mutex activeValidatorViewMutex_;
|
||||
|
||||
// Recent proposers intersected with the active UNL (liveness hint)
|
||||
hash_set<NodeID> likelyParticipants_;
|
||||
|
||||
// Current consensus mode (set by adaptor at round start)
|
||||
ConsensusMode mode_{ConsensusMode::observing};
|
||||
|
||||
public:
|
||||
// --- RNG Sub-state Machine (accessed by extensionsTick template) ---
|
||||
EstablishState estState_{EstablishState::ConvergingTx};
|
||||
std::chrono::steady_clock::time_point revealPhaseStart_{};
|
||||
std::chrono::steady_clock::time_point commitHashConflictStart_{};
|
||||
bool explicitFinalProposalSent_{false};
|
||||
bool entropySetPublished_{false};
|
||||
std::chrono::steady_clock::time_point entropyPublishStart_{};
|
||||
bool exportSigGateStarted_{false};
|
||||
std::chrono::steady_clock::time_point exportSigGateStart_{};
|
||||
bool exportSigConvergenceFailed_{false};
|
||||
/** Proof data from a proposal signature, for embedding in SHAMap
|
||||
entries. Contains everything needed to independently verify
|
||||
that a validator committed/revealed a specific value. */
|
||||
struct ProposalProof
|
||||
{
|
||||
std::uint32_t proposeSeq;
|
||||
std::uint32_t closeTime;
|
||||
uint256 prevLedger;
|
||||
Serializer positionData; // serialized ExtendedPosition
|
||||
Buffer signature;
|
||||
};
|
||||
|
||||
private:
|
||||
// Proposal proofs keyed by NodeID.
|
||||
// commitProofs_: only seq=0 proofs (deterministic across all nodes).
|
||||
// proposalProofs_: latest proof with reveal (for entropySet).
|
||||
hash_map<NodeID, ProposalProof> commitProofs_;
|
||||
hash_map<NodeID, ProposalProof> proposalProofs_;
|
||||
|
||||
public:
|
||||
ConsensusExtensions(Application& app, beast::Journal j);
|
||||
|
||||
ExportSigCollector&
|
||||
exportSigCollector()
|
||||
{
|
||||
return exportSigCollector_;
|
||||
}
|
||||
|
||||
ExportSigCollector const&
|
||||
exportSigCollector() const
|
||||
{
|
||||
return exportSigCollector_;
|
||||
}
|
||||
|
||||
/// Set the current consensus mode (called by adaptor).
|
||||
void
|
||||
setMode(ConsensusMode m)
|
||||
{
|
||||
mode_ = m;
|
||||
}
|
||||
|
||||
// --- RNG Helper Methods ---
|
||||
|
||||
std::size_t
|
||||
quorumThreshold() const;
|
||||
|
||||
std::size_t
|
||||
exportSigQuorumThreshold() const;
|
||||
|
||||
void
|
||||
setExpectedProposers(hash_set<NodeID> proposers);
|
||||
|
||||
std::size_t
|
||||
pendingCommitCount() const;
|
||||
|
||||
std::size_t
|
||||
pendingRevealCount() const;
|
||||
|
||||
std::size_t
|
||||
expectedProposerCount() const;
|
||||
|
||||
bool
|
||||
hasQuorumOfCommits() const;
|
||||
|
||||
bool
|
||||
hasMinimumReveals() const;
|
||||
|
||||
bool
|
||||
hasAnyReveals() const;
|
||||
|
||||
bool
|
||||
shouldZeroEntropy() const;
|
||||
|
||||
bool
|
||||
rngEnabled() const;
|
||||
|
||||
bool
|
||||
exportEnabled() const;
|
||||
|
||||
bool
|
||||
bootstrapFastStartEnabled() const;
|
||||
|
||||
bool
|
||||
shouldSendExplicitFinalProposal() const;
|
||||
|
||||
std::optional<RCLTxSet>
|
||||
buildExplicitFinalProposalTxSet(RCLTxSet const& txns, LedgerIndex seq);
|
||||
|
||||
uint256
|
||||
buildCommitSet(LedgerIndex seq);
|
||||
|
||||
uint256
|
||||
buildEntropySet(LedgerIndex seq);
|
||||
|
||||
uint256
|
||||
buildExportSigSet(LedgerIndex seq);
|
||||
|
||||
bool
|
||||
hasPendingExportSigs() const;
|
||||
|
||||
bool
|
||||
hasConsensusExportTxns() const;
|
||||
|
||||
void
|
||||
setExportSigConvergenceFailed();
|
||||
|
||||
bool
|
||||
exportSigConvergenceFailed() const;
|
||||
|
||||
bool
|
||||
isSidecarSet(uint256 const& hash) const;
|
||||
|
||||
ActiveValidatorViewPtr
|
||||
activeValidatorView() const;
|
||||
|
||||
ActiveValidatorViewPtr
|
||||
makeActiveValidatorView(
|
||||
std::shared_ptr<Ledger const> const& prevLedger) const;
|
||||
|
||||
bool
|
||||
isActiveValidator(PublicKey const& validationKey) const;
|
||||
|
||||
bool
|
||||
isActiveValidator(
|
||||
PublicKey const& validationKey,
|
||||
ActiveValidatorView const& view) const;
|
||||
|
||||
void
|
||||
onAcquiredSidecarSet(std::shared_ptr<SHAMap> const& map);
|
||||
|
||||
void
|
||||
fetchRngSetIfNeeded(
|
||||
std::optional<uint256> const& hash,
|
||||
SidecarKind kind = SidecarKind::commit);
|
||||
|
||||
/// Fetch any sidecar sets from a peer's position if needed.
|
||||
void
|
||||
fetchSidecarsIfNeeded(ExtendedPosition const& peerPos);
|
||||
|
||||
void
|
||||
cacheConsensusTxSet(RCLTxSet const& txns);
|
||||
|
||||
std::size_t
|
||||
verifyPendingExportSigs(RCLTxSet const& txns, LedgerIndex seq);
|
||||
|
||||
void
|
||||
cacheUNLReport(std::shared_ptr<Ledger const> const& prevLedger = {});
|
||||
|
||||
bool
|
||||
isUNLReportMember(NodeID const& nodeId) const;
|
||||
|
||||
void
|
||||
generateEntropySecret();
|
||||
|
||||
uint256
|
||||
getEntropySecret() const;
|
||||
|
||||
void
|
||||
setEntropyFailed();
|
||||
|
||||
/// Self-seed our own reveal into pendingReveals_.
|
||||
/// Called from extensionsTick at reveal transition.
|
||||
/// In production, decorateMessage also self-seeds (belt + suspenders).
|
||||
void
|
||||
selfSeedReveal();
|
||||
|
||||
void
|
||||
clearRngState();
|
||||
|
||||
void
|
||||
onPreBuild(CanonicalTXSet& retriableTxs, LedgerIndex seq);
|
||||
|
||||
void
|
||||
harvestRngData(
|
||||
NodeID const& nodeId,
|
||||
PublicKey const& publicKey,
|
||||
ExtendedPosition const& position,
|
||||
std::uint32_t proposeSeq,
|
||||
NetClock::time_point closeTime,
|
||||
uint256 const& prevLedger,
|
||||
Slice const& signature);
|
||||
|
||||
static Blob
|
||||
serializeProof(ProposalProof const& proof);
|
||||
|
||||
static std::optional<ProposalProof>
|
||||
deserializeProof(Blob const& proofBlob);
|
||||
|
||||
static bool
|
||||
verifyProof(
|
||||
Blob const& proofBlob,
|
||||
PublicKey const& publicKey,
|
||||
uint256 const& expectedDigest,
|
||||
bool isCommit);
|
||||
|
||||
/// Append extension diagnostics to consensus JSON.
|
||||
void
|
||||
appendJson(Json::Value& ret) const;
|
||||
|
||||
/// Log extension-specific position fields at trace level.
|
||||
void
|
||||
logPosition(
|
||||
ExtendedPosition const& pos,
|
||||
beast::Journal j,
|
||||
beast::severities::Severity level = beast::severities::kTrace) const;
|
||||
|
||||
// --- Consensus/adaptor lifecycle hooks ---
|
||||
|
||||
/** Reset per-round extension state.
|
||||
Called from startRoundInternal under RCLConsensus::mutex_. */
|
||||
void
|
||||
onRoundStart(RCLCxLedger const& prevLedger, hash_set<NodeID> lastProposers);
|
||||
|
||||
/** Extract extension data from the parsed proposal.
|
||||
Called from peerProposalInternal under RCLConsensus::mutex_. */
|
||||
void
|
||||
onTrustedPeerProposal(
|
||||
NodeID const& nodeId,
|
||||
PublicKey const& publicKey,
|
||||
ExtendedPosition const& position,
|
||||
std::uint32_t proposeSeq,
|
||||
NetClock::time_point closeTime,
|
||||
uint256 const& prevLedger,
|
||||
Slice const& signature,
|
||||
std::vector<std::string> const& exportSignatures = {});
|
||||
|
||||
/** Harvest proposal-carried export signatures after the proposal payload is
|
||||
known to be signed by `publicKey`. */
|
||||
std::size_t
|
||||
harvestExportSignatures(
|
||||
PublicKey const& publicKey,
|
||||
uint256 const& prevLedger,
|
||||
std::vector<std::string> const& exportSignatures,
|
||||
char const* source);
|
||||
|
||||
/** Signal that the accept/build path finished successfully.
|
||||
Called from doAccept (frozen state, no consensus mutex). */
|
||||
void
|
||||
onAcceptComplete();
|
||||
|
||||
/** Extract export signatures from the raw protobuf wire message.
|
||||
Called from PeerImp overlay ingress (outside consensus mutex).
|
||||
Only touches the independently synchronized ExportSigCollector. */
|
||||
void
|
||||
onTrustedPeerMessage(::protocol::TMProposeSet const& wireMsg);
|
||||
|
||||
/** Attach RNG commitment to the initial proposal position.
|
||||
Called from onClose BEFORE signing. Affects proposal identity.
|
||||
Generates entropy secret, caches UNL, seeds own commitment. */
|
||||
void
|
||||
decoratePosition(
|
||||
ExtendedPosition& pos,
|
||||
std::shared_ptr<Ledger const> const& prevLedger,
|
||||
bool proposing);
|
||||
|
||||
/** Attach export signatures before proposal signing.
|
||||
The caller hashes the resulting blobs into ExtendedPosition so the
|
||||
proposal signature authenticates the side-channel protobuf field. */
|
||||
void
|
||||
attachExportSignatures(
|
||||
protocol::TMProposeSet& prop,
|
||||
RCLCxPeerPos::Proposal const& proposal);
|
||||
|
||||
/** Record post-signature RNG state for the outgoing protobuf.
|
||||
Self-seeds own reveal and stores proposal proofs. */
|
||||
void
|
||||
decorateMessage(
|
||||
protocol::TMProposeSet& prop,
|
||||
RCLCxPeerPos::Proposal const& proposal,
|
||||
ExtendedPosition const& signedPosition,
|
||||
Buffer const& proposalSig);
|
||||
|
||||
ExtensionTickResult
|
||||
onTick(TickContext const& ctx);
|
||||
|
||||
// --- Accessors for adaptor forwarding ---
|
||||
|
||||
void
|
||||
setRngEnabledThisRound(bool v)
|
||||
{
|
||||
rngEnabledThisRound_ = v;
|
||||
}
|
||||
|
||||
void
|
||||
setExportEnabledThisRound(bool v)
|
||||
{
|
||||
exportEnabledThisRound_ = v;
|
||||
}
|
||||
|
||||
bool
|
||||
extensionsBusy() const
|
||||
{
|
||||
return estState_ != EstablishState::ConvergingTx ||
|
||||
(exportEnabled() &&
|
||||
(exportSigGateStarted_ || hasPendingExportSigs()));
|
||||
}
|
||||
|
||||
EstablishState
|
||||
estState() const
|
||||
{
|
||||
return estState_;
|
||||
}
|
||||
|
||||
void
|
||||
resetSubState()
|
||||
{
|
||||
estState_ = EstablishState::ConvergingTx;
|
||||
revealPhaseStart_ = {};
|
||||
commitHashConflictStart_ = {};
|
||||
explicitFinalProposalSent_ = false;
|
||||
entropySetPublished_ = false;
|
||||
entropyPublishStart_ = {};
|
||||
exportSigGateStarted_ = false;
|
||||
exportSigGateStart_ = {};
|
||||
exportSigConvergenceFailed_ = false;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
250
src/xrpld/app/consensus/ConsensusExtensionsDesign.md
Normal file
250
src/xrpld/app/consensus/ConsensusExtensionsDesign.md
Normal file
@@ -0,0 +1,250 @@
|
||||
# Consensus Extension Design Principles
|
||||
|
||||
This note captures the principles behind the Xahau consensus extensions:
|
||||
ConsensusEntropy/RNG, proposal sidecars, and export signature convergence.
|
||||
Read this before changing `ConsensusExtensions`, `ConsensusExtensionsTick`,
|
||||
`ExtendedPosition`, sidecar SHAMap handling, or the related CSF tests.
|
||||
|
||||
The short version: extension data may coordinate extra same-ledger features,
|
||||
but it must not redefine ordinary transaction-set consensus. When extension
|
||||
state cannot be made safe in time, the extension degrades deterministically
|
||||
and the ledger still closes.
|
||||
|
||||
The priority order for consensus extensions is: safe, fast, works. Safety means
|
||||
extension timing must not create divergent closed-ledger effects when a bounded
|
||||
coordination step can avoid it. Fast means those coordination steps stay short
|
||||
and conditional, never becoming an open-ended wait for an extension feature to
|
||||
succeed. Works means missed or late extension material follows that feature's
|
||||
deterministic fallback, such as zero entropy for RNG or normal Export
|
||||
retry/expiry, rather than blocking core consensus.
|
||||
|
||||
## Core Invariants
|
||||
|
||||
1. Core consensus remains keyed by the transaction set.
|
||||
|
||||
`ExtendedPosition::operator==` intentionally compares only `txSetHash`.
|
||||
RNG, export sig, commit-set, and entropy-set hashes are proposal sidecars.
|
||||
They are coordinated during establish, but they do not define whether peers
|
||||
agree on the ordinary transaction set.
|
||||
|
||||
2. Extension waits are bounded.
|
||||
|
||||
RNG and export sidecar convergence may wait briefly inside establish, but
|
||||
they must not block ledger close indefinitely. If RNG cannot establish a
|
||||
safe non-zero entropy value, it injects the deterministic zero-entropy path.
|
||||
If export signatures cannot converge, export retries or expires according
|
||||
to transaction rules.
|
||||
|
||||
3. Safety is in validation; extension logic is deliberation.
|
||||
|
||||
ConsensusEntropy is materialized during accept/buildLCL as a deterministic
|
||||
pseudo-transaction. Nodes agree on the base transaction set first, then
|
||||
derive the entropy transaction from agreed sidecar inputs. Any local fault
|
||||
still has to survive normal validation/LCL agreement.
|
||||
|
||||
4. Converge signed inputs, not just derived outputs.
|
||||
|
||||
RNG commits, RNG reveals, and export signatures are the verifiable inputs.
|
||||
The design converges on those input sets using sidecar SHAMaps. The final
|
||||
entropy digest and export quorum result are derived from the converged
|
||||
inputs.
|
||||
|
||||
5. Sidecars are not transactions.
|
||||
|
||||
Commit, reveal, and export signature entries are `STObject(sfGeneric)`
|
||||
leaves in ephemeral `SHAMapType::SIDECAR` maps. They use `sfSidecarType`
|
||||
to distinguish payloads and `HashPrefix::sidecar` for item hashes. They
|
||||
are fetched through sidecar sync, not parsed or submitted as transactions.
|
||||
|
||||
6. Proposal-visible or validation-visible extension data must be signed.
|
||||
|
||||
Do not attach behavior-changing sidecar payloads as unsigned out-of-band
|
||||
proposal or validation wrapper data. If stripping or changing a field would
|
||||
alter RNG or export behavior, that field must be covered by the relevant
|
||||
signed payload and by the identity used for duplicate suppression/replay
|
||||
checks on that path.
|
||||
|
||||
Today ConsensusExtensions uses signed proposal sidecars, not validation
|
||||
sidecars. If a future design carries extension material through
|
||||
validations, the same rule applies: the behavior-changing data, or a digest
|
||||
of it, must be inside the signed validation payload and bound to the
|
||||
validating key and ledger. A protobuf field outside the signed validation is
|
||||
only transport metadata; it must not affect consensus-extension behavior.
|
||||
|
||||
## Validator Set And Quorum
|
||||
|
||||
The active validator view is the shared denominator for RNG and export:
|
||||
|
||||
- Prefer `UNLReport.sfActiveValidators` from the consensus parent ledger.
|
||||
- If no report is available, fall back to configured trusted validators so
|
||||
early ledgers and dev/test networks can make progress.
|
||||
- If `featureNegativeUNL` is enabled, subtract the parent ledger's Negative
|
||||
UNL from whichever source produced the view.
|
||||
- Use the same snapshot throughout the round.
|
||||
|
||||
`quorumThreshold()` is 80% of that active validator view. Recent or expected
|
||||
proposers are liveness hints only; they do not shrink the quorum denominator.
|
||||
|
||||
Be careful with `prevProposers`: in the generic consensus code it is peer-only.
|
||||
When checking whether the previous round had enough active participants, count
|
||||
our own proposer slot if this node is proposing.
|
||||
|
||||
## RNG Commit/Reveal Principles
|
||||
|
||||
RNG proceeds through establish sub-states:
|
||||
|
||||
1. `ConvergingTx`: ordinary transaction-set convergence while harvesting
|
||||
commitments.
|
||||
2. `ConvergingCommit`: after proofed commit quorum, publish the commit sidecar
|
||||
hash and reveal the same secret that produced the original commitment.
|
||||
3. `ConvergingReveal`: collect reveals, publish the entropy sidecar hash, and
|
||||
wait for sidecar agreement or deterministic fallback.
|
||||
|
||||
Commit quorum counts only proofed commits from active validators. A commit that
|
||||
cannot be emitted as a verifiable sidecar leaf does not count.
|
||||
|
||||
Reveal collection targets all known committers, because the commit sidecar set
|
||||
defines who is expected to reveal. The reveal wait is still bounded. A node
|
||||
that crashes, withholds, or partitions after committing must not stop the
|
||||
ledger forever.
|
||||
|
||||
Final entropy is computed from the agreed entropy sidecar SHAMap, not from a
|
||||
node's opportunistic local `pendingReveals_` map. This prevents different
|
||||
local reveal subsets at timeout boundaries from producing different entropy.
|
||||
|
||||
## Entropy Alignment Rules
|
||||
|
||||
Non-zero entropy requires quorum alignment on the entropy sidecar hash.
|
||||
|
||||
The alignment count is:
|
||||
|
||||
```
|
||||
our published entropySetHash + tx-converged peers with the same entropySetHash
|
||||
```
|
||||
|
||||
If that count reaches `quorumThreshold()`, the node may proceed with non-zero
|
||||
entropy even if a below-quorum minority advertises a conflicting or
|
||||
unacquirable entropy hash.
|
||||
|
||||
If no entropy hash reaches quorum alignment before the bounded deadline, the
|
||||
round must fall back to zero entropy. This is the safe degradation path, not a
|
||||
consensus failure.
|
||||
|
||||
Examples with five active validators and threshold four:
|
||||
|
||||
- Four honest validators align on one entropy hash and one validator advertises
|
||||
a bogus hash: proceed with non-zero entropy for the honest quorum.
|
||||
- Two validators advertise different bogus hashes and only three align on the
|
||||
honest hash: fall back to zero entropy.
|
||||
- No peer entropy hash is observed in time: fall back to zero entropy.
|
||||
|
||||
Zero entropy means unavailable entropy. The pseudo-transaction is still
|
||||
deterministic, with zero digest and zero entropy count, so hooks can detect
|
||||
the unavailable path.
|
||||
|
||||
## Sidecar Convergence Rules
|
||||
|
||||
Sidecar SHAMaps use union convergence:
|
||||
|
||||
- Every valid active-validator contribution belongs in the set.
|
||||
- Sets only grow during fetch/merge.
|
||||
- Fetch/merge is a safety net for missed proposals, not the normal transport.
|
||||
- Rebuild and republish the sidecar hash after merging missing leaves.
|
||||
|
||||
Do not use avalanche-style transaction inclusion logic for sidecar inputs.
|
||||
For RNG and export sidecars, the disagreement to resolve is usually timing or
|
||||
delivery, not whether a valid contribution should be included.
|
||||
|
||||
The entropy sidecar gate always gives peers at least one observation tick after
|
||||
publishing `entropySetHash`. Publishing and accepting in the same tick can hide
|
||||
conflicts and produce asymmetric zero/non-zero outcomes.
|
||||
|
||||
## Export Principles
|
||||
|
||||
`featureExport` and `featureConsensusEntropy` are independently amendment
|
||||
gated.
|
||||
|
||||
Export can run without ConsensusEntropy and still uses the active validator
|
||||
view's 80% quorum threshold. Verified export signature sidecars converge
|
||||
through `ExtendedPosition`, and the `exportSigSetHash` is signed by proposals
|
||||
whether or not RNG is enabled. Do not make Export liveness depend on unanimity:
|
||||
one active validator with a missing, delayed, or conflicting sidecar must not
|
||||
veto an otherwise quorum-aligned export round.
|
||||
|
||||
The extended proposal machinery is enabled when either feature needs signed
|
||||
sidecar fields. Do not make Export depend on RNG availability just because RNG
|
||||
was the first consumer of `ExtendedPosition`.
|
||||
|
||||
When `featureExport` is disabled, the export sidecar gate is disabled too. Stale
|
||||
collector entries must not keep a stopped amendment active.
|
||||
|
||||
Only verified export signatures count toward quorum or enter export sidecar
|
||||
SHAMaps. Proposal-ingress signatures are sender-bound to the trusted proposal
|
||||
validator and may be stored as unverified until the matching export transaction
|
||||
is available for cryptographic verification.
|
||||
|
||||
The consensus candidate transaction set is the authority for export signature
|
||||
verification. The open ledger may be used for early proposal ingestion, but
|
||||
once a candidate tx set exists, only signatures verified against the `ttEXPORT`
|
||||
in that candidate set may become quorum material or enter `exportSigSetHash`.
|
||||
|
||||
Export sidecar publication is local-material only. A node may publish only the
|
||||
verified export signatures it actually has locally, and only for `ttEXPORT`
|
||||
transactions in the consensus candidate set. A fetched export sidecar is not a
|
||||
separate apply input: on merge, each leaf must be active-view checked, verified
|
||||
against the candidate transaction, and promoted into `ExportSigCollector`.
|
||||
Closed-ledger apply snapshots that collector, so the sidecar convergence state
|
||||
and the signer set used by `ttEXPORT` stay on the same path.
|
||||
|
||||
If the consensus candidate contains a `ttEXPORT` but the node has no eligible
|
||||
local export signatures yet, the export sidecar gate opens only a bounded
|
||||
safety window for tx-converged peers to advertise `exportSigSetHash`. This is
|
||||
not a wait-for-Export-success mechanism; it is a short opportunity to avoid
|
||||
closing a minority ledger while sidecar convergence is already reachable. If no
|
||||
advertised sidecar appears by the deadline, the gate stops waiting and the
|
||||
export retries or expires through normal transaction rules.
|
||||
|
||||
Export success requires quorum alignment on `exportSigSetHash`, not merely a
|
||||
local collector quorum. If a quorum of tx-converged participants advertises the
|
||||
same export signature sidecar hash, that hash is aligned and below-quorum
|
||||
conflicts are ignored. If no export signature hash reaches quorum alignment by
|
||||
the bounded deadline, do not choose the largest non-quorum set; the export
|
||||
retries or expires according to normal transaction rules.
|
||||
|
||||
Closed-ledger apply must not promote unverified proposal-carried signatures into
|
||||
current-round quorum material. It may verify and retain them for a future retry,
|
||||
where they can be published in a sidecar set and converged before use.
|
||||
|
||||
Export sig convergence runs in parallel with RNG. An export-side convergence
|
||||
failure must not change RNG semantics; an RNG fallback must not make export
|
||||
unsafe. Each feature has its own gate and fallback.
|
||||
|
||||
Accept-time cleanup must preserve Export state through `buildLCL` whenever
|
||||
`featureExport` is enabled. RNG-disabled does not mean extensions-disabled:
|
||||
`ttEXPORT` still needs the round's export sidecar convergence state when it
|
||||
applies.
|
||||
|
||||
CSF consensus tests model the export sidecar gate directly. Testnet scenarios
|
||||
under `.testnet/scenarios/export/` cover live-node Export+CE behavior and
|
||||
Export-only quorum behavior.
|
||||
|
||||
## Review Checklist
|
||||
|
||||
When changing consensus extension code, check these questions:
|
||||
|
||||
- Does this preserve transaction-set equality as the core consensus identity?
|
||||
- Does every extension wait have a bounded fallback?
|
||||
- Does non-zero entropy require active-validator quorum alignment?
|
||||
- Can one bad validator deny entropy to an honest quorum? It must not.
|
||||
- Can a sub-quorum set produce non-zero entropy? It must not.
|
||||
- Are quorum calculations using the active validator view, not recent
|
||||
proposers as the denominator?
|
||||
- Are sidecar entries typed as sidecars, not pseudo-transactions?
|
||||
- Are proposal-visible or validation-visible sidecar fields covered by the
|
||||
relevant signature and duplicate/replay identity?
|
||||
- Are export signatures verified before they count?
|
||||
- Does export success require `exportSigSetHash` alignment, not just local
|
||||
collector quorum?
|
||||
- Can one bad validator deny Export to an honest quorum? It must not.
|
||||
- Can timeout select a largest-but-below-quorum export sidecar set? It must not.
|
||||
- Are CE and Export still independently gated and independently stoppable?
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/consensus/ConsensusExtensions.h>
|
||||
#include <xrpld/app/consensus/RCLConsensus.h>
|
||||
#include <xrpld/app/consensus/RCLValidations.h>
|
||||
#include <xrpld/app/ledger/BuildLedger.h>
|
||||
@@ -27,27 +28,39 @@
|
||||
#include <xrpld/app/ledger/LocalTxs.h>
|
||||
#include <xrpld/app/ledger/OpenLedger.h>
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
#include <xrpld/app/misc/CanonicalTXSet.h>
|
||||
#include <xrpld/app/misc/HashRouter.h>
|
||||
#include <xrpld/app/misc/LoadFeeTrack.h>
|
||||
#include <xrpld/app/misc/NegativeUNLVote.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/misc/RuntimeConfig.h>
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/app/misc/ValidatorKeys.h>
|
||||
#include <xrpld/app/misc/ValidatorList.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/consensus/Consensus.h>
|
||||
#include <xrpld/consensus/LedgerTiming.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/predicates.h>
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/beast/core/LexicalCast.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/crypto/csprng.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/BuildInfo.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/SecretKey.h>
|
||||
#include <xrpl/protocol/Sign.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <algorithm>
|
||||
#include <iomanip>
|
||||
#include <cstring>
|
||||
#include <mutex>
|
||||
#include <random>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -57,7 +70,7 @@ RCLConsensus::RCLConsensus(
|
||||
LedgerMaster& ledgerMaster,
|
||||
LocalTxs& localTxs,
|
||||
InboundTransactions& inboundTransactions,
|
||||
Consensus<Adaptor>::clock_type const& clock,
|
||||
clock_type const& clock,
|
||||
ValidatorKeys const& validatorKeys,
|
||||
beast::Journal journal)
|
||||
: adaptor_(
|
||||
@@ -68,11 +81,13 @@ RCLConsensus::RCLConsensus(
|
||||
inboundTransactions,
|
||||
validatorKeys,
|
||||
journal)
|
||||
, consensus_(clock, adaptor_, journal)
|
||||
, consensus_(std::make_unique<Consensus<Adaptor>>(clock, adaptor_, journal))
|
||||
, j_(journal)
|
||||
{
|
||||
}
|
||||
|
||||
RCLConsensus::~RCLConsensus() = default;
|
||||
|
||||
RCLConsensus::Adaptor::Adaptor(
|
||||
Application& app,
|
||||
std::unique_ptr<FeeVote>&& feeVote,
|
||||
@@ -122,6 +137,22 @@ RCLConsensus::Adaptor::Adaptor(
|
||||
}
|
||||
}
|
||||
|
||||
// --- ConsensusExtensions helpers ---
|
||||
|
||||
ConsensusExtensions&
|
||||
RCLConsensus::Adaptor::ce()
|
||||
{
|
||||
return app_.getConsensusExtensions();
|
||||
}
|
||||
|
||||
ConsensusExtensions const&
|
||||
RCLConsensus::Adaptor::ce() const
|
||||
{
|
||||
return app_.getConsensusExtensions();
|
||||
}
|
||||
|
||||
// --- End ConsensusExtensions helpers ---
|
||||
|
||||
std::optional<RCLCxLedger>
|
||||
RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
{
|
||||
@@ -173,10 +204,14 @@ RCLConsensus::Adaptor::share(RCLCxPeerPos const& peerPos)
|
||||
prop.set_proposeseq(proposal.proposeSeq());
|
||||
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
|
||||
|
||||
prop.set_currenttxhash(
|
||||
proposal.position().begin(), proposal.position().size());
|
||||
// Serialize full ExtendedPosition
|
||||
Serializer positionData;
|
||||
proposal.position().add(positionData);
|
||||
auto const posSlice = positionData.slice();
|
||||
prop.set_currenttxhash(posSlice.data(), posSlice.size());
|
||||
|
||||
prop.set_previousledger(
|
||||
proposal.prevLedger().begin(), proposal.position().size());
|
||||
proposal.prevLedger().begin(), proposal.prevLedger().size());
|
||||
|
||||
auto const pk = peerPos.publicKey().slice();
|
||||
prop.set_nodepubkey(pk.data(), pk.size());
|
||||
@@ -184,6 +219,9 @@ RCLConsensus::Adaptor::share(RCLCxPeerPos const& peerPos)
|
||||
auto const sig = peerPos.signature();
|
||||
prop.set_signature(sig.data(), sig.size());
|
||||
|
||||
for (auto const& exportSig : peerPos.exportSignatures())
|
||||
prop.add_exportsignatures(exportSig.data(), exportSig.size());
|
||||
|
||||
app_.overlay().relay(prop, peerPos.suppressionID(), peerPos.publicKey());
|
||||
}
|
||||
|
||||
@@ -217,39 +255,52 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
|
||||
|
||||
protocol::TMProposeSet prop;
|
||||
|
||||
prop.set_currenttxhash(
|
||||
proposal.position().begin(), proposal.position().size());
|
||||
auto wirePosition = proposal.position();
|
||||
|
||||
ce().attachExportSignatures(prop, proposal);
|
||||
if (prop.exportsignatures_size() > 0)
|
||||
wirePosition.exportSignaturesHash =
|
||||
proposalExportSignaturesHash(prop.exportsignatures());
|
||||
|
||||
// Serialize full ExtendedPosition (includes RNG leaves and export
|
||||
// signature digest)
|
||||
Serializer positionData;
|
||||
wirePosition.add(positionData);
|
||||
auto const posSlice = positionData.slice();
|
||||
prop.set_currenttxhash(posSlice.data(), posSlice.size());
|
||||
|
||||
prop.set_previousledger(
|
||||
proposal.prevLedger().begin(), proposal.prevLedger().size());
|
||||
prop.set_proposeseq(proposal.proposeSeq());
|
||||
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
|
||||
prop.set_nodepubkey(
|
||||
validatorKeys_.keys->publicKey.data(),
|
||||
validatorKeys_.keys->publicKey.size());
|
||||
|
||||
if (!validatorKeys_.keys)
|
||||
{
|
||||
JLOG(j_.warn()) << "RCLConsensus::Adaptor::propose: ValidatorKeys "
|
||||
"not set: \n";
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& keys = *validatorKeys_.keys;
|
||||
|
||||
prop.set_nodepubkey(keys.publicKey.data(), keys.publicKey.size());
|
||||
|
||||
auto sig =
|
||||
signDigest(keys.publicKey, keys.secretKey, proposal.signingHash());
|
||||
auto sig = signDigest(
|
||||
validatorKeys_.keys->publicKey,
|
||||
validatorKeys_.keys->secretKey,
|
||||
sha512Half(
|
||||
HashPrefix::proposal,
|
||||
std::uint32_t(proposal.proposeSeq()),
|
||||
proposal.closeTime().time_since_epoch().count(),
|
||||
proposal.prevLedger(),
|
||||
wirePosition));
|
||||
|
||||
prop.set_signature(sig.data(), sig.size());
|
||||
|
||||
auto const suppression = proposalUniqueId(
|
||||
proposal.position(),
|
||||
wirePosition,
|
||||
proposal.prevLedger(),
|
||||
proposal.proposeSeq(),
|
||||
proposal.closeTime(),
|
||||
keys.publicKey,
|
||||
validatorKeys_.keys->publicKey,
|
||||
sig);
|
||||
|
||||
app_.getHashRouter().addSuppression(suppression);
|
||||
|
||||
ce().decorateMessage(prop, proposal, wirePosition, sig);
|
||||
|
||||
app_.overlay().broadcast(prop);
|
||||
}
|
||||
|
||||
@@ -400,12 +451,16 @@ RCLConsensus::Adaptor::onClose(
|
||||
// Needed because of the move below.
|
||||
auto const setHash = initialSet->getHash().as_uint256();
|
||||
|
||||
ExtendedPosition pos{setHash};
|
||||
|
||||
ce().decoratePosition(pos, prevLedger, proposing);
|
||||
|
||||
return Result{
|
||||
std::move(initialSet),
|
||||
RCLCxPeerPos::Proposal{
|
||||
initialLedger->info().parentHash,
|
||||
RCLCxPeerPos::Proposal::seqJoin,
|
||||
setHash,
|
||||
std::move(pos),
|
||||
closeTime,
|
||||
app_.timeKeeper().closeTime(),
|
||||
validatorKeys_.nodeID}};
|
||||
@@ -443,11 +498,13 @@ RCLConsensus::Adaptor::onAccept(
|
||||
jtACCEPT,
|
||||
"acceptLedger",
|
||||
[=, this, cj = std::move(consensusJson)]() mutable {
|
||||
//@@start do-accept-freeze-contract
|
||||
// Note that no lock is held or acquired during this job.
|
||||
// This is because generic Consensus guarantees that once a ledger
|
||||
// is accepted, the consensus results and capture by reference state
|
||||
// will not change until startRound is called (which happens via
|
||||
// endConsensus).
|
||||
//@@end do-accept-freeze-contract
|
||||
RclConsensusLogger clog("onAccept", validating, j_);
|
||||
this->doAccept(
|
||||
result,
|
||||
@@ -529,6 +586,17 @@ RCLConsensus::Adaptor::doAccept(
|
||||
}
|
||||
}
|
||||
|
||||
//@@start auxiliary-pre-build-injection
|
||||
// Inject consensus entropy pseudo-transaction (if amendment enabled).
|
||||
// Export-only rounds still need extension state preserved through buildLCL
|
||||
// so ttEXPORT can observe exportSigSetHash convergence at apply time.
|
||||
//@@start accept-time-cleanup-disabled
|
||||
if (ce().rngEnabled())
|
||||
ce().onPreBuild(retriableTxs, prevLedger.seq() + 1);
|
||||
else if (!ce().exportEnabled())
|
||||
ce().clearRngState();
|
||||
//@@end accept-time-cleanup-disabled
|
||||
|
||||
auto built = buildLCL(
|
||||
prevLedger,
|
||||
retriableTxs,
|
||||
@@ -537,6 +605,7 @@ RCLConsensus::Adaptor::doAccept(
|
||||
closeResolution,
|
||||
result.roundTime.read(),
|
||||
failed);
|
||||
//@@end auxiliary-pre-build-injection
|
||||
|
||||
auto const newLCLHash = built.id();
|
||||
JLOG(j_.debug()) << "Built ledger #" << built.seq() << ": " << newLCLHash;
|
||||
@@ -729,6 +798,8 @@ RCLConsensus::Adaptor::doAccept(
|
||||
|
||||
app_.timeKeeper().adjustCloseTime(offset);
|
||||
}
|
||||
|
||||
ce().onAcceptComplete();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -826,19 +897,10 @@ RCLConsensus::Adaptor::validate(
|
||||
validationTime = lastValidationTime_ + 1s;
|
||||
lastValidationTime_ = validationTime;
|
||||
|
||||
if (!validatorKeys_.keys)
|
||||
{
|
||||
JLOG(j_.warn()) << "RCLConsensus::Adaptor::validate: ValidatorKeys "
|
||||
"not set\n";
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& keys = *validatorKeys_.keys;
|
||||
|
||||
auto v = std::make_shared<STValidation>(
|
||||
lastValidationTime_,
|
||||
keys.publicKey,
|
||||
keys.secretKey,
|
||||
validatorKeys_.keys->publicKey,
|
||||
validatorKeys_.keys->secretKey,
|
||||
validatorKeys_.nodeID,
|
||||
[&](STValidation& v) {
|
||||
v.setFieldH256(sfLedgerHash, ledger.id());
|
||||
@@ -900,7 +962,7 @@ RCLConsensus::Adaptor::validate(
|
||||
|
||||
handleNewValidation(app_, v, "local");
|
||||
|
||||
// Broadcast to all our peers:
|
||||
// Broadcast validation to all peers.
|
||||
protocol::TMValidation val;
|
||||
val.set_validation(serialized.data(), serialized.size());
|
||||
app_.overlay().broadcast(val);
|
||||
@@ -923,6 +985,26 @@ RCLConsensus::Adaptor::onModeChange(ConsensusMode before, ConsensusMode after)
|
||||
censorshipDetector_.reset();
|
||||
|
||||
mode_ = after;
|
||||
ce().setMode(after);
|
||||
}
|
||||
|
||||
ConsensusPhase
|
||||
RCLConsensus::phase() const
|
||||
{
|
||||
return consensus_->phase();
|
||||
}
|
||||
|
||||
bool
|
||||
RCLConsensus::extensionsBusy() const
|
||||
{
|
||||
return consensus_->extensionsBusy();
|
||||
}
|
||||
|
||||
RCLCxLedger::ID
|
||||
RCLConsensus::prevLedgerID() const
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
return consensus_->prevLedgerID();
|
||||
}
|
||||
|
||||
Json::Value
|
||||
@@ -931,7 +1013,7 @@ RCLConsensus::getJson(bool full) const
|
||||
Json::Value ret;
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
ret = consensus_.getJson(full);
|
||||
ret = consensus_->getJson(full);
|
||||
}
|
||||
ret["validating"] = adaptor_.validating();
|
||||
return ret;
|
||||
@@ -945,7 +1027,7 @@ RCLConsensus::timerEntry(
|
||||
try
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
consensus_.timerEntry(now, clog);
|
||||
consensus_->timerEntry(now, clog);
|
||||
}
|
||||
catch (SHAMapMissingNode const& mn)
|
||||
{
|
||||
@@ -964,7 +1046,7 @@ RCLConsensus::gotTxSet(NetClock::time_point const& now, RCLTxSet const& txSet)
|
||||
try
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
consensus_.gotTxSet(now, txSet);
|
||||
consensus_->gotTxSet(now, txSet);
|
||||
}
|
||||
catch (SHAMapMissingNode const& mn)
|
||||
{
|
||||
@@ -982,7 +1064,7 @@ RCLConsensus::simulate(
|
||||
std::optional<std::chrono::milliseconds> consensusDelay)
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
consensus_.simulate(now, consensusDelay);
|
||||
consensus_->simulate(now, consensusDelay);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -991,14 +1073,24 @@ RCLConsensus::peerProposal(
|
||||
RCLCxPeerPos const& newProposal)
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
return consensus_.peerProposal(now, newProposal);
|
||||
return consensus_->peerProposal(now, newProposal);
|
||||
}
|
||||
|
||||
//@@start pre-start-round
|
||||
bool
|
||||
RCLConsensus::Adaptor::preStartRound(
|
||||
RCLCxLedger const& prevLgr,
|
||||
hash_set<NodeID> const& nowTrusted)
|
||||
{
|
||||
ce().setRngEnabledThisRound(
|
||||
prevLgr.ledger_->rules().enabled(featureConsensusEntropy));
|
||||
ce().setExportEnabledThisRound(
|
||||
prevLgr.ledger_->rules().enabled(featureExport));
|
||||
|
||||
JLOG(j_.trace()) << "RNGGATE: preStartRound prevSeq=" << prevLgr.seq()
|
||||
<< " rulesEnabled=" << ce().rngEnabled()
|
||||
<< " exportEnabled=" << ce().exportEnabled();
|
||||
|
||||
// We have a key, we do not want out of sync validations after a restart
|
||||
// and are not amendment blocked.
|
||||
validating_ = validatorKeys_.keys &&
|
||||
@@ -1041,9 +1133,19 @@ RCLConsensus::Adaptor::preStartRound(
|
||||
!nowTrusted.empty())
|
||||
nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted);
|
||||
|
||||
bool const proposing = validating_ && synced;
|
||||
|
||||
JLOG(j_.info()) << "STARTDIAG: preStartRound"
|
||||
<< " mode=" << app_.getOPs().strOperatingMode()
|
||||
<< " synced=" << (synced ? "yes" : "no")
|
||||
<< " validating=" << (validating_ ? "yes" : "no")
|
||||
<< " proposing=" << (proposing ? "yes" : "no")
|
||||
<< " seq=" << (prevLgr.seq() + 1);
|
||||
|
||||
// propose only if we're in sync with the network (and validating)
|
||||
return validating_ && synced;
|
||||
return proposing;
|
||||
}
|
||||
//@@end pre-start-round
|
||||
|
||||
bool
|
||||
RCLConsensus::Adaptor::haveValidated() const
|
||||
@@ -1081,7 +1183,11 @@ void
|
||||
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
|
||||
{
|
||||
if (!positions && app_.getOPs().isFull())
|
||||
{
|
||||
JLOG(j_.warn()) << "STARTDIAG: updateOperatingMode demoting"
|
||||
<< " FULL->CONNECTED positions=" << positions;
|
||||
app_.getOPs().setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1094,7 +1200,7 @@ RCLConsensus::startRound(
|
||||
std::unique_ptr<std::stringstream> const& clog)
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
consensus_.startRound(
|
||||
consensus_->startRound(
|
||||
now,
|
||||
prevLgrId,
|
||||
prevLgr,
|
||||
@@ -1124,11 +1230,16 @@ RclConsensusLogger::~RclConsensusLogger()
|
||||
return;
|
||||
auto const duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - start_);
|
||||
std::stringstream outSs;
|
||||
outSs << header_ << "duration " << (duration.count() / 1000) << '.'
|
||||
<< std::setw(3) << std::setfill('0') << (duration.count() % 1000)
|
||||
<< "s. " << ss_->str();
|
||||
j_.sink().writeAlways(beast::severities::kInfo, outSs.str());
|
||||
}
|
||||
ss_->seekg(0, std::ios::beg);
|
||||
|
||||
std::string line;
|
||||
while (std::getline(*ss_, line, '.'))
|
||||
{
|
||||
boost::algorithm::trim(line);
|
||||
if (!line.empty())
|
||||
JLOG(j_.debug()) << header_ << line << ".";
|
||||
}
|
||||
JLOG(j_.debug()) << header_ << "Total duration: " << duration.count()
|
||||
<< "ms.";
|
||||
}
|
||||
} // namespace ripple
|
||||
|
||||
@@ -20,22 +20,26 @@
|
||||
#ifndef RIPPLE_APP_CONSENSUS_RCLCONSENSUS_H_INCLUDED
|
||||
#define RIPPLE_APP_CONSENSUS_RCLCONSENSUS_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/consensus/ConsensusExtensions.h>
|
||||
#include <xrpld/app/consensus/RCLCensorshipDetector.h>
|
||||
#include <xrpld/app/consensus/RCLCxLedger.h>
|
||||
#include <xrpld/app/consensus/RCLCxPeerPos.h>
|
||||
#include <xrpld/app/consensus/RCLCxTx.h>
|
||||
#include <xrpld/app/misc/FeeVote.h>
|
||||
#include <xrpld/app/misc/NegativeUNLVote.h>
|
||||
#include <xrpld/consensus/Consensus.h>
|
||||
#include <xrpld/consensus/ConsensusParms.h>
|
||||
#include <xrpld/consensus/ConsensusTypes.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/overlay/Message.h>
|
||||
#include <xrpld/shamap/SHAMap.h>
|
||||
#include <xrpl/basics/CountedObject.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/beast/clock/abstract_clock.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/protocol/RippleLedgerHash.h>
|
||||
#include <xrpl/protocol/STValidation.h>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
@@ -44,10 +48,13 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class CanonicalTXSet;
|
||||
class InboundTransactions;
|
||||
class LocalTxs;
|
||||
class LedgerMaster;
|
||||
class ValidatorKeys;
|
||||
template <class Adaptor>
|
||||
class Consensus;
|
||||
|
||||
/** Manages the generic consensus algorithm for use by the RCL.
|
||||
*/
|
||||
@@ -91,12 +98,16 @@ class RCLConsensus
|
||||
RCLCensorshipDetector<TxID, LedgerIndex> censorshipDetector_;
|
||||
NegativeUNLVote nUnlVote_;
|
||||
|
||||
// RNG/Export state has moved to ConsensusExtensions
|
||||
// (owned by Application, accessible via app_.getConsensusExtensions())
|
||||
|
||||
public:
|
||||
using Ledger_t = RCLCxLedger;
|
||||
using NodeID_t = NodeID;
|
||||
using NodeKey_t = PublicKey;
|
||||
using TxSet_t = RCLTxSet;
|
||||
using PeerPosition_t = RCLCxPeerPos;
|
||||
using Position_t = ExtendedPosition;
|
||||
|
||||
using Result = ConsensusResult<Adaptor>;
|
||||
|
||||
@@ -182,6 +193,14 @@ class RCLConsensus
|
||||
return parms_;
|
||||
}
|
||||
|
||||
// --- ConsensusExtensions access ---
|
||||
|
||||
ConsensusExtensions&
|
||||
ce();
|
||||
|
||||
ConsensusExtensions const&
|
||||
ce() const;
|
||||
|
||||
private:
|
||||
//---------------------------------------------------------------------
|
||||
// The following members implement the generic Consensus requirements
|
||||
@@ -420,6 +439,8 @@ class RCLConsensus
|
||||
};
|
||||
|
||||
public:
|
||||
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
|
||||
|
||||
//! Constructor
|
||||
RCLConsensus(
|
||||
Application& app,
|
||||
@@ -427,10 +448,12 @@ public:
|
||||
LedgerMaster& ledgerMaster,
|
||||
LocalTxs& localTxs,
|
||||
InboundTransactions& inboundTransactions,
|
||||
Consensus<Adaptor>::clock_type const& clock,
|
||||
clock_type const& clock,
|
||||
ValidatorKeys const& validatorKeys,
|
||||
beast::Journal journal);
|
||||
|
||||
~RCLConsensus();
|
||||
|
||||
RCLConsensus(RCLConsensus const&) = delete;
|
||||
|
||||
RCLConsensus&
|
||||
@@ -472,9 +495,26 @@ public:
|
||||
}
|
||||
|
||||
ConsensusPhase
|
||||
phase() const
|
||||
phase() const;
|
||||
|
||||
//! Whether extensions have pending sub-state work in establish
|
||||
bool
|
||||
extensionsBusy() const;
|
||||
|
||||
//! Check if hash is a known extension sidecar set (under mutex)
|
||||
bool
|
||||
isExtensionSet(uint256 const& hash) const
|
||||
{
|
||||
return consensus_.phase();
|
||||
std::lock_guard _{mutex_};
|
||||
return adaptor_.ce().isSidecarSet(hash);
|
||||
}
|
||||
|
||||
//! Route acquired extension sidecar set (under mutex)
|
||||
void
|
||||
gotExtensionSet(std::shared_ptr<SHAMap> const& map)
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
adaptor_.ce().onAcquiredSidecarSet(map);
|
||||
}
|
||||
|
||||
//! @see Consensus::getJson
|
||||
@@ -505,11 +545,7 @@ public:
|
||||
|
||||
// @see Consensus::prevLedgerID
|
||||
RCLCxLedger::ID
|
||||
prevLedgerID() const
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
return consensus_.prevLedgerID();
|
||||
}
|
||||
prevLedgerID() const;
|
||||
|
||||
//! @see Consensus::simulate
|
||||
void
|
||||
@@ -536,7 +572,7 @@ private:
|
||||
mutable std::recursive_mutex mutex_;
|
||||
|
||||
Adaptor adaptor_;
|
||||
Consensus<Adaptor> consensus_;
|
||||
std::unique_ptr<Consensus<Adaptor>> consensus_;
|
||||
beast::Journal const j_;
|
||||
};
|
||||
|
||||
|
||||
@@ -31,10 +31,12 @@ RCLCxPeerPos::RCLCxPeerPos(
|
||||
PublicKey const& publicKey,
|
||||
Slice const& signature,
|
||||
uint256 const& suppression,
|
||||
Proposal&& proposal)
|
||||
Proposal&& proposal,
|
||||
std::vector<std::string> exportSignatures)
|
||||
: publicKey_(publicKey)
|
||||
, suppression_(suppression)
|
||||
, proposal_(std::move(proposal))
|
||||
, exportSignatures_(std::move(exportSignatures))
|
||||
{
|
||||
// The maximum allowed size of a signature is 72 bytes; we verify
|
||||
// this elsewhere, but we want to be extra careful here:
|
||||
@@ -66,15 +68,17 @@ RCLCxPeerPos::getJson() const
|
||||
|
||||
uint256
|
||||
proposalUniqueId(
|
||||
uint256 const& proposeHash,
|
||||
ExtendedPosition const& position,
|
||||
uint256 const& previousLedger,
|
||||
std::uint32_t proposeSeq,
|
||||
NetClock::time_point closeTime,
|
||||
Slice const& publicKey,
|
||||
Slice const& signature)
|
||||
{
|
||||
// This is for suppression/dedup only, NOT for signing.
|
||||
// Must include all fields that distinguish proposals.
|
||||
Serializer s(512);
|
||||
s.addBitString(proposeHash);
|
||||
position.add(s);
|
||||
s.addBitString(previousLedger);
|
||||
s.add32(proposeSeq);
|
||||
s.add32(closeTime.time_since_epoch().count());
|
||||
|
||||
@@ -28,13 +28,294 @@
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <xrpl/protocol/SecretKey.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** Extended position for consensus with RNG entropy support.
|
||||
|
||||
Carries the tx-set hash (the core convergence target), RNG set hashes
|
||||
(agreed via sub-state quorum, not via operator==), and per-validator
|
||||
leaves (unique to each proposer, piggybacked on proposals).
|
||||
|
||||
Critical design:
|
||||
- operator== compares txSetHash ONLY (sub-states handle the rest)
|
||||
- add() includes ALL fields for signing (prevents stripping attacks)
|
||||
*/
|
||||
struct ExtendedPosition
|
||||
{
|
||||
// === Core Convergence Target ===
|
||||
uint256 txSetHash;
|
||||
|
||||
// === Set Hashes (sub-state quorum, not in operator==) ===
|
||||
std::optional<uint256> commitSetHash;
|
||||
std::optional<uint256> entropySetHash;
|
||||
std::optional<uint256> exportSigSetHash;
|
||||
std::optional<uint256> exportSignaturesHash;
|
||||
|
||||
// === Per-Validator Leaves (unique per proposer) ===
|
||||
std::optional<uint256> myCommitment;
|
||||
std::optional<uint256> myReveal;
|
||||
|
||||
ExtendedPosition() = default;
|
||||
explicit ExtendedPosition(uint256 const& txSet) : txSetHash(txSet)
|
||||
{
|
||||
}
|
||||
|
||||
// Implicit conversion for legacy compatibility
|
||||
operator uint256() const
|
||||
{
|
||||
return txSetHash;
|
||||
}
|
||||
|
||||
// Helper to update TxSet while preserving sidecar data
|
||||
void
|
||||
updateTxSet(uint256 const& set)
|
||||
{
|
||||
txSetHash = set;
|
||||
}
|
||||
|
||||
// TODO: replace operator== with a named method (e.g. txSetMatches())
|
||||
// so call sites read as intent, not as "full equality". Overloading
|
||||
// operator== to ignore most fields is surprising and fragile.
|
||||
//
|
||||
// CRITICAL: Only compare txSetHash for consensus convergence.
|
||||
//
|
||||
// Why not commitSetHash / entropySetHash?
|
||||
// Nodes transition through sub-states (ConvergingTx → ConvergingCommit
|
||||
// → ConvergingReveal) at slightly different times. If we included
|
||||
// commitSetHash here, a node that transitions first would set it,
|
||||
// making its position "different" from peers who haven't transitioned
|
||||
// yet — deadlocking haveConsensus() for everyone.
|
||||
//
|
||||
// Instead, the sub-state machine in phaseEstablish handles agreement
|
||||
// on those fields via quorum checks (hasQuorumOfCommits, etc.).
|
||||
//
|
||||
// Implications to consider:
|
||||
// - Two nodes with the same txSetHash but different commitSetHash
|
||||
// will appear to "agree" from the convergence engine's perspective.
|
||||
// This is intentional: tx consensus must not be blocked by RNG.
|
||||
// - A malicious node could propose a different commitSetHash without
|
||||
// affecting tx convergence. This is safe because commitSetHash
|
||||
// disagreement is caught by the sub-state quorum checks, and the
|
||||
// entropy result is verified deterministically from collected reveals.
|
||||
// - Leaves (myCommitment, myReveal) are also excluded — they are
|
||||
// per-validator data unique to each proposer.
|
||||
//@@start rng-extended-position-equality
|
||||
bool
|
||||
operator==(ExtendedPosition const& other) const
|
||||
{
|
||||
return txSetHash == other.txSetHash;
|
||||
}
|
||||
|
||||
bool
|
||||
operator!=(ExtendedPosition const& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
// Comparison with uint256 (compares txSetHash only)
|
||||
bool
|
||||
operator==(uint256 const& hash) const
|
||||
{
|
||||
return txSetHash == hash;
|
||||
}
|
||||
|
||||
bool
|
||||
operator!=(uint256 const& hash) const
|
||||
{
|
||||
return txSetHash != hash;
|
||||
}
|
||||
|
||||
friend bool
|
||||
operator==(uint256 const& hash, ExtendedPosition const& pos)
|
||||
{
|
||||
return pos.txSetHash == hash;
|
||||
}
|
||||
|
||||
friend bool
|
||||
operator!=(uint256 const& hash, ExtendedPosition const& pos)
|
||||
{
|
||||
return pos.txSetHash != hash;
|
||||
}
|
||||
//@@end rng-extended-position-equality
|
||||
|
||||
// CRITICAL: Include ALL fields for signing (prevents stripping attacks)
|
||||
//
|
||||
// Compatibility note:
|
||||
// - New code accepts both legacy 32-byte tx-set hashes and the extended
|
||||
// payload with RNG sidecars.
|
||||
// - Older binaries that only understand a raw uint256 proposal position
|
||||
// will reject extended payloads as malformed.
|
||||
// - Therefore ConsensusEntropy requires an all-upgraded validator set
|
||||
// before activation; this format is backward-compatible, not
|
||||
// forward-compatible.
|
||||
//@@start rng-extended-position-serialize
|
||||
void
|
||||
add(Serializer& s) const
|
||||
{
|
||||
s.addBitString(txSetHash);
|
||||
|
||||
// Wire compatibility: if no extensions, emit exactly 32 bytes
|
||||
// so legacy nodes that expect a plain uint256 work unchanged.
|
||||
if (!commitSetHash && !entropySetHash && !exportSigSetHash &&
|
||||
!exportSignaturesHash && !myCommitment && !myReveal)
|
||||
return;
|
||||
|
||||
std::uint8_t flags = 0;
|
||||
if (commitSetHash)
|
||||
flags |= 0x01;
|
||||
if (entropySetHash)
|
||||
flags |= 0x02;
|
||||
if (myCommitment)
|
||||
flags |= 0x04;
|
||||
if (myReveal)
|
||||
flags |= 0x08;
|
||||
if (exportSigSetHash)
|
||||
flags |= 0x10;
|
||||
if (exportSignaturesHash)
|
||||
flags |= 0x20;
|
||||
s.add8(flags);
|
||||
|
||||
if (commitSetHash)
|
||||
s.addBitString(*commitSetHash);
|
||||
if (entropySetHash)
|
||||
s.addBitString(*entropySetHash);
|
||||
if (myCommitment)
|
||||
s.addBitString(*myCommitment);
|
||||
if (myReveal)
|
||||
s.addBitString(*myReveal);
|
||||
if (exportSigSetHash)
|
||||
s.addBitString(*exportSigSetHash);
|
||||
if (exportSignaturesHash)
|
||||
s.addBitString(*exportSignaturesHash);
|
||||
}
|
||||
//@@end rng-extended-position-serialize
|
||||
|
||||
Json::Value
|
||||
getJson() const
|
||||
{
|
||||
Json::Value ret = Json::objectValue;
|
||||
ret["tx_set"] = to_string(txSetHash);
|
||||
if (commitSetHash)
|
||||
ret["commit_set"] = to_string(*commitSetHash);
|
||||
if (entropySetHash)
|
||||
ret["entropy_set"] = to_string(*entropySetHash);
|
||||
if (exportSigSetHash)
|
||||
ret["export_sig_set"] = to_string(*exportSigSetHash);
|
||||
if (exportSignaturesHash)
|
||||
ret["export_signatures"] = to_string(*exportSignaturesHash);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** Deserialize from wire format.
|
||||
Handles both legacy 32-byte hash and new extended format.
|
||||
Returns nullopt if the payload is malformed (truncated for the
|
||||
flags advertised).
|
||||
*/
|
||||
//@@start rng-extended-position-deserialize
|
||||
static std::optional<ExtendedPosition>
|
||||
fromSerialIter(SerialIter& sit, std::size_t totalSize)
|
||||
{
|
||||
if (totalSize < 32)
|
||||
return std::nullopt;
|
||||
|
||||
ExtendedPosition pos;
|
||||
pos.txSetHash = sit.get256();
|
||||
|
||||
// Legacy format: exactly 32 bytes
|
||||
if (totalSize == 32)
|
||||
return pos;
|
||||
|
||||
// Extended format: flags byte + optional uint256 fields
|
||||
if (sit.empty())
|
||||
return pos;
|
||||
|
||||
std::uint8_t flags = sit.get8();
|
||||
|
||||
// Reject unknown flag bits (reduces wire malleability)
|
||||
if (flags & 0xC0)
|
||||
return std::nullopt;
|
||||
|
||||
// Validate exact byte count for the flagged fields.
|
||||
// Each flag bit indicates a 32-byte uint256.
|
||||
int fieldCount = 0;
|
||||
for (int i = 0; i < 6; ++i)
|
||||
if (flags & (1 << i))
|
||||
++fieldCount;
|
||||
|
||||
if (sit.getBytesLeft() != static_cast<std::size_t>(fieldCount * 32))
|
||||
return std::nullopt;
|
||||
|
||||
if (flags & 0x01)
|
||||
pos.commitSetHash = sit.get256();
|
||||
if (flags & 0x02)
|
||||
pos.entropySetHash = sit.get256();
|
||||
if (flags & 0x04)
|
||||
pos.myCommitment = sit.get256();
|
||||
if (flags & 0x08)
|
||||
pos.myReveal = sit.get256();
|
||||
if (flags & 0x10)
|
||||
pos.exportSigSetHash = sit.get256();
|
||||
if (flags & 0x20)
|
||||
pos.exportSignaturesHash = sit.get256();
|
||||
|
||||
return pos;
|
||||
}
|
||||
//@@end rng-extended-position-deserialize
|
||||
};
|
||||
|
||||
// For logging/debugging - returns txSetHash as string
|
||||
inline std::string
|
||||
to_string(ExtendedPosition const& pos)
|
||||
{
|
||||
return to_string(pos.txSetHash);
|
||||
}
|
||||
|
||||
// Stream output for logging
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& os, ExtendedPosition const& pos)
|
||||
{
|
||||
return os << pos.txSetHash;
|
||||
}
|
||||
|
||||
/** Hash the raw export-signature blobs carried alongside a proposal.
|
||||
|
||||
The resulting digest is embedded in ExtendedPosition and therefore covered
|
||||
by the normal proposal signature. The raw protobuf field remains outside
|
||||
consensus equality, but stripping or mutating it invalidates the signed
|
||||
digest before duplicate suppression.
|
||||
*/
|
||||
template <class ExportSignatures>
|
||||
uint256
|
||||
proposalExportSignaturesHash(ExportSignatures const& exportSignatures)
|
||||
{
|
||||
Serializer s(512);
|
||||
s.add32(static_cast<std::uint32_t>(exportSignatures.size()));
|
||||
for (auto const& blob : exportSignatures)
|
||||
s.addVL(Slice(blob.data(), blob.size()));
|
||||
return s.getSHA512Half();
|
||||
}
|
||||
|
||||
// For hash_append (used in sha512Half and similar)
|
||||
template <class Hasher>
|
||||
void
|
||||
hash_append(Hasher& h, ExtendedPosition const& pos)
|
||||
{
|
||||
using beast::hash_append;
|
||||
// Serialize full position including all fields
|
||||
Serializer s;
|
||||
pos.add(s);
|
||||
hash_append(h, s.slice());
|
||||
}
|
||||
|
||||
/** A peer's signed, proposed position for use in RCLConsensus.
|
||||
|
||||
Carries a ConsensusProposal signed by a peer. Provides value semantics
|
||||
@@ -43,8 +324,9 @@ namespace ripple {
|
||||
class RCLCxPeerPos
|
||||
{
|
||||
public:
|
||||
//< The type of the proposed position
|
||||
using Proposal = ConsensusProposal<NodeID, uint256, uint256>;
|
||||
//< The type of the proposed position (uses ExtendedPosition for RNG
|
||||
// support)
|
||||
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
|
||||
|
||||
/** Constructor
|
||||
|
||||
@@ -60,7 +342,8 @@ public:
|
||||
PublicKey const& publicKey,
|
||||
Slice const& signature,
|
||||
uint256 const& suppress,
|
||||
Proposal&& proposal);
|
||||
Proposal&& proposal,
|
||||
std::vector<std::string> exportSignatures = {});
|
||||
|
||||
//! Verify the signing hash of the proposal
|
||||
bool
|
||||
@@ -93,6 +376,12 @@ public:
|
||||
return proposal_;
|
||||
}
|
||||
|
||||
std::vector<std::string> const&
|
||||
exportSignatures() const
|
||||
{
|
||||
return exportSignatures_;
|
||||
}
|
||||
|
||||
//! JSON representation of proposal
|
||||
Json::Value
|
||||
getJson() const;
|
||||
@@ -107,6 +396,7 @@ private:
|
||||
PublicKey publicKey_;
|
||||
uint256 suppression_;
|
||||
Proposal proposal_;
|
||||
std::vector<std::string> exportSignatures_;
|
||||
boost::container::static_vector<std::uint8_t, 72> signature_;
|
||||
|
||||
template <class Hasher>
|
||||
@@ -118,7 +408,10 @@ private:
|
||||
hash_append(h, std::uint32_t(proposal().proposeSeq()));
|
||||
hash_append(h, proposal().closeTime());
|
||||
hash_append(h, proposal().prevLedger());
|
||||
hash_append(h, proposal().position());
|
||||
// Serialize full ExtendedPosition for hashing
|
||||
Serializer s;
|
||||
proposal().position().add(s);
|
||||
hash_append(h, s.slice());
|
||||
}
|
||||
};
|
||||
|
||||
@@ -131,7 +424,7 @@ private:
|
||||
order to validate the signature. If the last closed ledger is left out, then
|
||||
it is considered as all zeroes for the purposes of signing.
|
||||
|
||||
@param proposeHash The hash of the proposed position
|
||||
@param position The extended position (includes entropy fields)
|
||||
@param previousLedger The hash of the ledger the proposal is based upon
|
||||
@param proposeSeq Sequence number of the proposal
|
||||
@param closeTime Close time of the proposal
|
||||
@@ -140,7 +433,7 @@ private:
|
||||
*/
|
||||
uint256
|
||||
proposalUniqueId(
|
||||
uint256 const& proposeHash,
|
||||
ExtendedPosition const& position,
|
||||
uint256 const& previousLedger,
|
||||
std::uint32_t proposeSeq,
|
||||
NetClock::time_point closeTime,
|
||||
|
||||
@@ -139,7 +139,8 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
|
||||
if (!ledger)
|
||||
{
|
||||
JLOG(j_.debug())
|
||||
// MERGE NOTE (upstream 86ef16dbeb): promoted from debug to warn.
|
||||
JLOG(j_.warn())
|
||||
<< "Need validated ledger for preferred ledger analysis " << hash;
|
||||
|
||||
Application* pApp = &app_;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# RCL Consensus
|
||||
# RCL Consensus
|
||||
|
||||
This directory holds the types and classes needed
|
||||
to connect the generic consensus algorithm to the
|
||||
@@ -7,7 +7,11 @@ rippled-specific instance of consensus.
|
||||
* `RCLCxTx` adapts a `SHAMapItem` transaction.
|
||||
* `RCLCxTxSet` adapts a `SHAMap` to represent a set of transactions.
|
||||
* `RCLCxLedger` adapts a `Ledger`.
|
||||
* `RCLConsensus` is implements the requirements of the generic
|
||||
* `RCLConsensus` implements the requirements of the generic
|
||||
`Consensus` class by connecting to the rest of the `rippled`
|
||||
application.
|
||||
application.
|
||||
|
||||
Xahau-specific proposal sidecars, ConsensusEntropy/RNG, and export signature
|
||||
convergence follow the invariants in
|
||||
[`ConsensusExtensionsDesign.md`](ConsensusExtensionsDesign.md). Read that note
|
||||
before changing extension quorum, sidecar sync, or fallback behavior.
|
||||
|
||||
@@ -82,6 +82,16 @@ public:
|
||||
Expected<uint256, HookReturnCode>
|
||||
etxn_nonce() const;
|
||||
|
||||
/// xport APIs
|
||||
Expected<uint64_t, HookReturnCode>
|
||||
xport_reserve(uint64_t count) const;
|
||||
|
||||
Expected<uint256, HookReturnCode>
|
||||
xport(Slice const& txBlob) const;
|
||||
|
||||
Expected<uint64_t, HookReturnCode>
|
||||
xport_cancel(uint32_t ticketSeq) const;
|
||||
|
||||
/// float APIs
|
||||
Expected<uint64_t, HookReturnCode>
|
||||
float_set(int32_t exponent, int64_t mantissa) const;
|
||||
|
||||
@@ -145,7 +145,7 @@ struct HookResult
|
||||
ripple::uint256 const hookNamespace;
|
||||
|
||||
std::queue<std::shared_ptr<ripple::Transaction>>
|
||||
emittedTxn{}; // etx stored here until accept/rollback
|
||||
emittedTxn{}; // etx stored here until accept/rollback (includes xport)
|
||||
HookStateMap& stateMap;
|
||||
uint16_t changedStateCount = 0;
|
||||
std::map<
|
||||
@@ -174,6 +174,8 @@ struct HookResult
|
||||
false; // hook_again allows strong pre-apply to nominate
|
||||
// additional weak post-apply execution
|
||||
std::shared_ptr<STObject const> provisionalMeta;
|
||||
uint64_t rngCallCounter{
|
||||
0}; // used to ensure conseq. rng calls don't return same data
|
||||
};
|
||||
|
||||
class HookExecutor;
|
||||
@@ -202,6 +204,8 @@ struct HookContext
|
||||
uint16_t ledger_nonce_counter{0};
|
||||
int64_t expected_etxn_count{-1}; // make this a 64bit int so the uint32
|
||||
// from the hookapi cant overflow it
|
||||
int64_t expected_export_count{-1};
|
||||
int64_t export_count{0}; // how many xport() calls succeeded
|
||||
std::map<ripple::uint256, bool> nonce_used{};
|
||||
uint32_t generation =
|
||||
0; // used for caching, only generated when txn_generation is called
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <xrpld/app/hook/HookAPI.h>
|
||||
#include <xrpld/app/ledger/OpenLedger.h>
|
||||
#include <xrpld/app/ledger/TransactionMaster.h>
|
||||
#include <xrpld/app/tx/detail/ExportLedgerOps.h>
|
||||
#include <xrpld/app/tx/detail/Import.h>
|
||||
#include <xrpl/protocol/STParsedJSON.h>
|
||||
#include <cfenv>
|
||||
@@ -1223,6 +1224,193 @@ HookAPI::etxn_reserve(uint64_t count) const
|
||||
return count;
|
||||
}
|
||||
|
||||
Expected<uint64_t, HookReturnCode>
|
||||
HookAPI::xport_reserve(uint64_t count) const
|
||||
{
|
||||
if (hookCtx.expected_export_count > -1)
|
||||
return Unexpected(ALREADY_SET);
|
||||
|
||||
if (count < 1)
|
||||
return Unexpected(TOO_SMALL);
|
||||
|
||||
if (count > hook_api::max_export)
|
||||
return Unexpected(TOO_BIG);
|
||||
|
||||
hookCtx.expected_export_count = count;
|
||||
|
||||
// Also reserve emit slots so the wrapper ttEXPORT can flow
|
||||
// through the normal emitted txn path.
|
||||
if (hookCtx.expected_etxn_count < 0)
|
||||
hookCtx.expected_etxn_count = 0;
|
||||
hookCtx.expected_etxn_count += count;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
Expected<uint256, HookReturnCode>
|
||||
HookAPI::xport(Slice const& txBlob) const
|
||||
{
|
||||
auto& applyCtx = hookCtx.applyCtx;
|
||||
auto& app = applyCtx.app;
|
||||
auto j = app.journal("View");
|
||||
auto& view = applyCtx.view();
|
||||
|
||||
if (hookCtx.expected_export_count < 0)
|
||||
return Unexpected(PREREQUISITE_NOT_MET);
|
||||
|
||||
if (hookCtx.export_count >= hookCtx.expected_export_count)
|
||||
return Unexpected(TOO_MANY_EXPORTED_TXN);
|
||||
|
||||
// Parse and validate the inner (cross-chain) transaction.
|
||||
std::shared_ptr<STTx const> innerTx;
|
||||
try
|
||||
{
|
||||
SerialIter sit(txBlob);
|
||||
innerTx = std::make_shared<STTx const>(sit);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j.trace()) << "HookExport[" << HC_ACC() << "]: Failed "
|
||||
<< e.what();
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
}
|
||||
|
||||
if (auto ter = ExportLedgerOps::validateExportAccount(
|
||||
*innerTx, hookCtx.result.account, j);
|
||||
!isTesSuccess(ter))
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
|
||||
if (auto ter = ExportLedgerOps::validateNetworkID(
|
||||
*innerTx, app.config().NETWORK_ID, j);
|
||||
!isTesSuccess(ter))
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
|
||||
if (auto ter = ExportLedgerOps::validateTicketSequence(*innerTx, j);
|
||||
!isTesSuccess(ter))
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
|
||||
// Construct a ttEXPORT wrapping the inner tx, with EmitDetails,
|
||||
// and push onto the emitted txn queue. This flows through the
|
||||
// normal emitted txn path (emitted dir → TxQ injection → open
|
||||
// ledger → retriable Export transactor).
|
||||
uint32_t const ledgerSeq = view.info().seq;
|
||||
|
||||
// Generate a nonce for the emitted ttEXPORT wrapper.
|
||||
auto nonce = etxn_nonce();
|
||||
if (!nonce.has_value())
|
||||
return Unexpected(INTERNAL_ERROR);
|
||||
|
||||
// Serialize inner tx as sfExportedTxn object.
|
||||
Serializer innerSer;
|
||||
innerTx->add(innerSer);
|
||||
|
||||
// Build the ttEXPORT wrapper as an STObject first so we can
|
||||
// compute the fee, set it, then construct the STTx from the
|
||||
// final serialised bytes. This avoids mutating the STTx after
|
||||
// construction (which would leave a stale cached txid — see
|
||||
// the tefNONDIR_EMIT check in Transactor::preclaim).
|
||||
//
|
||||
// The fee field is a fixed 9 bytes regardless of value, so
|
||||
// patching it on the STObject doesn't change the serialised size.
|
||||
STObject exportObj(sfGeneric);
|
||||
{
|
||||
exportObj.setFieldU16(sfTransactionType, ttEXPORT);
|
||||
exportObj[sfAccount] = hookCtx.result.account;
|
||||
exportObj[sfSequence] = 0u;
|
||||
exportObj.setFieldVL(sfSigningPubKey, Blob{});
|
||||
exportObj[sfFirstLedgerSequence] = ledgerSeq + 1;
|
||||
exportObj[sfLastLedgerSequence] = ledgerSeq + 5;
|
||||
exportObj[sfFee] = STAmount{0};
|
||||
|
||||
// sfExportedTxn inner object
|
||||
SerialIter sit(innerSer.slice());
|
||||
exportObj.set(std::make_unique<STObject>(sit, sfExportedTxn));
|
||||
|
||||
// sfEmitDetails
|
||||
STObject emitDetails(sfEmitDetails);
|
||||
emitDetails.setFieldU32(
|
||||
sfEmitGeneration, static_cast<uint32_t>(etxn_generation()));
|
||||
{
|
||||
auto const burdenResult = etxn_burden();
|
||||
emitDetails.setFieldU64(
|
||||
sfEmitBurden,
|
||||
burdenResult ? static_cast<uint64_t>(*burdenResult) : 1ULL);
|
||||
}
|
||||
emitDetails.setFieldH256(
|
||||
sfEmitParentTxnID, applyCtx.tx.getTransactionID());
|
||||
emitDetails.setFieldH256(sfEmitNonce, *nonce);
|
||||
emitDetails.setFieldH256(sfEmitHookHash, hookCtx.result.hookHash);
|
||||
if (hookCtx.result.hasCallback)
|
||||
emitDetails.setAccountID(sfEmitCallback, hookCtx.result.account);
|
||||
exportObj.set(std::move(emitDetails));
|
||||
|
||||
// Compute fee from serialised size and patch it in.
|
||||
Serializer feeSer;
|
||||
exportObj.add(feeSer);
|
||||
auto feeResult = etxn_fee_base(feeSer.slice());
|
||||
if (!feeResult)
|
||||
{
|
||||
JLOG(j.trace()) << "HookExport[" << HC_ACC()
|
||||
<< "]: Fee calculation failed for ttEXPORT wrapper";
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
}
|
||||
exportObj[sfFee] = STAmount{static_cast<uint64_t>(*feeResult)};
|
||||
}
|
||||
|
||||
// Construct the STTx from the finalised STObject bytes.
|
||||
Serializer exportSer;
|
||||
exportObj.add(exportSer);
|
||||
STTx exportStx(SerialIter{exportSer.slice()});
|
||||
|
||||
// Preflight the wrapper.
|
||||
auto preflightResult = ripple::preflight(
|
||||
app, view.rules(), exportStx, ripple::ApplyFlags::tapPREFLIGHT_EMIT, j);
|
||||
|
||||
if (!isTesSuccess(preflightResult.ter))
|
||||
{
|
||||
JLOG(j.trace()) << "HookExport[" << HC_ACC()
|
||||
<< "]: ttEXPORT wrapper preflight failure: "
|
||||
<< transHuman(preflightResult.ter);
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
}
|
||||
|
||||
// Wrap in Transaction and push to emittedTxn queue.
|
||||
auto stpExport = std::make_shared<STTx const>(std::move(exportStx));
|
||||
std::string reason;
|
||||
auto tpTrans = std::make_shared<Transaction>(stpExport, reason, app);
|
||||
if (tpTrans->getStatus() != NEW)
|
||||
{
|
||||
JLOG(j.trace()) << "HookExport[" << HC_ACC()
|
||||
<< "]: tpTrans->getStatus() != NEW for wrapper";
|
||||
return Unexpected(EXPORT_FAILURE);
|
||||
}
|
||||
|
||||
// Push onto emittedTxn. The wrapper ttEXPORT flows through the
|
||||
// normal emitted txn path (emitted dir → TxQ → open ledger →
|
||||
// retriable Export transactor).
|
||||
hookCtx.result.emittedTxn.push(tpTrans);
|
||||
++hookCtx.export_count;
|
||||
|
||||
// Return the inner tx hash — this is what the hook author cares
|
||||
// about (the cross-chain transaction they built).
|
||||
return innerTx->getTransactionID();
|
||||
}
|
||||
|
||||
Expected<uint64_t, HookReturnCode>
|
||||
HookAPI::xport_cancel(uint32_t ticketSeq) const
|
||||
{
|
||||
auto& app = hookCtx.applyCtx.app;
|
||||
auto j = app.journal("View");
|
||||
|
||||
TER const ter = ExportLedgerOps::cancelShadowTicket(
|
||||
hookCtx.applyCtx.view(), hookCtx.result.account, ticketSeq, j);
|
||||
|
||||
if (!isTesSuccess(ter))
|
||||
return Unexpected(DOESNT_EXIST);
|
||||
|
||||
return ticketSeq;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
HookAPI::etxn_generation() const
|
||||
{
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/app/tx/detail/Import.h>
|
||||
#include <xrpld/app/tx/detail/NFTokenUtils.h>
|
||||
#include <xrpld/ledger/View.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
@@ -584,7 +585,9 @@ getTransactionalStakeHolders(STTx const& tx, ReadView const& rv)
|
||||
case ttFEE:
|
||||
case ttUNL_MODIFY:
|
||||
case ttEMIT_FAILURE:
|
||||
case ttUNL_REPORT: {
|
||||
case ttUNL_REPORT:
|
||||
case ttEXPORT:
|
||||
case ttCONSENSUS_ENTROPY: {
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
@@ -1657,6 +1660,7 @@ hook::finalizeHookResult(
|
||||
// directory) if we are allowed to
|
||||
std::vector<std::pair<uint256 /* txnid */, uint256 /* emit nonce */>>
|
||||
emission_txnid;
|
||||
std::vector<uint256 /* txnid */> exported_txnid;
|
||||
|
||||
if (doEmit)
|
||||
{
|
||||
@@ -1691,7 +1695,8 @@ hook::finalizeHookResult(
|
||||
ptr->add(s);
|
||||
SerialIter sit(s.slice());
|
||||
|
||||
sleEmitted->emplace_back(ripple::STObject(sit, sfEmittedTxn));
|
||||
sleEmitted->set(
|
||||
std::make_unique<ripple::STObject>(sit, sfEmittedTxn));
|
||||
auto page = applyCtx.view().dirInsert(
|
||||
keylet::emittedDir(), emittedId, [&](SLE::ref sle) {
|
||||
(*sle)[sfFlags] = lsfEmittedDir;
|
||||
@@ -1712,6 +1717,12 @@ hook::finalizeHookResult(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Exported txns now flow through the emitted txn path above
|
||||
// (xport() pushes a ttEXPORT wrapper onto emittedTxn).
|
||||
// The export backlog cap is enforced after hook finalization by
|
||||
// ApplyContext::checkExportEmissionLimit(), so strong and weak hook
|
||||
// emissions use the same fee-only reset path.
|
||||
}
|
||||
|
||||
bool const fixV2 = applyCtx.view().rules().enabled(fixXahauV2);
|
||||
@@ -1738,6 +1749,10 @@ hook::finalizeHookResult(
|
||||
meta.setFieldU16(
|
||||
sfHookEmitCount,
|
||||
emission_txnid.size()); // this will never wrap, hard limit
|
||||
if (applyCtx.view().rules().enabled(featureExport))
|
||||
{
|
||||
meta.setFieldU16(sfHookExportCount, exported_txnid.size());
|
||||
}
|
||||
meta.setFieldU16(sfHookExecutionIndex, exec_index);
|
||||
meta.setFieldU16(sfHookStateChangeCount, hookResult.changedStateCount);
|
||||
meta.setFieldH256(sfHookHash, hookResult.hookHash);
|
||||
@@ -3029,6 +3044,31 @@ DEFINE_HOOK_FUNCTION(int64_t, etxn_reserve, uint32_t count)
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
DEFINE_HOOK_FUNCTION(int64_t, xport_reserve, uint32_t count)
|
||||
{
|
||||
HOOK_SETUP(); // populates memory_ctx, memory, memory_length, applyCtx,
|
||||
// hookCtx on current stack
|
||||
|
||||
auto const result = api.xport_reserve(count);
|
||||
if (!result)
|
||||
return result.error();
|
||||
return result.value();
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
DEFINE_HOOK_FUNCTION(int64_t, xport_cancel, uint32_t ticket_seq)
|
||||
{
|
||||
HOOK_SETUP();
|
||||
|
||||
auto const result = api.xport_cancel(ticket_seq);
|
||||
if (!result)
|
||||
return result.error();
|
||||
return result.value();
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
// Compute the burden of an emitted transaction based on a number of factors
|
||||
DEFINE_HOOK_FUNCTION(int64_t, etxn_burden)
|
||||
{
|
||||
@@ -4092,6 +4132,177 @@ DEFINE_HOOK_FUNCTION(
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
//@@start xport-impl
|
||||
DEFINE_HOOK_FUNCTION(
|
||||
int64_t,
|
||||
xport,
|
||||
uint32_t write_ptr,
|
||||
uint32_t write_len,
|
||||
uint32_t read_ptr,
|
||||
uint32_t read_len)
|
||||
{
|
||||
HOOK_SETUP();
|
||||
|
||||
if (NOT_IN_BOUNDS(read_ptr, read_len, memory_length))
|
||||
return OUT_OF_BOUNDS;
|
||||
|
||||
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
|
||||
return OUT_OF_BOUNDS;
|
||||
|
||||
if (write_len < 32)
|
||||
return TOO_SMALL;
|
||||
|
||||
// Delegate to decoupled HookAPI for xport logic
|
||||
ripple::Slice txBlob{
|
||||
reinterpret_cast<const void*>(memory + read_ptr), read_len};
|
||||
|
||||
auto const res = api.xport(txBlob);
|
||||
|
||||
if (!res)
|
||||
return res.error();
|
||||
|
||||
auto const& innerTxHash = *res;
|
||||
|
||||
if (innerTxHash.size() > write_len)
|
||||
return TOO_SMALL;
|
||||
|
||||
if (NOT_IN_BOUNDS(write_ptr, innerTxHash.size(), memory_length))
|
||||
return OUT_OF_BOUNDS;
|
||||
|
||||
WRITE_WASM_MEMORY_AND_RETURN(
|
||||
write_ptr,
|
||||
innerTxHash.size(),
|
||||
innerTxHash.data(),
|
||||
innerTxHash.size(),
|
||||
memory,
|
||||
memory_length);
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
//@@end xport-impl
|
||||
|
||||
// byteCount must be a multiple of 32
|
||||
inline std::vector<uint8_t>
|
||||
fairRng(ApplyContext& applyCtx, hook::HookResult& hr, uint32_t byteCount)
|
||||
{
|
||||
if (byteCount > 512)
|
||||
byteCount = 512;
|
||||
|
||||
// force the byte count to be a multiple of 32
|
||||
byteCount &= ~0b11111;
|
||||
|
||||
if (byteCount == 0)
|
||||
return {};
|
||||
|
||||
auto& view = applyCtx.view();
|
||||
|
||||
auto const sleEntropy = view.peek(ripple::keylet::consensusEntropy());
|
||||
auto const seq = view.info().seq;
|
||||
|
||||
auto const entropySeq =
|
||||
sleEntropy ? sleEntropy->getFieldU32(sfLedgerSequence) : 0u;
|
||||
|
||||
// Allow entropy from current ledger (during close) or previous ledger
|
||||
// (open ledger / speculative execution). On the real network hooks
|
||||
// always execute during buildLCL where the entropy pseudo-tx has
|
||||
// already updated the SLE to the current seq.
|
||||
// TODO: open-ledger entropy uses previous ledger's entropy, so
|
||||
// dice/random results will differ between speculative and final
|
||||
// execution. This needs further thought re: UX implications.
|
||||
if (!sleEntropy || entropySeq > seq || (seq - entropySeq) > 1 ||
|
||||
sleEntropy->getFieldU16(sfEntropyCount) < 5)
|
||||
return {};
|
||||
|
||||
// we'll generate bytes in lots of 32
|
||||
|
||||
uint256 rndData = sha512Half(
|
||||
view.info().seq,
|
||||
applyCtx.tx.getTransactionID(),
|
||||
hr.otxnAccount,
|
||||
hr.hookHash,
|
||||
hr.account,
|
||||
hr.hookChainPosition,
|
||||
hr.executeAgainAsWeak ? std::string("weak") : std::string("strong"),
|
||||
sleEntropy->getFieldH256(sfDigest),
|
||||
hr.rngCallCounter++);
|
||||
|
||||
std::vector<uint8_t> bytesOut;
|
||||
bytesOut.resize(byteCount);
|
||||
|
||||
uint8_t* ptr = bytesOut.data();
|
||||
while (1)
|
||||
{
|
||||
std::memcpy(ptr, rndData.data(), 32);
|
||||
ptr += 32;
|
||||
|
||||
if (ptr - bytesOut.data() >= byteCount)
|
||||
break;
|
||||
|
||||
rndData = sha512Half(rndData);
|
||||
}
|
||||
|
||||
return bytesOut;
|
||||
}
|
||||
|
||||
DEFINE_HOOK_FUNCTION(int64_t, dice, uint32_t sides)
|
||||
{
|
||||
HOOK_SETUP();
|
||||
|
||||
if (sides == 0)
|
||||
return INVALID_ARGUMENT;
|
||||
|
||||
auto vec = fairRng(applyCtx, hookCtx.result, 32);
|
||||
|
||||
if (vec.empty())
|
||||
return TOO_LITTLE_ENTROPY;
|
||||
|
||||
if (vec.size() != 32)
|
||||
return INTERNAL_ERROR;
|
||||
|
||||
uint32_t value;
|
||||
std::memcpy(&value, vec.data(), sizeof(uint32_t));
|
||||
|
||||
return value % sides;
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
DEFINE_HOOK_FUNCTION(int64_t, random, uint32_t write_ptr, uint32_t write_len)
|
||||
{
|
||||
HOOK_SETUP();
|
||||
|
||||
if (write_len == 0)
|
||||
return TOO_SMALL;
|
||||
|
||||
if (write_len > 512)
|
||||
return TOO_BIG;
|
||||
|
||||
uint32_t required = write_len;
|
||||
|
||||
if ((required & ~0b11111) == required)
|
||||
{
|
||||
// already a multiple of 32 bytes
|
||||
}
|
||||
else
|
||||
{
|
||||
// round up
|
||||
required &= ~0b11111;
|
||||
required += 32;
|
||||
}
|
||||
|
||||
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
|
||||
return OUT_OF_BOUNDS;
|
||||
|
||||
auto vec = fairRng(applyCtx, hookCtx.result, required);
|
||||
|
||||
if (vec.empty())
|
||||
return TOO_LITTLE_ENTROPY;
|
||||
|
||||
WRITE_WASM_MEMORY_AND_RETURN(
|
||||
write_ptr, write_len, vec.data(), vec.size(), memory, memory_length);
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
/*
|
||||
|
||||
DEFINE_HOOK_FUNCTION(
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <xrpld/nodestore/Database.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/STTx.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
namespace ripple {
|
||||
@@ -64,6 +65,15 @@ ConsensusTransSetSF::gotNode(
|
||||
stx->getTransactionID() == nodeHash.as_uint256(),
|
||||
"ripple::ConsensusTransSetSF::gotNode : transaction hash "
|
||||
"match");
|
||||
|
||||
//@@start rng-pseudo-tx-submission-filtering
|
||||
// Don't submit pseudo-transactions (consensus entropy, fees,
|
||||
// amendments, etc.) — they exist as SHAMap entries for
|
||||
// content-addressed identification but are not real user txns.
|
||||
if (isPseudoTx(*stx))
|
||||
return;
|
||||
//@@end rng-pseudo-tx-submission-filtering
|
||||
|
||||
auto const pap = &app_;
|
||||
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
|
||||
@@ -23,12 +23,15 @@
|
||||
#include <xrpld/overlay/Peer.h>
|
||||
#include <xrpld/shamap/SHAMap.h>
|
||||
#include <xrpl/beast/clock/abstract_clock.h>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class Application;
|
||||
|
||||
enum class InboundSetKind : std::uint8_t { transaction, sidecar };
|
||||
|
||||
/** Manages the acquisition and lifetime of transaction sets.
|
||||
*/
|
||||
|
||||
@@ -49,11 +52,15 @@ public:
|
||||
* @param setHash The transaction set ID (digest of the SHAMap root node).
|
||||
* @param acquire Whether to fetch the transaction set from the network if
|
||||
* it is missing.
|
||||
* @param kind The kind of SHAMap payload to acquire if the set is missing.
|
||||
* @return The transaction set with ID setHash, or nullptr if it is
|
||||
* missing.
|
||||
*/
|
||||
virtual std::shared_ptr<SHAMap>
|
||||
getSet(uint256 const& setHash, bool acquire) = 0;
|
||||
getSet(
|
||||
uint256 const& setHash,
|
||||
bool acquire,
|
||||
InboundSetKind kind = InboundSetKind::transaction) = 0;
|
||||
|
||||
/** Add a transaction set from a LedgerData message.
|
||||
*
|
||||
|
||||
52
src/xrpld/app/ledger/SidecarSetSF.cpp
Normal file
52
src/xrpld/app/ledger/SidecarSetSF.cpp
Normal file
@@ -0,0 +1,52 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/ledger/SidecarSetSF.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
SidecarSetSF::SidecarSetSF(NodeCache& nodeCache) : m_nodeCache(nodeCache)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
SidecarSetSF::gotNode(
|
||||
bool fromFilter,
|
||||
SHAMapHash const& nodeHash,
|
||||
std::uint32_t,
|
||||
Blob&& nodeData,
|
||||
SHAMapNodeType) const
|
||||
{
|
||||
if (fromFilter)
|
||||
return;
|
||||
|
||||
m_nodeCache.insert(nodeHash, nodeData);
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
SidecarSetSF::getNode(SHAMapHash const& nodeHash) const
|
||||
{
|
||||
Blob nodeData;
|
||||
if (m_nodeCache.retrieve(nodeHash, nodeData))
|
||||
return nodeData;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
57
src/xrpld/app/ledger/SidecarSetSF.h
Normal file
57
src/xrpld/app/ledger/SidecarSetSF.h
Normal file
@@ -0,0 +1,57 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_LEDGER_SIDECARSETSF_H_INCLUDED
|
||||
#define RIPPLE_APP_LEDGER_SIDECARSETSF_H_INCLUDED
|
||||
|
||||
#include <xrpld/shamap/SHAMapSyncFilter.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <optional>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// Sync filter for sidecar SHAMaps. Sidecar leaves are STObject(sfGeneric)
|
||||
// payloads, not STTx transactions, so acquisition must not submit them.
|
||||
// Validation stays with the consensus extension merge step, where the expected
|
||||
// sidecar kind and active validator view are known.
|
||||
class SidecarSetSF : public SHAMapSyncFilter
|
||||
{
|
||||
public:
|
||||
using NodeCache = TaggedCache<SHAMapHash, Blob>;
|
||||
|
||||
explicit SidecarSetSF(NodeCache& nodeCache);
|
||||
|
||||
void
|
||||
gotNode(
|
||||
bool fromFilter,
|
||||
SHAMapHash const& nodeHash,
|
||||
std::uint32_t ledgerSeq,
|
||||
Blob&& nodeData,
|
||||
SHAMapNodeType type) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
getNode(SHAMapHash const& nodeHash) const override;
|
||||
|
||||
private:
|
||||
NodeCache& m_nodeCache;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <xrpld/app/misc/CanonicalTXSet.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -106,6 +107,47 @@ applyTransactions(
|
||||
bool certainRetry = true;
|
||||
std::size_t count = 0;
|
||||
|
||||
//@@start rng-entropy-first-application
|
||||
// CRITICAL: Apply consensus entropy pseudo-tx FIRST before any other
|
||||
// transactions. This ensures hooks can read entropy during this ledger.
|
||||
for (auto it = txns.begin(); it != txns.end(); /* manual */)
|
||||
{
|
||||
if (it->second->getTxnType() != ttCONSENSUS_ENTROPY)
|
||||
{
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto const txid = it->first.getTXID();
|
||||
JLOG(j.debug()) << "Applying entropy tx FIRST: " << txid;
|
||||
|
||||
try
|
||||
{
|
||||
auto const result =
|
||||
applyTransaction(app, view, *it->second, true, tapNONE, j);
|
||||
|
||||
if (result == ApplyTransactionResult::Success)
|
||||
{
|
||||
++count;
|
||||
JLOG(j.debug()) << "Entropy tx applied successfully";
|
||||
}
|
||||
else
|
||||
{
|
||||
failed.insert(txid);
|
||||
JLOG(j.warn()) << "Entropy tx failed to apply";
|
||||
}
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
JLOG(j.warn()) << "Entropy tx throws: " << ex.what();
|
||||
failed.insert(txid);
|
||||
}
|
||||
|
||||
it = txns.erase(it);
|
||||
break; // Only one entropy tx per ledger
|
||||
}
|
||||
//@@end rng-entropy-first-application
|
||||
|
||||
// Attempt to apply all of the retriable transactions
|
||||
for (int pass = 0; pass < LEDGER_TOTAL_PASSES; ++pass)
|
||||
{
|
||||
|
||||
@@ -93,7 +93,10 @@ public:
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMap>
|
||||
getSet(uint256 const& hash, bool acquire) override
|
||||
getSet(
|
||||
uint256 const& hash,
|
||||
bool acquire,
|
||||
InboundSetKind kind = InboundSetKind::transaction) override
|
||||
{
|
||||
TransactionAcquire::pointer ta;
|
||||
|
||||
@@ -117,7 +120,7 @@ public:
|
||||
return std::shared_ptr<SHAMap>();
|
||||
|
||||
ta = std::make_shared<TransactionAcquire>(
|
||||
app_, hash, m_peerSetBuilder->build());
|
||||
app_, hash, m_peerSetBuilder->build(), kind);
|
||||
|
||||
auto& obj = m_map[hash];
|
||||
obj.mAcquire = ta;
|
||||
|
||||
@@ -27,12 +27,14 @@ LedgerReplay::LedgerReplay(
|
||||
std::shared_ptr<Ledger const> replay)
|
||||
: parent_{std::move(parent)}, replay_{std::move(replay)}
|
||||
{
|
||||
//@@start ledger-replay-ordered-txns
|
||||
for (auto const& item : replay_->txMap())
|
||||
{
|
||||
auto txPair = replay_->txRead(item.key()); // non-const so can be moved
|
||||
auto const txIndex = (*txPair.second)[sfTransactionIndex];
|
||||
orderedTxns_.emplace(txIndex, std::move(txPair.first));
|
||||
}
|
||||
//@@end ledger-replay-ordered-txns
|
||||
}
|
||||
|
||||
LedgerReplay::LedgerReplay(
|
||||
|
||||
@@ -120,7 +120,9 @@ OpenLedger::accept(
|
||||
f(*next, j_);
|
||||
// Apply local tx
|
||||
for (auto const& item : locals)
|
||||
{
|
||||
app.getTxQ().apply(app, *next, item.second, flags, j_);
|
||||
}
|
||||
|
||||
// If we didn't relay this transaction recently, relay it to all peers
|
||||
for (auto const& txpair : next->txs)
|
||||
|
||||
@@ -20,11 +20,13 @@
|
||||
#include <xrpld/app/ledger/ConsensusTransSetSF.h>
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/InboundTransactions.h>
|
||||
#include <xrpld/app/ledger/SidecarSetSF.h>
|
||||
#include <xrpld/app/ledger/detail/TransactionAcquire.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/detail/ProtocolMessage.h>
|
||||
#include <xrpld/shamap/SHAMapSyncFilter.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
@@ -40,10 +42,26 @@ enum {
|
||||
MAX_TIMEOUTS = 20,
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
std::unique_ptr<SHAMapSyncFilter>
|
||||
makeSyncFilter(InboundSetKind kind, Application& app)
|
||||
{
|
||||
// Sidecars deliberately reuse candidate tx-set acquisition; the filter only
|
||||
// changes leaf handling so sidecar STObjects are cached, not submitted.
|
||||
if (kind == InboundSetKind::sidecar)
|
||||
return std::make_unique<SidecarSetSF>(app.getTempNodeCache());
|
||||
|
||||
return std::make_unique<ConsensusTransSetSF>(app, app.getTempNodeCache());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TransactionAcquire::TransactionAcquire(
|
||||
Application& app,
|
||||
uint256 const& hash,
|
||||
std::unique_ptr<PeerSet> peerSet)
|
||||
std::unique_ptr<PeerSet> peerSet,
|
||||
InboundSetKind kind)
|
||||
: TimeoutCounter(
|
||||
app,
|
||||
hash,
|
||||
@@ -52,9 +70,15 @@ TransactionAcquire::TransactionAcquire(
|
||||
app.journal("TransactionAcquire"))
|
||||
, mHaveRoot(false)
|
||||
, mPeerSet(std::move(peerSet))
|
||||
, mSetKind(kind)
|
||||
{
|
||||
// Keep sidecar fetch on the same content-addressed SHAMap path as tx sets:
|
||||
// normal reply limits, peer scoring, charging, and timeout behavior apply.
|
||||
mMap = std::make_shared<SHAMap>(
|
||||
SHAMapType::TRANSACTION, hash, app_.getNodeFamily());
|
||||
kind == InboundSetKind::sidecar ? SHAMapType::SIDECAR
|
||||
: SHAMapType::TRANSACTION,
|
||||
hash,
|
||||
app_.getNodeFamily());
|
||||
mMap->setUnbacked();
|
||||
}
|
||||
|
||||
@@ -69,7 +93,10 @@ TransactionAcquire::done()
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.debug()) << "Acquired TX set " << hash_;
|
||||
JLOG(journal_.debug())
|
||||
<< "Acquired "
|
||||
<< (mSetKind == InboundSetKind::sidecar ? "sidecar" : "TX")
|
||||
<< " set " << hash_;
|
||||
mMap->setImmutable();
|
||||
|
||||
uint256 const& hash(hash_);
|
||||
@@ -145,8 +172,8 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
}
|
||||
else
|
||||
{
|
||||
ConsensusTransSetSF sf(app_, app_.getTempNodeCache());
|
||||
auto nodes = mMap->getMissingNodes(256, &sf);
|
||||
auto sf = makeSyncFilter(mSetKind, app_);
|
||||
auto nodes = mMap->getMissingNodes(256, sf.get());
|
||||
|
||||
if (nodes.empty())
|
||||
{
|
||||
@@ -198,7 +225,7 @@ TransactionAcquire::takeNodes(
|
||||
if (data.empty())
|
||||
return SHAMapAddNode::invalid();
|
||||
|
||||
ConsensusTransSetSF sf(app_, app_.getTempNodeCache());
|
||||
auto sf = makeSyncFilter(mSetKind, app_);
|
||||
|
||||
for (auto const& d : data)
|
||||
{
|
||||
@@ -216,7 +243,7 @@ TransactionAcquire::takeNodes(
|
||||
else
|
||||
mHaveRoot = true;
|
||||
}
|
||||
else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood())
|
||||
else if (!mMap->addKnownNode(d.first, d.second, sf.get()).isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "TX acquire got bad non-root node";
|
||||
return SHAMapAddNode::invalid();
|
||||
|
||||
@@ -23,9 +23,12 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/overlay/PeerSet.h>
|
||||
#include <xrpld/shamap/SHAMap.h>
|
||||
#include <cstdint>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
enum class InboundSetKind : std::uint8_t;
|
||||
|
||||
// VFALCO TODO rename to PeerTxRequest
|
||||
// A transaction set we are trying to acquire
|
||||
class TransactionAcquire final
|
||||
@@ -39,7 +42,8 @@ public:
|
||||
TransactionAcquire(
|
||||
Application& app,
|
||||
uint256 const& hash,
|
||||
std::unique_ptr<PeerSet> peerSet);
|
||||
std::unique_ptr<PeerSet> peerSet,
|
||||
InboundSetKind kind);
|
||||
~TransactionAcquire() = default;
|
||||
|
||||
SHAMapAddNode
|
||||
@@ -57,6 +61,7 @@ private:
|
||||
std::shared_ptr<SHAMap> mMap;
|
||||
bool mHaveRoot;
|
||||
std::unique_ptr<PeerSet> mPeerSet;
|
||||
InboundSetKind mSetKind;
|
||||
|
||||
void
|
||||
onTimer(bool progress, ScopedLockType& peerSetLock) override;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/consensus/ConsensusExtensions.h>
|
||||
#include <xrpld/app/consensus/RCLValidations.h>
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/InboundTransactions.h>
|
||||
@@ -41,6 +42,7 @@
|
||||
#include <xrpld/app/misc/HashRouter.h>
|
||||
#include <xrpld/app/misc/LoadFeeTrack.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/misc/RuntimeConfig.h>
|
||||
#include <xrpld/app/misc/SHAMapStore.h>
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/app/misc/ValidatorKeys.h>
|
||||
@@ -214,9 +216,11 @@ public:
|
||||
std::unique_ptr<AmendmentTable> m_amendmentTable;
|
||||
std::unique_ptr<LoadFeeTrack> mFeeTrack;
|
||||
std::unique_ptr<HashRouter> hashRouter_;
|
||||
RuntimeConfig runtimeConfig_;
|
||||
RCLValidations mValidations;
|
||||
std::unique_ptr<LoadManager> m_loadManager;
|
||||
std::unique_ptr<TxQ> txQ_;
|
||||
std::unique_ptr<ConsensusExtensions> consensusExtensions_;
|
||||
ClosureCounter<void, boost::system::error_code const&> waitHandlerCounter_;
|
||||
boost::asio::steady_timer sweepTimer_;
|
||||
boost::asio::steady_timer entropyTimer_;
|
||||
@@ -461,6 +465,10 @@ public:
|
||||
, txQ_(
|
||||
std::make_unique<TxQ>(setup_TxQ(*config_), logs_->journal("TxQ")))
|
||||
|
||||
, consensusExtensions_(std::make_unique<ConsensusExtensions>(
|
||||
*this,
|
||||
logs_->journal("ConsensusExtensions")))
|
||||
|
||||
, sweepTimer_(get_io_service())
|
||||
|
||||
, entropyTimer_(get_io_service())
|
||||
@@ -583,6 +591,22 @@ public:
|
||||
return validatorKeys_.keys->publicKey;
|
||||
}
|
||||
|
||||
SecretKey const&
|
||||
getValidationSecretKey() const override
|
||||
{
|
||||
if (!validatorKeys_.keys)
|
||||
LogicError(
|
||||
"Accessing validation secret key without validator keys");
|
||||
|
||||
return validatorKeys_.keys->secretKey;
|
||||
}
|
||||
|
||||
ValidatorKeys const&
|
||||
getValidatorKeys() const override
|
||||
{
|
||||
return validatorKeys_;
|
||||
}
|
||||
|
||||
NetworkOPs&
|
||||
getOPs() override
|
||||
{
|
||||
@@ -732,6 +756,12 @@ public:
|
||||
return *hashRouter_;
|
||||
}
|
||||
|
||||
RuntimeConfig&
|
||||
getRuntimeConfig() override
|
||||
{
|
||||
return runtimeConfig_;
|
||||
}
|
||||
|
||||
RCLValidations&
|
||||
getValidations() override
|
||||
{
|
||||
@@ -815,6 +845,15 @@ public:
|
||||
return *txQ_;
|
||||
}
|
||||
|
||||
ConsensusExtensions&
|
||||
getConsensusExtensions() override
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
consensusExtensions_,
|
||||
"ripple::ApplicationImp::getConsensusExtensions : non-null");
|
||||
return *consensusExtensions_;
|
||||
}
|
||||
|
||||
RelationalDatabase&
|
||||
getRelationalDatabase() override
|
||||
{
|
||||
|
||||
@@ -62,7 +62,9 @@ using SLE = STLedgerEntry;
|
||||
using CachedSLEs = TaggedCache<uint256, SLE const>;
|
||||
|
||||
class CollectorManager;
|
||||
class ConsensusExtensions;
|
||||
class Family;
|
||||
class RuntimeConfig;
|
||||
class HashRouter;
|
||||
class Logs;
|
||||
class LoadFeeTrack;
|
||||
@@ -177,6 +179,8 @@ public:
|
||||
getAmendmentTable() = 0;
|
||||
virtual HashRouter&
|
||||
getHashRouter() = 0;
|
||||
virtual RuntimeConfig&
|
||||
getRuntimeConfig() = 0;
|
||||
virtual LoadFeeTrack&
|
||||
getFeeTrack() = 0;
|
||||
virtual LoadManager&
|
||||
@@ -215,6 +219,8 @@ public:
|
||||
getLedgerCleaner() = 0;
|
||||
virtual LedgerReplayer&
|
||||
getLedgerReplayer() = 0;
|
||||
virtual ConsensusExtensions&
|
||||
getConsensusExtensions() = 0;
|
||||
virtual NetworkOPs&
|
||||
getOPs() = 0;
|
||||
virtual OrderBookDB&
|
||||
@@ -232,6 +238,11 @@ public:
|
||||
virtual std::optional<PublicKey const>
|
||||
getValidationPublicKey() const = 0;
|
||||
|
||||
virtual SecretKey const&
|
||||
getValidationSecretKey() const = 0;
|
||||
|
||||
virtual ValidatorKeys const&
|
||||
getValidatorKeys() const = 0;
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
virtual PathRequests&
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#ifndef RIPPLE_APP_MAIN_DATAGRAMMONITOR_H_INCLUDED
|
||||
#define RIPPLE_APP_MAIN_DATAGRAMMONITOR_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/consensus/RCLConsensus.h>
|
||||
#include <xrpld/app/ledger/AcceptedLedger.h>
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
|
||||
407
src/xrpld/app/misc/ExportSigCollector.h
Normal file
407
src/xrpld/app/misc/ExportSigCollector.h
Normal file
@@ -0,0 +1,407 @@
|
||||
#ifndef RIPPLE_APP_MISC_EXPORTSIGCOLLECTOR_H_INCLUDED
|
||||
#define RIPPLE_APP_MISC_EXPORTSIGCOLLECTOR_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Buffer.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/// Export signature collector for the retriable export approach.
|
||||
///
|
||||
/// Stores multisign signatures from validators for pending ttEXPORT
|
||||
/// transactions. Signatures arrive via two paths:
|
||||
///
|
||||
/// 1. Proposal ingestion (onTrustedPeerMessage) — post-checkSign,
|
||||
/// sender-bound, and (when possible) multisign-verified.
|
||||
/// 2. SHAMap merge (onAcquiredSidecarSet) — trusted + verified.
|
||||
///
|
||||
/// Signatures are either **verified** (cryptographically checked against
|
||||
/// buildMultiSigningData) or **unverified** (stored on proposal-level
|
||||
/// trust alone, e.g. when the ttEXPORT tx isn't in the open ledger yet
|
||||
/// due to relay ordering).
|
||||
///
|
||||
/// Only verified signatures count toward quorum, appear in SHAMap
|
||||
/// convergence, and are assembled into the final export blob.
|
||||
/// Unverified sigs are a local cache that can be upgraded to verified
|
||||
/// via `upgradeSignature()` when the tx becomes available (e.g. in
|
||||
/// Export::doApply which always has the tx).
|
||||
///
|
||||
//@@start export-sig-collector-mutex
|
||||
/// Thread-safe.
|
||||
class ExportSigCollector
|
||||
{
|
||||
mutable std::mutex mutex_;
|
||||
//@@end export-sig-collector-mutex
|
||||
|
||||
struct SigEntry
|
||||
{
|
||||
/// All validators that have contributed (verified or unverified).
|
||||
std::set<PublicKey> validators;
|
||||
/// Actual multisign signature bytes keyed by validator pubkey.
|
||||
/// Empty buffers mean pubkey-only (standalone mode).
|
||||
std::map<PublicKey, Buffer> signatures;
|
||||
/// Validators whose sigs have been cryptographically verified.
|
||||
/// Only these count toward quorum and appear in SHAMap/snapshot.
|
||||
std::set<PublicKey> verified;
|
||||
std::uint32_t firstSeenSeq{0};
|
||||
};
|
||||
|
||||
std::unordered_map<uint256, SigEntry> sigs_;
|
||||
std::set<uint256> sentThisRound_;
|
||||
|
||||
static constexpr std::uint32_t maxStaleLedgers = 256;
|
||||
|
||||
void
|
||||
touchSeq(SigEntry& entry, std::uint32_t seq)
|
||||
{
|
||||
if (entry.firstSeenSeq == 0 && seq > 0)
|
||||
entry.firstSeenSeq = seq;
|
||||
}
|
||||
|
||||
public:
|
||||
/// Store a signature that has been cryptographically verified
|
||||
/// against buildMultiSigningData + verify().
|
||||
void
|
||||
addVerifiedSignature(
|
||||
uint256 const& txnHash,
|
||||
PublicKey const& validator,
|
||||
Buffer const& signature,
|
||||
std::uint32_t currentSeq = 0)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
signature.size() > 0,
|
||||
"ripple::ExportSigCollector::addVerifiedSignature : "
|
||||
"non-empty signature");
|
||||
std::lock_guard lock(mutex_);
|
||||
auto& entry = sigs_[txnHash];
|
||||
entry.validators.insert(validator);
|
||||
entry.signatures[validator] = signature;
|
||||
entry.verified.insert(validator);
|
||||
touchSeq(entry, currentSeq);
|
||||
}
|
||||
|
||||
/// Store a signature from a trusted source (checkSign + sender
|
||||
/// binding passed) but without multisign content verification.
|
||||
/// Used when the ttEXPORT tx isn't in the open ledger yet due
|
||||
/// to relay ordering. Will be upgraded to verified via
|
||||
/// upgradeSignature() when the tx becomes available.
|
||||
///
|
||||
/// Does NOT count toward quorum or appear in SHAMap/snapshot.
|
||||
void
|
||||
addUnverifiedSignature(
|
||||
uint256 const& txnHash,
|
||||
PublicKey const& validator,
|
||||
Buffer const& signature,
|
||||
std::uint32_t currentSeq = 0)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
signature.size() > 0,
|
||||
"ripple::ExportSigCollector::addUnverifiedSignature : "
|
||||
"non-empty signature");
|
||||
std::lock_guard lock(mutex_);
|
||||
auto& entry = sigs_[txnHash];
|
||||
entry.validators.insert(validator);
|
||||
// Don't overwrite a verified sig with an unverified one.
|
||||
if (entry.verified.find(validator) == entry.verified.end())
|
||||
entry.signatures[validator] = signature;
|
||||
touchSeq(entry, currentSeq);
|
||||
}
|
||||
|
||||
/// Upgrade a previously unverified sig to verified.
|
||||
/// Called from Export::doApply after verifying against the inner tx.
|
||||
/// The caller passes the exact buffer it verified; we only promote
|
||||
/// if the stored buffer still matches (guards against concurrent
|
||||
/// overwrites between unverifiedSignatures() and this call).
|
||||
void
|
||||
upgradeSignature(
|
||||
uint256 const& txnHash,
|
||||
PublicKey const& validator,
|
||||
Buffer const& verifiedBuf,
|
||||
std::uint32_t currentSeq = 0)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return;
|
||||
auto sit = it->second.signatures.find(validator);
|
||||
if (sit == it->second.signatures.end() || sit->second.size() == 0)
|
||||
return;
|
||||
// Only promote if the stored buffer is the same one we verified.
|
||||
if (!(sit->second == verifiedBuf))
|
||||
return;
|
||||
it->second.verified.insert(validator);
|
||||
touchSeq(it->second, currentSeq);
|
||||
}
|
||||
|
||||
/// Remove a signature if the stored buffer still matches the caller's
|
||||
/// verified-invalid buffer. This keeps stale unverified data from being
|
||||
/// retried forever while avoiding races with a newer replacement.
|
||||
bool
|
||||
removeSignature(
|
||||
uint256 const& txnHash,
|
||||
PublicKey const& validator,
|
||||
Buffer const& expectedBuf)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return false;
|
||||
|
||||
auto& entry = it->second;
|
||||
auto sit = entry.signatures.find(validator);
|
||||
if (sit == entry.signatures.end() || !(sit->second == expectedBuf))
|
||||
return false;
|
||||
|
||||
entry.signatures.erase(sit);
|
||||
entry.validators.erase(validator);
|
||||
entry.verified.erase(validator);
|
||||
|
||||
if (entry.signatures.empty() && entry.validators.empty() &&
|
||||
entry.verified.empty())
|
||||
sigs_.erase(it);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Store a pubkey-only entry (no real signature). Used in
|
||||
/// standalone mode where quorum counting is sufficient.
|
||||
/// Treated as verified (standalone has no consensus to verify against).
|
||||
void
|
||||
addStandaloneSignature(
|
||||
uint256 const& txnHash,
|
||||
PublicKey const& validator,
|
||||
std::uint32_t currentSeq = 0)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto& entry = sigs_[txnHash];
|
||||
entry.validators.insert(validator);
|
||||
if (entry.signatures.find(validator) == entry.signatures.end())
|
||||
entry.signatures[validator] = Buffer{};
|
||||
entry.verified.insert(validator);
|
||||
touchSeq(entry, currentSeq);
|
||||
}
|
||||
|
||||
/// Check if a cryptographically verified signature exists.
|
||||
/// Used to skip redundant verify() calls when the same sig
|
||||
/// arrives via multiple paths (proposal + SHAMap merge).
|
||||
bool
|
||||
hasVerifiedSignature(uint256 const& txnHash, PublicKey const& validator)
|
||||
const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return false;
|
||||
return it->second.verified.count(validator) > 0;
|
||||
}
|
||||
|
||||
/// Return unverified validators for a given txHash.
|
||||
/// Used by Export::doApply to find sigs that need verification.
|
||||
std::map<PublicKey, Buffer>
|
||||
unverifiedSignatures(uint256 const& txnHash) const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
std::map<PublicKey, Buffer> result;
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return result;
|
||||
for (auto const& [pk, buf] : it->second.signatures)
|
||||
{
|
||||
if (buf.size() > 0 && it->second.verified.count(pk) == 0)
|
||||
result[pk] = buf;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
hasUnverifiedSignatures() const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
for (auto const& [_, entry] : sigs_)
|
||||
{
|
||||
for (auto const& [pk, buf] : entry.signatures)
|
||||
{
|
||||
if (buf.size() > 0 && entry.verified.count(pk) == 0)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Count of VERIFIED signatures only.
|
||||
template <class IncludeValidator>
|
||||
std::size_t
|
||||
signatureCount(uint256 const& txnHash, IncludeValidator includeValidator)
|
||||
const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return 0;
|
||||
|
||||
return std::count_if(
|
||||
it->second.verified.begin(),
|
||||
it->second.verified.end(),
|
||||
includeValidator);
|
||||
}
|
||||
|
||||
/// Count of VERIFIED signatures only.
|
||||
std::size_t
|
||||
signatureCount(uint256 const& txnHash) const
|
||||
{
|
||||
return signatureCount(txnHash, [](PublicKey const&) { return true; });
|
||||
}
|
||||
|
||||
void
|
||||
clear(uint256 const& txnHash)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
sigs_.erase(txnHash);
|
||||
}
|
||||
|
||||
void
|
||||
clearAll()
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
sigs_.clear();
|
||||
sentThisRound_.clear();
|
||||
}
|
||||
|
||||
/// Get a snapshot of VERIFIED sigs (pubkeys only) for building
|
||||
/// the SHAMap. Only verified sigs appear in convergence.
|
||||
std::unordered_map<uint256, std::set<PublicKey>>
|
||||
snapshot() const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
std::unordered_map<uint256, std::set<PublicKey>> result;
|
||||
for (auto const& [hash, entry] : sigs_)
|
||||
{
|
||||
if (!entry.verified.empty())
|
||||
result[hash] = entry.verified;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Get a snapshot of VERIFIED signatures including sig buffers.
|
||||
template <class IncludeValidator>
|
||||
std::unordered_map<uint256, std::map<PublicKey, Buffer>>
|
||||
snapshotWithSigs(IncludeValidator includeValidator) const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
std::unordered_map<uint256, std::map<PublicKey, Buffer>> result;
|
||||
for (auto const& [hash, entry] : sigs_)
|
||||
{
|
||||
std::map<PublicKey, Buffer> verifiedSigs;
|
||||
for (auto const& pk : entry.verified)
|
||||
{
|
||||
if (!includeValidator(pk))
|
||||
continue;
|
||||
|
||||
auto sit = entry.signatures.find(pk);
|
||||
if (sit != entry.signatures.end())
|
||||
verifiedSigs[pk] = sit->second;
|
||||
}
|
||||
if (!verifiedSigs.empty())
|
||||
result[hash] = std::move(verifiedSigs);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Get a snapshot of VERIFIED signatures including sig buffers.
|
||||
std::unordered_map<uint256, std::map<PublicKey, Buffer>>
|
||||
snapshotWithSigs() const
|
||||
{
|
||||
return snapshotWithSigs([](PublicKey const&) { return true; });
|
||||
}
|
||||
|
||||
/// Atomic quorum check + snapshot for a single txHash.
|
||||
/// Returns VERIFIED signatures if quorum is met, nullopt otherwise.
|
||||
template <class IncludeValidator>
|
||||
std::optional<std::map<PublicKey, Buffer>>
|
||||
checkQuorumAndSnapshot(
|
||||
uint256 const& txnHash,
|
||||
std::size_t threshold,
|
||||
IncludeValidator includeValidator) const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = sigs_.find(txnHash);
|
||||
if (it == sigs_.end())
|
||||
return std::nullopt;
|
||||
|
||||
std::map<PublicKey, Buffer> result;
|
||||
for (auto const& pk : it->second.verified)
|
||||
{
|
||||
if (!includeValidator(pk))
|
||||
continue;
|
||||
|
||||
auto sit = it->second.signatures.find(pk);
|
||||
XRPL_ASSERT(
|
||||
sit != it->second.signatures.end(),
|
||||
"ripple::ExportSigCollector::checkQuorumAndSnapshot : "
|
||||
"verified key must exist in signatures map");
|
||||
XRPL_ASSERT(
|
||||
sit->second.size() > 0,
|
||||
"ripple::ExportSigCollector::checkQuorumAndSnapshot : "
|
||||
"verified signature must be non-empty");
|
||||
if (sit != it->second.signatures.end())
|
||||
result[pk] = sit->second;
|
||||
}
|
||||
|
||||
if (result.size() < threshold)
|
||||
return std::nullopt;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Atomic quorum check + snapshot for a single txHash.
|
||||
/// Returns VERIFIED signatures if quorum is met, nullopt otherwise.
|
||||
std::optional<std::map<PublicKey, Buffer>>
|
||||
checkQuorumAndSnapshot(uint256 const& txnHash, std::size_t threshold) const
|
||||
{
|
||||
return checkQuorumAndSnapshot(
|
||||
txnHash, threshold, [](PublicKey const&) { return true; });
|
||||
}
|
||||
|
||||
/// Remove entries older than maxStaleLedgers.
|
||||
void
|
||||
cleanupStale(std::uint32_t currentSeq)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
for (auto it = sigs_.begin(); it != sigs_.end();)
|
||||
{
|
||||
if (it->second.firstSeenSeq > 0 &&
|
||||
currentSeq > it->second.firstSeenSeq + maxStaleLedgers)
|
||||
it = sigs_.erase(it);
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if we haven't sent our sig for this tx yet this round.
|
||||
/// Marks it as sent on first call.
|
||||
bool
|
||||
markSent(uint256 const& txnHash)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return sentThisRound_.insert(txnHash).second;
|
||||
}
|
||||
|
||||
/// Clear per-round state. Call at the start of each consensus round.
|
||||
void
|
||||
clearRound()
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
sentThisRound_.clear();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user