Compare commits

..

64 Commits

Author SHA1 Message Date
RichardAH
c2e3fd4666 Merge branch 'dev' into hook-api-unittest 2026-02-15 20:26:38 +10:00
tequ
8989b763ff clang-format 2026-01-28 14:18:02 +09:00
tequ
a81476a175 Avoid creating HookAPI instances on every hook API call
Add api() method to HookContext that lazily initializes a single
HookAPI instance, eliminating repeated instantiation overhead.
Simplify call sites to use hookCtx.api().xxx() directly.
2026-01-28 14:09:01 +09:00
RichardAH
6638b08ab0 Merge branch 'dev' into hook-api-unittest 2026-01-28 13:17:22 +10:00
tequ
96aa775f31 Merge branch 'dev' into hook-api-unittest 2026-01-07 17:56:54 +09:00
tequ
048c4cfd7b Merge branch 'dev' into hook-api-unittest 2025-12-24 12:23:09 +09:00
tequ
c5cf0cf32c Merge branch 'dev' into hook-api-unittest 2025-12-17 10:03:08 +09:00
tequ
c2f948173d refactor emit 2025-12-03 15:15:19 +09:00
tequ
df357438d7 add state tests 2025-12-03 13:43:02 +09:00
tequ
59e5650521 add sto_erase tests 2025-12-02 22:10:32 +09:00
tequ
a875fd36b3 fix build error 2025-12-02 14:33:53 +09:00
tequ
5dfbadc100 Merge remote-tracking branch 'upstream/dev' into hook-api-unittest 2025-12-02 13:23:01 +09:00
tequ
e28c1db0ef fix header 2025-11-29 00:40:58 +09:00
tequ
e9011d0433 refactor sto_emplace test 2025-11-28 20:03:24 +09:00
tequ
d0eb251c1e add sto_validate test 2025-11-28 16:47:06 +09:00
tequ
88b993257b add sto_subfield test 2025-11-28 16:37:35 +09:00
tequ
6806fa063e add sto_subarray test 2025-11-28 16:24:55 +09:00
tequ
0502f12ee3 add hook_skip test 2025-11-28 15:48:19 +09:00
tequ
181090278f add hook_hash test 2025-11-28 15:35:35 +09:00
tequ
255a516a29 Merge branch 'dev' into hook-api-unittest 2025-11-28 14:44:34 +09:00
tequ
df5b6c9528 add sto_emplace tests 2025-11-25 18:13:07 +09:00
tequ
15a4497adf add xpop_slot test 2025-11-25 14:14:58 +09:00
tequ
71d52585ce add meta_slot test 2025-11-25 13:17:27 +09:00
tequ
6b0271c2a8 add ledger_keylet test 2025-11-25 12:47:25 +09:00
tequ
4f9794461e add tests for fixEtxnFeeBase 2025-11-25 12:05:49 +09:00
tequ
abac045e20 Merge remote-tracking branch 'upstream/dev' into hook-api-unittest 2025-11-25 11:54:30 +09:00
tequ
641197d4dd add util tests 2025-11-24 15:06:39 +09:00
tequ
3d4ca8417a add slot tests 2025-11-24 15:06:30 +09:00
tequ
bb96e63e2e refactor pattern tests 2025-11-24 01:22:19 +09:00
tequ
be9cf1c8f6 add tests 2025-11-24 00:35:44 +09:00
tequ
8e5ea9a44b remove un-needed header 2025-11-23 11:26:46 +09:00
tequ
f1b64e1f28 add etxn_fee_base test 2025-11-20 20:34:42 +09:00
tequ
a0d46f0f85 move createApplyContext to private 2025-11-20 20:10:40 +09:00
tequ
1a1b1bae83 add emit tests 2025-11-20 19:27:19 +09:00
tequ
0e9946e186 add HookAPI_test.cpp 2025-11-20 16:20:20 +09:00
tequ
10bddc173a fix 2025-11-20 15:14:38 +09:00
tequ
a2e3ac4f66 fix 2025-11-12 16:44:20 +09:00
tequ
6704f2b8b6 Merge branch 'dev' into hook-api-unittest 2025-11-12 16:19:34 +09:00
tequ
11d0c3486c fix comment 2025-10-24 17:11:07 +09:00
tequ
bf7ea28b3d Merge remote-tracking branch 'upstream/dev' into hook-api-unittest 2025-10-24 16:54:31 +09:00
tequ
010951e8d7 fix gcc build error 2025-10-06 17:02:01 +09:00
tequ
19032e5a2b use uint256 instead ripple::base_uint<256> 2025-10-06 14:28:06 +09:00
tequ
c83bfe1987 add slot_type API 2025-10-06 13:49:07 +09:00
tequ
bc7a28f6af add sto_float API 2025-10-06 13:01:46 +09:00
tequ
3db217058b add emit tests 2025-10-06 11:30:08 +09:00
tequ
bf9765753f refactor 2025-09-29 16:48:19 +09:00
tequ
cf59180660 add util_raddr, util_accid 2025-09-29 13:55:23 +09:00
tequ
4ff8d688e2 add sto APIs 2025-09-29 13:38:05 +09:00
tequ
e3c4644151 float_sto_set 2025-09-29 12:50:03 +09:00
tequ
a62bac3fcd add meta_slot, xpop_slot 2025-09-29 12:25:41 +09:00
tequ
27e4e4b510 add slot APIs 2025-09-29 12:18:01 +09:00
tequ
962fdbceb6 sort HookAPI.cpp 2025-09-29 11:31:00 +09:00
tequ
2466289a1e add util_verify, util_sha512h 2025-09-29 11:17:16 +09:00
tequ
7b79e7d390 state_foreign, state_foreign_set 2025-09-23 18:35:48 +09:00
tequ
4d33603f39 ledger_last_time 2025-09-23 16:17:15 +09:00
tequ
3f65b57997 ledger Hook APIs 2025-09-23 14:53:50 +09:00
tequ
22c71a9801 fix to const params 2025-09-23 14:33:03 +09:00
tequ
0290b73a9e hook Hook APIs 2025-09-23 14:28:28 +09:00
tequ
cf9eef03e5 Merge remote-tracking branch 'upstream/dev' into hook-api-unittest 2025-09-23 13:14:30 +09:00
tequ
dfd93e9ab2 etxn APIs 2025-09-06 17:08:29 +09:00
tequ
ed68a53f6c otxn Hook APIs 2025-09-06 03:59:19 +09:00
tequ
6e49f7d1b1 add float APIs 2025-09-06 02:42:34 +09:00
tequ
4a36ca527e refactor 2025-09-05 21:12:17 +09:00
tequ
c732609f46 Hook API Unit Testing 2025-09-05 19:55:46 +09:00
47 changed files with 38 additions and 3290 deletions

View File

@@ -1,24 +0,0 @@
name: Guard Checker Build
on:
push:
pull_request:
jobs:
guard-checker-build:
strategy:
fail-fast: false
matrix:
include:
- run-on: ubuntu-latest
- run-on: macos-latest
runs-on: ${{ matrix.run-on }}
name: Guard Checker Build - ${{ matrix.run-on }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Build Guard Checker
run: |
cd src/ripple/app/hook
make guard_checker

View File

@@ -737,7 +737,6 @@ if (tests)
src/test/app/BaseFee_test.cpp
src/test/app/Check_test.cpp
src/test/app/ClaimReward_test.cpp
src/test/app/ConsensusEntropy_test.cpp
src/test/app/Cron_test.cpp
src/test/app/Clawback_test.cpp
src/test/app/CrossingLimits_test.cpp
@@ -851,7 +850,6 @@ if (tests)
#]===============================]
src/test/consensus/ByzantineFailureSim_test.cpp
src/test/consensus/Consensus_test.cpp
src/test/consensus/ExtendedPosition_test.cpp
src/test/consensus/DistributedValidatorsSim_test.cpp
src/test/consensus/LedgerTiming_test.cpp
src/test/consensus/LedgerTrie_test.cpp

View File

@@ -60,7 +60,7 @@ git-subtree. See those directories' README files for more details.
- [Xrpl Documentation](https://xrpl.org)
- [Xahau Documentation](https://xahau.network/)
- [Hooks Technical Documentation](https://xrpl-hooks.readme.io/)
- **Explorers**: Explore the Xahau Network using various explorers:
- **Explorers**: Explore the Xahau ledger using various explorers:
- [xahauexplorer.com](https://xahauexplorer.com)
- [xahscan.com](https://xahscan.com)
- [xahau.xrpl.org](https://xahau.xrpl.org)

View File

@@ -62,11 +62,11 @@ For these complaints or reports, please [contact our support team](mailto:bugs@x
### The following type of security problems are excluded
1. **In scope**. Only bugs in software under the scope of the program qualify. Currently, that means `xahaud` and `xahau-lib`.
2. **Relevant**. A security issue, posing a danger to user funds, privacy or the operation of the Xahau Network.
2. **Relevant**. A security issue, posing a danger to user funds, privacy or the operation of the Xahau Ledger.
3. **Original and previously unknown**. Bugs that are already known and discussed in public do not qualify. Previously reported bugs, even if publicly unknown, are not eligible.
4. **Specific**. We welcome general security advice or recommendations, but we cannot pay bounties for that.
5. **Fixable**. There has to be something we can do to permanently fix the problem. Note that bugs in other peoples software may still qualify in some cases. For example, if you find a bug in a library that we use which can compromise the security of software that is in scope and we can get it fixed, you may qualify for a bounty.
6. **Unused**. If you use the exploit to attack the Xahau Network, you do not qualify for a bounty. If you report a vulnerability used in an ongoing or past attack and there is specific, concrete evidence that suggests you are the attacker we reserve the right not to pay a bounty.
6. **Unused**. If you use the exploit to attack the Xahau Ledger, you do not qualify for a bounty. If you report a vulnerability used in an ongoing or past attack and there is specific, concrete evidence that suggests you are the attacker we reserve the right not to pay a bounty.
Please note: Reports that are lacking any proof (such as screenshots or other data), detailed information or details on how to reproduce any unexpected result will be investigated but will not be eligible for any reward.

View File

@@ -47,6 +47,5 @@
#define MEM_OVERLAP -43
#define TOO_MANY_STATE_MODIFICATIONS -44
#define TOO_MANY_NAMESPACES -45
#define TOO_LITTLE_ENTROPY -46
#define HOOK_ERROR_CODES
#endif //HOOK_ERROR_CODES

View File

@@ -329,11 +329,5 @@ meta_slot(uint32_t slot_no);
extern int64_t
xpop_slot(uint32_t slot_no_tx, uint32_t slot_no_meta);
extern int64_t
dice(uint32_t sides);
extern int64_t
random(uint32_t write_ptr, uint32_t write_len);
#define HOOK_EXTERN
#endif // HOOK_EXTERN

View File

@@ -16,7 +16,6 @@
#define sfHookExecutionIndex ((1U << 16U) + 19U)
#define sfHookApiVersion ((1U << 16U) + 20U)
#define sfHookStateScale ((1U << 16U) + 21U)
#define sfEntropyCount ((1U << 16U) + 99U)
#define sfNetworkID ((2U << 16U) + 1U)
#define sfFlags ((2U << 16U) + 2U)
#define sfSourceTag ((2U << 16U) + 3U)

View File

@@ -47,4 +47,3 @@
#define ttUNL_MODIFY 102
#define ttEMIT_FAILURE 103
#define ttUNL_REPORT 104
#define ttCONSENSUS_ENTROPY 105

View File

@@ -27,7 +27,6 @@
#include <ripple/app/ledger/LocalTxs.h>
#include <ripple/app/ledger/OpenLedger.h>
#include <ripple/app/misc/AmendmentTable.h>
#include <ripple/app/misc/CanonicalTXSet.h>
#include <ripple/app/misc/HashRouter.h>
#include <ripple/app/misc/LoadFeeTrack.h>
#include <ripple/app/misc/NegativeUNLVote.h>
@@ -39,19 +38,14 @@
#include <ripple/basics/random.h>
#include <ripple/beast/core/LexicalCast.h>
#include <ripple/consensus/LedgerTiming.h>
#include <ripple/crypto/csprng.h>
#include <ripple/nodestore/DatabaseShard.h>
#include <ripple/overlay/Overlay.h>
#include <ripple/overlay/predicates.h>
#include <ripple/protocol/BuildInfo.h>
#include <ripple/protocol/Feature.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/TxFlags.h>
#include <ripple/protocol/TxFormats.h>
#include <ripple/protocol/digest.h>
#include <algorithm>
#include <cstring>
#include <mutex>
namespace ripple {
@@ -171,14 +165,10 @@ RCLConsensus::Adaptor::share(RCLCxPeerPos const& peerPos)
prop.set_proposeseq(proposal.proposeSeq());
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
// Serialize full ExtendedPosition (includes RNG leaves)
Serializer positionData;
proposal.position().add(positionData);
auto const posSlice = positionData.slice();
prop.set_currenttxhash(posSlice.data(), posSlice.size());
prop.set_currenttxhash(
proposal.position().begin(), proposal.position().size());
prop.set_previousledger(
proposal.prevLedger().begin(), proposal.prevLedger().size());
proposal.prevLedger().begin(), proposal.position().size());
auto const pk = peerPos.publicKey().slice();
prop.set_nodepubkey(pk.data(), pk.size());
@@ -219,28 +209,8 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
protocol::TMProposeSet prop;
// Serialize full ExtendedPosition (includes RNG leaves)
Serializer positionData;
proposal.position().add(positionData);
auto const posSlice = positionData.slice();
prop.set_currenttxhash(posSlice.data(), posSlice.size());
JLOG(j_.info()) << "RNG: propose seq=" << proposal.proposeSeq()
<< " wireBytes=" << posSlice.size() << " commit="
<< (proposal.position().myCommitment ? "yes" : "no")
<< " reveal="
<< (proposal.position().myReveal ? "yes" : "no");
// Self-seed our own reveal so we count toward reveal quorum
// (harvestRngData only sees peer proposals, not our own).
if (proposal.position().myReveal)
{
auto const ownNodeId = validatorKeys_.nodeID;
pendingReveals_[ownNodeId] = *proposal.position().myReveal;
nodeIdToKey_[ownNodeId] = validatorKeys_.publicKey;
JLOG(j_.debug()) << "RNG: self-seeded reveal for " << ownNodeId;
}
prop.set_currenttxhash(
proposal.position().begin(), proposal.position().size());
prop.set_previousledger(
proposal.prevLedger().begin(), proposal.prevLedger().size());
prop.set_proposeseq(proposal.proposeSeq());
@@ -255,31 +225,6 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
prop.set_signature(sig.data(), sig.size());
// Store our own proposal proof for embedding in SHAMap entries.
// commitProofs_ gets seq=0 only (deterministic commitSet).
// proposalProofs_ gets the latest with a reveal (for entropySet).
if (proposal.position().myCommitment || proposal.position().myReveal)
{
auto makeProof = [&]() {
ProposalProof proof;
proof.proposeSeq = proposal.proposeSeq();
proof.closeTime = static_cast<std::uint32_t>(
proposal.closeTime().time_since_epoch().count());
proof.prevLedger = proposal.prevLedger();
Serializer s;
proposal.position().add(s);
proof.positionData = std::move(s);
proof.signature = Buffer(sig.data(), sig.size());
return proof;
};
if (proposal.position().myCommitment && proposal.proposeSeq() == 0)
commitProofs_.emplace(validatorKeys_.nodeID, makeProof());
if (proposal.position().myReveal)
proposalProofs_[validatorKeys_.nodeID] = makeProof();
}
auto const suppression = proposalUniqueId(
proposal.position(),
proposal.prevLedger(),
@@ -440,51 +385,12 @@ RCLConsensus::Adaptor::onClose(
// Needed because of the move below.
auto const setHash = initialSet->getHash().as_uint256();
ExtendedPosition pos{setHash};
// Bootstrap commit-reveal: generate entropy and include commitment
// in our very first proposal so peers can collect it during consensus.
//
// This is gated on `proposing` — a node that just restarted enters
// as proposing=false (observing) and must watch at least one full
// round before consensus promotes it to proposing. During those
// observation rounds it cannot contribute to the RNG pipeline at
// all: no commitment, no reveal, no SHAMap entries. The surviving
// proposers will close those rounds with fewer commits (possibly
// falling back to ZERO entropy) until the rejoiner starts proposing.
if (proposing && prevLedger->rules().enabled(featureConsensusEntropy))
{
cacheUNLReport();
generateEntropySecret();
pos.myCommitment = sha512Half(
myEntropySecret_,
validatorKeys_.publicKey,
prevLedger->info().seq + 1);
// Seed our own commitment into pendingCommits_ so we count
// toward quorum (harvestRngData only sees peer proposals).
auto const ownNodeId = validatorKeys_.nodeID;
pendingCommits_[ownNodeId] = *pos.myCommitment;
nodeIdToKey_[ownNodeId] = validatorKeys_.publicKey;
JLOG(j_.info()) << "RNG: onClose bootstrap seq="
<< (prevLedger->info().seq + 1)
<< " commitment=" << *pos.myCommitment;
}
else
{
JLOG(j_.debug()) << "RNG: onClose skipped (proposing=" << proposing
<< " amendment="
<< prevLedger->rules().enabled(featureConsensusEntropy)
<< ")";
}
return Result{
std::move(initialSet),
RCLCxPeerPos::Proposal{
initialLedger->info().parentHash,
RCLCxPeerPos::Proposal::seqJoin,
std::move(pos),
setHash,
closeTime,
app_.timeKeeper().closeTime(),
validatorKeys_.nodeID}};
@@ -606,13 +512,6 @@ RCLConsensus::Adaptor::doAccept(
}
}
// Inject consensus entropy pseudo-transaction (if amendment enabled)
// This must happen before buildLCL so the entropy tx is in the ledger
if (prevLedger.ledger_->rules().enabled(featureConsensusEntropy))
injectEntropyPseudoTx(retriableTxs, prevLedger.seq() + 1);
else
clearRngState();
auto built = buildLCL(
prevLedger,
retriableTxs,
@@ -1148,815 +1047,6 @@ RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
app_.getOPs().setMode(OperatingMode::CONNECTED);
}
//------------------------------------------------------------------------------
// RNG Helper Methods
std::size_t
RCLConsensus::Adaptor::quorumThreshold() const
{
// Prefer expected proposers (recent proposers ∩ UNL) — this
// adapts to actual network conditions rather than relying on
// the potentially stale UNL Report. Falls back to full
// UNL Report for cold boot (first round).
//
// Round 1: threshold based on full UNL (conservative)
// Round 2+: threshold based on who actually proposed last round
auto const base = expectedProposers_.empty() ? unlReportNodeIds_.size()
: expectedProposers_.size();
if (base == 0)
return 1; // safety: need at least one commit
return (base * 80 + 99) / 100;
}
void
RCLConsensus::Adaptor::setExpectedProposers(hash_set<NodeID> proposers)
{
if (!proposers.empty())
{
// Intersect with active UNL — only expect commits from
// validators we trust. Non-UNL proposers are ignored.
hash_set<NodeID> filtered;
for (auto const& id : proposers)
{
if (unlReportNodeIds_.count(id))
filtered.insert(id);
}
filtered.insert(validatorKeys_.nodeID);
expectedProposers_ = std::move(filtered);
JLOG(j_.debug()) << "RNG: expectedProposers from recent proposers: "
<< expectedProposers_.size() << " (filtered from "
<< proposers.size() << ")";
return;
}
// First round (no previous proposers): fall back to UNL Report.
// cacheUNLReport() was called just before this, so it's populated.
if (!unlReportNodeIds_.empty())
{
expectedProposers_ = unlReportNodeIds_;
JLOG(j_.debug()) << "RNG: expectedProposers from UNL Report: "
<< expectedProposers_.size();
return;
}
// No data at all (shouldn't happen — cacheUNLReport falls back to
// trusted keys). Leave empty → hasQuorumOfCommits uses 80% fallback.
JLOG(j_.warn()) << "RNG: no expectedProposers available";
}
std::size_t
RCLConsensus::Adaptor::pendingCommitCount() const
{
return pendingCommits_.size();
}
bool
RCLConsensus::Adaptor::hasQuorumOfCommits() const
{
if (!expectedProposers_.empty())
{
// Wait for commits from all expected proposers.
// rngPIPELINE_TIMEOUT is the safety valve for dead nodes.
for (auto const& id : expectedProposers_)
{
if (pendingCommits_.find(id) == pendingCommits_.end())
{
JLOG(j_.debug())
<< "RNG: hasQuorumOfCommits? " << pendingCommits_.size()
<< "/" << expectedProposers_.size() << " -> no";
return false;
}
}
JLOG(j_.debug()) << "RNG: hasQuorumOfCommits? "
<< pendingCommits_.size() << "/"
<< expectedProposers_.size()
<< " -> YES (all expected)";
return true;
}
// Fallback: 80% of active UNL (cold boot, no expected set)
auto threshold = quorumThreshold();
bool result = pendingCommits_.size() >= threshold;
JLOG(j_.debug()) << "RNG: hasQuorumOfCommits? " << pendingCommits_.size()
<< "/" << threshold << " -> " << (result ? "YES" : "no")
<< " (80% fallback)";
return result;
}
bool
RCLConsensus::Adaptor::hasMinimumReveals() const
{
// Wait for reveals from ALL committers, not just 80%. The commit
// set is deterministic (SHAMap agreed), so we know exactly which
// validators should reveal. Waiting for all of them ensures every
// node builds the same entropy set. rngPIPELINE_TIMEOUT in
// Consensus.h is the safety valve for nodes that crash/partition
// between commit and reveal.
auto const expected = pendingCommits_.size();
bool result = pendingReveals_.size() >= expected;
JLOG(j_.debug()) << "RNG: hasMinimumReveals? " << pendingReveals_.size()
<< "/" << expected << " -> " << (result ? "YES" : "no");
return result;
}
bool
RCLConsensus::Adaptor::hasAnyReveals() const
{
return !pendingReveals_.empty();
}
uint256
RCLConsensus::Adaptor::buildCommitSet(LedgerIndex seq)
{
//@@start rng-build-commit-set
auto map =
std::make_shared<SHAMap>(SHAMapType::TRANSACTION, app_.getNodeFamily());
map->setUnbacked();
// NOTE: avoid structured bindings in for-loops containing lambdas —
// clang-14 (CI) rejects capturing them (P2036R3 not implemented).
for (auto const& entry : pendingCommits_)
{
auto const& nid = entry.first;
auto const& commit = entry.second;
if (!isUNLReportMember(nid))
continue;
auto kit = nodeIdToKey_.find(nid);
if (kit == nodeIdToKey_.end())
continue;
// Encode the NodeID into sfAccount so handleAcquiredRngSet can
// recover it without recomputing (master vs signing key issue).
AccountID acctId;
std::memcpy(acctId.data(), nid.data(), acctId.size());
STTx tx(ttCONSENSUS_ENTROPY, [&](auto& obj) {
obj.setFieldU32(sfFlags, tfEntropyCommit);
obj.setFieldU32(sfLedgerSequence, seq);
obj.setAccountID(sfAccount, acctId);
obj.setFieldU32(sfSequence, 0);
obj.setFieldAmount(sfFee, STAmount{});
obj.setFieldH256(sfDigest, commit);
obj.setFieldVL(sfSigningPubKey, kit->second.slice());
auto proofIt = commitProofs_.find(nid);
if (proofIt != commitProofs_.end())
obj.setFieldVL(sfBlob, serializeProof(proofIt->second));
});
Serializer s(2048);
tx.add(s);
map->addItem(
SHAMapNodeType::tnTRANSACTION_NM,
make_shamapitem(tx.getTransactionID(), s.slice()));
}
map = map->snapShot(false);
commitSetMap_ = map;
auto const hash = map->getHash().as_uint256();
inboundTransactions_.giveSet(hash, map, false);
JLOG(j_.debug()) << "RNG: built commitSet SHAMap hash=" << hash
<< " entries=" << pendingCommits_.size();
return hash;
//@@end rng-build-commit-set
}
uint256
RCLConsensus::Adaptor::buildEntropySet(LedgerIndex seq)
{
//@@start rng-build-entropy-set
auto map =
std::make_shared<SHAMap>(SHAMapType::TRANSACTION, app_.getNodeFamily());
map->setUnbacked();
// NOTE: avoid structured bindings — clang-14 can't capture them (P2036R3).
for (auto const& entry : pendingReveals_)
{
auto const& nid = entry.first;
auto const& reveal = entry.second;
if (!isUNLReportMember(nid))
continue;
auto kit = nodeIdToKey_.find(nid);
if (kit == nodeIdToKey_.end())
continue;
AccountID acctId;
std::memcpy(acctId.data(), nid.data(), acctId.size());
STTx tx(ttCONSENSUS_ENTROPY, [&](auto& obj) {
obj.setFieldU32(sfFlags, tfEntropyReveal);
obj.setFieldU32(sfLedgerSequence, seq);
obj.setAccountID(sfAccount, acctId);
obj.setFieldU32(sfSequence, 0);
obj.setFieldAmount(sfFee, STAmount{});
obj.setFieldH256(sfDigest, reveal);
obj.setFieldVL(sfSigningPubKey, kit->second.slice());
auto proofIt = proposalProofs_.find(nid);
if (proofIt != proposalProofs_.end())
obj.setFieldVL(sfBlob, serializeProof(proofIt->second));
});
Serializer s(2048);
tx.add(s);
map->addItem(
SHAMapNodeType::tnTRANSACTION_NM,
make_shamapitem(tx.getTransactionID(), s.slice()));
}
map = map->snapShot(false);
entropySetMap_ = map;
auto const hash = map->getHash().as_uint256();
inboundTransactions_.giveSet(hash, map, false);
JLOG(j_.debug()) << "RNG: built entropySet SHAMap hash=" << hash
<< " entries=" << pendingReveals_.size();
return hash;
//@@end rng-build-entropy-set
}
void
RCLConsensus::Adaptor::generateEntropySecret()
{
// Generate cryptographically secure random entropy
crypto_prng()(myEntropySecret_.data(), myEntropySecret_.size());
entropyFailed_ = false;
}
uint256
RCLConsensus::Adaptor::getEntropySecret() const
{
return myEntropySecret_;
}
void
RCLConsensus::Adaptor::setEntropyFailed()
{
entropyFailed_ = true;
}
PublicKey const&
RCLConsensus::Adaptor::validatorKey() const
{
return validatorKeys_.publicKey;
}
void
RCLConsensus::Adaptor::clearRngState()
{
pendingCommits_.clear();
pendingReveals_.clear();
nodeIdToKey_.clear();
myEntropySecret_ = uint256{};
entropyFailed_ = false;
commitSetMap_.reset();
entropySetMap_.reset();
pendingRngFetches_.clear();
unlReportNodeIds_.clear();
expectedProposers_.clear();
commitProofs_.clear();
proposalProofs_.clear();
}
void
RCLConsensus::Adaptor::cacheUNLReport()
{
unlReportNodeIds_.clear();
// Try UNL Report from the validated ledger
if (auto const prevLedger = ledgerMaster_.getValidatedLedger())
{
if (auto const sle = prevLedger->read(keylet::UNLReport()))
{
if (sle->isFieldPresent(sfActiveValidators))
{
for (auto const& obj : sle->getFieldArray(sfActiveValidators))
{
auto const pk = obj.getFieldVL(sfPublicKey);
if (publicKeyType(makeSlice(pk)))
{
unlReportNodeIds_.insert(
calcNodeID(PublicKey(makeSlice(pk))));
}
}
}
}
}
// Fallback to normal UNL if no report or empty
if (unlReportNodeIds_.empty())
{
for (auto const& masterKey : app_.validators().getTrustedMasterKeys())
{
unlReportNodeIds_.insert(calcNodeID(masterKey));
}
}
// Always include ourselves
unlReportNodeIds_.insert(validatorKeys_.nodeID);
JLOG(j_.debug()) << "RNG: cacheUNLReport size=" << unlReportNodeIds_.size();
}
bool
RCLConsensus::Adaptor::isUNLReportMember(NodeID const& nodeId) const
{
return unlReportNodeIds_.count(nodeId) > 0;
}
bool
RCLConsensus::Adaptor::isRngSet(uint256 const& hash) const
{
if (commitSetMap_ && commitSetMap_->getHash().as_uint256() == hash)
return true;
if (entropySetMap_ && entropySetMap_->getHash().as_uint256() == hash)
return true;
return pendingRngFetches_.count(hash) > 0;
}
void
RCLConsensus::Adaptor::handleAcquiredRngSet(std::shared_ptr<SHAMap> const& map)
{
auto const hash = map->getHash().as_uint256();
pendingRngFetches_.erase(hash);
JLOG(j_.debug()) << "RNG: handleAcquiredRngSet hash=" << hash;
// Determine if this is a commitSet or entropySet by inspecting entries
bool isCommitSet = false;
bool isEntropySet = false;
map->visitLeaves([&](boost::intrusive_ptr<SHAMapItem const> const& item) {
try
{
// Skip prefix (4 bytes) when deserializing
SerialIter sit(item->slice());
auto stx = std::make_shared<STTx const>(std::ref(sit));
auto flags = stx->getFieldU32(sfFlags);
if (flags & tfEntropyCommit)
isCommitSet = true;
else if (flags & tfEntropyReveal)
isEntropySet = true;
}
catch (std::exception const&)
{
// Skip malformed entries
}
});
if (!isCommitSet && !isEntropySet)
{
JLOG(j_.warn()) << "RNG: acquired set " << hash
<< " has no recognizable RNG entries";
return;
}
// Union-merge: diff against our local set and add any entries we're
// missing. Unlike normal txSets which use avalanche voting to resolve
// disagreements, RNG sets use pure union — every valid UNL entry
// belongs in the set. Differences arise only from propagation timing,
// not from conflicting opinions about inclusion.
auto& localMap = isCommitSet ? commitSetMap_ : entropySetMap_;
auto& pendingData = isCommitSet ? pendingCommits_ : pendingReveals_;
std::size_t merged = 0;
if (localMap)
{
SHAMap::Delta delta;
localMap->compare(*map, delta, 65536);
for (auto const& [key, pair] : delta)
{
// pair.first = our entry, pair.second = their entry
// If we don't have it (pair.first is null), merge it
if (!pair.first && pair.second)
{
try
{
SerialIter sit(pair.second->slice());
auto stx = std::make_shared<STTx const>(std::ref(sit));
auto pk = stx->getFieldVL(sfSigningPubKey);
PublicKey pubKey(makeSlice(pk));
auto digest = stx->getFieldH256(sfDigest);
// Recover NodeID from sfAccount (encoded by
// buildCommitSet/buildEntropySet) to avoid
// master-vs-signing key mismatch.
auto const acctId = stx->getAccountID(sfAccount);
NodeID nodeId;
std::memcpy(nodeId.data(), acctId.data(), nodeId.size());
if (!isUNLReportMember(nodeId))
{
JLOG(j_.debug()) << "RNG: rejecting non-UNL entry from "
<< nodeId << " in acquired set";
continue;
}
// Verify proposal proof if present
if (stx->isFieldPresent(sfBlob))
{
auto proofBlob = stx->getFieldVL(sfBlob);
if (!verifyProof(
proofBlob, pubKey, digest, isCommitSet))
{
JLOG(j_.warn())
<< "RNG: invalid proof from " << nodeId
<< " in acquired set (diff)";
continue;
}
}
pendingData[nodeId] = digest;
nodeIdToKey_[nodeId] = pubKey;
++merged;
JLOG(j_.trace())
<< "RNG: merged " << (isCommitSet ? "commit" : "reveal")
<< " from " << nodeId;
}
catch (std::exception const& ex)
{
JLOG(j_.warn())
<< "RNG: failed to parse entry from acquired set: "
<< ex.what();
}
}
}
}
else
{
// We don't have a local set yet — extract all entries
map->visitLeaves(
[&](boost::intrusive_ptr<SHAMapItem const> const& item) {
try
{
SerialIter sit(item->slice());
auto stx = std::make_shared<STTx const>(std::ref(sit));
auto pk = stx->getFieldVL(sfSigningPubKey);
PublicKey pubKey(makeSlice(pk));
auto digest = stx->getFieldH256(sfDigest);
auto const acctId = stx->getAccountID(sfAccount);
NodeID nodeId;
std::memcpy(nodeId.data(), acctId.data(), nodeId.size());
if (!isUNLReportMember(nodeId))
{
JLOG(j_.debug()) << "RNG: rejecting non-UNL entry from "
<< nodeId << " in acquired set";
return;
}
// Verify proposal proof if present
if (stx->isFieldPresent(sfBlob))
{
auto proofBlob = stx->getFieldVL(sfBlob);
if (!verifyProof(
proofBlob, pubKey, digest, isCommitSet))
{
JLOG(j_.warn())
<< "RNG: invalid proof from " << nodeId
<< " in acquired set (visit)";
return;
}
}
pendingData[nodeId] = digest;
nodeIdToKey_[nodeId] = pubKey;
++merged;
}
catch (std::exception const&)
{
// Skip malformed entries
}
});
}
JLOG(j_.info()) << "RNG: merged " << merged << " entries from "
<< (isCommitSet ? "commitSet" : "entropySet")
<< " hash=" << hash;
}
void
RCLConsensus::Adaptor::fetchRngSetIfNeeded(std::optional<uint256> const& hash)
{
if (!hash || *hash == uint256{})
return;
// Check if we already have this set
if (commitSetMap_ && commitSetMap_->getHash().as_uint256() == *hash)
return;
if (entropySetMap_ && entropySetMap_->getHash().as_uint256() == *hash)
return;
// Check if already fetching
if (pendingRngFetches_.count(*hash))
return;
// Check if InboundTransactions already has it
if (auto existing = inboundTransactions_.getSet(*hash, false))
{
handleAcquiredRngSet(existing);
return;
}
// Trigger network fetch
JLOG(j_.debug()) << "RNG: triggering fetch for set " << *hash;
pendingRngFetches_.insert(*hash);
inboundTransactions_.getSet(*hash, true);
}
void
RCLConsensus::Adaptor::injectEntropyPseudoTx(
CanonicalTXSet& retriableTxs,
LedgerIndex seq)
{
JLOG(j_.info()) << "RNG: injectEntropy seq=" << seq
<< " commits=" << pendingCommits_.size()
<< " reveals=" << pendingReveals_.size()
<< " failed=" << entropyFailed_;
uint256 finalEntropy;
bool hasEntropy = false;
//@@start rng-inject-entropy-selection
// Calculate entropy from collected reveals
if (app_.config().standalone())
{
// Standalone mode: generate synthetic deterministic entropy
// so that Hook APIs (dice/random) work for testing.
finalEntropy = sha512Half(std::string("standalone-entropy"), seq);
hasEntropy = true;
JLOG(j_.info()) << "RNG: Standalone synthetic entropy " << finalEntropy
<< " for ledger " << seq;
}
else if (entropyFailed_ || pendingReveals_.empty())
{
// Liveness fallback: inject zero entropy.
// Hooks MUST check for zero to know entropy is unavailable.
finalEntropy.zero();
hasEntropy = true;
JLOG(j_.warn()) << "RNG: Injecting ZERO entropy (fallback) for ledger "
<< seq;
}
else
{
// Sort reveals deterministically by Validator Public Key
std::vector<std::pair<PublicKey, uint256>> sorted;
sorted.reserve(pendingReveals_.size());
for (auto const& [nodeId, reveal] : pendingReveals_)
{
auto it = nodeIdToKey_.find(nodeId);
if (it != nodeIdToKey_.end())
sorted.emplace_back(it->second, reveal);
}
if (!sorted.empty())
{
std::sort(
sorted.begin(), sorted.end(), [](auto const& a, auto const& b) {
return a.first.slice() < b.first.slice();
});
// Mix all reveals into final entropy
Serializer s;
for (auto const& [key, reveal] : sorted)
{
s.addVL(key.slice());
s.addBitString(reveal);
}
finalEntropy = sha512Half(s.slice());
hasEntropy = true;
JLOG(j_.info()) << "RNG: Injecting entropy " << finalEntropy
<< " from " << sorted.size() << " reveals"
<< " for ledger " << seq;
}
}
//@@end rng-inject-entropy-selection
//@@start rng-inject-pseudotx
// Synthesize and inject the pseudo-transaction
if (hasEntropy)
{
// Account Zero convention for pseudo-transactions (same as ttFEE, etc)
auto const entropyCount = static_cast<std::uint16_t>(
app_.config().standalone()
? 20 // synthetic: high enough for Hook APIs (need >= 5)
: (entropyFailed_ || pendingReveals_.empty()
? 0
: pendingReveals_.size()));
STTx tx(ttCONSENSUS_ENTROPY, [&](auto& obj) {
obj.setFieldU32(sfLedgerSequence, seq);
obj.setAccountID(sfAccount, AccountID{});
obj.setFieldU32(sfSequence, 0);
obj.setFieldAmount(sfFee, STAmount{});
obj.setFieldH256(sfDigest, finalEntropy);
obj.setFieldU16(sfEntropyCount, entropyCount);
});
retriableTxs.insert(std::make_shared<STTx>(std::move(tx)));
}
//@@end rng-inject-pseudotx
// Reset RNG state for next round
clearRngState();
}
void
RCLConsensus::Adaptor::harvestRngData(
NodeID const& nodeId,
PublicKey const& publicKey,
ExtendedPosition const& position,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
uint256 const& prevLedger,
Slice const& signature)
{
JLOG(j_.debug()) << "RNG: harvestRngData from " << nodeId
<< " commit=" << (position.myCommitment ? "yes" : "no")
<< " reveal=" << (position.myReveal ? "yes" : "no");
//@@start rng-harvest-trust-and-reveal-verification
// Reject data from validators not in the active UNL
if (!isUNLReportMember(nodeId))
{
JLOG(j_.debug()) << "RNG: rejecting data from non-UNL validator "
<< nodeId;
return;
}
// Store nodeId -> publicKey mapping for deterministic ordering
nodeIdToKey_[nodeId] = publicKey;
// Harvest commitment if present
if (position.myCommitment)
{
auto [it, inserted] =
pendingCommits_.emplace(nodeId, *position.myCommitment);
if (!inserted && it->second != *position.myCommitment)
{
JLOG(j_.warn())
<< "Validator " << nodeId << " changed commitment from "
<< it->second << " to " << *position.myCommitment;
it->second = *position.myCommitment;
}
else if (inserted)
{
JLOG(j_.trace()) << "Harvested commitment from " << nodeId << ": "
<< *position.myCommitment;
}
}
// Harvest reveal if present — verify it matches the stored commitment
if (position.myReveal)
{
auto commitIt = pendingCommits_.find(nodeId);
if (commitIt == pendingCommits_.end())
{
// No commitment on record — cannot verify. Ignore to prevent
// grinding attacks where a validator skips the commit phase.
JLOG(j_.warn()) << "RNG: rejecting reveal from " << nodeId
<< " (no commitment on record)";
return;
}
// Verify Hash(reveal | pubKey | seq) == commitment
auto const prevLgr = ledgerMaster_.getLedgerByHash(prevLedger);
if (!prevLgr)
{
JLOG(j_.warn()) << "RNG: cannot verify reveal from " << nodeId
<< " (prevLedger not available)";
return;
}
auto const seq = prevLgr->info().seq + 1;
auto const calculated = sha512Half(*position.myReveal, publicKey, seq);
if (calculated != commitIt->second)
{
JLOG(j_.warn()) << "RNG: fraudulent reveal from " << nodeId
<< " (does not match commitment)";
return;
}
auto [it, inserted] =
pendingReveals_.emplace(nodeId, *position.myReveal);
if (!inserted && it->second != *position.myReveal)
{
JLOG(j_.warn()) << "Validator " << nodeId << " changed reveal from "
<< it->second << " to " << *position.myReveal;
it->second = *position.myReveal;
}
else if (inserted)
{
JLOG(j_.trace()) << "Harvested reveal from " << nodeId << ": "
<< *position.myReveal;
}
}
//@@end rng-harvest-trust-and-reveal-verification
// Store proposal proofs for embedding in SHAMap entries.
// commitProofs_: only seq=0 (commitments always ride on seq=0,
// so all nodes store the same proof → deterministic commitSet).
// proposalProofs_: latest proof carrying a reveal (for entropySet).
if (position.myCommitment || position.myReveal)
{
auto makeProof = [&]() {
ProposalProof proof;
proof.proposeSeq = proposeSeq;
proof.closeTime = static_cast<std::uint32_t>(
closeTime.time_since_epoch().count());
proof.prevLedger = prevLedger;
Serializer s;
position.add(s);
proof.positionData = std::move(s);
proof.signature = Buffer(signature.data(), signature.size());
return proof;
};
if (position.myCommitment && proposeSeq == 0)
commitProofs_.emplace(nodeId, makeProof());
if (position.myReveal)
proposalProofs_[nodeId] = makeProof();
}
}
Blob
RCLConsensus::Adaptor::serializeProof(ProposalProof const& proof)
{
Serializer s;
s.add32(proof.proposeSeq);
s.add32(proof.closeTime);
s.addBitString(proof.prevLedger);
s.addVL(proof.positionData.slice());
s.addVL(Slice(proof.signature.data(), proof.signature.size()));
return s.getData();
}
bool
RCLConsensus::Adaptor::verifyProof(
Blob const& proofBlob,
PublicKey const& publicKey,
uint256 const& expectedDigest,
bool isCommit)
{
try
{
SerialIter sit(makeSlice(proofBlob));
auto proposeSeq = sit.get32();
auto closeTime = sit.get32();
auto prevLedger = sit.get256();
auto positionData = sit.getVL();
auto signature = sit.getVL();
// Deserialize ExtendedPosition from the proof
SerialIter posIter(makeSlice(positionData));
auto maybePos =
ExtendedPosition::fromSerialIter(posIter, positionData.size());
if (!maybePos)
return false;
auto position = std::move(*maybePos);
// Verify the expected digest matches the position's leaf
if (isCommit)
{
if (!position.myCommitment ||
*position.myCommitment != expectedDigest)
return false;
}
else
{
if (!position.myReveal || *position.myReveal != expectedDigest)
return false;
}
// Recompute the signing hash (must match
// ConsensusProposal::signingHash)
auto signingHash = sha512Half(
HashPrefix::proposal, proposeSeq, closeTime, prevLedger, position);
// Verify the proposal signature
return verifyDigest(publicKey, signingHash, makeSlice(signature));
}
catch (std::exception const&)
{
return false;
}
}
void
RCLConsensus::startRound(
NetClock::time_point const& now,

View File

@@ -40,7 +40,6 @@
#include <set>
namespace ripple {
class CanonicalTXSet;
class InboundTransactions;
class LocalTxs;
class LedgerMaster;
@@ -88,54 +87,12 @@ class RCLConsensus
RCLCensorshipDetector<TxID, LedgerIndex> censorshipDetector_;
NegativeUNLVote nUnlVote_;
// --- RNG Pipelined Storage ---
hash_map<NodeID, uint256> pendingCommits_;
hash_map<NodeID, uint256> pendingReveals_;
hash_map<NodeID, PublicKey> nodeIdToKey_;
// Ephemeral entropy secret (in-memory only, crash = non-revealer)
uint256 myEntropySecret_;
bool entropyFailed_ = false;
// Real SHAMaps for the current round (unbacked, ephemeral)
std::shared_ptr<SHAMap> commitSetMap_;
std::shared_ptr<SHAMap> entropySetMap_;
// Track pending RNG set hashes we've triggered fetches for
hash_set<uint256> pendingRngFetches_;
// Cached set of NodeIDs from UNL Report (or fallback UNL)
hash_set<NodeID> unlReportNodeIds_;
// Expected proposers for commit quorum — derived from last round's
// actual proposers (best signal), falling back to UNL Report.
hash_set<NodeID> expectedProposers_;
/** Proof data from a proposal signature, for embedding in SHAMap
entries. Contains everything needed to independently verify
that a validator committed/revealed a specific value. */
struct ProposalProof
{
std::uint32_t proposeSeq;
std::uint32_t closeTime;
uint256 prevLedger;
Serializer positionData; // serialized ExtendedPosition
Buffer signature;
};
// Proposal proofs keyed by NodeID.
// commitProofs_: only seq=0 proofs (deterministic across all nodes).
// proposalProofs_: latest proof with reveal (for entropySet).
hash_map<NodeID, ProposalProof> commitProofs_;
hash_map<NodeID, ProposalProof> proposalProofs_;
public:
using Ledger_t = RCLCxLedger;
using NodeID_t = NodeID;
using NodeKey_t = PublicKey;
using TxSet_t = RCLTxSet;
using PeerPosition_t = RCLCxPeerPos;
using Position_t = ExtendedPosition;
using Result = ConsensusResult<Adaptor>;
@@ -221,131 +178,6 @@ class RCLConsensus
return parms_;
}
// --- RNG Helper Methods ---
/** Get the quorum threshold (80% of trusted validators) */
std::size_t
quorumThreshold() const;
/** Set expected proposers for this round's commit quorum.
Cascade: recent proposers > UNL Report > (empty = 80% fallback).
*/
void
setExpectedProposers(hash_set<NodeID> proposers);
/** Number of pending commits (for timeout fallback check) */
std::size_t
pendingCommitCount() const;
/** Check if we have quorum of commits */
bool
hasQuorumOfCommits() const;
/** Check if we have minimum reveals for consensus */
bool
hasMinimumReveals() const;
/** Check if we have any reveals at all */
bool
hasAnyReveals() const;
/** Build real SHAMap from collected commits, register for fetch.
@param seq The ledger sequence being built
@return The SHAMap root hash (commitSetHash)
*/
uint256
buildCommitSet(LedgerIndex seq);
/** Build real SHAMap from collected reveals, register for fetch.
@param seq The ledger sequence being built
@return The SHAMap root hash (entropySetHash)
*/
uint256
buildEntropySet(LedgerIndex seq);
/** Check if a hash is a known RNG set (commitSet or entropySet) */
bool
isRngSet(uint256 const& hash) const;
/** Handle an acquired RNG set — diff, merge missing entries */
void
handleAcquiredRngSet(std::shared_ptr<SHAMap> const& map);
/** Trigger fetch for a peer's unknown RNG set hash */
void
fetchRngSetIfNeeded(std::optional<uint256> const& hash);
/** Cache the active UNL NodeIDs for this round.
Reads from UNL Report (in-ledger), falls back to normal UNL.
*/
void
cacheUNLReport();
/** Check if a NodeID is in the active UNL for this round */
bool
isUNLReportMember(NodeID const& nodeId) const;
/** Generate new entropy secret for this round */
void
generateEntropySecret();
/** Get the current entropy secret */
uint256
getEntropySecret() const;
/** Mark entropy as failed for this round */
void
setEntropyFailed();
/** Get our validator public key */
PublicKey const&
validatorKey() const;
/** Clear RNG state for new round */
void
clearRngState();
/** Inject consensus entropy pseudo-transaction into the tx set.
Creates a ttCONSENSUS_ENTROPY pseudo-transaction from collected
reveals and injects it into the transaction set. This must be
called before buildLCL so the entropy is written to the ledger.
@param retriableTxs The canonical transaction set to inject into
@param seq The ledger sequence being built
*/
void
injectEntropyPseudoTx(CanonicalTXSet& retriableTxs, LedgerIndex seq);
/** Harvest RNG data from a peer proposal.
Extracts commits and reveals from the proposal's ExtendedPosition
and stores them in pending collections for later processing.
Also captures a ProposalProof for embedding in SHAMap entries.
*/
void
harvestRngData(
NodeID const& nodeId,
PublicKey const& publicKey,
ExtendedPosition const& position,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
uint256 const& prevLedger,
Slice const& signature);
/** Serialize a ProposalProof into a blob for sfBlob */
static Blob
serializeProof(ProposalProof const& proof);
/** Verify a proof blob against the entry's public key and digest.
@return true if the proof is valid */
static bool
verifyProof(
Blob const& proofBlob,
PublicKey const& publicKey,
uint256 const& expectedDigest,
bool isCommit);
private:
//---------------------------------------------------------------------
// The following members implement the generic Consensus requirements
@@ -639,29 +471,6 @@ public:
return consensus_.phase();
}
//! @see Consensus::inRngSubState
bool
inRngSubState() const
{
return consensus_.inRngSubState();
}
//! Check if a hash is a known RNG set (commitSet or entropySet)
bool
isRngSet(uint256 const& hash) const
{
std::lock_guard _{mutex_};
return adaptor_.isRngSet(hash);
}
//! Handle an acquired RNG set from InboundTransactions
void
gotRngSet(std::shared_ptr<SHAMap> const& map)
{
std::lock_guard _{mutex_};
adaptor_.handleAcquiredRngSet(map);
}
//! @see Consensus::getJson
Json::Value
getJson(bool full) const;

View File

@@ -64,17 +64,15 @@ RCLCxPeerPos::getJson() const
uint256
proposalUniqueId(
ExtendedPosition const& position,
uint256 const& proposeHash,
uint256 const& previousLedger,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,
Slice const& publicKey,
Slice const& signature)
{
// This is for suppression/dedup only, NOT for signing.
// Must include all fields that distinguish proposals.
Serializer s(512);
position.add(s);
s.addBitString(proposeHash);
s.addBitString(previousLedger);
s.add32(proposeSeq);
s.add32(closeTime.time_since_epoch().count());

View File

@@ -28,247 +28,13 @@
#include <ripple/protocol/HashPrefix.h>
#include <ripple/protocol/PublicKey.h>
#include <ripple/protocol/SecretKey.h>
#include <ripple/protocol/Serializer.h>
#include <boost/container/static_vector.hpp>
#include <chrono>
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
namespace ripple {
/** Extended position for consensus with RNG entropy support.
Carries the tx-set hash (the core convergence target), RNG set hashes
(agreed via sub-state quorum, not via operator==), and per-validator
leaves (unique to each proposer, piggybacked on proposals).
Critical design:
- operator== compares txSetHash ONLY (sub-states handle the rest)
- add() includes ALL fields for signing (prevents stripping attacks)
*/
struct ExtendedPosition
{
// === Core Convergence Target ===
uint256 txSetHash;
// === RNG Set Hashes (sub-state quorum, not in operator==) ===
std::optional<uint256> commitSetHash;
std::optional<uint256> entropySetHash;
// === Per-Validator Leaves (unique per proposer) ===
std::optional<uint256> myCommitment;
std::optional<uint256> myReveal;
ExtendedPosition() = default;
explicit ExtendedPosition(uint256 const& txSet) : txSetHash(txSet)
{
}
// Implicit conversion for legacy compatibility
operator uint256() const
{
return txSetHash;
}
// Helper to update TxSet while preserving sidecar data
void
updateTxSet(uint256 const& set)
{
txSetHash = set;
}
// TODO: replace operator== with a named method (e.g. txSetMatches())
// so call sites read as intent, not as "full equality". Overloading
// operator== to ignore most fields is surprising and fragile.
//
// CRITICAL: Only compare txSetHash for consensus convergence.
//
// Why not commitSetHash / entropySetHash?
// Nodes transition through sub-states (ConvergingTx → ConvergingCommit
// → ConvergingReveal) at slightly different times. If we included
// commitSetHash here, a node that transitions first would set it,
// making its position "different" from peers who haven't transitioned
// yet — deadlocking haveConsensus() for everyone.
//
// Instead, the sub-state machine in phaseEstablish handles agreement
// on those fields via quorum checks (hasQuorumOfCommits, etc.).
//
// Implications to consider:
// - Two nodes with the same txSetHash but different commitSetHash
// will appear to "agree" from the convergence engine's perspective.
// This is intentional: tx consensus must not be blocked by RNG.
// - A malicious node could propose a different commitSetHash without
// affecting tx convergence. This is safe because commitSetHash
// disagreement is caught by the sub-state quorum checks, and the
// entropy result is verified deterministically from collected reveals.
// - Leaves (myCommitment, myReveal) are also excluded — they are
// per-validator data unique to each proposer.
//@@start rng-extended-position-equality
bool
operator==(ExtendedPosition const& other) const
{
return txSetHash == other.txSetHash;
}
bool
operator!=(ExtendedPosition const& other) const
{
return !(*this == other);
}
// Comparison with uint256 (compares txSetHash only)
bool
operator==(uint256 const& hash) const
{
return txSetHash == hash;
}
bool
operator!=(uint256 const& hash) const
{
return txSetHash != hash;
}
friend bool
operator==(uint256 const& hash, ExtendedPosition const& pos)
{
return pos.txSetHash == hash;
}
friend bool
operator!=(uint256 const& hash, ExtendedPosition const& pos)
{
return pos.txSetHash != hash;
}
//@@end rng-extended-position-equality
// CRITICAL: Include ALL fields for signing (prevents stripping attacks)
//@@start rng-extended-position-serialize
void
add(Serializer& s) const
{
s.addBitString(txSetHash);
// Wire compatibility: if no extensions, emit exactly 32 bytes
// so legacy nodes that expect a plain uint256 work unchanged.
if (!commitSetHash && !entropySetHash && !myCommitment && !myReveal)
return;
std::uint8_t flags = 0;
if (commitSetHash)
flags |= 0x01;
if (entropySetHash)
flags |= 0x02;
if (myCommitment)
flags |= 0x04;
if (myReveal)
flags |= 0x08;
s.add8(flags);
if (commitSetHash)
s.addBitString(*commitSetHash);
if (entropySetHash)
s.addBitString(*entropySetHash);
if (myCommitment)
s.addBitString(*myCommitment);
if (myReveal)
s.addBitString(*myReveal);
}
//@@end rng-extended-position-serialize
Json::Value
getJson() const
{
Json::Value ret = Json::objectValue;
ret["tx_set"] = to_string(txSetHash);
if (commitSetHash)
ret["commit_set"] = to_string(*commitSetHash);
if (entropySetHash)
ret["entropy_set"] = to_string(*entropySetHash);
return ret;
}
/** Deserialize from wire format.
Handles both legacy 32-byte hash and new extended format.
Returns nullopt if the payload is malformed (truncated for the
flags advertised).
*/
//@@start rng-extended-position-deserialize
static std::optional<ExtendedPosition>
fromSerialIter(SerialIter& sit, std::size_t totalSize)
{
if (totalSize < 32)
return std::nullopt;
ExtendedPosition pos;
pos.txSetHash = sit.get256();
// Legacy format: exactly 32 bytes
if (totalSize == 32)
return pos;
// Extended format: flags byte + optional uint256 fields
if (sit.empty())
return pos;
std::uint8_t flags = sit.get8();
// Reject unknown flag bits (reduces wire malleability)
if (flags & 0xF0)
return std::nullopt;
// Validate exact byte count for the flagged fields.
// Each flag bit indicates a 32-byte uint256.
int fieldCount = 0;
for (int i = 0; i < 4; ++i)
if (flags & (1 << i))
++fieldCount;
if (sit.getBytesLeft() != static_cast<std::size_t>(fieldCount * 32))
return std::nullopt;
if (flags & 0x01)
pos.commitSetHash = sit.get256();
if (flags & 0x02)
pos.entropySetHash = sit.get256();
if (flags & 0x04)
pos.myCommitment = sit.get256();
if (flags & 0x08)
pos.myReveal = sit.get256();
return pos;
}
//@@end rng-extended-position-deserialize
};
// For logging/debugging - returns txSetHash as string
inline std::string
to_string(ExtendedPosition const& pos)
{
return to_string(pos.txSetHash);
}
// Stream output for logging
inline std::ostream&
operator<<(std::ostream& os, ExtendedPosition const& pos)
{
return os << pos.txSetHash;
}
// For hash_append (used in sha512Half and similar)
template <class Hasher>
void
hash_append(Hasher& h, ExtendedPosition const& pos)
{
using beast::hash_append;
// Serialize full position including all fields
Serializer s;
pos.add(s);
hash_append(h, s.slice());
}
/** A peer's signed, proposed position for use in RCLConsensus.
Carries a ConsensusProposal signed by a peer. Provides value semantics
@@ -277,9 +43,8 @@ hash_append(Hasher& h, ExtendedPosition const& pos)
class RCLCxPeerPos
{
public:
//< The type of the proposed position (uses ExtendedPosition for RNG
// support)
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
//< The type of the proposed position
using Proposal = ConsensusProposal<NodeID, uint256, uint256>;
/** Constructor
@@ -347,10 +112,7 @@ private:
hash_append(h, std::uint32_t(proposal().proposeSeq()));
hash_append(h, proposal().closeTime());
hash_append(h, proposal().prevLedger());
// Serialize full ExtendedPosition for hashing
Serializer s;
proposal().position().add(s);
hash_append(h, s.slice());
hash_append(h, proposal().position());
}
};
@@ -363,7 +125,7 @@ private:
order to validate the signature. If the last closed ledger is left out, then
it is considered as all zeroes for the purposes of signing.
@param position The extended position (includes entropy fields)
@param proposeHash The hash of the proposed position
@param previousLedger The hash of the ledger the proposal is based upon
@param proposeSeq Sequence number of the proposal
@param closeTime Close time of the proposal
@@ -372,7 +134,7 @@ private:
*/
uint256
proposalUniqueId(
ExtendedPosition const& position,
uint256 const& proposeHash,
uint256 const& previousLedger,
std::uint32_t proposeSeq,
NetClock::time_point closeTime,

View File

@@ -15,7 +15,6 @@
#define uint256 std::string
#define featureHooksUpdate1 "1"
#define fix20250131 "1"
#define featureConsensusEntropy "1"
namespace hook_api {
struct Rules
{
@@ -373,8 +372,7 @@ enum hook_return_code : int64_t {
MEM_OVERLAP = -43, // one or more specified buffers are the same memory
TOO_MANY_STATE_MODIFICATIONS = -44, // more than 5000 modified state
// entires in the combined hook chains
TOO_MANY_NAMESPACES = -45,
TOO_LITTLE_ENTROPY = -46,
TOO_MANY_NAMESPACES = -45
};
enum ExitType : uint8_t {

View File

@@ -168,8 +168,6 @@ struct HookResult
false; // hook_again allows strong pre-apply to nominate
// additional weak post-apply execution
std::shared_ptr<STObject const> provisionalMeta;
uint64_t rngCallCounter{
0}; // used to ensure conseq. rng calls don't return same data
};
class HookExecutor;

View File

@@ -367,13 +367,3 @@ HOOK_API_DEFINITION(
HOOK_API_DEFINITION(
int64_t, xpop_slot, (uint32_t, uint32_t),
featureHooksUpdate1)
// int64_t dice(uint32_t sides);
HOOK_API_DEFINITION(
int64_t, dice, (uint32_t),
featureConsensusEntropy)
// int64_t random(uint32_t write_ptr, uint32_t write_len);
HOOK_API_DEFINITION(
int64_t, random, (uint32_t, uint32_t),
featureConsensusEntropy)

View File

@@ -3866,130 +3866,6 @@ DEFINE_HOOK_FUNCTION(
HOOK_TEARDOWN();
}
// byteCount must be a multiple of 32
inline std::vector<uint8_t>
fairRng(ApplyContext& applyCtx, hook::HookResult& hr, uint32_t byteCount)
{
if (byteCount > 512)
byteCount = 512;
// force the byte count to be a multiple of 32
byteCount &= ~0b11111;
if (byteCount == 0)
return {};
auto& view = applyCtx.view();
auto const sleEntropy = view.peek(ripple::keylet::consensusEntropy());
auto const seq = view.info().seq;
auto const entropySeq =
sleEntropy ? sleEntropy->getFieldU32(sfLedgerSequence) : 0u;
// Allow entropy from current ledger (during close) or previous ledger
// (open ledger / speculative execution). On the real network hooks
// always execute during buildLCL where the entropy pseudo-tx has
// already updated the SLE to the current seq.
// TODO: open-ledger entropy uses previous ledger's entropy, so
// dice/random results will differ between speculative and final
// execution. This needs further thought re: UX implications.
if (!sleEntropy || entropySeq > seq || (seq - entropySeq) > 1 ||
sleEntropy->getFieldU16(sfEntropyCount) < 5)
return {};
// we'll generate bytes in lots of 32
uint256 rndData = sha512Half(
view.info().seq,
applyCtx.tx.getTransactionID(),
hr.otxnAccount,
hr.hookHash,
hr.account,
hr.hookChainPosition,
hr.executeAgainAsWeak ? std::string("weak") : std::string("strong"),
sleEntropy->getFieldH256(sfDigest),
hr.rngCallCounter++);
std::vector<uint8_t> bytesOut;
bytesOut.resize(byteCount);
uint8_t* ptr = bytesOut.data();
while (1)
{
std::memcpy(ptr, rndData.data(), 32);
ptr += 32;
if (ptr - bytesOut.data() >= byteCount)
break;
rndData = sha512Half(rndData);
}
return bytesOut;
}
DEFINE_HOOK_FUNCTION(int64_t, dice, uint32_t sides)
{
HOOK_SETUP();
if (sides == 0)
return INVALID_ARGUMENT;
auto vec = fairRng(applyCtx, hookCtx.result, 32);
if (vec.empty())
return TOO_LITTLE_ENTROPY;
if (vec.size() != 32)
return INTERNAL_ERROR;
uint32_t value;
std::memcpy(&value, vec.data(), sizeof(uint32_t));
return value % sides;
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, random, uint32_t write_ptr, uint32_t write_len)
{
HOOK_SETUP();
if (write_len == 0)
return TOO_SMALL;
if (write_len > 512)
return TOO_BIG;
uint32_t required = write_len;
if ((required & ~0b11111) == required)
{
// already a multiple of 32 bytes
}
else
{
// round up
required &= ~0b11111;
required += 32;
}
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
return OUT_OF_BOUNDS;
auto vec = fairRng(applyCtx, hookCtx.result, required);
if (vec.empty())
return TOO_LITTLE_ENTROPY;
WRITE_WASM_MEMORY_AND_RETURN(
write_ptr, write_len, vec.data(), vec.size(), memory, memory_length);
HOOK_TEARDOWN();
}
/*
DEFINE_HOOK_FUNCTION(

View File

@@ -26,7 +26,6 @@
#include <ripple/core/JobQueue.h>
#include <ripple/nodestore/Database.h>
#include <ripple/protocol/HashPrefix.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/digest.h>
namespace ripple {
@@ -62,15 +61,6 @@ ConsensusTransSetSF::gotNode(
SerialIter sit(s.slice());
auto stx = std::make_shared<STTx const>(std::ref(sit));
assert(stx->getTransactionID() == nodeHash.as_uint256());
//@@start rng-pseudo-tx-submission-filtering
// Don't submit pseudo-transactions (consensus entropy, fees,
// amendments, etc.) — they exist as SHAMap entries for
// content-addressed identification but are not real user txns.
if (isPseudoTx(*stx))
return;
//@@end rng-pseudo-tx-submission-filtering
auto const pap = &app_;
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
pap->getOPs().submitTransaction(stx);

View File

@@ -25,7 +25,6 @@
#include <ripple/app/misc/CanonicalTXSet.h>
#include <ripple/app/tx/apply.h>
#include <ripple/protocol/Feature.h>
#include <ripple/protocol/TxFormats.h>
namespace ripple {
@@ -104,47 +103,6 @@ applyTransactions(
bool certainRetry = true;
std::size_t count = 0;
//@@start rng-entropy-first-application
// CRITICAL: Apply consensus entropy pseudo-tx FIRST before any other
// transactions. This ensures hooks can read entropy during this ledger.
for (auto it = txns.begin(); it != txns.end(); /* manual */)
{
if (it->second->getTxnType() != ttCONSENSUS_ENTROPY)
{
++it;
continue;
}
auto const txid = it->first.getTXID();
JLOG(j.debug()) << "Applying entropy tx FIRST: " << txid;
try
{
auto const result =
applyTransaction(app, view, *it->second, true, tapNONE, j);
if (result == ApplyResult::Success)
{
++count;
JLOG(j.debug()) << "Entropy tx applied successfully";
}
else
{
failed.insert(txid);
JLOG(j.warn()) << "Entropy tx failed to apply";
}
}
catch (std::exception const& ex)
{
JLOG(j.warn()) << "Entropy tx throws: " << ex.what();
failed.insert(txid);
}
it = txns.erase(it);
break; // Only one entropy tx per ledger
}
//@@end rng-entropy-first-application
// Attempt to apply all of the retriable transactions
for (int pass = 0; pass < LEDGER_TOTAL_PASSES; ++pass)
{

View File

@@ -540,8 +540,7 @@ private:
std::function<void()> onExpire,
std::function<void()> onError);
void
setHeartbeatTimer(
std::chrono::milliseconds interval = std::chrono::milliseconds{0});
setHeartbeatTimer();
void
setClusterTimer();
void
@@ -888,14 +887,11 @@ NetworkOPsImp::setTimer(
}
void
NetworkOPsImp::setHeartbeatTimer(std::chrono::milliseconds interval)
NetworkOPsImp::setHeartbeatTimer()
{
if (interval == std::chrono::milliseconds{0})
interval = mConsensus.parms().ledgerGRANULARITY;
setTimer(
heartbeatTimer_,
interval,
mConsensus.parms().ledgerGRANULARITY,
[this]() {
m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
processHeartbeatTimer();
@@ -988,22 +984,7 @@ NetworkOPsImp::processHeartbeatTimer()
mLastConsensusPhase = currPhase;
}
//@@start rng-fast-polling
// Use faster polling during RNG sub-state transitions
// to reduce latency of commit-reveal rounds.
// Tunable via XAHAU_RNG_POLL_MS env var (default 250ms).
if (mConsensus.inRngSubState())
{
static auto const rngPollMs = []() -> std::chrono::milliseconds {
if (auto const* env = std::getenv("XAHAU_RNG_POLL_MS"))
return std::chrono::milliseconds{std::atoi(env)};
return std::chrono::milliseconds{250};
}();
setHeartbeatTimer(rngPollMs);
}
else
setHeartbeatTimer();
//@@end rng-fast-polling
setHeartbeatTimer();
}
void
@@ -1892,17 +1873,7 @@ NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
// We acquired it because consensus asked us to
if (fromAcquire)
{
auto const hash = map->getHash().as_uint256();
if (mConsensus.isRngSet(hash))
{
// RNG set (commitSet or entropySet) — route to adaptor
// for diff/merge, not into txSet consensus machinery.
mConsensus.gotRngSet(map);
return;
}
mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
}
}
void

View File

@@ -97,21 +97,6 @@ Change::preflight(PreflightContext const& ctx)
}
}
if (ctx.tx.getTxnType() == ttCONSENSUS_ENTROPY)
{
if (!ctx.rules.enabled(featureConsensusEntropy))
{
JLOG(ctx.j.warn()) << "Change: ConsensusEntropy is not enabled.";
return temDISABLED;
}
if (!ctx.tx.isFieldPresent(sfDigest))
{
JLOG(ctx.j.warn()) << "Change: ConsensusEntropy must have sfDigest";
return temMALFORMED;
}
}
return tesSUCCESS;
}
@@ -170,7 +155,6 @@ Change::preclaim(PreclaimContext const& ctx)
case ttAMENDMENT:
case ttUNL_MODIFY:
case ttEMIT_FAILURE:
case ttCONSENSUS_ENTROPY:
return tesSUCCESS;
case ttUNL_REPORT: {
if (!ctx.tx.isFieldPresent(sfImportVLKey) ||
@@ -226,45 +210,12 @@ Change::doApply()
return applyEmitFailure();
case ttUNL_REPORT:
return applyUNLReport();
case ttCONSENSUS_ENTROPY:
return applyConsensusEntropy();
default:
assert(0);
return tefFAILURE;
}
}
TER
Change::applyConsensusEntropy()
{
auto const entropy = ctx_.tx.getFieldH256(sfDigest);
//@@start rng-consensus-entropy-sle-write
auto sle = view().peek(keylet::consensusEntropy());
bool const created = !sle;
if (created)
sle = std::make_shared<SLE>(keylet::consensusEntropy());
sle->setFieldH256(sfDigest, entropy);
sle->setFieldU16(sfEntropyCount, ctx_.tx.getFieldU16(sfEntropyCount));
sle->setFieldU32(sfLedgerSequence, view().info().seq);
// Note: sfPreviousTxnID and sfPreviousTxnLgrSeq are set automatically
// by ApplyStateTable::threadItem() because isThreadedType() returns true
// for ledger entries that have sfPreviousTxnID in their format.
if (created)
view().insert(sle);
else
view().update(sle);
//@@end rng-consensus-entropy-sle-write
JLOG(j_.info()) << "ConsensusEntropy: updated entropy to " << entropy
<< " at ledger " << view().info().seq;
return tesSUCCESS;
}
TER
Change::applyUNLReport()
{

View File

@@ -76,9 +76,6 @@ private:
TER
applyUNLReport();
TER
applyConsensusEntropy();
};
} // namespace ripple

View File

@@ -494,7 +494,6 @@ LedgerEntryTypesMatch::visitEntry(
case ltCRON:
case ltIMPORT_VLSEQ:
case ltUNL_REPORT:
case ltCONSENSUS_ENTROPY:
break;
default:
invalidTypeAdded_ = true;

View File

@@ -152,7 +152,6 @@ invoke_preflight(PreflightContext const& ctx)
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttCONSENSUS_ENTROPY:
return invoke_preflight_helper<Change>(ctx);
case ttHOOK_SET:
return invoke_preflight_helper<SetHook>(ctx);
@@ -284,7 +283,6 @@ invoke_preclaim(PreclaimContext const& ctx)
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttCONSENSUS_ENTROPY:
return invoke_preclaim<Change>(ctx);
case ttNFTOKEN_MINT:
return invoke_preclaim<NFTokenMint>(ctx);
@@ -376,7 +374,6 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx)
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttCONSENSUS_ENTROPY:
return Change::calculateBaseFee(view, tx);
case ttNFTOKEN_MINT:
return NFTokenMint::calculateBaseFee(view, tx);
@@ -547,8 +544,7 @@ invoke_apply(ApplyContext& ctx)
case ttFEE:
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttCONSENSUS_ENTROPY: {
case ttEMIT_FAILURE: {
Change p(ctx);
return p();
}

View File

@@ -29,7 +29,6 @@
#include <ripple/consensus/DisputedTx.h>
#include <ripple/consensus/LedgerTiming.h>
#include <ripple/json/json_writer.h>
#include <ripple/protocol/digest.h>
#include <boost/logic/tribool.hpp>
#include <deque>
#include <optional>
@@ -37,33 +36,6 @@
namespace ripple {
/** Sub-states for pipelined consensus with RNG entropy support.
The establish phase is divided into sub-states to support commit-reveal
for consensus-derived randomness while maintaining low latency through
pipelining.
@note Data collection (commits, reveals) happens continuously via proposal
leaves. Sub-states are checkpoints, not serial waits.
@note Convergence model: commitSet and entropySet use UNION convergence,
not avalanche voting. This is sufficient because:
- Each validator contributes exactly one deterministic entry
- Entries are piggybacked on proposals (already reliably propagated)
- There is no disagreement about inclusion — every valid entry belongs
- The only source of difference between nodes is timing
- Union is monotonic (sets only grow) and bounded (one per UNL member)
- SHAMap fetch/diff/merge handles late arrivals as a safety net
Avalanche is needed when nodes disagree about what to include/exclude
(e.g. disputed user transactions). For RNG sets, all honest nodes
want the same thing — include everything — so union suffices.
*/
enum class EstablishState {
ConvergingTx, ///< Normal txset convergence + harvesting commits
ConvergingCommit, ///< Confirming commitSet agreement (near-instant)
ConvergingReveal ///< Collecting reveals + confirming entropySet
};
/** Determines whether the current ledger should close at this time.
This function should be called when a ledger is open and there is no close
@@ -317,11 +289,10 @@ class Consensus
using NodeID_t = typename Adaptor::NodeID_t;
using Tx_t = typename TxSet_t::Tx;
using PeerPosition_t = typename Adaptor::PeerPosition_t;
// Use Adaptor::Position_t for RNG support (ExtendedPosition)
using Proposal_t = ConsensusProposal<
NodeID_t,
typename Ledger_t::ID,
typename Adaptor::Position_t>;
typename TxSet_t::ID>;
using Result = ConsensusResult<Adaptor>;
@@ -402,18 +373,6 @@ public:
void
timerEntry(NetClock::time_point const& now);
/** Whether the RNG sub-state machine wants faster polling.
Returns true during ConvergingCommit and ConvergingReveal so the
heartbeat timer can use a shorter interval (e.g. 250ms) instead
of the normal ledgerGRANULARITY (1s).
*/
bool
inRngSubState() const
{
return estState_ != EstablishState::ConvergingTx;
}
/** Process a transaction set acquired from the network
@param now The network adjusted time
@@ -583,8 +542,6 @@ private:
Adaptor& adaptor_;
ConsensusPhase phase_{ConsensusPhase::accepted};
EstablishState estState_{EstablishState::ConvergingTx};
std::chrono::steady_clock::time_point revealPhaseStart_{};
MonitoredMode mode_{ConsensusMode::observing};
bool firstRound_ = true;
bool haveCloseTimeConsensus_ = false;
@@ -719,40 +676,12 @@ Consensus<Adaptor>::startRoundInternal(
convergePercent_ = 0;
haveCloseTimeConsensus_ = false;
openTime_.reset(clock_.now());
// Capture last round's proposer IDs before clearing — this is the
// best signal for who will propose this round.
hash_set<NodeID_t> lastProposers;
if constexpr (requires(Adaptor & a) {
a.setExpectedProposers(hash_set<NodeID_t>{});
})
{
for (auto const& [id, pos] : currPeerPositions_)
lastProposers.insert(id);
}
currPeerPositions_.clear();
acquired_.clear();
rawCloseTimes_.peers.clear();
rawCloseTimes_.self = {};
deadNodes_.clear();
// Reset RNG state for new round if adaptor supports it
if constexpr (requires(Adaptor & a) { a.clearRngState(); })
{
adaptor_.clearRngState();
// Populate UNL cache for all nodes (including observers).
// onClose only caches for proposing validators, so observers
// would otherwise have an empty set and reject all RNG data.
adaptor_.cacheUNLReport();
// Set expected proposers: recent proposers > UNL Report > 80% fallback
adaptor_.setExpectedProposers(std::move(lastProposers));
}
// Reset establish sub-state for new round
estState_ = EstablishState::ConvergingTx;
revealPhaseStart_ = {};
closeResolution_ = getNextLedgerTimeResolution(
previousLedger_.closeTimeResolution(),
previousLedger_.closeAgree(),
@@ -850,58 +779,6 @@ Consensus<Adaptor>::peerProposalInternal(
currPeerPositions_.emplace(peerID, newPeerPos);
}
// Harvest RNG data from proposal if adaptor supports it
if constexpr (requires(Adaptor & a, PeerPosition_t const& pp) {
a.harvestRngData(
pp.proposal().nodeID(),
pp.publicKey(),
pp.proposal().position(),
pp.proposal().proposeSeq(),
pp.proposal().closeTime(),
pp.proposal().prevLedger(),
pp.signature());
})
{
JLOG(j_.debug()) << "RNG: peerProposal from " << peerID << " commit="
<< (newPeerProp.position().myCommitment ? "yes" : "no")
<< " reveal="
<< (newPeerProp.position().myReveal ? "yes" : "no");
adaptor_.harvestRngData(
peerID,
newPeerPos.publicKey(),
newPeerProp.position(),
newPeerProp.proposeSeq(),
newPeerProp.closeTime(),
newPeerProp.prevLedger(),
newPeerPos.signature());
// Trigger fetch for unknown RNG set hashes, but only once we've
// built our own local set for diffing. During ConvergingTx all
// data arrives via proposal leaves — fetching a peer's commitSet
// before we have our own just generates unnecessary traffic.
//
// IMPORTANT: SHAMap fetch/diff/merge is a safety net for the
// rare case where active proposers have slightly different
// commit/reveal sets due to dropped proposals. It does NOT
// help late-joining nodes: a node that restarts mid-round
// enters as proposing=false and cannot generate commitments
// (onClose gates on proposing). It must observe for at least
// one full round before consensus promotes it to proposing.
// The primary data transport is proposals themselves — the
// SHAMap sync is belt-and-suspenders, not the critical path.
if constexpr (requires(Adaptor & a) {
a.fetchRngSetIfNeeded(std::optional<uint256>{});
})
{
if (estState_ != EstablishState::ConvergingTx)
adaptor_.fetchRngSetIfNeeded(
newPeerProp.position().commitSetHash);
if (estState_ == EstablishState::ConvergingReveal)
adaptor_.fetchRngSetIfNeeded(
newPeerProp.position().entropySetHash);
}
}
if (newPeerProp.isInitial())
{
// Record the close time estimate
@@ -1409,190 +1286,6 @@ Consensus<Adaptor>::phaseEstablish()
return;
}
//@@start rng-phase-establish-substates
// --- RNG Sub-state Checkpoints (if adaptor supports RNG) ---
// These sub-states use union convergence (not avalanche).
// Commits and reveals arrive piggybacked on proposals, so by the time
// we reach these checkpoints most data is already collected. The
// SHAMap fetch/diff/merge in handleAcquiredRngSet is a safety net
// for stragglers, not a voting mechanism.
//
// Why 80% for commits but 100% for reveals?
//
// COMMITS: quorum is based on the active UNL, but we don't know
// which UNL members are actually online until they propose — and
// commitments ride on those same proposals. Chicken-and-egg: we
// learn who's active by receiving their commits. 80% of the UNL
// says "we've heard from enough validators, let's go." The
// impossible-quorum early-exit handles the case where too few
// participants exist to ever reach 80%.
//
// REVEALS: the commit set is now locked and we know *exactly* who
// committed. Every committer broadcasts their reveal immediately.
// So we wait for ALL of them, with rngREVEAL_TIMEOUT (measured
// from ConvergingReveal entry) as the safety valve for nodes that
// crash between commit and reveal.
if constexpr (requires(Adaptor & a) {
a.hasQuorumOfCommits();
a.buildCommitSet(typename Ledger_t::Seq{});
a.generateEntropySecret();
})
{
auto const buildSeq = previousLedger_.seq() + typename Ledger_t::Seq{1};
JLOG(j_.debug()) << "RNG: phaseEstablish estState="
<< static_cast<int>(estState_);
if (estState_ == EstablishState::ConvergingTx)
{
if (adaptor_.hasQuorumOfCommits()) // all expected proposers (80%
// fallback)
{
auto commitSetHash = adaptor_.buildCommitSet(buildSeq);
// Keep the same entropy secret from onClose() — do NOT
// regenerate. The commitment in the commitSet was built
// from that original secret; regenerating would make the
// later reveal fail verification.
auto newPos = result_->position.position();
newPos.commitSetHash = commitSetHash;
result_->position.changePosition(
newPos, asCloseTime(result_->position.closeTime()), now_);
if (mode_.get() == ConsensusMode::proposing)
adaptor_.propose(result_->position);
estState_ = EstablishState::ConvergingCommit;
JLOG(j_.debug()) << "RNG: transitioned to ConvergingCommit"
<< " commitSet=" << commitSetHash;
return; // Wait for next tick
}
// Don't let the round close while waiting for commit quorum.
// Without this gate, execution falls through to the normal
// consensus close logic and nodes inject partial/zero entropy
// while others are still collecting — causing ledger mismatches.
//
// However, if we've already converged on the txSet (which we
// have — haveConsensus() passed above) and there aren't enough
// participants to ever reach quorum, skip immediately. With
// 3 nodes and quorum=3, losing one node means 2/3 commits
// forever — waiting 3s per round just delays recovery.
//
// NOTE: Late-joining nodes (e.g. restarting after a crash)
// cannot help here. They enter the round as proposing=false
// and onClose() skips commitment generation for non-proposers.
// It takes at least one full round of observing before
// consensus promotes them to proposing.
{
// participants = peers + ourselves
auto const participants = currPeerPositions_.size() + 1;
auto const threshold = adaptor_.quorumThreshold();
bool const impossible = participants < threshold;
if (impossible)
{
JLOG(j_.debug())
<< "RNG: skipping commit wait (participants="
<< participants << " < threshold=" << threshold << ")";
// Fall through to close with zero entropy
}
else
{
bool timeout =
result_->roundTime.read() > parms.rngPIPELINE_TIMEOUT;
if (!timeout)
return; // Wait for more commits
// Timeout waiting for all expected proposers.
// If we still have quorum (80% of UNL), proceed
// with what we have — the SHAMap merge handles
// any fuzziness for this transition round.
auto const commits = adaptor_.pendingCommitCount();
auto const quorum = adaptor_.quorumThreshold();
if (commits >= quorum)
{
JLOG(j_.info())
<< "RNG: commit timeout but have quorum ("
<< commits << "/" << quorum
<< "), proceeding with partial set";
// Jump to the same path as hasQuorumOfCommits
auto commitSetHash = adaptor_.buildCommitSet(buildSeq);
auto newPos = result_->position.position();
newPos.commitSetHash = commitSetHash;
result_->position.changePosition(
newPos,
asCloseTime(result_->position.closeTime()),
now_);
if (mode_.get() == ConsensusMode::proposing)
adaptor_.propose(result_->position);
estState_ = EstablishState::ConvergingCommit;
JLOG(j_.debug())
<< "RNG: transitioned to ConvergingCommit"
<< " commitSet=" << commitSetHash
<< " (timeout fallback)";
return;
}
// Truly below quorum: fall through to zero entropy
}
}
}
else if (estState_ == EstablishState::ConvergingCommit)
{
// haveConsensus() implies agreement on commitSetHash
auto newPos = result_->position.position();
newPos.myReveal = adaptor_.getEntropySecret();
result_->position.changePosition(
newPos, asCloseTime(result_->position.closeTime()), now_);
if (mode_.get() == ConsensusMode::proposing)
adaptor_.propose(result_->position);
estState_ = EstablishState::ConvergingReveal;
revealPhaseStart_ = std::chrono::steady_clock::now();
JLOG(j_.debug()) << "RNG: transitioned to ConvergingReveal"
<< " reveal=" << adaptor_.getEntropySecret();
return; // Wait for next tick
}
else if (estState_ == EstablishState::ConvergingReveal)
{
// Wait for ALL committers to reveal (not just 80%).
// Timeout measured from ConvergingReveal entry, not round start.
auto const elapsed =
std::chrono::steady_clock::now() - revealPhaseStart_;
bool timeout = elapsed > parms.rngREVEAL_TIMEOUT;
bool ready = false;
if ((haveConsensus() && adaptor_.hasMinimumReveals()) || timeout)
{
if (timeout && !adaptor_.hasAnyReveals())
{
adaptor_.setEntropyFailed();
JLOG(j_.warn()) << "RNG: entropy failed (no reveals)";
}
else
{
auto entropySetHash = adaptor_.buildEntropySet(buildSeq);
auto newPos = result_->position.position();
newPos.entropySetHash = entropySetHash;
result_->position.changePosition(
newPos,
asCloseTime(result_->position.closeTime()),
now_);
JLOG(j_.debug()) << "RNG: built entropySet";
}
ready = true;
}
if (!ready)
return;
}
}
//@@end rng-phase-establish-substates
JLOG(j_.info()) << "Converge cutoff (" << currPeerPositions_.size()
<< " participants)";
adaptor_.updateOperatingMode(currPeerPositions_.size());
@@ -1617,7 +1310,6 @@ Consensus<Adaptor>::closeLedger()
assert(!result_);
phase_ = ConsensusPhase::establish;
estState_ = EstablishState::ConvergingTx;
JLOG(j_.debug()) << "transitioned to ConsensusPhase::establish";
rawCloseTimes_.self = now_;
@@ -1823,21 +1515,7 @@ Consensus<Adaptor>::updateOurPositions()
<< consensusCloseTime.time_since_epoch().count()
<< ", tx " << newID;
// Preserve sidecar data (RNG fields), only update txSetHash
// Use type traits to conditionally handle ExtendedPosition vs simple ID
if constexpr (requires(typename Adaptor::Position_t p) {
p.updateTxSet(newID);
})
{
auto currentPos = result_->position.position();
currentPos.updateTxSet(newID);
result_->position.changePosition(
currentPos, consensusCloseTime, now_);
}
else
{
result_->position.changePosition(newID, consensusCloseTime, now_);
}
result_->position.changePosition(newID, consensusCloseTime, now_);
// Share our new transaction set and update disputes
// if we haven't already received it

View File

@@ -88,32 +88,6 @@ struct ConsensusParms
*/
std::chrono::milliseconds ledgerMAX_CONSENSUS = std::chrono::seconds{10};
/** Maximum time to wait for RNG commit/reveal quorum before giving up.
*
* This is intentionally shorter than ledgerMAX_CONSENSUS because
* waiting longer won't help: a node that missed the start of the
* round (e.g. restarting after a crash) enters as proposing=false
* and cannot generate commitments until consensus promotes it to
* proposing — which takes at least one full round of observing.
* Waiting the full 10s just delays the inevitable ZERO-entropy
* fallback and slows recovery for the restarting node (it can't
* catch up until the survivors close a ledger).
*
* 3s is long enough for commits to propagate on any reasonable
* network, but short enough that a missing-node scenario recovers
* quickly via the ZERO-entropy fallback path.
*/
std::chrono::milliseconds rngPIPELINE_TIMEOUT = std::chrono::seconds{3};
/** Reveal-phase timeout — maximum time to wait for reveals after
* entering ConvergingReveal. Measured from the moment we broadcast
* our own reveal, NOT from round start. This is the defense against
* a validator that commits but never reveals (crash or malice).
* 1.5s is generous for propagation on any network.
*/
std::chrono::milliseconds rngREVEAL_TIMEOUT =
std::chrono::milliseconds{1500};
//! Minimum number of seconds to wait to ensure others have computed the LCL
std::chrono::milliseconds ledgerMIN_CLOSE = std::chrono::seconds{2};

View File

@@ -205,20 +205,16 @@ struct ConsensusResult
using NodeID_t = typename Traits::NodeID_t;
using Tx_t = typename TxSet_t::Tx;
// Use Traits::Position_t for RNG support (defaults to TxSet_t::ID)
using Proposal_t = ConsensusProposal<
NodeID_t,
typename Ledger_t::ID,
typename Traits::Position_t>;
typename TxSet_t::ID>;
using Dispute_t = DisputedTx<Tx_t, NodeID_t>;
ConsensusResult(TxSet_t&& s, Proposal_t&& p)
: txns{std::move(s)}, position{std::move(p)}
{
// Use implicit conversion to uint256 for ExtendedPosition
assert(
txns.id() ==
static_cast<typename TxSet_t::ID>(position.position()));
assert(txns.id() == position.position());
}
//! The set of transactions consensus agrees go in the ledger

View File

@@ -1935,9 +1935,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
return;
}
// Position data must be at least 32 bytes (txSetHash), previous ledger
// exactly 32
if (set.currenttxhash().size() < 32 ||
if (!stringIsUint256Sized(set.currenttxhash()) ||
!stringIsUint256Sized(set.previousledger()))
{
JLOG(p_journal_.warn()) << "Proposal: malformed";
@@ -1957,34 +1955,13 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
return;
// Deserialize ExtendedPosition (handles both legacy 32-byte and extended
// formats). Reject malformed payloads early.
auto const positionSlice = makeSlice(set.currenttxhash());
SerialIter sit(positionSlice);
auto const maybePosition =
ExtendedPosition::fromSerialIter(sit, positionSlice.size());
if (!maybePosition)
{
JLOG(p_journal_.warn())
<< "Malformed proposal payload (" << positionSlice.size()
<< " bytes) from " << toBase58(TokenType::NodePublic, publicKey);
fee_ = Resource::feeInvalidRequest;
return;
}
ExtendedPosition const& position = *maybePosition;
JLOG(p_journal_.debug())
<< "RNG: recv proposal size=" << positionSlice.size()
<< " commit=" << (position.myCommitment ? "yes" : "no")
<< " reveal=" << (position.myReveal ? "yes" : "no") << " from "
<< toBase58(TokenType::NodePublic, publicKey);
uint256 const proposeHash{set.currenttxhash()};
uint256 const prevLedger{set.previousledger()};
NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
uint256 const suppression = proposalUniqueId(
position,
proposeHash,
prevLedger,
set.proposeseq(),
closeTime,
@@ -2031,7 +2008,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
RCLCxPeerPos::Proposal{
prevLedger,
set.proposeseq(),
position,
proposeHash,
closeTime,
app_.timeKeeper().closeTime(),
calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 91;
static constexpr std::size_t numFeatures = 90;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -378,7 +378,6 @@ extern uint256 const fixInvalidTxFlags;
extern uint256 const featureExtendedHookState;
extern uint256 const fixCronStacking;
extern uint256 const fixHookAPI20251128;
extern uint256 const featureConsensusEntropy;
} // namespace ripple
#endif

View File

@@ -115,10 +115,6 @@ negativeUNL() noexcept;
Keylet const&
UNLReport() noexcept;
/** The (fixed) index of the object containing consensus-derived entropy. */
Keylet const&
consensusEntropy() noexcept;
/** The beginning of an order book */
struct book_t
{

View File

@@ -183,15 +183,7 @@ enum LedgerEntryType : std::uint16_t
*
* \sa keylet::UNLReport
*/
ltUNL_REPORT = 0x0052,
/** The ledger object which stores consensus-derived entropy.
\note This is a singleton: only one such object exists in the ledger.
\sa keylet::consensusEntropy
*/
ltCONSENSUS_ENTROPY = 0x0058,
ltUNL_REPORT = 0x0052,
//---------------------------------------------------------------------------
/** A special type, matching any ledger entry type.

View File

@@ -355,7 +355,6 @@ extern SF_UINT16 const sfHookEmitCount;
extern SF_UINT16 const sfHookExecutionIndex;
extern SF_UINT16 const sfHookApiVersion;
extern SF_UINT16 const sfHookStateScale;
extern SF_UINT16 const sfEntropyCount;
// 32-bit integers (common)
extern SF_UINT32 const sfNetworkID;

View File

@@ -206,13 +206,6 @@ enum CronSetFlags : uint32_t {
};
constexpr std::uint32_t const tfCronSetMask = ~(tfUniversal | tfCronUnset);
// ConsensusEntropy flags (used on ttCONSENSUS_ENTROPY SHAMap entries):
enum ConsensusEntropyFlags : uint32_t {
tfEntropyCommit = 0x00000001, // entry is a commitment in commitSet
tfEntropyReveal = 0x00000002, // entry is a reveal in entropySet
};
// flag=0 (no tfEntropyCommit/tfEntropyReveal) = final injected pseudo-tx
// clang-format on
} // namespace ripple

View File

@@ -197,13 +197,6 @@ enum TxType : std::uint16_t
ttUNL_MODIFY = 102,
ttEMIT_FAILURE = 103,
ttUNL_REPORT = 104,
/** This system-generated transaction type is used to record consensus-derived entropy.
The entropy is computed from a commit-reveal scheme during consensus and
written to the ledger for use by hooks and other deterministic applications.
*/
ttCONSENSUS_ENTROPY = 105,
};
// clang-format on

View File

@@ -484,7 +484,6 @@ REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::De
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FIX (fixCronStacking, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fixHookAPI20251128, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(ConsensusEntropy, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -73,7 +73,6 @@ enum class LedgerNameSpace : std::uint16_t {
IMPORT_VLSEQ = 'I',
UNL_REPORT = 'R',
CRON = 'L',
CONSENSUS_ENTROPY = 'X',
// No longer used or supported. Left here to reserve the space
// to avoid accidental reuse.
@@ -497,14 +496,6 @@ cron(uint32_t timestamp, std::optional<AccountID> const& id)
return {ltCRON, uint256::fromVoid(h)};
}
Keylet const&
consensusEntropy() noexcept
{
static Keylet const ret{
ltCONSENSUS_ENTROPY, indexHash(LedgerNameSpace::CONSENSUS_ENTROPY)};
return ret;
}
} // namespace keylet
} // namespace ripple

View File

@@ -381,17 +381,6 @@ LedgerFormats::LedgerFormats()
},
commonFields);
add(jss::ConsensusEntropy,
ltCONSENSUS_ENTROPY,
{
{sfDigest, soeREQUIRED}, // The consensus-derived entropy
{sfEntropyCount, soeREQUIRED}, // Number of validators that contributed
{sfLedgerSequence, soeREQUIRED}, // Ledger this entropy is for
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
},
commonFields);
// clang-format on
}

View File

@@ -103,7 +103,6 @@ CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16,
CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19);
CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20);
CONSTRUCT_TYPED_SFIELD(sfHookStateScale, "HookStateScale", UINT16, 21);
CONSTRUCT_TYPED_SFIELD(sfEntropyCount, "EntropyCount", UINT16, 99);
// 32-bit integers (common)
CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1);

View File

@@ -615,8 +615,7 @@ isPseudoTx(STObject const& tx)
auto tt = safe_cast<TxType>(*t);
return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY ||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON ||
tt == ttCONSENSUS_ENTROPY;
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttCRON;
}
} // namespace ripple

View File

@@ -490,16 +490,6 @@ TxFormats::TxFormats()
{sfStartTime, soeOPTIONAL},
},
commonFields);
add(jss::ConsensusEntropy,
ttCONSENSUS_ENTROPY,
{
{sfLedgerSequence, soeREQUIRED},
{sfDigest, soeREQUIRED},
{sfEntropyCount, soeREQUIRED},
{sfBlob, soeOPTIONAL}, // Proposal proof for SHAMap entries
},
commonFields);
}
TxFormats const&

View File

@@ -254,8 +254,7 @@ JSS(count); // in: AccountTx*, ValidatorList
JSS(counters); // in/out: retrieve counters
JSS(coins);
JSS(children);
JSS(ConsensusEntropy); // transaction and ledger type.
JSS(ctid); // in/out: Tx RPC
JSS(ctid); // in/out: Tx RPC
JSS(cres);
JSS(cron);
JSS(currency_a); // out: BookChanges

View File

@@ -109,22 +109,14 @@ public:
Consumer
newInboundEndpoint(beast::IP::Endpoint const& address)
{
//@@start rng-local-testnet-resource-bucket
// Inbound connections from the same IP normally share one
// resource bucket (port stripped) for DoS protection. For
// loopback addresses, preserve the port so local testnet nodes
// each get their own bucket instead of all sharing one.
auto const key = is_loopback(address) ? address : address.at_port(0);
//@@end rng-local-testnet-resource-bucket
Entry* entry(nullptr);
{
std::lock_guard _(lock_);
auto [resultIt, resultInserted] = table_.emplace(
std::piecewise_construct,
std::make_tuple(kindInbound, key),
std::make_tuple(m_clock.now()));
std::make_tuple(kindInbound, address.at_port(0)), // Key
std::make_tuple(m_clock.now())); // Entry
entry = &resultIt->second;
entry->key = &resultIt->first;

View File

@@ -1,445 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 XRPL Labs
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/hook/Enum.h>
#include <ripple/beast/unit_test.h>
#include <ripple/protocol/Feature.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/TxFlags.h>
#include <ripple/protocol/jss.h>
#include <test/app/ConsensusEntropy_test_hooks.h>
#include <test/jtx.h>
#include <test/jtx/hook.h>
namespace ripple {
namespace test {
using TestHook = std::vector<uint8_t> const&;
#define BEAST_REQUIRE(x) \
{ \
BEAST_EXPECT(!!(x)); \
if (!(x)) \
return; \
}
#define HSFEE fee(100'000'000)
#define M(m) memo(m, "", "")
class ConsensusEntropy_test : public beast::unit_test::suite
{
static void
overrideFlag(Json::Value& jv)
{
jv[jss::Flags] = hsfOVERRIDE;
}
void
testSLECreated()
{
testcase("SLE created on ledger close");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
env.close();
auto const sle = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle);
auto const digest = sle->getFieldH256(sfDigest);
BEAST_EXPECT(digest != uint256{});
auto const count = sle->getFieldU16(sfEntropyCount);
BEAST_EXPECT(count >= 5);
auto const sleSeq = sle->getFieldU32(sfLedgerSequence);
BEAST_EXPECT(sleSeq == env.closed()->seq());
}
void
testSLEUpdatedOnSubsequentClose()
{
testcase("SLE updated on subsequent ledger close");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
env.close();
auto const sle1 = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle1);
auto const digest1 = sle1->getFieldH256(sfDigest);
auto const seq1 = sle1->getFieldU32(sfLedgerSequence);
env.close();
auto const sle2 = env.le(keylet::consensusEntropy());
BEAST_REQUIRE(sle2);
auto const digest2 = sle2->getFieldH256(sfDigest);
auto const seq2 = sle2->getFieldU32(sfLedgerSequence);
BEAST_EXPECT(digest2 != digest1);
BEAST_EXPECT(seq2 == seq1 + 1);
}
void
testNoSLEWithoutAmendment()
{
testcase("No SLE without amendment");
using namespace jtx;
Env env{*this};
env.close();
env.close();
BEAST_EXPECT(!env.le(keylet::consensusEntropy()));
}
void
testDice()
{
testcase("Hook dice() API");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
// Entropy SLE must exist before hook can use dice()
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Set the hook
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
// dice(6) should return 0..5
int64_t result = dice(6);
// negative means error
if (result < 0)
rollback(0, 0, result);
if (result >= 6)
rollback(0, 0, -1);
// return the dice result as the accept code
return accept(0, 0, result);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice hook"),
HSFEE);
env.close();
// Invoke the hook
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
auto const returnCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
std::cerr << " dice(6) returnCode = " << returnCode << " (hex 0x"
<< std::hex << returnCode << std::dec << ")\n";
// dice(6) returns 0..5
BEAST_EXPECT(returnCode <= 5);
// Result should be 3 (accept)
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
testRandom()
{
testcase("Hook random() API");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Hook calls random() to fill a 32-byte buffer, then checks
// the buffer is not all zeroes.
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
uint8_t buf[32];
for (int i = 0; GUARD(32), i < 32; ++i)
buf[i] = 0;
int64_t result = random((uint32_t)buf, 32);
// Should return 32 (bytes written)
if (result != 32)
rollback(0, 0, result);
// Verify buffer is not all zeroes
int nonzero = 0;
for (int i = 0; GUARD(32), i < 32; ++i)
if (buf[i] != 0) nonzero = 1;
if (!nonzero)
rollback(0, 0, -2);
return accept(0, 0, 0);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set random hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test random"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
// Return code 0 = all checks passed in the hook
BEAST_EXPECT(hookExecutions[0].getFieldU64(sfHookReturnCode) == 0);
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
testDiceConsecutiveCallsDiffer()
{
testcase("Hook dice() consecutive calls return different values");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// dice(1000000) twice — large range makes collision near-impossible
// encode r1 in low 20 bits, r2 in high bits
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t r1 = dice(1000000);
if (r1 < 0)
rollback(0, 0, r1);
int64_t r2 = dice(1000000);
if (r2 < 0)
rollback(0, 0, r2);
// consecutive calls should differ (rngCallCounter)
if (r1 == r2)
rollback(0, 0, -1);
return accept(0, 0, r1 | (r2 << 20));
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice consecutive"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
auto const rc = hookExecutions[0].getFieldU64(sfHookReturnCode);
auto const r1 = rc & 0xFFFFF;
auto const r2 = (rc >> 20) & 0xFFFFF;
std::cerr << " two-call dice(1000000): returnCode=" << rc << " hex=0x"
<< std::hex << rc << std::dec << " r1=" << r1 << " r2=" << r2
<< "\n";
// hookResult 3 = accept (would be 1 if r1==r2 triggered rollback)
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
BEAST_EXPECT(r1 < 1000000);
BEAST_EXPECT(r2 < 1000000);
BEAST_EXPECT(r1 != r2);
}
void
testDiceZeroSides()
{
testcase("Hook dice(0) returns INVALID_ARGUMENT");
using namespace jtx;
Env env{
*this,
envconfig(),
supported_amendments() | featureConsensusEntropy,
nullptr};
auto const alice = Account{"alice"};
env.fund(XRP(10000), alice);
env.close();
BEAST_REQUIRE(env.le(keylet::consensusEntropy()));
// Hook calls dice(0) and returns whatever dice returns.
// dice(0) should return INVALID_ARGUMENT (-7).
TestHook hook = consensusentropy_test_wasm[R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t result = dice(0);
// dice(0) should return negative error code, pass it through
return accept(0, 0, result);
}
)[test.hook]"];
env(ripple::test::jtx::hook(alice, {{hso(hook, overrideFlag)}}, 0),
M("set dice0 hook"),
HSFEE);
env.close();
Json::Value invoke;
invoke[jss::TransactionType] = "Invoke";
invoke[jss::Account] = alice.human();
env(invoke, M("test dice(0)"), fee(XRP(1)));
auto meta = env.meta();
BEAST_REQUIRE(meta);
BEAST_REQUIRE(meta->isFieldPresent(sfHookExecutions));
auto const hookExecutions = meta->getFieldArray(sfHookExecutions);
BEAST_REQUIRE(hookExecutions.size() == 1);
// INVALID_ARGUMENT = -7, encoded as 0x8000000000000000 + abs(code)
// (see applyHook.cpp unsigned_exit_code encoding)
auto const rawCode = hookExecutions[0].getFieldU64(sfHookReturnCode);
int64_t returnCode = (rawCode & 0x8000000000000000ULL)
? -static_cast<int64_t>(rawCode & 0x7FFFFFFFFFFFFFFFULL)
: static_cast<int64_t>(rawCode);
std::cerr << " dice(0) returnCode = " << returnCode << " (raw 0x"
<< std::hex << rawCode << std::dec << ")\n";
BEAST_EXPECT(returnCode == -7);
BEAST_EXPECT(hookExecutions[0].getFieldU8(sfHookResult) == 3);
}
void
run() override
{
testSLECreated();
testSLEUpdatedOnSubsequentClose();
testNoSLEWithoutAmendment();
testDice();
testDiceZeroSides();
testRandom();
testDiceConsecutiveCallsDiffer();
}
};
BEAST_DEFINE_TESTSUITE(ConsensusEntropy, app, ripple);
} // namespace test
} // namespace ripple

View File

@@ -1,250 +0,0 @@
// This file is generated by build_test_hooks.py
#ifndef CONSENSUSENTROPY_TEST_WASM_INCLUDED
#define CONSENSUSENTROPY_TEST_WASM_INCLUDED
#include <map>
#include <stdint.h>
#include <string>
#include <vector>
namespace ripple {
namespace test {
std::map<std::string, std::vector<uint8_t>> consensusentropy_test_wasm = {
/* ==== WASM: 0 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
// dice(6) should return 0..5
int64_t result = dice(6);
// negative means error
if (result < 0)
rollback(0, 0, result);
if (result >= 6)
rollback(0, 0, -1);
// return the dice result as the accept code
return accept(0, 0, result);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
0x03U, 0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U,
0x06U, 0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x0FU, 0x02U, 0x04U, 0x63U,
0x62U, 0x61U, 0x6BU, 0x00U, 0x04U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU,
0x00U, 0x05U, 0x0AU, 0xD7U, 0x80U, 0x00U, 0x02U, 0x84U, 0x80U, 0x00U,
0x00U, 0x42U, 0x00U, 0x0BU, 0xCCU, 0x80U, 0x00U, 0x01U, 0x02U, 0x7EU,
0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U,
0x1AU, 0x41U, 0x06U, 0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U,
0x01U, 0x21U, 0x02U, 0x02U, 0x40U, 0x02U, 0x40U, 0x20U, 0x01U, 0x42U,
0x00U, 0x53U, 0x0DU, 0x00U, 0x42U, 0x7FU, 0x21U, 0x02U, 0x20U, 0x01U,
0x42U, 0x06U, 0x53U, 0x0DU, 0x01U, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U,
0x20U, 0x02U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU,
0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x01U, 0x10U, 0x83U, 0x80U, 0x80U,
0x80U, 0x00U, 0x0BU,
}},
/* ==== WASM: 1 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t random(uint32_t write_ptr, uint32_t write_len);
#define GUARD(maxiter) _g((1ULL << 31U) + __LINE__, (maxiter)+1)
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
uint8_t buf[32];
for (int i = 0; GUARD(32), i < 32; ++i)
buf[i] = 0;
int64_t result = random((uint32_t)buf, 32);
// Should return 32 (bytes written)
if (result != 32)
rollback(0, 0, result);
// Verify buffer is not all zeroes
int nonzero = 0;
for (int i = 0; GUARD(32), i < 32; ++i)
if (buf[i] != 0) nonzero = 1;
if (!nonzero)
rollback(0, 0, -2);
return accept(0, 0, 0);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x19U,
0x04U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x02U, 0x7FU,
0x7FU, 0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU,
0x60U, 0x01U, 0x7FU, 0x01U, 0x7EU, 0x02U, 0x33U, 0x04U, 0x03U, 0x65U,
0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, 0x00U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x72U, 0x61U, 0x6EU, 0x64U, 0x6FU, 0x6DU, 0x00U, 0x01U,
0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU, 0x6CU, 0x62U,
0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U,
0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x03U,
0x02U, 0x03U, 0x03U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U,
0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U,
0x80U, 0x08U, 0x0BU, 0x07U, 0x0FU, 0x02U, 0x04U, 0x63U, 0x62U, 0x61U,
0x6BU, 0x00U, 0x04U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x00U, 0x05U,
0x0AU, 0x8DU, 0x82U, 0x00U, 0x02U, 0x84U, 0x80U, 0x00U, 0x00U, 0x42U,
0x00U, 0x0BU, 0x82U, 0x82U, 0x00U, 0x03U, 0x02U, 0x7FU, 0x01U, 0x7EU,
0x02U, 0x7FU, 0x23U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U, 0x20U,
0x6BU, 0x22U, 0x01U, 0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x41U,
0x01U, 0x41U, 0x01U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU,
0x41U, 0x90U, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x80U,
0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x00U, 0x21U, 0x02U, 0x03U,
0x40U, 0x41U, 0x90U, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x10U,
0x00U, 0x1AU, 0x20U, 0x01U, 0x20U, 0x02U, 0x6AU, 0x41U, 0x00U, 0x3AU,
0x00U, 0x00U, 0x41U, 0x90U, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U,
0x1AU, 0x01U, 0x01U, 0x01U, 0x01U, 0x01U, 0x1AU, 0x20U, 0x02U, 0x41U,
0x01U, 0x6AU, 0x22U, 0x02U, 0x41U, 0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU,
0x02U, 0x40U, 0x20U, 0x01U, 0x41U, 0x20U, 0x10U, 0x81U, 0x80U, 0x80U,
0x80U, 0x00U, 0x22U, 0x03U, 0x42U, 0x20U, 0x51U, 0x0DU, 0x00U, 0x41U,
0x00U, 0x41U, 0x00U, 0x20U, 0x03U, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U,
0x00U, 0x1AU, 0x0BU, 0x41U, 0x9BU, 0x80U, 0x80U, 0x80U, 0x78U, 0x41U,
0x21U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x00U,
0x21U, 0x02U, 0x41U, 0x00U, 0x21U, 0x04U, 0x03U, 0x40U, 0x41U, 0x9BU,
0x80U, 0x80U, 0x80U, 0x78U, 0x41U, 0x21U, 0x10U, 0x80U, 0x80U, 0x80U,
0x80U, 0x00U, 0x1AU, 0x20U, 0x01U, 0x20U, 0x02U, 0x6AU, 0x2DU, 0x00U,
0x00U, 0x21U, 0x05U, 0x41U, 0x01U, 0x20U, 0x04U, 0x20U, 0x05U, 0x1BU,
0x21U, 0x04U, 0x20U, 0x02U, 0x41U, 0x01U, 0x6AU, 0x22U, 0x02U, 0x41U,
0x20U, 0x47U, 0x0DU, 0x00U, 0x0BU, 0x02U, 0x40U, 0x20U, 0x04U, 0x0DU,
0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x42U, 0x7EU, 0x10U, 0x82U, 0x80U,
0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x41U, 0x00U, 0x41U, 0x00U, 0x42U,
0x00U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x21U, 0x03U, 0x20U,
0x01U, 0x41U, 0x20U, 0x6AU, 0x24U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U,
0x20U, 0x03U, 0x0BU,
}},
/* ==== WASM: 2 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t r1 = dice(1000000);
if (r1 < 0)
rollback(0, 0, r1);
int64_t r2 = dice(1000000);
if (r2 < 0)
rollback(0, 0, r2);
// consecutive calls should differ (rngCallCounter)
if (r1 == r2)
rollback(0, 0, -1);
return accept(0, 0, r1 | (r2 << 20));
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x31U, 0x04U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x08U, 0x72U, 0x6FU, 0x6CU,
0x6CU, 0x62U, 0x61U, 0x63U, 0x6BU, 0x00U, 0x02U, 0x03U, 0x65U, 0x6EU,
0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, 0x00U, 0x02U,
0x03U, 0x03U, 0x02U, 0x01U, 0x01U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U,
0x06U, 0x21U, 0x05U, 0x7FU, 0x01U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U,
0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU,
0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x07U, 0x0FU, 0x02U, 0x04U, 0x63U,
0x62U, 0x61U, 0x6BU, 0x00U, 0x04U, 0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU,
0x00U, 0x05U, 0x0AU, 0x85U, 0x81U, 0x00U, 0x02U, 0x84U, 0x80U, 0x00U,
0x00U, 0x42U, 0x00U, 0x0BU, 0xFAU, 0x80U, 0x00U, 0x01U, 0x02U, 0x7EU,
0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x80U, 0x80U, 0x80U, 0x80U, 0x00U,
0x1AU, 0x02U, 0x40U, 0x41U, 0xC0U, 0x84U, 0x3DU, 0x10U, 0x81U, 0x80U,
0x80U, 0x80U, 0x00U, 0x22U, 0x01U, 0x42U, 0x7FU, 0x55U, 0x0DU, 0x00U,
0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x01U, 0x10U, 0x82U, 0x80U, 0x80U,
0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U, 0x40U, 0x41U, 0xC0U, 0x84U, 0x3DU,
0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x22U, 0x02U, 0x42U, 0x7FU,
0x55U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x02U, 0x10U,
0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU, 0x02U, 0x40U, 0x20U,
0x01U, 0x20U, 0x02U, 0x52U, 0x0DU, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U,
0x42U, 0x7FU, 0x10U, 0x82U, 0x80U, 0x80U, 0x80U, 0x00U, 0x1AU, 0x0BU,
0x41U, 0x00U, 0x41U, 0x00U, 0x20U, 0x02U, 0x42U, 0x14U, 0x86U, 0x20U,
0x01U, 0x84U, 0x10U, 0x83U, 0x80U, 0x80U, 0x80U, 0x00U, 0x0BU,
}},
/* ==== WASM: 3 ==== */
{R"[test.hook](
#include <stdint.h>
extern int32_t _g(uint32_t, uint32_t);
extern int64_t accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
extern int64_t dice(uint32_t sides);
int64_t cbak(uint32_t r) { return 0; }
int64_t hook(uint32_t r)
{
_g(1,1);
int64_t result = dice(0);
// dice(0) should return negative error code, pass it through
return accept(0, 0, result);
}
)[test.hook]",
{
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x13U,
0x03U, 0x60U, 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x02U,
0x22U, 0x03U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x04U, 0x64U, 0x69U, 0x63U, 0x65U,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x06U, 0x61U, 0x63U, 0x63U,
0x65U, 0x70U, 0x74U, 0x00U, 0x02U, 0x03U, 0x03U, 0x02U, 0x01U, 0x01U,
0x05U, 0x03U, 0x01U, 0x00U, 0x02U, 0x06U, 0x21U, 0x05U, 0x7FU, 0x01U,
0x41U, 0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U,
0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U,
0x80U, 0x88U, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU,
0x07U, 0x0FU, 0x02U, 0x04U, 0x63U, 0x62U, 0x61U, 0x6BU, 0x00U, 0x03U,
0x04U, 0x68U, 0x6FU, 0x6FU, 0x6BU, 0x00U, 0x04U, 0x0AU, 0xAAU, 0x80U,
0x00U, 0x02U, 0x84U, 0x80U, 0x00U, 0x00U, 0x42U, 0x00U, 0x0BU, 0x9FU,
0x80U, 0x00U, 0x00U, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x80U, 0x80U,
0x80U, 0x80U, 0x00U, 0x1AU, 0x41U, 0x00U, 0x41U, 0x00U, 0x41U, 0x00U,
0x10U, 0x81U, 0x80U, 0x80U, 0x80U, 0x00U, 0x10U, 0x82U, 0x80U, 0x80U,
0x80U, 0x00U, 0x0BU,
}},
};
}
} // namespace ripple
#endif

View File

@@ -145,7 +145,6 @@ public:
using namespace csf;
using namespace std::chrono;
//@@start peers-agree
ConsensusParms const parms{};
Sim sim;
PeerGroup peers = sim.createGroup(5);
@@ -175,7 +174,6 @@ public:
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
}
}
//@@end peers-agree
}
void
@@ -188,7 +186,6 @@ public:
// that have significantly longer network delays to the rest of the
// network
//@@start slow-peer-scenario
// Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
{
ConsensusParms const parms{};
@@ -227,18 +224,16 @@ public:
BEAST_EXPECT(
peer->prevRoundTime == network[0]->prevRoundTime);
// Slow peer's transaction (Tx{0}) didn't make it in time
BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
for (std::uint32_t i = 2; i < network.size(); ++i)
BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
// Tx 0 is still in the open transaction set for next round
// Tx 0 didn't make it
BEAST_EXPECT(
peer->openTxs.find(Tx{0}) != peer->openTxs.end());
}
}
}
//@@end slow-peer-scenario
// Test when the slow peers delay a consensus quorum (4/6 agree)
{
@@ -426,7 +421,6 @@ public:
// the wrong LCL at different phases of consensus
for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
{
//@@start wrong-lcl-scenario
// Consider 10 peers:
// 0 1 2 3 4 5 6 7 8 9
// minority majorityA majorityB
@@ -447,7 +441,6 @@ public:
// This topology can potentially fork with the above trust relations
// but that is intended for this test.
//@@end wrong-lcl-scenario
Sim sim;
@@ -731,7 +724,6 @@ public:
}
sim.run(1);
//@@start fork-threshold
// Fork should not happen for 40% or greater overlap
// Since the overlapped nodes have a UNL that is the union of the
// two cliques, the maximum sized UNL list is the number of peers
@@ -743,7 +735,6 @@ public:
// One for cliqueA, one for cliqueB and one for nodes in both
BEAST_EXPECT(sim.branches() <= 3);
}
//@@end fork-threshold
}
}

View File

@@ -1,444 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/consensus/RCLCxPeerPos.h>
#include <ripple/beast/unit_test.h>
#include <ripple/consensus/ConsensusProposal.h>
#include <ripple/protocol/SecretKey.h>
#include <ripple/protocol/digest.h>
#include <cstring>
namespace ripple {
namespace test {
class ExtendedPosition_test : public beast::unit_test::suite
{
// Generate deterministic test hashes
static uint256
makeHash(char const* label)
{
return sha512Half(Slice(label, std::strlen(label)));
}
void
testSerializationRoundTrip()
{
testcase("Serialization round-trip");
// Empty position (legacy compat)
{
auto const txSet = makeHash("txset-a");
ExtendedPosition pos{txSet};
Serializer s;
pos.add(s);
// Should be exactly 32 bytes (no flags byte)
BEAST_EXPECT(s.getDataLength() == 32);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(!deserialized->myCommitment);
BEAST_EXPECT(!deserialized->myReveal);
BEAST_EXPECT(!deserialized->commitSetHash);
BEAST_EXPECT(!deserialized->entropySetHash);
}
// Position with commitment
{
auto const txSet = makeHash("txset-b");
auto const commit = makeHash("commit-b");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
Serializer s;
pos.add(s);
// 32 (txSet) + 1 (flags) + 32 (commitment) = 65
BEAST_EXPECT(s.getDataLength() == 65);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(deserialized->myCommitment == commit);
BEAST_EXPECT(!deserialized->myReveal);
}
// Position with all fields
{
auto const txSet = makeHash("txset-c");
auto const commitSet = makeHash("commitset-c");
auto const entropySet = makeHash("entropyset-c");
auto const commit = makeHash("commit-c");
auto const reveal = makeHash("reveal-c");
ExtendedPosition pos{txSet};
pos.commitSetHash = commitSet;
pos.entropySetHash = entropySet;
pos.myCommitment = commit;
pos.myReveal = reveal;
Serializer s;
pos.add(s);
// 32 + 1 + 32 + 32 + 32 + 32 = 161
BEAST_EXPECT(s.getDataLength() == 161);
SerialIter sit(s.slice());
auto deserialized =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(deserialized.has_value());
if (!deserialized)
return;
BEAST_EXPECT(deserialized->txSetHash == txSet);
BEAST_EXPECT(deserialized->commitSetHash == commitSet);
BEAST_EXPECT(deserialized->entropySetHash == entropySet);
BEAST_EXPECT(deserialized->myCommitment == commit);
BEAST_EXPECT(deserialized->myReveal == reveal);
}
}
void
testSigningConsistency()
{
testcase("Signing hash consistency");
// The signing hash from ConsensusProposal::signingHash() must match
// what a receiver would compute via the same function after
// deserializing the ExtendedPosition from the wire.
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
auto const nodeId = calcNodeID(pk);
auto const prevLedger = makeHash("prevledger");
auto const closeTime =
NetClock::time_point{NetClock::duration{1234567}};
// Test with commitment (the case that was failing)
{
auto const txSet = makeHash("txset-sign");
auto const commit = makeHash("commitment-sign");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
using Proposal =
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
Proposal::seqJoin,
pos,
closeTime,
NetClock::time_point{},
nodeId};
// Sign it (same as propose() does)
auto const signingHash = prop.signingHash();
auto sig = signDigest(pk, sk, signingHash);
// Serialize position to wire format
Serializer positionData;
pos.add(positionData);
auto const posSlice = positionData.slice();
// Deserialize (same as PeerImp::onMessage does)
SerialIter sit(posSlice);
auto const maybeReceivedPos =
ExtendedPosition::fromSerialIter(sit, posSlice.size());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
// Reconstruct proposal on receiver side
Proposal receivedProp{
prevLedger,
Proposal::seqJoin,
*maybeReceivedPos,
closeTime,
NetClock::time_point{},
nodeId};
// The signing hash must match
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
// Verify signature (same as checkSign does)
BEAST_EXPECT(
verifyDigest(pk, receivedProp.signingHash(), sig, false));
}
// Test without commitment (legacy case)
{
auto const txSet = makeHash("txset-legacy");
ExtendedPosition pos{txSet};
using Proposal =
ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
Proposal::seqJoin,
pos,
closeTime,
NetClock::time_point{},
nodeId};
auto const signingHash = prop.signingHash();
auto sig = signDigest(pk, sk, signingHash);
Serializer positionData;
pos.add(positionData);
SerialIter sit(positionData.slice());
auto const maybeReceivedPos = ExtendedPosition::fromSerialIter(
sit, positionData.getDataLength());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
Proposal receivedProp{
prevLedger,
Proposal::seqJoin,
*maybeReceivedPos,
closeTime,
NetClock::time_point{},
nodeId};
BEAST_EXPECT(receivedProp.signingHash() == signingHash);
BEAST_EXPECT(
verifyDigest(pk, receivedProp.signingHash(), sig, false));
}
}
void
testSuppressionConsistency()
{
testcase("Suppression hash consistency");
// proposalUniqueId must produce the same result on sender and
// receiver when given the same ExtendedPosition data.
auto const [pk, sk] = randomKeyPair(KeyType::secp256k1);
auto const prevLedger = makeHash("prevledger-supp");
auto const closeTime =
NetClock::time_point{NetClock::duration{1234567}};
std::uint32_t const proposeSeq = 0;
auto const txSet = makeHash("txset-supp");
auto const commit = makeHash("commitment-supp");
ExtendedPosition pos{txSet};
pos.myCommitment = commit;
// Sign (to get a real signature for suppression)
using Proposal = ConsensusProposal<NodeID, uint256, ExtendedPosition>;
Proposal prop{
prevLedger,
proposeSeq,
pos,
closeTime,
NetClock::time_point{},
calcNodeID(pk)};
auto sig = signDigest(pk, sk, prop.signingHash());
// Sender computes suppression
auto const senderSuppression =
proposalUniqueId(pos, prevLedger, proposeSeq, closeTime, pk, sig);
// Simulate wire: serialize and deserialize
Serializer positionData;
pos.add(positionData);
SerialIter sit(positionData.slice());
auto const maybeReceivedPos =
ExtendedPosition::fromSerialIter(sit, positionData.getDataLength());
BEAST_EXPECT(maybeReceivedPos.has_value());
if (!maybeReceivedPos)
return;
// Receiver computes suppression
auto const receiverSuppression = proposalUniqueId(
*maybeReceivedPos, prevLedger, proposeSeq, closeTime, pk, sig);
BEAST_EXPECT(senderSuppression == receiverSuppression);
}
void
testMalformedPayload()
{
testcase("Malformed payload rejected");
// Too short (< 32 bytes)
{
Serializer s;
s.add32(0xDEADBEEF); // only 4 bytes
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Empty payload
{
Serializer s;
SerialIter sit(s.slice());
auto result = ExtendedPosition::fromSerialIter(sit, 0);
BEAST_EXPECT(!result.has_value());
}
// Flags claim fields that aren't present (truncated)
{
auto const txSet = makeHash("txset-malformed");
Serializer s;
s.addBitString(txSet);
// flags = 0x0F (all 4 fields), but no field data follows
s.add8(0x0F);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Flags claim 2 fields but only 1 field's worth of data
{
auto const txSet = makeHash("txset-malformed2");
auto const commit = makeHash("commit-malformed2");
Serializer s;
s.addBitString(txSet);
// flags = 0x03 (commitSetHash + entropySetHash), but only
// provide commitSetHash data
s.add8(0x03);
s.addBitString(commit);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Unknown flag bits in upper nibble (wire malleability)
{
auto const txSet = makeHash("txset-unkflags");
Serializer s;
s.addBitString(txSet);
s.add8(0x11); // bit 4 is unknown, bit 0 = commitSetHash
s.addBitString(makeHash("commitset-unkflags"));
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Trailing extra bytes after valid fields
{
auto const txSet = makeHash("txset-trailing");
auto const commitSet = makeHash("commitset-trailing");
Serializer s;
s.addBitString(txSet);
s.add8(0x01); // commitSetHash only
s.addBitString(commitSet);
s.add32(0xDEADBEEF); // 4 extra trailing bytes
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(!result.has_value());
}
// Valid flags with exactly the right amount of data (should succeed)
{
auto const txSet = makeHash("txset-ok");
auto const commitSet = makeHash("commitset-ok");
Serializer s;
s.addBitString(txSet);
s.add8(0x01); // commitSetHash only
s.addBitString(commitSet);
SerialIter sit(s.slice());
auto result =
ExtendedPosition::fromSerialIter(sit, s.getDataLength());
BEAST_EXPECT(result.has_value());
if (result)
{
BEAST_EXPECT(result->txSetHash == txSet);
BEAST_EXPECT(result->commitSetHash == commitSet);
BEAST_EXPECT(!result->entropySetHash);
}
}
}
void
testEquality()
{
testcase("Equality is txSetHash only");
auto const txSet = makeHash("txset-eq");
auto const txSet2 = makeHash("txset-eq-2");
ExtendedPosition a{txSet};
a.myCommitment = makeHash("commit1-eq");
ExtendedPosition b{txSet};
b.myCommitment = makeHash("commit2-eq");
// Same txSetHash, different leaves -> equal
BEAST_EXPECT(a == b);
// Same txSetHash, different commitSetHash -> still equal
// (sub-state quorum handles commitSetHash agreement)
b.commitSetHash = makeHash("cs-eq");
BEAST_EXPECT(a == b);
// Same txSetHash, different entropySetHash -> still equal
b.entropySetHash = makeHash("es-eq");
BEAST_EXPECT(a == b);
// Different txSetHash -> not equal
ExtendedPosition c{txSet2};
BEAST_EXPECT(a != c);
}
public:
void
run() override
{
testSerializationRoundTrip();
testSigningConsistency();
testSuppressionConsistency();
testMalformedPayload();
testEquality();
}
};
BEAST_DEFINE_TESTSUITE(ExtendedPosition, consensus, ripple);
} // namespace test
} // namespace ripple

View File

@@ -159,8 +159,6 @@ struct Peer
using NodeKey_t = PeerKey;
using TxSet_t = TxSet;
using PeerPosition_t = Position;
using Position_t =
typename TxSet_t::ID; // Use TxSet::ID for test framework
using Result = ConsensusResult<Peer>;
using NodeKey = Validation::NodeKey;

View File

@@ -82,12 +82,7 @@ supported_amendments()
Throw<std::runtime_error>(
"Unknown feature: " + s + " in supportedAmendments.");
}
//@@start rng-test-environment-gating
// TODO: ConsensusEntropy injects a pseudo-tx every ledger which
// breaks existing test transaction count assumptions. Exclude from
// default test set until dedicated tests are written.
return FeatureBitset(feats) - featureConsensusEntropy;
//@@end rng-test-environment-gating
return FeatureBitset(feats);
}();
return ids;
}