mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 09:17:53 +00:00
feat: add ttHASH_MIGRATION
This commit is contained in:
@@ -1,184 +0,0 @@
|
||||
# Pseudo Transactions in Xahau/Ripple
|
||||
|
||||
## Overview
|
||||
|
||||
Pseudo transactions are special system-level transactions that are automatically applied by the network at specific ledger sequences. Unlike regular transactions that are submitted by users and go through the transaction queue, pseudo transactions are generated and applied deterministically by the protocol itself.
|
||||
|
||||
## Current Pseudo Transaction Types
|
||||
|
||||
Based on the codebase analysis from `PseudoTx_test.cpp`:
|
||||
|
||||
### 1. **Fee Pseudo Transaction (ttFEE)**
|
||||
- Applied to set or update the base fee and reserve amounts
|
||||
- Contains fields:
|
||||
- `sfLedgerSequence`: The ledger where this applies
|
||||
- Fee fields (depending on `featureXRPFees`):
|
||||
- Modern: `sfBaseFeeDrops`, `sfReserveBaseDrops`, `sfReserveIncrementDrops`
|
||||
- Legacy: `sfBaseFee`, `sfReserveBase`, `sfReserveIncrement`, `sfReferenceFeeUnits`
|
||||
|
||||
### 2. **Amendment Pseudo Transaction (ttAMENDMENT)**
|
||||
- Applied to activate or deactivate protocol amendments
|
||||
- Contains fields:
|
||||
- `sfAccount`: Set to AccountID() (null account)
|
||||
- `sfAmendment`: The amendment hash
|
||||
- `sfLedgerSequence`: The activation ledger
|
||||
|
||||
## How Pseudo Transactions Work
|
||||
|
||||
1. **Generation**: Created deterministically at specific ledger sequences
|
||||
2. **Application**: Applied automatically during ledger closing
|
||||
3. **Validation**: Cannot be submitted by users (blocked by `passesLocalChecks`)
|
||||
4. **Consensus**: All validators generate identical pseudo transactions
|
||||
|
||||
## Using Pseudo Transactions for Hash Migration
|
||||
|
||||
### The Hash Migration Pseudo Transaction Proposal
|
||||
|
||||
Create a new pseudo transaction type `ttHASH_MIGRATION` that would:
|
||||
|
||||
```cpp
|
||||
// Pseudo transaction structure
|
||||
ttHASH_MIGRATION {
|
||||
sfAccount: AccountID(), // null account
|
||||
sfLedgerSequence: HASH_MIGRATION_LEDGER, // e.g., 20,000,000
|
||||
sfHashAlgorithm: "BLAKE3",
|
||||
sfPreviousAlgorithm: "SHA512_HALF",
|
||||
sfMigrationFlags: {
|
||||
REHASH_STATE_MAP: true,
|
||||
INVALIDATE_CACHES: true,
|
||||
CHECKPOINT_REQUIRED: true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Migration Strategy Using Pseudo Transactions
|
||||
|
||||
#### Phase 1: Pre-Migration (Before Ledger 20,000,000)
|
||||
- All nodes use SHA-512 Half
|
||||
- Code already contains Blake3 implementation
|
||||
- Hash context classifiers are in place (as we've just implemented)
|
||||
|
||||
#### Phase 2: Migration Ledger (Ledger 20,000,000)
|
||||
```
|
||||
Ledger 19,999,999 closes with SHA-512 Half
|
||||
↓
|
||||
ttHASH_MIGRATION pseudo transaction triggers
|
||||
↓
|
||||
All state is rehashed with Blake3
|
||||
↓
|
||||
Ledger 20,000,000 opens with Blake3
|
||||
```
|
||||
|
||||
#### Phase 3: Post-Migration (After Ledger 20,000,000)
|
||||
- All new hashes use Blake3
|
||||
- Historical data before migration still verifiable with SHA-512
|
||||
|
||||
### Implementation Details
|
||||
|
||||
#### 1. Add Hash Migration Transaction Type
|
||||
```cpp
|
||||
// In TxFormats.h
|
||||
enum TxType : std::uint16_t {
|
||||
// ... existing types ...
|
||||
ttHASH_MIGRATION = 21, // New pseudo transaction type
|
||||
};
|
||||
```
|
||||
|
||||
#### 2. Migration Pseudo Transaction Handler
|
||||
```cpp
|
||||
class HashMigration {
|
||||
public:
|
||||
static TER
|
||||
apply(ApplyView& view, STTx const& tx, beast::Journal j) {
|
||||
// 1. Verify this is the correct migration ledger
|
||||
if (view.seq() != HASH_MIGRATION_LEDGER)
|
||||
return tefBAD_LEDGER;
|
||||
|
||||
// 2. Trigger state map rehashing
|
||||
view.rawView().rehashStateMap(HashAlgorithm::BLAKE3);
|
||||
|
||||
// 3. Invalidate all cached nodes
|
||||
view.rawView().invalidateHashCaches();
|
||||
|
||||
// 4. Set global hash algorithm flag
|
||||
view.rawView().setHashAlgorithm(HashAlgorithm::BLAKE3);
|
||||
|
||||
// 5. Create migration checkpoint
|
||||
createMigrationCheckpoint(view);
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
#### 3. Hash Function Selection Based on Ledger
|
||||
```cpp
|
||||
// In digest.h/cpp
|
||||
HashAlgorithm selectHashAlgorithm(hash_options const& opts) {
|
||||
if (!opts.ledger_index.has_value())
|
||||
return HashAlgorithm::SHA512_HALF; // Default for non-ledger
|
||||
|
||||
if (opts.ledger_index.value() >= HASH_MIGRATION_LEDGER) {
|
||||
// Post-migration ledgers use Blake3
|
||||
return HashAlgorithm::BLAKE3;
|
||||
} else {
|
||||
// Pre-migration ledgers use SHA-512 Half
|
||||
return HashAlgorithm::SHA512_HALF;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Advantages of Pseudo Transaction Approach
|
||||
|
||||
1. **Deterministic**: All nodes execute the same migration at the same ledger
|
||||
2. **Atomic**: The entire state transitions in one ledger close
|
||||
3. **Auditable**: The migration appears in the ledger history
|
||||
4. **Reversible**: Could theoretically migrate back if needed
|
||||
5. **Clean**: No ambiguity about which algorithm to use
|
||||
|
||||
### Challenges and Solutions
|
||||
|
||||
#### Challenge 1: Performance Impact
|
||||
**Problem**: Rehashing the entire state map could take significant time.
|
||||
**Solution**: Pre-compute Blake3 hashes in background before migration ledger.
|
||||
|
||||
#### Challenge 2: Network Synchronization
|
||||
**Problem**: Nodes must stay in sync during migration.
|
||||
**Solution**: Require supermajority agreement before migration proceeds.
|
||||
|
||||
#### Challenge 3: Historical Verification
|
||||
**Problem**: Need to verify pre-migration data with old algorithm.
|
||||
**Solution**: Use ledger sequence from hash_options to select correct algorithm.
|
||||
|
||||
### Alternative: Amendment-Based Migration
|
||||
|
||||
Instead of a dedicated pseudo transaction, use the existing amendment mechanism:
|
||||
|
||||
```cpp
|
||||
// Activate hash migration via amendment
|
||||
if (view.rules().enabled(featureBLAKE3Migration)) {
|
||||
if (view.seq() == firstLedgerWithFeature(featureBLAKE3Migration)) {
|
||||
// This is the transition ledger
|
||||
applyHashMigration(view);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit Tests**: Verify pseudo transaction generation and application
|
||||
2. **Integration Tests**: Test full migration on test networks
|
||||
3. **Performance Tests**: Measure migration time for various state sizes
|
||||
4. **Consensus Tests**: Ensure all nodes reach same post-migration state
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. **Phase 1**: Deploy code with Blake3 support (dormant)
|
||||
2. **Phase 2**: Activate on test networks
|
||||
3. **Phase 3**: Set migration ledger far in future on mainnet
|
||||
4. **Phase 4**: Monitor and prepare for migration
|
||||
5. **Phase 5**: Migration occurs automatically at designated ledger
|
||||
|
||||
## Conclusion
|
||||
|
||||
Using pseudo transactions for hash migration provides a clean, deterministic, and auditable way to transition the entire network from SHA-512 Half to Blake3. The migration appears as a historical event in the ledger, maintaining the blockchain's integrity and auditability while modernizing its cryptographic foundation.
|
||||
@@ -41,7 +41,9 @@ Change::preflight(PreflightContext const& ctx)
|
||||
{
|
||||
auto const ret = preflight0(ctx);
|
||||
if (!isTesSuccess(ret))
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto account = ctx.tx.getAccountID(sfAccount);
|
||||
if (account != beast::zero)
|
||||
@@ -110,7 +112,6 @@ Change::preflight(PreflightContext const& ctx)
|
||||
return telIMPORT_VL_KEY_NOT_RECOGNISED;
|
||||
}
|
||||
}
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
@@ -170,6 +171,7 @@ Change::preclaim(PreclaimContext const& ctx)
|
||||
case ttUNL_MODIFY:
|
||||
case ttUNL_REPORT:
|
||||
case ttEMIT_FAILURE:
|
||||
case ttHASH_MIGRATION:
|
||||
return tesSUCCESS;
|
||||
default:
|
||||
return temUNKNOWN;
|
||||
@@ -191,6 +193,8 @@ Change::doApply()
|
||||
return applyEmitFailure();
|
||||
case ttUNL_REPORT:
|
||||
return applyUNLReport();
|
||||
case ttHASH_MIGRATION:
|
||||
return applyHashMigration();
|
||||
default:
|
||||
assert(0);
|
||||
return tefFAILURE;
|
||||
@@ -1088,6 +1092,31 @@ Change::applyEmitFailure()
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
TER
|
||||
Change::applyHashMigration()
|
||||
{
|
||||
// This pseudo transaction triggers the hash algorithm migration
|
||||
// from SHA-512 Half to BLAKE3.
|
||||
|
||||
JLOG(j_.warn()) << "Hash migration pseudo transaction triggered at ledger "
|
||||
<< view().seq();
|
||||
|
||||
// TODO: Implement the actual state tree rehashing logic here
|
||||
// This is where we would:
|
||||
// 1. Iterate through all state tree nodes
|
||||
// 2. Rehash each node with BLAKE3
|
||||
// 3. Update the tree structure
|
||||
// 4. Set a flag indicating migration is complete
|
||||
|
||||
// For now, this is a placeholder implementation
|
||||
// In a real implementation, we would:
|
||||
// - Call a function to rehash the entire state map
|
||||
// - Set a migration complete flag in the ledger
|
||||
// - Update the global hash algorithm setting
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
TER
|
||||
Change::applyUNLModify()
|
||||
{
|
||||
|
||||
@@ -76,6 +76,9 @@ private:
|
||||
|
||||
TER
|
||||
applyUNLReport();
|
||||
|
||||
TER
|
||||
applyHashMigration();
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -150,6 +150,7 @@ invoke_preflight(PreflightContext const& ctx)
|
||||
case ttUNL_MODIFY:
|
||||
case ttUNL_REPORT:
|
||||
case ttEMIT_FAILURE:
|
||||
case ttHASH_MIGRATION:
|
||||
return invoke_preflight_helper<Change>(ctx);
|
||||
case ttHOOK_SET:
|
||||
return invoke_preflight_helper<SetHook>(ctx);
|
||||
@@ -277,6 +278,7 @@ invoke_preclaim(PreclaimContext const& ctx)
|
||||
case ttUNL_MODIFY:
|
||||
case ttUNL_REPORT:
|
||||
case ttEMIT_FAILURE:
|
||||
case ttHASH_MIGRATION:
|
||||
return invoke_preclaim<Change>(ctx);
|
||||
case ttNFTOKEN_MINT:
|
||||
return invoke_preclaim<NFTokenMint>(ctx);
|
||||
@@ -364,6 +366,7 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx)
|
||||
case ttUNL_MODIFY:
|
||||
case ttUNL_REPORT:
|
||||
case ttEMIT_FAILURE:
|
||||
case ttHASH_MIGRATION:
|
||||
return Change::calculateBaseFee(view, tx);
|
||||
case ttNFTOKEN_MINT:
|
||||
return NFTokenMint::calculateBaseFee(view, tx);
|
||||
@@ -530,7 +533,8 @@ invoke_apply(ApplyContext& ctx)
|
||||
case ttFEE:
|
||||
case ttUNL_MODIFY:
|
||||
case ttUNL_REPORT:
|
||||
case ttEMIT_FAILURE: {
|
||||
case ttEMIT_FAILURE:
|
||||
case ttHASH_MIGRATION: {
|
||||
Change p(ctx);
|
||||
return p();
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 85;
|
||||
static constexpr std::size_t numFeatures = 86;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
@@ -373,6 +373,7 @@ extern uint256 const fixProvisionalDoubleThreading;
|
||||
extern uint256 const featureClawback;
|
||||
extern uint256 const featureDeepFreeze;
|
||||
extern uint256 const featureIOUIssuerWeakTSH;
|
||||
extern uint256 const featureBLAKE3Migration;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -191,6 +191,12 @@ enum TxType : std::uint16_t
|
||||
ttUNL_MODIFY = 102,
|
||||
ttEMIT_FAILURE = 103,
|
||||
ttUNL_REPORT = 104,
|
||||
|
||||
/** This system-generated transaction type migrates the hash algorithm from SHA-512 to BLAKE3.
|
||||
|
||||
This pseudo transaction is executed at a predetermined ledger to rehash the entire state tree.
|
||||
*/
|
||||
ttHASH_MIGRATION = 105,
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
|
||||
@@ -480,6 +480,7 @@ REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::De
|
||||
REGISTER_FIX (fixProvisionalDoubleThreading, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FEATURE(BLAKE3Migration, Supported::yes, VoteBehavior::DefaultNo);
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
// because they could potentially get enabled.
|
||||
|
||||
@@ -615,7 +615,7 @@ isPseudoTx(STObject const& tx)
|
||||
|
||||
auto tt = safe_cast<TxType>(*t);
|
||||
return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY ||
|
||||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT;
|
||||
tt == ttEMIT_FAILURE || tt == ttUNL_REPORT || tt == ttHASH_MIGRATION;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -216,6 +216,13 @@ TxFormats::TxFormats()
|
||||
},
|
||||
commonFields);
|
||||
|
||||
add(jss::HashMigration,
|
||||
ttHASH_MIGRATION,
|
||||
{
|
||||
{sfLedgerSequence, soeREQUIRED},
|
||||
},
|
||||
commonFields);
|
||||
|
||||
add(jss::TicketCreate,
|
||||
ttTICKET_CREATE,
|
||||
{
|
||||
|
||||
@@ -126,6 +126,7 @@ JSS(SetFee); // transaction type.
|
||||
JSS(SetRemarks); // transaction type
|
||||
JSS(UNLModify); // transaction type.
|
||||
JSS(UNLReport); // transaction type.
|
||||
JSS(HashMigration); // transaction type.
|
||||
JSS(SettleDelay); // in: TransactionSign
|
||||
JSS(SendMax); // in: TransactionSign
|
||||
JSS(Sequence); // in/out: TransactionSign; field.
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/misc/HashRouter.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/protocol/Feature.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
@@ -109,6 +110,51 @@ struct PseudoTx_test : public beast::unit_test::suite
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testHashMigration()
|
||||
{
|
||||
using namespace jtx;
|
||||
|
||||
// Enable BLAKE3Migration feature
|
||||
FeatureBitset features = supported_amendments();
|
||||
features[featureBLAKE3Migration] = true;
|
||||
|
||||
Env env(*this, features);
|
||||
|
||||
// Create the hash migration pseudo transaction
|
||||
STTx migrationTx(ttHASH_MIGRATION, [&](auto& obj) {
|
||||
obj.setAccountID(sfAccount, AccountID());
|
||||
obj.setFieldU32(sfLedgerSequence, env.closed()->seq() + 1);
|
||||
});
|
||||
|
||||
// Verify it's recognized as a pseudo transaction
|
||||
BEAST_EXPECT(isPseudoTx(migrationTx));
|
||||
|
||||
// Verify it cannot be submitted by users
|
||||
std::string reason;
|
||||
BEAST_EXPECT(!passesLocalChecks(migrationTx, reason));
|
||||
BEAST_EXPECT(reason == "Cannot submit pseudo transactions.");
|
||||
|
||||
// Insert the pseudo transaction into the open ledger
|
||||
env.app().openLedger().modify([&](OpenView& view, beast::Journal j) {
|
||||
// This simulates what happens during consensus when
|
||||
// pseudo transactions are injected
|
||||
uint256 txID = migrationTx.getTransactionID();
|
||||
auto s = std::make_shared<ripple::Serializer>();
|
||||
migrationTx.add(*s);
|
||||
env.app().getHashRouter().setFlags(txID, SF_PRIVATE2);
|
||||
view.rawTxInsert(txID, std::move(s), nullptr);
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
// Close the ledger to process the pseudo transaction
|
||||
env.close();
|
||||
|
||||
JLOG(env.journal.info())
|
||||
<< "Hash migration pseudo transaction test completed successfully";
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
@@ -119,6 +165,7 @@ struct PseudoTx_test : public beast::unit_test::suite
|
||||
testPrevented(all - featureXRPFees);
|
||||
testPrevented(all);
|
||||
testAllowed();
|
||||
testHashMigration();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user