Compare commits

..

3 Commits

Author SHA1 Message Date
tequ
ac93b24c91 fixXahauGenesisOwnerCount 2026-01-13 15:42:23 +09:00
Niq Dudfield
a8d7b2619e fix: restore [ips_fixed] to use addFixedPeer instead of addFallbackStrings (#641) 2026-01-05 13:46:02 +10:00
Niq Dudfield
775fb3a8b2 fix: increment manifest sequence for client code cache invalidation (#631) 2025-12-24 11:16:00 +10:00
30 changed files with 112 additions and 728 deletions

View File

@@ -350,10 +350,7 @@ enum hook_return_code : int64_t {
MEM_OVERLAP = -43, // one or more specified buffers are the same memory
TOO_MANY_STATE_MODIFICATIONS = -44, // more than 5000 modified state
// entires in the combined hook chains
TOO_MANY_NAMESPACES = -45,
EXPORT_FAILURE = -46,
TOO_MANY_EXPORTED_TXN = -47,
TOO_MANY_NAMESPACES = -45
};
enum ExitType : uint8_t {
@@ -367,7 +364,6 @@ const uint16_t max_state_modifications = 256;
const uint8_t max_slots = 255;
const uint8_t max_nonce = 255;
const uint8_t max_emit = 255;
const uint8_t max_export = 4;
const uint8_t max_params = 16;
const double fee_base_multiplier = 1.1f;
@@ -473,13 +469,6 @@ static const APIWhitelist import_whitelist_1{
// clang-format on
};
static const APIWhitelist import_whitelist_2{
// clang-format off
HOOK_API_DEFINITION(I64, xport, (I32, I32)),
HOOK_API_DEFINITION(I64, xport_reserve, (I32)),
// clang-format on
};
#undef HOOK_API_DEFINITION
#undef I32
#undef I64

View File

@@ -1034,12 +1034,6 @@ validateGuards(
{
// PASS, this is a version 1 api
}
else if (rulesVersion & 0x04U &&
hook_api::import_whitelist_2.find(import_name) !=
hook_api::import_whitelist_2.end())
{
// PASS, this is an export api
}
else
{
GUARDLOG(hook::log::IMPORT_ILLEGAL)

View File

@@ -406,17 +406,6 @@ DECLARE_HOOK_FUNCTION(
uint32_t slot_no_tx,
uint32_t slot_no_meta);
DECLARE_HOOK_FUNCTION(
int64_t,
xport,
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
DECLARE_HOOK_FUNCTION(
int64_t,
xport_reserve,
uint32_t count);
/*
DECLARE_HOOK_FUNCTION(int64_t, str_find, uint32_t hread_ptr,
uint32_t hread_len, uint32_t nread_ptr, uint32_t nread_len, uint32_t mode,
@@ -496,8 +485,6 @@ struct HookResult
std::queue<std::shared_ptr<ripple::Transaction>>
emittedTxn{}; // etx stored here until accept/rollback
std::queue<std::shared_ptr<ripple::Transaction>>
exportedTxn{};
HookStateMap& stateMap;
uint16_t changedStateCount = 0;
std::map<
@@ -554,7 +541,6 @@ struct HookContext
uint16_t ledger_nonce_counter{0};
int64_t expected_etxn_count{-1}; // make this a 64bit int so the uint32
// from the hookapi cant overflow it
int64_t expected_export_count{-1};
std::map<ripple::uint256, bool> nonce_used{};
uint32_t generation =
0; // used for caching, only generated when txn_generation is called
@@ -891,9 +877,6 @@ public:
ADD_HOOK_FUNCTION(meta_slot, ctx);
ADD_HOOK_FUNCTION(xpop_slot, ctx);
ADD_HOOK_FUNCTION(xport, ctx);
ADD_HOOK_FUNCTION(xport_reserve, ctx);
/*
ADD_HOOK_FUNCTION(str_find, ctx);
ADD_HOOK_FUNCTION(str_replace, ctx);

View File

@@ -79,7 +79,7 @@ main(int argc, char** argv)
close(fd);
auto result = validateGuards(hook, std::cout, "", 7);
auto result = validateGuards(hook, std::cout, "", 3);
if (!result)
{

View File

@@ -1971,8 +1971,6 @@ hook::finalizeHookResult(
// directory) if we are allowed to
std::vector<std::pair<uint256 /* txnid */, uint256 /* emit nonce */>>
emission_txnid;
std::vector<uint256 /* txnid */>
exported_txnid;
if (doEmit)
{
@@ -2028,58 +2026,6 @@ hook::finalizeHookResult(
}
}
}
DBG_PRINTF("exported txn count: %d\n", hookResult.exportedTxn.size());
for (; hookResult.exportedTxn.size() > 0; hookResult.exportedTxn.pop())
{
auto& tpTrans = hookResult.exportedTxn.front();
auto& id = tpTrans->getID();
JLOG(j.trace()) << "HookExport[" << HR_ACC() << "]: " << id;
// exported txns must be marked bad by the hash router to ensure under
// no circumstances they will enter consensus on *this* chain.
applyCtx.app.getHashRouter().setFlags(id, SF_BAD);
std::shared_ptr<const ripple::STTx> ptr =
tpTrans->getSTransaction();
auto exportedId = keylet::exportedTxn(id);
auto sleExported = applyCtx.view().peek(exportedId);
if (!sleExported)
{
exported_txnid.emplace_back(id);
sleExported = std::make_shared<SLE>(exportedId);
// RH TODO: add a new constructor to STObject to avoid this
// serder thing
ripple::Serializer s;
ptr->add(s);
SerialIter sit(s.slice());
sleExported->emplace_back(ripple::STObject(sit, sfExportedTxn));
auto page = applyCtx.view().dirInsert(
keylet::exportedDir(), exportedId, [&](SLE::ref sle) {
(*sle)[sfFlags] = lsfEmittedDir;
});
if (page)
{
(*sleExported)[sfOwnerNode] = *page;
applyCtx.view().insert(sleExported);
}
else
{
JLOG(j.warn())
<< "HookError[" << HR_ACC() << "]: "
<< "Export Directory full when trying to insert "
<< id;
return tecDIR_FULL;
}
}
}
}
bool const fixV2 = applyCtx.view().rules().enabled(fixXahauV2);
@@ -2106,12 +2052,6 @@ hook::finalizeHookResult(
meta.setFieldU16(
sfHookEmitCount,
emission_txnid.size()); // this will never wrap, hard limit
if (applyCtx.view().rules().enabled(featureExport))
{
meta.setFieldU16(
sfHookExportCount,
exported_txnid.size());
}
meta.setFieldU16(sfHookExecutionIndex, exec_index);
meta.setFieldU16(sfHookStateChangeCount, hookResult.changedStateCount);
meta.setFieldH256(sfHookHash, hookResult.hookHash);
@@ -3948,27 +3888,6 @@ DEFINE_HOOK_FUNCTION(int64_t, etxn_reserve, uint32_t count)
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, xport_reserve, uint32_t count)
{
HOOK_SETUP(); // populates memory_ctx, memory, memory_length, applyCtx,
// hookCtx on current stack
if (hookCtx.expected_export_count > -1)
return ALREADY_SET;
if (count < 1)
return TOO_SMALL;
if (count > hook_api::max_export)
return TOO_BIG;
hookCtx.expected_export_count = count;
return count;
HOOK_TEARDOWN();
}
// Compute the burden of an emitted transaction based on a number of factors
DEFINE_HOOK_FUNCNARG(int64_t, etxn_burden)
{
@@ -6237,92 +6156,6 @@ DEFINE_HOOK_FUNCTION(
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(
int64_t,
xport,
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len)
{
HOOK_SETUP();
if (NOT_IN_BOUNDS(read_ptr, read_len, memory_length))
return OUT_OF_BOUNDS;
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
return OUT_OF_BOUNDS;
if (write_len < 32)
return TOO_SMALL;
auto& app = hookCtx.applyCtx.app;
if (hookCtx.expected_export_count < 0)
return PREREQUISITE_NOT_MET;
if (hookCtx.result.exportedTxn.size() >= hookCtx.expected_export_count)
return TOO_MANY_EXPORTED_TXN;
ripple::Blob blob{memory + read_ptr, memory + read_ptr + read_len};
std::shared_ptr<STTx const> stpTrans;
try
{
stpTrans = std::make_shared<STTx const>(
SerialIter{memory + read_ptr, read_len});
}
catch (std::exception& e)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC() << "]: Failed " << e.what()
<< "\n";
return EXPORT_FAILURE;
}
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) != hookCtx.result.account)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: Attempted to export a txn that's not for this Hook's Account ID.";
return EXPORT_FAILURE;
}
std::string reason;
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, app);
// RHTODO: is this needed or wise? VVV
if (tpTrans->getStatus() != NEW)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: tpTrans->getStatus() != NEW";
return EXPORT_FAILURE;
}
auto const& txID = tpTrans->getID();
if (txID.size() > write_len)
return TOO_SMALL;
if (NOT_IN_BOUNDS(write_ptr, txID.size(), memory_length))
return OUT_OF_BOUNDS;
auto const write_txid = [&]() -> int64_t {
WRITE_WASM_MEMORY_AND_RETURN(
write_ptr,
txID.size(),
txID.data(),
txID.size(),
memory,
memory_length);
};
int64_t result = write_txid();
if (result == 32)
hookCtx.result.exportedTxn.push(tpTrans);
return result;
HOOK_TEARDOWN();
}
/*
DEFINE_HOOK_FUNCTION(

View File

@@ -599,13 +599,6 @@ public:
return validatorKeys_.publicKey;
}
ValidatorKeys const&
getValidatorKeys() const override
{
return validatorKeys_;
}
NetworkOPs&
getOPs() override
{

View File

@@ -240,8 +240,7 @@ public:
virtual PublicKey const&
getValidationPublicKey() const = 0;
virtual ValidatorKeys const&
getValidatorKeys() const = 0;
virtual Resource::Manager&
getResourceManager() = 0;
virtual PathRequests&

View File

@@ -471,6 +471,10 @@ ManifestCache::applyManifest(Manifest m)
auto masterKey = m.masterKey;
map_.emplace(std::move(masterKey), std::move(m));
// Increment sequence to invalidate cached manifest messages
seq_++;
return ManifestDisposition::accepted;
}

View File

@@ -27,8 +27,6 @@
#include <ripple/protocol/Feature.h>
#include <ripple/protocol/jss.h>
#include <ripple/protocol/st.h>
#include <ripple/app/misc/ValidatorKeys.h>
#include <ripple/protocol/Sign.h>
#include <algorithm>
#include <limits>
#include <numeric>
@@ -1541,247 +1539,6 @@ TxQ::accept(Application& app, OpenView& view)
}
}
// Inject exported transactions/signatures, if any
if (view.rules().enabled(featureExport))
{
do
{
// if we're not a validator we do nothing here
if (app.getValidationPublicKey().empty())
break;
auto const& keys = app.getValidatorKeys();
if (keys.configInvalid())
break;
// and if we're not on the UNLReport we also do nothing
auto const unlRep = view.read(keylet::UNLReport());
if (!unlRep || !unlRep->isFieldPresent(sfActiveValidators))
{
// nothing to do without a unlreport object
break;
}
bool found = false;
auto const& avs = unlRep->getFieldArray(sfActiveValidators);
for (auto const& av : avs)
{
if (PublicKey(av[sfPublicKey]) == keys.masterPublicKey)
{
found = true;
break;
}
}
if (!found)
break;
// execution to here means we're a validator and on the UNLReport
AccountID signingAcc = calcAccountID(keys.publicKey);
Keylet const exportedDirKeylet{keylet::exportedDir()};
if (dirIsEmpty(view, exportedDirKeylet))
break;
std::shared_ptr<SLE const> sleDirNode{};
unsigned int uDirEntry{0};
uint256 dirEntry{beast::zero};
if (!cdirFirst(
view,
exportedDirKeylet.key,
sleDirNode,
uDirEntry,
dirEntry))
break;
do
{
Keylet const itemKeylet{ltCHILD, dirEntry};
auto sleItem = view.read(itemKeylet);
if (!sleItem)
{
// Directory node has an invalid index. Bail out.
JLOG(j_.warn())
<< "ExportedTxn processing: directory node in ledger "
<< view.seq()
<< " has index to object that is missing: "
<< to_string(dirEntry);
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
LedgerEntryType const nodeType{
safe_cast<LedgerEntryType>((*sleItem)[sfLedgerEntryType])};
if (nodeType != ltEXPORTED_TXN)
{
JLOG(j_.warn())
<< "ExportedTxn processing: emitted directory contained "
"non ltEMITTED_TXN type";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
JLOG(j_.info()) << "Processing exported txn: " << *sleItem;
auto const& exported =
const_cast<ripple::STLedgerEntry&>(*sleItem)
.getField(sfExportedTxn)
.downcast<STObject>();
auto const& txnHash = sleItem->getFieldH256(sfTransactionHash);
auto exportedLgrSeq = exported.getFieldU32(sfLedgerSequence);
auto const seq = view.seq();
if (exportedLgrSeq == seq)
{
// this shouldn't happen, but do nothing
continue;
}
if (exportedLgrSeq < seq - 1)
{
// all old entries need to be turned into Export transactions so they can be removed
// from the directory
// in the previous ledger all the ExportSign transactions were executed, and one-by-one
// added the validators' signatures to the ltEXPORTED_TXN's sfSigners array.
// now we need to collect these together and place them inside the ExportedTxn blob
// and publish the blob in the Export transaction type.
STArray signers = sleItem->getFieldArray(sfSigners);
auto s = std::make_shared<ripple::Serializer>();
exported.add(*s);
SerialIter sitTrans(s->slice());
try
{
auto stpTrans =
std::make_shared<STTx>(std::ref(sitTrans));
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) == beast::zero)
{
JLOG(j_.warn()) << "Hook: Export failure: "
<< "sfAccount missing or zero.";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
// RH TODO: should we force remove signingpubkey here?
stpTrans->setFieldArray(sfSigners, signers);
Blob const& blob = stpTrans->getSerializer().peekData();
STTx exportTx(ttEXPORT, [&](auto& obj) {
obj.setFieldVL(sfExportedTxn, blob);
obj.setFieldU32(sfLedgerSequence, seq);
obj.setFieldH256(sfTransactionHash, txnHash);
obj.setFieldArray(sfSigners, signers);
});
// submit to the ledger
{
uint256 txID = exportTx.getTransactionID();
auto s = std::make_shared<ripple::Serializer>();
exportTx.add(*s);
app.getHashRouter().setFlags(txID, SF_PRIVATE2);
app.getHashRouter().setFlags(txID, SF_EMITTED);
view.rawTxInsert(txID, std::move(s), nullptr);
ledgerChanged = true;
}
}
catch (std::exception& e)
{
JLOG(j_.warn())
<< "ExportedTxn Processing: Failure: " << e.what()
<< "\n";
}
continue;
}
// this ledger is the one after the exported txn was added to the directory
// so generate the export sign txns
auto s = std::make_shared<ripple::Serializer>();
exported.add(*s);
SerialIter sitTrans(s->slice());
try
{
auto const& stpTrans =
std::make_shared<STTx const>(std::ref(sitTrans));
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) == beast::zero)
{
JLOG(j_.warn()) << "Hook: Export failure: "
<< "sfAccount missing or zero.";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
auto seq = view.info().seq;
auto txnHash = stpTrans->getTransactionID();
Serializer s =
buildMultiSigningData(*stpTrans, signingAcc);
auto multisig = ripple::sign(keys.publicKey, keys.secretKey, s.slice());
STTx exportSignTx(ttEXPORT_SIGN, [&](auto& obj) {
obj.set(([&]() {
auto inner = std::make_unique<STObject>(sfSigner);
inner->setFieldVL(sfSigningPubKey, keys.publicKey);
inner->setAccountID(sfAccount, signingAcc);
inner->setFieldVL(sfTxnSignature, multisig);
return inner;
})());
obj.setFieldU32(sfLedgerSequence, seq);
obj.setFieldH256(sfTransactionHash, txnHash);
});
// submit to the ledger
{
uint256 txID = exportSignTx.getTransactionID();
auto s = std::make_shared<ripple::Serializer>();
exportSignTx.add(*s);
app.getHashRouter().setFlags(txID, SF_PRIVATE2);
app.getHashRouter().setFlags(txID, SF_EMITTED);
view.rawTxInsert(txID, std::move(s), nullptr);
ledgerChanged = true;
}
}
catch (std::exception& e)
{
JLOG(j_.warn())
<< "ExportedTxn Processing: Failure: " << e.what()
<< "\n";
}
} while (cdirNext(
view, exportedDirKeylet.key, sleDirNode, uDirEntry, dirEntry));
} while (0);
}
// Inject emitted transactions if any
if (view.rules().enabled(featureHooks))
do

View File

@@ -24,6 +24,7 @@
#include <ripple/app/misc/AmendmentTable.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/app/tx/impl/Change.h>
#include <ripple/app/tx/impl/SetHook.h>
#include <ripple/app/tx/impl/SetSignerList.h>
#include <ripple/app/tx/impl/XahauGenesis.h>
#include <ripple/basics/Log.h>
@@ -96,13 +97,6 @@ Change::preflight(PreflightContext const& ctx)
}
}
if ((ctx.tx.getTxnType() == ttEXPORT_SIGN || ctx.tx.getTxnType() == ttEXPORT) &&
!ctx.rules.enabled(featureExport))
{
JLOG(ctx.j.warn()) << "Change: Export not enabled";
return temDISABLED;
}
return tesSUCCESS;
}
@@ -161,8 +155,6 @@ Change::preclaim(PreclaimContext const& ctx)
case ttAMENDMENT:
case ttUNL_MODIFY:
case ttEMIT_FAILURE:
case ttEXPORT:
case ttEXPORT_SIGN:
return tesSUCCESS;
case ttUNL_REPORT: {
if (!ctx.tx.isFieldPresent(sfImportVLKey) ||
@@ -218,11 +210,6 @@ Change::doApply()
return applyEmitFailure();
case ttUNL_REPORT:
return applyUNLReport();
case ttEXPORT:
return applyExport();
case ttEXPORT_SIGN:
return applyExportSign();
default:
assert(0);
return tefFAILURE;
@@ -597,10 +584,6 @@ Change::activateXahauGenesis()
SetSignerList::removeFromLedger(ctx_.app, sb, accid, j_);
// Step 4: install genesis hooks
sle->setFieldU32(
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + genesis_hooks.size());
sb.update(sle);
if (sb.exists(keylet::hook(accid)))
{
JLOG(j_.warn()) << "featureXahauGenesis genesis account already has "
@@ -611,6 +594,7 @@ Change::activateXahauGenesis()
{
ripple::STArray hooks{sfHooks, static_cast<int>(genesis_hooks.size())};
int hookCount = 0;
uint32_t hookReserve = 0;
for (auto const& [hookOn, wasmBytes, params] : genesis_hooks)
{
@@ -620,8 +604,7 @@ Change::activateXahauGenesis()
loggerStream,
"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh",
(ctx_.view().rules().enabled(featureHooksUpdate1) ? 1 : 0) +
(ctx_.view().rules().enabled(fix20250131) ? 2 : 0) +
(ctx_.view().rules().enabled(featureExport) ? 4 : 0));
(ctx_.view().rules().enabled(fix20250131) ? 2 : 0));
if (!result)
{
@@ -717,8 +700,14 @@ Change::activateXahauGenesis()
}
hooks.push_back(hookObj);
hookReserve += SetHook::computeHookReserve(hookObj);
}
sle->setFieldU32(
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + hookReserve);
sb.update(sle);
auto sle = std::make_shared<SLE>(keylet::hook(accid));
sle->setFieldArray(sfHooks, hooks);
sle->setAccountID(sfAccount, accid);
@@ -759,6 +748,8 @@ Change::activateXahauGenesis()
ripple::STArray hooks{sfHooks, 1};
STObject hookObj{sfHook};
hookObj.setFieldH256(sfHookHash, governHash);
uint32_t hookReserve = 0;
// parameters
{
std::vector<STObject> vec;
@@ -774,6 +765,7 @@ Change::activateXahauGenesis()
sfHookParameters, STArray(vec, sfHookParameters));
}
hookReserve += SetHook::computeHookReserve(hookObj);
hooks.push_back(hookObj);
auto sle = std::make_shared<SLE>(hookKL);
@@ -800,7 +792,8 @@ Change::activateXahauGenesis()
sle->setAccountID(sfRegularKey, noAccount());
sle->setFieldU32(sfFlags, lsfDisableMaster);
sle->setFieldU32(sfOwnerCount, sle->getFieldU32(sfOwnerCount) + 1);
sle->setFieldU32(
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + hookReserve);
sb.update(sle);
}
}
@@ -1087,80 +1080,6 @@ Change::applyEmitFailure()
return tesSUCCESS;
}
TER
Change::applyExport()
{
uint256 txnID(ctx_.tx.getFieldH256(sfTransactionHash));
do
{
JLOG(j_.info()) << "HookExport[" << txnID
<< "]: ttExport exporting transaction";
auto key = keylet::exportedTxn(txnID);
auto const& sle = view().peek(key);
if (!sle)
{
// most likely explanation is that this was somehow a double-up, so just ignore
JLOG(j_.warn())
<< "HookError[" << txnID << "]: ttExport could not find exported txn in ledger";
break;
}
if (!view().dirRemove(
keylet::exportedDir(),
sle->getFieldU64(sfOwnerNode),
key,
false))
{
JLOG(j_.fatal()) << "HookError[" << txnID
<< "]: ttExport (Change) tefBAD_LEDGER";
return tefBAD_LEDGER;
}
view().erase(sle);
} while (0);
return tesSUCCESS;
}
TER
Change::applyExportSign()
{
uint256 txnID(ctx_.tx.getFieldH256(sfTransactionHash));
do
{
JLOG(j_.info()) << "HookExport[" << txnID
<< "]: ttExportSign adding signature to transaction";
auto key = keylet::exportedTxn(txnID);
auto const& sle = view().peek(key);
if (!sle)
{
// most likely explanation is that this was somehow a double-up, so just ignore
JLOG(j_.warn())
<< "HookError[" << txnID << "]: ttExportSign could not find exported txn in ledger";
break;
}
// grab the signer object off the txn
STObject signerObj = const_cast<ripple::STTx&>(ctx_.tx)
.getField(sfSigner)
.downcast<STObject>();
// append it to the signers field in the ledger object
STArray signers = sle->getFieldArray(sfSigners);
signers.push_back(signerObj);
sle->setFieldArray(sfSigners, signers);
// done
view().update(sle);
} while (0);
return tesSUCCESS;
}
TER
Change::applyUNLModify()
{

View File

@@ -74,12 +74,6 @@ private:
TER
applyEmitFailure();
TER
applyExport();
TER
applyExportSign();
TER
applyUNLReport();
};

View File

@@ -37,12 +37,9 @@
#include <charconv>
#include <iostream>
#include <vector>
#include <ripple/app/hook/applyHook.h>
namespace ripple {
static const uint256 shadowTicketNamespace = uint256::fromVoid("RESERVED NAMESPACE SHADOW TICKET");
TxConsequences
Import::makeTxConsequences(PreflightContext const& ctx)
{
@@ -200,7 +197,7 @@ Import::preflight(PreflightContext const& ctx)
if (!stpTrans || !meta)
return temMALFORMED;
if (stpTrans->isFieldPresent(sfTicketSequence) && !ctx.rules.enabled(featureExport))
if (stpTrans->isFieldPresent(sfTicketSequence))
{
JLOG(ctx.j.warn()) << "Import: cannot use TicketSequence XPOP.";
return temMALFORMED;
@@ -891,26 +888,6 @@ Import::preclaim(PreclaimContext const& ctx)
return tefINTERNAL;
}
bool const hasTicket = stpTrans->isFieldPresent(sfTicketSequence);
if (hasTicket)
{
if (!ctx.view.rules().enabled(featureExport))
return tefINTERNAL;
auto const acc = stpTrans->getAccountID(sfAccount);
uint256 const seq = uint256(stpTrans->getFieldU32(sfTicketSequence));
// check if there is a shadow ticket, and if not we won't allow
// the txn to pass into consensus
if (!ctx.view.exists(keylet::hookState(acc, seq, shadowTicketNamespace)))
{
JLOG(ctx.j.warn()) << "Import: attempted to import a txn without shadow ticket.";
return telSHADOW_TICKET_REQUIRED; // tel code to avoid consensus/forward without SF_BAD
}
}
auto const& sle = ctx.view.read(keylet::account(ctx.tx[sfAccount]));
auto const tt = stpTrans->getTxnType();
@@ -951,17 +928,13 @@ Import::preclaim(PreclaimContext const& ctx)
} while (0);
}
if (!hasTicket)
if (sle && sle->isFieldPresent(sfImportSequence))
{
uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence);
if (sle && sle->isFieldPresent(sfImportSequence))
{
uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence);
// replay attempt
if (sleImportSequence >= stpTrans->getFieldU32(sfSequence))
return tefPAST_IMPORT_SEQ;
}
// replay attempt
if (sleImportSequence >= stpTrans->getFieldU32(sfSequence))
return tefPAST_IMPORT_SEQ;
}
// when importing for the first time the fee must be zero
@@ -1269,11 +1242,7 @@ Import::doApply()
auto const id = ctx_.tx[sfAccount];
auto sle = view().peek(keylet::account(id));
std::optional<uint256> ticket;
if (stpTrans->isFieldPresent(sfTicketSequence))
ticket = uint256(stpTrans->getFieldU32(sfTicketSequence));
if (sle && !ticket.has_value() && sle->getFieldU32(sfImportSequence) >= importSequence)
if (sle && sle->getFieldU32(sfImportSequence) >= importSequence)
{
// make double sure import seq hasn't passed
JLOG(ctx_.journal.warn()) << "Import: ImportSequence passed";
@@ -1366,24 +1335,8 @@ Import::doApply()
}
}
if (!ticket.has_value())
sle->setFieldU32(sfImportSequence, importSequence);
sle->setFieldU32(sfImportSequence, importSequence);
sle->setFieldAmount(sfBalance, finalBal);
if (ticket.has_value())
{
auto sleTicket = view().peek(keylet::hookState(id, *ticket, shadowTicketNamespace));
if (!sleTicket)
return tefINTERNAL;
TER result = hook::setHookState(ctx_, id, shadowTicketNamespace, *ticket, {});
if (result != tesSUCCESS)
return result;
// RHUPTO: ticketseq billing?
}
if (create)
{

View File

@@ -491,8 +491,7 @@ SetHook::validateHookSetEntry(SetHookCtx& ctx, STObject const& hookSetObj)
logger,
hsacc,
(ctx.rules.enabled(featureHooksUpdate1) ? 1 : 0) +
(ctx.rules.enabled(fix20250131) ? 2 : 0) +
(ctx.rules.enabled(featureExport) ? 4 : 0));
(ctx.rules.enabled(fix20250131) ? 2 : 0));
if (ctx.j.trace())
{
@@ -1174,6 +1173,23 @@ updateHookParameters(
return tesSUCCESS;
}
uint32_t
SetHook::computeHookReserve(STObject const& hookObj)
{
if (!hookObj.isFieldPresent(sfHookHash))
return 0;
int reserve{1};
if (hookObj.isFieldPresent(sfHookParameters))
reserve += hookObj.getFieldArray(sfHookParameters).size();
if (hookObj.isFieldPresent(sfHookGrants))
reserve += hookObj.getFieldArray(sfHookGrants).size();
return reserve;
};
struct KeyletComparator
{
bool
@@ -1838,28 +1854,14 @@ SetHook::setHook()
int oldHookReserve = 0;
int newHookReserve = 0;
auto const computeHookReserve = [](STObject const& hookObj) -> int {
if (!hookObj.isFieldPresent(sfHookHash))
return 0;
int reserve{1};
if (hookObj.isFieldPresent(sfHookParameters))
reserve += hookObj.getFieldArray(sfHookParameters).size();
if (hookObj.isFieldPresent(sfHookGrants))
reserve += hookObj.getFieldArray(sfHookGrants).size();
return reserve;
};
for (int i = 0; i < hook::maxHookChainLength(); ++i)
{
if (oldHooks && i < oldHookCount)
oldHookReserve += computeHookReserve(((*oldHooks).get())[i]);
oldHookReserve +=
SetHook::computeHookReserve(((*oldHooks).get())[i]);
if (i < newHooks.size())
newHookReserve += computeHookReserve(newHooks[i]);
newHookReserve += SetHook::computeHookReserve(newHooks[i]);
}
reserveDelta = newHookReserve - oldHookReserve;

View File

@@ -91,6 +91,9 @@ public:
static HookSetValidation
validateHookSetEntry(SetHookCtx& ctx, STObject const& hookSetObj);
static uint32_t
computeHookReserve(STObject const& hookObj);
private:
TER
setHook();

View File

@@ -374,8 +374,6 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx)
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttEXPORT_SIGN:
case ttEXPORT:
return Change::calculateBaseFee(view, tx);
case ttNFTOKEN_MINT:
return NFTokenMint::calculateBaseFee(view, tx);
@@ -546,8 +544,6 @@ invoke_apply(ApplyContext& ctx)
case ttFEE:
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEXPORT:
case ttEXPORT_SIGN:
case ttEMIT_FAILURE: {
Change p(ctx);
return p();

View File

@@ -484,44 +484,61 @@ OverlayImpl::start()
m_peerFinder->setConfig(config);
m_peerFinder->start();
auto addIps = [&](std::vector<std::string> bootstrapIps) -> void {
auto addIps = [this](std::vector<std::string> ips, bool fixed) {
beast::Journal const& j = app_.journal("Overlay");
for (auto& ip : bootstrapIps)
for (auto& ip : ips)
{
std::size_t pos = ip.find('#');
if (pos != std::string::npos)
ip.erase(pos);
JLOG(j.trace()) << "Found boostrap IP: " << ip;
JLOG(j.trace())
<< "Found " << (fixed ? "fixed" : "bootstrap") << " IP: " << ip;
}
m_resolver.resolve(
bootstrapIps,
[&](std::string const& name,
ips,
[this, fixed](
std::string const& name,
std::vector<beast::IP::Endpoint> const& addresses) {
std::vector<std::string> ips;
ips.reserve(addresses.size());
beast::Journal const& j = app_.journal("Overlay");
std::string const base("config: ");
std::vector<beast::IP::Endpoint> eps;
eps.reserve(addresses.size());
for (auto const& addr : addresses)
{
std::string addrStr = addr.port() == 0
? to_string(addr.at_port(DEFAULT_PEER_PORT))
: to_string(addr);
JLOG(j.trace()) << "Parsed boostrap IP: " << addrStr;
ips.push_back(addrStr);
auto ep = addr.port() == 0 ? addr.at_port(DEFAULT_PEER_PORT)
: addr;
JLOG(j.trace())
<< "Parsed " << (fixed ? "fixed" : "bootstrap")
<< " IP: " << ep;
eps.push_back(ep);
}
std::string const base("config: ");
if (!ips.empty())
m_peerFinder->addFallbackStrings(base + name, ips);
if (eps.empty())
return;
if (fixed)
{
m_peerFinder->addFixedPeer(base + name, eps);
}
else
{
std::vector<std::string> strs;
strs.reserve(eps.size());
for (auto const& ep : eps)
strs.push_back(to_string(ep));
m_peerFinder->addFallbackStrings(base + name, strs);
}
});
};
if (!app_.config().IPS.empty())
addIps(app_.config().IPS);
addIps(app_.config().IPS, false);
if (!app_.config().IPS_FIXED.empty())
addIps(app_.config().IPS_FIXED);
addIps(app_.config().IPS_FIXED, true);
auto const timer = std::make_shared<Timer>(*this);
std::lock_guard lock(mutex_);

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 91;
static constexpr std::size_t numFeatures = 90;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -378,7 +378,6 @@ extern uint256 const fixInvalidTxFlags;
extern uint256 const featureExtendedHookState;
extern uint256 const fixCronStacking;
extern uint256 const fixHookAPI20251128;
extern uint256 const featureExport;
} // namespace ripple
#endif

View File

@@ -56,15 +56,9 @@ namespace keylet {
Keylet const&
emittedDir() noexcept;
Keylet const&
exportedDir() noexcept;
Keylet
emittedTxn(uint256 const& id) noexcept;
Keylet
exportedTxn(uint256 const& id) noexcept;
Keylet
hookDefinition(uint256 const& hash) noexcept;

View File

@@ -260,8 +260,6 @@ enum LedgerEntryType : std::uint16_t
\sa keylet::emitted
*/
ltEMITTED_TXN = 'E',
ltEXPORTED_TXN = 0x4578, // Ex (exported transaction)
};
// clang-format off
@@ -320,8 +318,7 @@ enum LedgerSpecificFlags {
// ltDIR_NODE
lsfNFTokenBuyOffers = 0x00000001,
lsfNFTokenSellOffers = 0x00000002,
lsfEmittedDir = 0x00000004,
lsfExportedDir = 0x00000008,
lsfEmittedDir = 0x00000004,
// ltNFTOKEN_OFFER
lsfSellNFToken = 0x00000001,

View File

@@ -355,7 +355,6 @@ extern SF_UINT16 const sfHookEmitCount;
extern SF_UINT16 const sfHookExecutionIndex;
extern SF_UINT16 const sfHookApiVersion;
extern SF_UINT16 const sfHookStateScale;
extern SF_UINT16 const sfHookExportCount;
// 32-bit integers (common)
extern SF_UINT32 const sfNetworkID;
@@ -596,7 +595,6 @@ extern SField const sfSigner;
extern SField const sfMajority;
extern SField const sfDisabledValidator;
extern SField const sfEmittedTxn;
extern SField const sfExportedTxn;
extern SField const sfHookExecution;
extern SField const sfHookDefinition;
extern SField const sfHookParameter;

View File

@@ -67,7 +67,6 @@ enum TELcodes : TERUnderlyingType {
telNON_LOCAL_EMITTED_TXN,
telIMPORT_VL_KEY_NOT_RECOGNISED,
telCAN_NOT_QUEUE_IMPORT,
telSHADOW_TICKET_REQUIRED,
};
//------------------------------------------------------------------------------

View File

@@ -149,12 +149,6 @@ enum TxType : std::uint16_t
ttURITOKEN_CREATE_SELL_OFFER = 48,
ttURITOKEN_CANCEL_SELL_OFFER = 49,
/* A pseudo-txn containing an exported transaction plus signatures from the validators */
ttEXPORT = 90,
/* A pseudo-txn containing a validator's signature for an export transaction */
ttEXPORT_SIGN = 91,
/* A pseudo-txn alarm signal for invoking a hook, emitted by validators after alarm set conditions are met */
ttCRON = 92,

View File

@@ -484,7 +484,6 @@ REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::De
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FIX (fixCronStacking, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fixHookAPI20251128, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(Export, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -66,8 +66,6 @@ enum class LedgerNameSpace : std::uint16_t {
HOOK_DEFINITION = 'D',
EMITTED_TXN = 'E',
EMITTED_DIR = 'F',
EXPORTED_TXN = 0x4578, // Ex
EXPORTED_DIR = 0x4564, // Ed
NFTOKEN_OFFER = 'q',
NFTOKEN_BUY_OFFERS = 'h',
NFTOKEN_SELL_OFFERS = 'i',
@@ -149,14 +147,6 @@ emittedDir() noexcept
return ret;
}
Keylet const&
exportedDir() noexcept
{
static Keylet const ret{
ltDIR_NODE, indexHash(LedgerNameSpace::EXPORTED_DIR)};
return ret;
}
Keylet
hookStateDir(AccountID const& id, uint256 const& ns) noexcept
{
@@ -169,12 +159,6 @@ emittedTxn(uint256 const& id) noexcept
return {ltEMITTED_TXN, indexHash(LedgerNameSpace::EMITTED_TXN, id)};
}
Keylet
exportedTxn(uint256 const& id) noexcept
{
return {ltEXPORTED_TXN, indexHash(LedgerNameSpace::EXPORTED_TXN, id)};
}
Keylet
hook(AccountID const& id) noexcept
{

View File

@@ -380,15 +380,6 @@ LedgerFormats::LedgerFormats()
{sfPreviousTxnLgrSeq, soeREQUIRED}
},
commonFields);
add(jss::ExportedTxn,
ltEXPORTED_TXN,
{
{sfExportedTxn, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
},
commonFields);
// clang-format on
}

View File

@@ -103,7 +103,6 @@ CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16,
CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19);
CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20);
CONSTRUCT_TYPED_SFIELD(sfHookStateScale, "HookStateScale", UINT16, 21);
CONSTRUCT_TYPED_SFIELD(sfHookExportCount, "HookExportCount", UINT16, 22);
// 32-bit integers (common)
CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1);
@@ -362,7 +361,6 @@ CONSTRUCT_UNTYPED_SFIELD(sfImportVLKey, "ImportVLKey", OBJECT,
CONSTRUCT_UNTYPED_SFIELD(sfHookEmission, "HookEmission", OBJECT, 93);
CONSTRUCT_UNTYPED_SFIELD(sfMintURIToken, "MintURIToken", OBJECT, 92);
CONSTRUCT_UNTYPED_SFIELD(sfAmountEntry, "AmountEntry", OBJECT, 91);
CONSTRUCT_UNTYPED_SFIELD(sfExportedTxn, "ExportedTxn", OBJECT, 90);
// array of objects
// ARRAY/1 is reserved for end of array

View File

@@ -141,7 +141,6 @@ transResults()
MAKE_ERROR(telNON_LOCAL_EMITTED_TXN, "Emitted transaction cannot be applied because it was not generated locally."),
MAKE_ERROR(telIMPORT_VL_KEY_NOT_RECOGNISED, "Import vl key was not recognized."),
MAKE_ERROR(telCAN_NOT_QUEUE_IMPORT, "Import transaction was not able to be directly applied and cannot be queued."),
MAKE_ERROR(telSHADOW_TICKET_REQUIRED, "The imported transaction uses a TicketSequence but no shadow ticket exists."),
MAKE_ERROR(temMALFORMED, "Malformed transaction."),
MAKE_ERROR(temBAD_AMOUNT, "Can only send positive amounts."),
MAKE_ERROR(temBAD_CURRENCY, "Malformed: Bad currency."),

View File

@@ -490,26 +490,6 @@ TxFormats::TxFormats()
{sfStartTime, soeOPTIONAL},
},
commonFields);
add(jss::ExportSign,
ttEXPORT_SIGN,
{
{sfSigner, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
{sfTransactionHash, soeREQUIRED},
},
commonFields);
add(jss::Export,
ttEXPORT,
{
{sfTransactionHash, soeREQUIRED},
{sfExportedTxn, soeREQUIRED},
{sfSigners, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
},
commonFields);
}
TxFormats const&

View File

@@ -140,9 +140,6 @@ JSS(HookState); // ledger type.
JSS(HookStateData); // field.
JSS(HookStateKey); // field.
JSS(EmittedTxn); // ledger type.
JSS(ExportedTxn);
JSS(Export);
JSS(ExportSign);
JSS(SignerList); // ledger type.
JSS(SignerListSet); // transaction type.
JSS(SigningPubKey); // field.

View File

@@ -139,7 +139,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
false, // means the calling test already burned some of the genesis
bool skipTests = false,
bool const testFlag = false,
bool const badNetID = false)
bool const badNetID = false,
uint32_t const expectedOwnerCount =
10 /** testFlag ? 10 : 14 (default) */)
{
using namespace jtx;
@@ -247,7 +249,10 @@ struct XahauGenesis_test : public beast::unit_test::suite
BEAST_EXPECT(
genesisAccRoot->getFieldAmount(sfBalance) ==
XahauGenesis::GenesisAmount);
BEAST_EXPECT(genesisAccRoot->getFieldU32(sfOwnerCount) == 2);
BEAST_EXPECT(
genesisAccRoot->getFieldU32(sfOwnerCount) == !testFlag
? expectedOwnerCount
: 14);
// ensure the definitions are correctly set
{
@@ -583,7 +588,14 @@ struct XahauGenesis_test : public beast::unit_test::suite
toBase58(t), membersStr);
}
activate(__LINE__, env, true, false, true);
activate(
__LINE__,
env,
true,
false,
true,
{},
3 /* IRR,IRD,IMC */ + members.size() + tables.size());
env.close();
env.close();
@@ -2235,6 +2247,8 @@ struct XahauGenesis_test : public beast::unit_test::suite
BEAST_EXPECT(!!hookLE);
uint256 const ns = beast::zero;
uint8_t mc = 0;
uint8_t paramsCount = 0;
if (hookLE)
{
auto const hooksArray = hookLE->getFieldArray(sfHooks);
@@ -2242,6 +2256,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
hooksArray.size() == 1 &&
hooksArray[0].getFieldH256(sfHookHash) == governHookHash);
paramsCount =
hooksArray[0].getFieldArray(sfHookParameters).size();
for (Account const* m : members)
{
auto const mVec = vecFromAcc(*m);
@@ -2308,7 +2325,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
BEAST_EXPECT(!!root);
if (root)
{
BEAST_EXPECT(root->getFieldU32(sfOwnerCount) == mc * 2 + 2);
BEAST_EXPECT(
root->getFieldU32(sfOwnerCount) ==
mc * 2 + 2 + paramsCount);
BEAST_EXPECT(root->getFieldU32(sfFlags) & lsfDisableMaster);
BEAST_EXPECT(root->getAccountID(sfRegularKey) == noAccount());
}