mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
replay network code
This commit is contained in:
@@ -129,6 +129,12 @@ class RCLConsensus
|
|||||||
return mode_;
|
return mode_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
setProposing()
|
||||||
|
{
|
||||||
|
mode_ = ConsensusMode::proposing;
|
||||||
|
}
|
||||||
|
|
||||||
/** Called before kicking off a new consensus round.
|
/** Called before kicking off a new consensus round.
|
||||||
|
|
||||||
@param prevLedger Ledger that will be prior ledger for next round
|
@param prevLedger Ledger that will be prior ledger for next round
|
||||||
@@ -465,6 +471,12 @@ public:
|
|||||||
return adaptor_.mode();
|
return adaptor_.mode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
setProposing()
|
||||||
|
{
|
||||||
|
adaptor_.setProposing();
|
||||||
|
}
|
||||||
|
|
||||||
ConsensusPhase
|
ConsensusPhase
|
||||||
phase() const
|
phase() const
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -944,7 +944,13 @@ NetworkOPsImp::processHeartbeatTimer()
|
|||||||
// do we have sufficient peers? If not, we are disconnected.
|
// do we have sufficient peers? If not, we are disconnected.
|
||||||
if (numPeers < minPeerCount_)
|
if (numPeers < minPeerCount_)
|
||||||
{
|
{
|
||||||
if (mMode != OperatingMode::DISCONNECTED)
|
if (app_.config().NETWORK_ID == 65534)
|
||||||
|
{
|
||||||
|
// replay network is always considered to be connected
|
||||||
|
// ensuring that it actually is is up to the tester
|
||||||
|
setMode(OperatingMode::FULL);
|
||||||
|
}
|
||||||
|
else if (mMode != OperatingMode::DISCONNECTED)
|
||||||
{
|
{
|
||||||
setMode(OperatingMode::DISCONNECTED);
|
setMode(OperatingMode::DISCONNECTED);
|
||||||
JLOG(m_journal.warn())
|
JLOG(m_journal.warn())
|
||||||
@@ -1797,6 +1803,13 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
|
|||||||
{
|
{
|
||||||
assert(networkClosed.isNonZero());
|
assert(networkClosed.isNonZero());
|
||||||
|
|
||||||
|
if (app_.config().NETWORK_ID == 65534)
|
||||||
|
{
|
||||||
|
// replay network automatically goes to proposing
|
||||||
|
setMode(OperatingMode::FULL);
|
||||||
|
mConsensus.setProposing();
|
||||||
|
}
|
||||||
|
|
||||||
auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
|
auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
|
||||||
|
|
||||||
JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
|
JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
|
||||||
|
|||||||
@@ -769,6 +769,47 @@ TxQ::apply(
|
|||||||
if (!isTesSuccess(pfresult.ter))
|
if (!isTesSuccess(pfresult.ter))
|
||||||
return {pfresult.ter, false};
|
return {pfresult.ter, false};
|
||||||
|
|
||||||
|
bool const isReplayNetwork = (app.config().NETWORK_ID == 65534);
|
||||||
|
|
||||||
|
if (isReplayNetwork)
|
||||||
|
{
|
||||||
|
// in the replay network everything is always queued no matter what
|
||||||
|
|
||||||
|
std::lock_guard lock(mutex_);
|
||||||
|
auto const metricsSnapshot = feeMetrics_.getSnapshot();
|
||||||
|
auto const feeLevelPaid =
|
||||||
|
getRequiredFeeLevel(view, flags, metricsSnapshot, lock);
|
||||||
|
|
||||||
|
auto const account = (*tx)[sfAccount];
|
||||||
|
AccountMap::iterator accountIter = byAccount_.find(account);
|
||||||
|
bool const accountIsInQueue = accountIter != byAccount_.end();
|
||||||
|
|
||||||
|
if (!accountIsInQueue)
|
||||||
|
{
|
||||||
|
// Create a new TxQAccount object and add the byAccount lookup.
|
||||||
|
bool created;
|
||||||
|
std::tie(accountIter, created) =
|
||||||
|
byAccount_.emplace(account, TxQAccount(tx));
|
||||||
|
(void)created;
|
||||||
|
assert(created);
|
||||||
|
}
|
||||||
|
|
||||||
|
flags &= ~tapRETRY;
|
||||||
|
|
||||||
|
auto& candidate = accountIter->second.add(
|
||||||
|
{tx, transactionID, feeLevelPaid, flags, pfresult});
|
||||||
|
|
||||||
|
// Then index it into the byFee lookup.
|
||||||
|
byFee_.insert(candidate);
|
||||||
|
JLOG(j_.debug()) << "Added transaction " << candidate.txID
|
||||||
|
<< " with result " << transToken(pfresult.ter)
|
||||||
|
<< " from " << (accountIsInQueue ? "existing" : "new")
|
||||||
|
<< " account " << candidate.account << " to queue."
|
||||||
|
<< " Flags: " << flags;
|
||||||
|
|
||||||
|
return {terQUEUED, false};
|
||||||
|
}
|
||||||
|
|
||||||
// If the account is not currently in the ledger, don't queue its tx.
|
// If the account is not currently in the ledger, don't queue its tx.
|
||||||
auto const account = (*tx)[sfAccount];
|
auto const account = (*tx)[sfAccount];
|
||||||
Keylet const accountKey{keylet::account(account)};
|
Keylet const accountKey{keylet::account(account)};
|
||||||
@@ -1158,11 +1199,11 @@ TxQ::apply(
|
|||||||
(potentialTotalSpend == XRPAmount{0} &&
|
(potentialTotalSpend == XRPAmount{0} &&
|
||||||
multiTxn->applyView.fees().base == 0));
|
multiTxn->applyView.fees().base == 0));
|
||||||
sleBump->setFieldAmount(sfBalance, balance - potentialTotalSpend);
|
sleBump->setFieldAmount(sfBalance, balance - potentialTotalSpend);
|
||||||
// The transaction's sequence/ticket will be valid when the other
|
// The transaction's sequence/ticket will be valid when the
|
||||||
// transactions in the queue have been processed. If the tx has a
|
// other transactions in the queue have been processed. If the
|
||||||
// sequence, set the account to match it. If it has a ticket, use
|
// tx has a sequence, set the account to match it. If it has a
|
||||||
// the next queueable sequence, which is the closest approximation
|
// ticket, use the next queueable sequence, which is the closest
|
||||||
// to the most successful case.
|
// approximation to the most successful case.
|
||||||
sleBump->at(sfSequence) = txSeqProx.isSeq()
|
sleBump->at(sfSequence) = txSeqProx.isSeq()
|
||||||
? txSeqProx.value()
|
? txSeqProx.value()
|
||||||
: nextQueuableSeqImpl(sleAccount, lock).value();
|
: nextQueuableSeqImpl(sleAccount, lock).value();
|
||||||
|
|||||||
@@ -458,11 +458,24 @@ Change::activateXahauGenesis()
|
|||||||
bool const isTest =
|
bool const isTest =
|
||||||
(ctx_.tx.getFlags() & tfTestSuite) && ctx_.app.config().standalone();
|
(ctx_.tx.getFlags() & tfTestSuite) && ctx_.app.config().standalone();
|
||||||
|
|
||||||
// RH NOTE: we'll only configure xahau governance structure on networks that
|
// RH NOTE: we'll only configure xahau governance structure on certain
|
||||||
// begin with 2133... so production xahau: 21337 and its testnet 21338
|
// network ids
|
||||||
// with 21330-21336 and 21339 also valid and reserved for dev nets etc.
|
|
||||||
// all other Network IDs will be conventionally configured.
|
const auto nid = ctx_.app.config().NETWORK_ID;
|
||||||
if ((ctx_.app.config().NETWORK_ID / 10) != 2133 && !isTest)
|
|
||||||
|
if (nid >= 65520)
|
||||||
|
{
|
||||||
|
// networks 65520 - 65535 are are also configured as xahau gov
|
||||||
|
}
|
||||||
|
else if (isTest)
|
||||||
|
{
|
||||||
|
// test is configured like this too
|
||||||
|
}
|
||||||
|
else if (nid / 10 == 2133)
|
||||||
|
{
|
||||||
|
// networks 2133X are the valid xahau prod dev and testnets
|
||||||
|
}
|
||||||
|
else
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto [ng_entries, l1_entries, l2_entries, gov_params] =
|
auto [ng_entries, l1_entries, l2_entries, gov_params] =
|
||||||
|
|||||||
@@ -167,6 +167,9 @@ Import::preflight(PreflightContext const& ctx)
|
|||||||
if (!xpop)
|
if (!xpop)
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
|
|
||||||
|
if (ctx.app.config().NETWORK_ID == 65534 /* replay network */)
|
||||||
|
return tesSUCCESS;
|
||||||
|
|
||||||
// we will check if we recognise the vl key in preclaim because it may be
|
// we will check if we recognise the vl key in preclaim because it may be
|
||||||
// from on-ledger object
|
// from on-ledger object
|
||||||
std::optional<PublicKey> masterVLKey;
|
std::optional<PublicKey> masterVLKey;
|
||||||
@@ -270,7 +273,9 @@ Import::preflight(PreflightContext const& ctx)
|
|||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stpTrans->getFieldU32(sfOperationLimit) != ctx.app.config().NETWORK_ID)
|
const auto nid = ctx.app.config().NETWORK_ID;
|
||||||
|
if (stpTrans->getFieldU32(sfOperationLimit) != nid &&
|
||||||
|
nid != 65534 /* replay network */)
|
||||||
{
|
{
|
||||||
JLOG(ctx.j.warn()) << "Import: Wrong network ID for OperationLimit in "
|
JLOG(ctx.j.warn()) << "Import: Wrong network ID for OperationLimit in "
|
||||||
"inner txn. outer txid: "
|
"inner txn. outer txid: "
|
||||||
@@ -1307,8 +1312,8 @@ Import::doApply()
|
|||||||
view().rules().enabled(featureXahauGenesis)
|
view().rules().enabled(featureXahauGenesis)
|
||||||
? view().info().parentCloseTime.time_since_epoch().count()
|
? view().info().parentCloseTime.time_since_epoch().count()
|
||||||
: view().rules().enabled(featureDeletableAccounts)
|
: view().rules().enabled(featureDeletableAccounts)
|
||||||
? view().seq()
|
? view().seq()
|
||||||
: 1};
|
: 1};
|
||||||
|
|
||||||
sle = std::make_shared<SLE>(keylet::account(id));
|
sle = std::make_shared<SLE>(keylet::account(id));
|
||||||
sle->setAccountID(sfAccount, id);
|
sle->setAccountID(sfAccount, id);
|
||||||
|
|||||||
@@ -120,8 +120,11 @@ preflight1(PreflightContext const& ctx)
|
|||||||
auto const fee = ctx.tx.getFieldAmount(sfFee);
|
auto const fee = ctx.tx.getFieldAmount(sfFee);
|
||||||
if (!fee.native() || fee.negative() || !isLegalAmount(fee.xrp()))
|
if (!fee.native() || fee.negative() || !isLegalAmount(fee.xrp()))
|
||||||
{
|
{
|
||||||
JLOG(ctx.j.debug()) << "preflight1: invalid fee";
|
if (ctx.app.config().NETWORK_ID != 65534 /* replay network */)
|
||||||
return temBAD_FEE;
|
{
|
||||||
|
JLOG(ctx.j.debug()) << "preflight1: invalid fee";
|
||||||
|
return temBAD_FEE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if a hook emitted this transaction we bypass signature checks
|
// if a hook emitted this transaction we bypass signature checks
|
||||||
@@ -437,6 +440,10 @@ Transactor::minimumFee(
|
|||||||
TER
|
TER
|
||||||
Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
|
Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
|
||||||
{
|
{
|
||||||
|
// on the replay network fees are unimportant
|
||||||
|
if (ctx.app.config().NETWORK_ID == 65534 /* replay network */)
|
||||||
|
return tesSUCCESS;
|
||||||
|
|
||||||
if (!ctx.tx[sfFee].native())
|
if (!ctx.tx[sfFee].native())
|
||||||
return temBAD_FEE;
|
return temBAD_FEE;
|
||||||
|
|
||||||
@@ -478,6 +485,7 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
|
|||||||
"a fee and an existing account.";
|
"a fee and an existing account.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
std::cout << "transactor 485 NO_ACCOUNT\n";
|
||||||
return terNO_ACCOUNT;
|
return terNO_ACCOUNT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -549,6 +557,7 @@ Transactor::checkSeqProxy(
|
|||||||
JLOG(j.trace())
|
JLOG(j.trace())
|
||||||
<< "applyTransaction: delay: source account does not exist "
|
<< "applyTransaction: delay: source account does not exist "
|
||||||
<< toBase58(id);
|
<< toBase58(id);
|
||||||
|
std::cout << "transactor 557 NO_ACCOUNT\n";
|
||||||
return terNO_ACCOUNT;
|
return terNO_ACCOUNT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -635,6 +644,7 @@ Transactor::checkPriorTxAndLastLedger(PreclaimContext const& ctx)
|
|||||||
JLOG(ctx.j.trace())
|
JLOG(ctx.j.trace())
|
||||||
<< "applyTransaction: delay: source account does not exist "
|
<< "applyTransaction: delay: source account does not exist "
|
||||||
<< toBase58(id);
|
<< toBase58(id);
|
||||||
|
std::cout << "transactor 644 NO_ACCOUNT\n";
|
||||||
return terNO_ACCOUNT;
|
return terNO_ACCOUNT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -792,12 +802,14 @@ Transactor::apply()
|
|||||||
|
|
||||||
// If the transactor requires a valid account and the transaction doesn't
|
// If the transactor requires a valid account and the transaction doesn't
|
||||||
// list one, preflight will have already a flagged a failure.
|
// list one, preflight will have already a flagged a failure.
|
||||||
auto const sle = view().peek(keylet::account(account_));
|
auto sle = view().peek(keylet::account(account_));
|
||||||
|
|
||||||
|
const bool isReplayNetwork = (ctx_.app.config().NETWORK_ID == 65534);
|
||||||
|
|
||||||
// sle must exist except for transactions
|
// sle must exist except for transactions
|
||||||
// that allow zero account. (and ttIMPORT)
|
// that allow zero account. (and ttIMPORT)
|
||||||
assert(
|
assert(
|
||||||
sle != nullptr || account_ == beast::zero ||
|
sle != nullptr || account_ == beast::zero || isReplayNetwork ||
|
||||||
view().rules().enabled(featureImport) &&
|
view().rules().enabled(featureImport) &&
|
||||||
ctx_.tx.getTxnType() == ttIMPORT &&
|
ctx_.tx.getTxnType() == ttIMPORT &&
|
||||||
!ctx_.tx.isFieldPresent(sfIssuer));
|
!ctx_.tx.isFieldPresent(sfIssuer));
|
||||||
@@ -820,6 +832,39 @@ Transactor::apply()
|
|||||||
|
|
||||||
view().update(sle);
|
view().update(sle);
|
||||||
}
|
}
|
||||||
|
else if (isReplayNetwork)
|
||||||
|
{
|
||||||
|
// create missing acc for replay network
|
||||||
|
// Create the account.
|
||||||
|
std::uint32_t const seqno{
|
||||||
|
view().rules().enabled(featureXahauGenesis)
|
||||||
|
? view().info().parentCloseTime.time_since_epoch().count()
|
||||||
|
: view().rules().enabled(featureDeletableAccounts)
|
||||||
|
? view().seq()
|
||||||
|
: 1};
|
||||||
|
|
||||||
|
sle = std::make_shared<SLE>(keylet::account(account_));
|
||||||
|
sle->setAccountID(sfAccount, account_);
|
||||||
|
|
||||||
|
sle->setFieldU32(sfSequence, seqno);
|
||||||
|
sle->setFieldU32(sfOwnerCount, 0);
|
||||||
|
|
||||||
|
if (view().exists(keylet::fees()) &&
|
||||||
|
view().rules().enabled(featureXahauGenesis))
|
||||||
|
{
|
||||||
|
auto sleFees = view().peek(keylet::fees());
|
||||||
|
uint64_t accIdx = sleFees->isFieldPresent(sfAccountCount)
|
||||||
|
? sleFees->getFieldU64(sfAccountCount)
|
||||||
|
: 0;
|
||||||
|
sle->setFieldU64(sfAccountIndex, accIdx);
|
||||||
|
sleFees->setFieldU64(sfAccountCount, accIdx + 1);
|
||||||
|
view().update(sleFees);
|
||||||
|
}
|
||||||
|
|
||||||
|
// we'll fix this up at the end
|
||||||
|
sle->setFieldAmount(sfBalance, STAmount{XRPAmount{100}});
|
||||||
|
view().insert(sle);
|
||||||
|
}
|
||||||
|
|
||||||
return doApply();
|
return doApply();
|
||||||
}
|
}
|
||||||
@@ -842,7 +887,7 @@ Transactor::checkSign(PreclaimContext const& ctx)
|
|||||||
|
|
||||||
// wildcard network gets a free pass on all signatures
|
// wildcard network gets a free pass on all signatures
|
||||||
if (ctx.tx.isFieldPresent(sfNetworkID) &&
|
if (ctx.tx.isFieldPresent(sfNetworkID) &&
|
||||||
ctx.tx.getFieldU32(sfNetworkID) == 65535)
|
ctx.tx.getFieldU32(sfNetworkID) >= 65534)
|
||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
|
|
||||||
// pass ttIMPORTs, their signatures are checked at the preflight against the
|
// pass ttIMPORTs, their signatures are checked at the preflight against the
|
||||||
@@ -876,7 +921,18 @@ Transactor::checkSingleSign(PreclaimContext const& ctx)
|
|||||||
auto const sleAccount = ctx.view.read(keylet::account(idAccount));
|
auto const sleAccount = ctx.view.read(keylet::account(idAccount));
|
||||||
|
|
||||||
if (!sleAccount)
|
if (!sleAccount)
|
||||||
return terNO_ACCOUNT;
|
{
|
||||||
|
std::cout << "transactor 922 NO_ACCOUNT\n";
|
||||||
|
|
||||||
|
if (ctx.app.config().NETWORK_ID == 65534)
|
||||||
|
{
|
||||||
|
// replay network allows transactions to create missing accounts
|
||||||
|
// implicitly and in this event we will just pass the txn
|
||||||
|
return tesSUCCESS;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return terNO_ACCOUNT;
|
||||||
|
}
|
||||||
|
|
||||||
bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster);
|
bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster);
|
||||||
|
|
||||||
@@ -1941,7 +1997,9 @@ Transactor::operator()()
|
|||||||
{
|
{
|
||||||
// Check invariants: if `tecINVARIANT_FAILED` is not returned, we can
|
// Check invariants: if `tecINVARIANT_FAILED` is not returned, we can
|
||||||
// proceed to apply the tx
|
// proceed to apply the tx
|
||||||
result = ctx_.checkInvariants(result, fee);
|
|
||||||
|
if (ctx_.app.config().NETWORK_ID != 65534)
|
||||||
|
result = ctx_.checkInvariants(result, fee);
|
||||||
|
|
||||||
if (result == tecINVARIANT_FAILED)
|
if (result == tecINVARIANT_FAILED)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -196,14 +196,19 @@ invoke_preclaim(PreclaimContext const& ctx)
|
|||||||
// list one, preflight will have already a flagged a failure.
|
// list one, preflight will have already a flagged a failure.
|
||||||
auto const id = ctx.tx.getAccountID(sfAccount);
|
auto const id = ctx.tx.getAccountID(sfAccount);
|
||||||
|
|
||||||
|
bool const isReplayNetwork = (ctx.app.config().NETWORK_ID == 65534);
|
||||||
|
|
||||||
if (id != beast::zero)
|
if (id != beast::zero)
|
||||||
{
|
{
|
||||||
TER result = T::checkSeqProxy(ctx.view, ctx.tx, ctx.j);
|
TER result = isReplayNetwork
|
||||||
|
? tesSUCCESS
|
||||||
|
: T::checkSeqProxy(ctx.view, ctx.tx, ctx.j);
|
||||||
|
|
||||||
if (!isTesSuccess(result))
|
if (!isTesSuccess(result))
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
result = T::checkPriorTxAndLastLedger(ctx);
|
if (!isReplayNetwork)
|
||||||
|
result = T::checkPriorTxAndLastLedger(ctx);
|
||||||
|
|
||||||
if (!isTesSuccess(result))
|
if (!isTesSuccess(result))
|
||||||
return result;
|
return result;
|
||||||
|
|||||||
@@ -302,7 +302,7 @@ STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const
|
|||||||
|
|
||||||
// wildcard network gets a free pass on all signatures
|
// wildcard network gets a free pass on all signatures
|
||||||
bool const isWildcardNetwork =
|
bool const isWildcardNetwork =
|
||||||
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) == 65535;
|
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) >= 65534;
|
||||||
|
|
||||||
bool validSig = false;
|
bool validSig = false;
|
||||||
try
|
try
|
||||||
|
|||||||
@@ -666,18 +666,19 @@ JSS(strict); // in: AccountCurrencies, AccountInfo
|
|||||||
JSS(sub_index); // in: LedgerEntry
|
JSS(sub_index); // in: LedgerEntry
|
||||||
JSS(subcommand); // in: PathFind
|
JSS(subcommand); // in: PathFind
|
||||||
JSS(success); // rpc
|
JSS(success); // rpc
|
||||||
JSS(supported); // out: AmendmentTableImpl
|
JSS(success_count);
|
||||||
JSS(system_time_offset); // out: NetworkOPs
|
JSS(supported); // out: AmendmentTableImpl
|
||||||
JSS(tag); // out: Peers
|
JSS(system_time_offset); // out: NetworkOPs
|
||||||
JSS(taker); // in: Subscribe, BookOffers
|
JSS(tag); // out: Peers
|
||||||
JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers
|
JSS(taker); // in: Subscribe, BookOffers
|
||||||
JSS(taker_gets_funded); // out: NetworkOPs
|
JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers
|
||||||
JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers
|
JSS(taker_gets_funded); // out: NetworkOPs
|
||||||
JSS(taker_pays_funded); // out: NetworkOPs
|
JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers
|
||||||
JSS(threshold); // in: Blacklist
|
JSS(taker_pays_funded); // out: NetworkOPs
|
||||||
JSS(ticket); // in: AccountObjects
|
JSS(threshold); // in: Blacklist
|
||||||
JSS(ticket_count); // out: AccountInfo
|
JSS(ticket); // in: AccountObjects
|
||||||
JSS(ticket_seq); // in: LedgerEntry
|
JSS(ticket_count); // out: AccountInfo
|
||||||
|
JSS(ticket_seq); // in: LedgerEntry
|
||||||
JSS(time);
|
JSS(time);
|
||||||
JSS(timeouts); // out: InboundLedger
|
JSS(timeouts); // out: InboundLedger
|
||||||
JSS(track); // out: PeerImp
|
JSS(track); // out: PeerImp
|
||||||
@@ -701,11 +702,13 @@ JSS(trusted); // out: UnlList
|
|||||||
JSS(trusted_validator_keys); // out: ValidatorList
|
JSS(trusted_validator_keys); // out: ValidatorList
|
||||||
JSS(tx); // out: STTx, AccountTx*
|
JSS(tx); // out: STTx, AccountTx*
|
||||||
JSS(txroot);
|
JSS(txroot);
|
||||||
JSS(tx_blob); // in/out: Submit,
|
JSS(tx_blob); // in/out: Submit,
|
||||||
// in: TransactionSign, AccountTx*
|
JSS(tx_blobs);
|
||||||
JSS(tx_hash); // in: TransactionEntry
|
// in: TransactionSign, AccountTx*
|
||||||
JSS(tx_json); // in/out: TransactionSign
|
JSS(tx_hash); // in: TransactionEntry
|
||||||
// out: TransactionEntry
|
JSS(tx_json); // in/out: TransactionSign
|
||||||
|
// out: TransactionEntry
|
||||||
|
JSS(tx_results);
|
||||||
JSS(tx_signing_hash); // out: TransactionSign
|
JSS(tx_signing_hash); // out: TransactionSign
|
||||||
JSS(tx_unsigned); // out: TransactionSign
|
JSS(tx_unsigned); // out: TransactionSign
|
||||||
JSS(txn_count); // out: NetworkOPs
|
JSS(txn_count); // out: NetworkOPs
|
||||||
|
|||||||
@@ -29,6 +29,9 @@
|
|||||||
#include <ripple/rpc/GRPCHandlers.h>
|
#include <ripple/rpc/GRPCHandlers.h>
|
||||||
#include <ripple/rpc/impl/RPCHelpers.h>
|
#include <ripple/rpc/impl/RPCHelpers.h>
|
||||||
#include <ripple/rpc/impl/TransactionSign.h>
|
#include <ripple/rpc/impl/TransactionSign.h>
|
||||||
|
#include <future>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
@@ -82,15 +85,220 @@ doInject(RPC::JsonContext& context)
|
|||||||
return jvResult;
|
return jvResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to process a single transaction blob
|
||||||
|
static Json::Value
|
||||||
|
processSingleTransaction(
|
||||||
|
RPC::JsonContext& context,
|
||||||
|
const std::string& txBlob,
|
||||||
|
const NetworkOPs::FailHard& failType)
|
||||||
|
{
|
||||||
|
Json::Value result;
|
||||||
|
|
||||||
|
auto ret = strUnHex(txBlob);
|
||||||
|
if (!ret || !ret->size())
|
||||||
|
{
|
||||||
|
result[jss::error] = "invalidTransaction";
|
||||||
|
result[jss::error_exception] = "Invalid hex encoding";
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
SerialIter sitTrans(makeSlice(*ret));
|
||||||
|
std::shared_ptr<STTx const> stpTrans;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
stpTrans = std::make_shared<STTx const>(std::ref(sitTrans));
|
||||||
|
}
|
||||||
|
catch (std::exception& e)
|
||||||
|
{
|
||||||
|
result[jss::error] = "invalidTransaction";
|
||||||
|
result[jss::error_exception] = e.what();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validity check
|
||||||
|
{
|
||||||
|
if (!context.app.checkSigs())
|
||||||
|
forceValidity(
|
||||||
|
context.app.getHashRouter(),
|
||||||
|
stpTrans->getTransactionID(),
|
||||||
|
Validity::SigGoodOnly);
|
||||||
|
auto [validity, reason] = checkValidity(
|
||||||
|
context.app.getHashRouter(),
|
||||||
|
*stpTrans,
|
||||||
|
context.ledgerMaster.getCurrentLedger()->rules(),
|
||||||
|
context.app.config());
|
||||||
|
if (validity != Validity::Valid)
|
||||||
|
{
|
||||||
|
result[jss::error] = "invalidTransaction";
|
||||||
|
result[jss::error_exception] = "fails local checks: " + reason;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string reason;
|
||||||
|
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, context.app);
|
||||||
|
if (tpTrans->getStatus() != NEW)
|
||||||
|
{
|
||||||
|
result[jss::error] = "invalidTransaction";
|
||||||
|
result[jss::error_exception] = "fails local checks: " + reason;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
context.netOps.processTransaction(
|
||||||
|
tpTrans, isUnlimited(context.role), true, failType);
|
||||||
|
}
|
||||||
|
catch (std::exception& e)
|
||||||
|
{
|
||||||
|
result[jss::error] = "internalSubmit";
|
||||||
|
result[jss::error_exception] = e.what();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
result[jss::tx_json] = tpTrans->getJson(JsonOptions::none);
|
||||||
|
result[jss::tx_blob] =
|
||||||
|
strHex(tpTrans->getSTransaction()->getSerializer().peekData());
|
||||||
|
|
||||||
|
if (temUNCERTAIN != tpTrans->getResult())
|
||||||
|
{
|
||||||
|
std::string sToken;
|
||||||
|
std::string sHuman;
|
||||||
|
|
||||||
|
transResultInfo(tpTrans->getResult(), sToken, sHuman);
|
||||||
|
|
||||||
|
result[jss::engine_result] = sToken;
|
||||||
|
result[jss::engine_result_code] = tpTrans->getResult();
|
||||||
|
result[jss::engine_result_message] = sHuman;
|
||||||
|
|
||||||
|
auto const submitResult = tpTrans->getSubmitResult();
|
||||||
|
|
||||||
|
result[jss::accepted] = submitResult.any();
|
||||||
|
result[jss::applied] = submitResult.applied;
|
||||||
|
result[jss::broadcast] = submitResult.broadcast;
|
||||||
|
result[jss::queued] = submitResult.queued;
|
||||||
|
result[jss::kept] = submitResult.kept;
|
||||||
|
|
||||||
|
if (auto currentLedgerState = tpTrans->getCurrentLedgerState())
|
||||||
|
{
|
||||||
|
result[jss::account_sequence_next] =
|
||||||
|
safe_cast<Json::Value::UInt>(
|
||||||
|
currentLedgerState->accountSeqNext);
|
||||||
|
result[jss::account_sequence_available] =
|
||||||
|
safe_cast<Json::Value::UInt>(
|
||||||
|
currentLedgerState->accountSeqAvail);
|
||||||
|
result[jss::open_ledger_cost] =
|
||||||
|
to_string(currentLedgerState->minFeeRequired);
|
||||||
|
result[jss::validated_ledger_index] =
|
||||||
|
safe_cast<Json::Value::UInt>(
|
||||||
|
currentLedgerState->validatedLedger);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
catch (std::exception& e)
|
||||||
|
{
|
||||||
|
result[jss::error] = "internalJson";
|
||||||
|
result[jss::error_exception] = e.what();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// {
|
// {
|
||||||
// tx_json: <object>,
|
// tx_json: <object>,
|
||||||
// secret: <secret>
|
// secret: <secret>
|
||||||
// }
|
// }
|
||||||
|
// OR for batch submission:
|
||||||
|
// {
|
||||||
|
// "tx_blobs": [<blob1>, <blob2>, ...],
|
||||||
|
// }
|
||||||
Json::Value
|
Json::Value
|
||||||
doSubmit(RPC::JsonContext& context)
|
doSubmit(RPC::JsonContext& context)
|
||||||
{
|
{
|
||||||
context.loadType = Resource::feeMediumBurdenRPC;
|
context.loadType = Resource::feeMediumBurdenRPC;
|
||||||
|
|
||||||
|
// Check for batch submission
|
||||||
|
if (context.params.isMember("tx_blobs"))
|
||||||
|
{
|
||||||
|
if (!context.params["tx_blobs"].isArray())
|
||||||
|
return rpcError(rpcINVALID_PARAMS);
|
||||||
|
|
||||||
|
const auto& txBlobs = context.params["tx_blobs"];
|
||||||
|
const auto blobCount = txBlobs.size();
|
||||||
|
|
||||||
|
if (blobCount == 0)
|
||||||
|
return rpcError(rpcINVALID_PARAMS);
|
||||||
|
|
||||||
|
// Limit batch size to prevent resource exhaustion
|
||||||
|
constexpr size_t maxBatchSize = 100;
|
||||||
|
if (blobCount > maxBatchSize)
|
||||||
|
{
|
||||||
|
Json::Value error;
|
||||||
|
error[jss::error] = "batchSizeExceeded";
|
||||||
|
error["error_message"] =
|
||||||
|
"Batch size exceeds maximum of " + std::to_string(maxBatchSize);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto const failType = getFailHard(context);
|
||||||
|
|
||||||
|
// Process transactions in parallel
|
||||||
|
std::vector<std::future<Json::Value>> futures;
|
||||||
|
futures.reserve(blobCount);
|
||||||
|
|
||||||
|
// Launch async tasks for each transaction
|
||||||
|
for (size_t i = 0; i < blobCount; ++i)
|
||||||
|
{
|
||||||
|
if (!txBlobs[i].isString())
|
||||||
|
{
|
||||||
|
// Create error result for invalid blob
|
||||||
|
std::promise<Json::Value> errorPromise;
|
||||||
|
Json::Value errorResult;
|
||||||
|
errorResult[jss::error] = "invalidTransaction";
|
||||||
|
errorResult[jss::error_exception] =
|
||||||
|
"tx_blobs element must be string";
|
||||||
|
errorPromise.set_value(std::move(errorResult));
|
||||||
|
futures.push_back(errorPromise.get_future());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string txBlobStr = txBlobs[i].asString();
|
||||||
|
futures.push_back(std::async(
|
||||||
|
std::launch::async, [&context, txBlobStr, failType]() {
|
||||||
|
return processSingleTransaction(
|
||||||
|
context, txBlobStr, failType);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect results
|
||||||
|
Json::Value jvResult;
|
||||||
|
Json::Value& results = jvResult["tx_results"] = Json::arrayValue;
|
||||||
|
|
||||||
|
for (auto& future : futures)
|
||||||
|
{
|
||||||
|
results.append(future.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
jvResult["batch_count"] = static_cast<Json::UInt>(blobCount);
|
||||||
|
|
||||||
|
// Count successful submissions
|
||||||
|
Json::UInt successCount = 0;
|
||||||
|
for (const auto& result : results)
|
||||||
|
{
|
||||||
|
std::cout << result << "\n";
|
||||||
|
if (!result.isMember(jss::error))
|
||||||
|
++successCount;
|
||||||
|
}
|
||||||
|
jvResult["success_count"] = successCount;
|
||||||
|
|
||||||
|
return jvResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single transaction submission (original code path)
|
||||||
if (!context.params.isMember(jss::tx_blob))
|
if (!context.params.isMember(jss::tx_blob))
|
||||||
{
|
{
|
||||||
auto const failType = getFailHard(context);
|
auto const failType = getFailHard(context);
|
||||||
@@ -116,124 +324,10 @@ doSubmit(RPC::JsonContext& context)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
Json::Value jvResult;
|
// Process single tx_blob
|
||||||
|
auto const failType = getFailHard(context);
|
||||||
auto ret = strUnHex(context.params[jss::tx_blob].asString());
|
return processSingleTransaction(
|
||||||
|
context, context.params[jss::tx_blob].asString(), failType);
|
||||||
if (!ret || !ret->size())
|
|
||||||
return rpcError(rpcINVALID_PARAMS);
|
|
||||||
|
|
||||||
SerialIter sitTrans(makeSlice(*ret));
|
|
||||||
|
|
||||||
std::shared_ptr<STTx const> stpTrans;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
stpTrans = std::make_shared<STTx const>(std::ref(sitTrans));
|
|
||||||
}
|
|
||||||
catch (std::exception& e)
|
|
||||||
{
|
|
||||||
jvResult[jss::error] = "invalidTransaction";
|
|
||||||
jvResult[jss::error_exception] = e.what();
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
if (!context.app.checkSigs())
|
|
||||||
forceValidity(
|
|
||||||
context.app.getHashRouter(),
|
|
||||||
stpTrans->getTransactionID(),
|
|
||||||
Validity::SigGoodOnly);
|
|
||||||
auto [validity, reason] = checkValidity(
|
|
||||||
context.app.getHashRouter(),
|
|
||||||
*stpTrans,
|
|
||||||
context.ledgerMaster.getCurrentLedger()->rules(),
|
|
||||||
context.app.config());
|
|
||||||
if (validity != Validity::Valid)
|
|
||||||
{
|
|
||||||
jvResult[jss::error] = "invalidTransaction";
|
|
||||||
jvResult[jss::error_exception] = "fails local checks: " + reason;
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string reason;
|
|
||||||
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, context.app);
|
|
||||||
if (tpTrans->getStatus() != NEW)
|
|
||||||
{
|
|
||||||
jvResult[jss::error] = "invalidTransaction";
|
|
||||||
jvResult[jss::error_exception] = "fails local checks: " + reason;
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto const failType = getFailHard(context);
|
|
||||||
|
|
||||||
context.netOps.processTransaction(
|
|
||||||
tpTrans, isUnlimited(context.role), true, failType);
|
|
||||||
}
|
|
||||||
catch (std::exception& e)
|
|
||||||
{
|
|
||||||
jvResult[jss::error] = "internalSubmit";
|
|
||||||
jvResult[jss::error_exception] = e.what();
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
jvResult[jss::tx_json] = tpTrans->getJson(JsonOptions::none);
|
|
||||||
jvResult[jss::tx_blob] =
|
|
||||||
strHex(tpTrans->getSTransaction()->getSerializer().peekData());
|
|
||||||
|
|
||||||
if (temUNCERTAIN != tpTrans->getResult())
|
|
||||||
{
|
|
||||||
std::string sToken;
|
|
||||||
std::string sHuman;
|
|
||||||
|
|
||||||
transResultInfo(tpTrans->getResult(), sToken, sHuman);
|
|
||||||
|
|
||||||
jvResult[jss::engine_result] = sToken;
|
|
||||||
jvResult[jss::engine_result_code] = tpTrans->getResult();
|
|
||||||
jvResult[jss::engine_result_message] = sHuman;
|
|
||||||
|
|
||||||
auto const submitResult = tpTrans->getSubmitResult();
|
|
||||||
|
|
||||||
jvResult[jss::accepted] = submitResult.any();
|
|
||||||
jvResult[jss::applied] = submitResult.applied;
|
|
||||||
jvResult[jss::broadcast] = submitResult.broadcast;
|
|
||||||
jvResult[jss::queued] = submitResult.queued;
|
|
||||||
jvResult[jss::kept] = submitResult.kept;
|
|
||||||
|
|
||||||
if (auto currentLedgerState = tpTrans->getCurrentLedgerState())
|
|
||||||
{
|
|
||||||
jvResult[jss::account_sequence_next] =
|
|
||||||
safe_cast<Json::Value::UInt>(
|
|
||||||
currentLedgerState->accountSeqNext);
|
|
||||||
jvResult[jss::account_sequence_available] =
|
|
||||||
safe_cast<Json::Value::UInt>(
|
|
||||||
currentLedgerState->accountSeqAvail);
|
|
||||||
jvResult[jss::open_ledger_cost] =
|
|
||||||
to_string(currentLedgerState->minFeeRequired);
|
|
||||||
jvResult[jss::validated_ledger_index] =
|
|
||||||
safe_cast<Json::Value::UInt>(
|
|
||||||
currentLedgerState->validatedLedger);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
catch (std::exception& e)
|
|
||||||
{
|
|
||||||
jvResult[jss::error] = "internalJson";
|
|
||||||
jvResult[jss::error_exception] = e.what();
|
|
||||||
|
|
||||||
return jvResult;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|||||||
Reference in New Issue
Block a user