From 027511a2a782409f9732daf7cecb68a10389461c Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Thu, 12 Jun 2025 12:53:15 +1000 Subject: [PATCH] replay network code --- src/ripple/app/consensus/RCLConsensus.h | 12 + src/ripple/app/misc/NetworkOPs.cpp | 15 +- src/ripple/app/misc/impl/TxQ.cpp | 51 +++- src/ripple/app/tx/impl/Change.cpp | 23 +- src/ripple/app/tx/impl/Import.cpp | 11 +- src/ripple/app/tx/impl/Transactor.cpp | 72 +++++- src/ripple/app/tx/impl/applySteps.cpp | 9 +- src/ripple/protocol/impl/STTx.cpp | 2 +- src/ripple/protocol/jss.h | 37 +-- src/ripple/rpc/handlers/Submit.cpp | 330 +++++++++++++++--------- 10 files changed, 403 insertions(+), 159 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.h b/src/ripple/app/consensus/RCLConsensus.h index f8c01e93c..c1c54e781 100644 --- a/src/ripple/app/consensus/RCLConsensus.h +++ b/src/ripple/app/consensus/RCLConsensus.h @@ -129,6 +129,12 @@ class RCLConsensus return mode_; } + void + setProposing() + { + mode_ = ConsensusMode::proposing; + } + /** Called before kicking off a new consensus round. @param prevLedger Ledger that will be prior ledger for next round @@ -465,6 +471,12 @@ public: return adaptor_.mode(); } + void + setProposing() + { + adaptor_.setProposing(); + } + ConsensusPhase phase() const { diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 0e5b8ef5f..0cab943d9 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -944,7 +944,13 @@ NetworkOPsImp::processHeartbeatTimer() // do we have sufficient peers? If not, we are disconnected. if (numPeers < minPeerCount_) { - if (mMode != OperatingMode::DISCONNECTED) + if (app_.config().NETWORK_ID == 65534) + { + // replay network is always considered to be connected + // ensuring that it actually is is up to the tester + setMode(OperatingMode::FULL); + } + else if (mMode != OperatingMode::DISCONNECTED) { setMode(OperatingMode::DISCONNECTED); JLOG(m_journal.warn()) @@ -1797,6 +1803,13 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) { assert(networkClosed.isNonZero()); + if (app_.config().NETWORK_ID == 65534) + { + // replay network automatically goes to proposing + setMode(OperatingMode::FULL); + mConsensus.setProposing(); + } + auto closingInfo = m_ledgerMaster.getCurrentLedger()->info(); JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index f321edf49..8fcb0a204 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -769,6 +769,47 @@ TxQ::apply( if (!isTesSuccess(pfresult.ter)) return {pfresult.ter, false}; + bool const isReplayNetwork = (app.config().NETWORK_ID == 65534); + + if (isReplayNetwork) + { + // in the replay network everything is always queued no matter what + + std::lock_guard lock(mutex_); + auto const metricsSnapshot = feeMetrics_.getSnapshot(); + auto const feeLevelPaid = + getRequiredFeeLevel(view, flags, metricsSnapshot, lock); + + auto const account = (*tx)[sfAccount]; + AccountMap::iterator accountIter = byAccount_.find(account); + bool const accountIsInQueue = accountIter != byAccount_.end(); + + if (!accountIsInQueue) + { + // Create a new TxQAccount object and add the byAccount lookup. + bool created; + std::tie(accountIter, created) = + byAccount_.emplace(account, TxQAccount(tx)); + (void)created; + assert(created); + } + + flags &= ~tapRETRY; + + auto& candidate = accountIter->second.add( + {tx, transactionID, feeLevelPaid, flags, pfresult}); + + // Then index it into the byFee lookup. + byFee_.insert(candidate); + JLOG(j_.debug()) << "Added transaction " << candidate.txID + << " with result " << transToken(pfresult.ter) + << " from " << (accountIsInQueue ? "existing" : "new") + << " account " << candidate.account << " to queue." + << " Flags: " << flags; + + return {terQUEUED, false}; + } + // If the account is not currently in the ledger, don't queue its tx. auto const account = (*tx)[sfAccount]; Keylet const accountKey{keylet::account(account)}; @@ -1158,11 +1199,11 @@ TxQ::apply( (potentialTotalSpend == XRPAmount{0} && multiTxn->applyView.fees().base == 0)); sleBump->setFieldAmount(sfBalance, balance - potentialTotalSpend); - // The transaction's sequence/ticket will be valid when the other - // transactions in the queue have been processed. If the tx has a - // sequence, set the account to match it. If it has a ticket, use - // the next queueable sequence, which is the closest approximation - // to the most successful case. + // The transaction's sequence/ticket will be valid when the + // other transactions in the queue have been processed. If the + // tx has a sequence, set the account to match it. If it has a + // ticket, use the next queueable sequence, which is the closest + // approximation to the most successful case. sleBump->at(sfSequence) = txSeqProx.isSeq() ? txSeqProx.value() : nextQueuableSeqImpl(sleAccount, lock).value(); diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index 61134ca25..62a931443 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -458,11 +458,24 @@ Change::activateXahauGenesis() bool const isTest = (ctx_.tx.getFlags() & tfTestSuite) && ctx_.app.config().standalone(); - // RH NOTE: we'll only configure xahau governance structure on networks that - // begin with 2133... so production xahau: 21337 and its testnet 21338 - // with 21330-21336 and 21339 also valid and reserved for dev nets etc. - // all other Network IDs will be conventionally configured. - if ((ctx_.app.config().NETWORK_ID / 10) != 2133 && !isTest) + // RH NOTE: we'll only configure xahau governance structure on certain + // network ids + + const auto nid = ctx_.app.config().NETWORK_ID; + + if (nid >= 65520) + { + // networks 65520 - 65535 are are also configured as xahau gov + } + else if (isTest) + { + // test is configured like this too + } + else if (nid / 10 == 2133) + { + // networks 2133X are the valid xahau prod dev and testnets + } + else return; auto [ng_entries, l1_entries, l2_entries, gov_params] = diff --git a/src/ripple/app/tx/impl/Import.cpp b/src/ripple/app/tx/impl/Import.cpp index 335cbe581..ed5a9294b 100644 --- a/src/ripple/app/tx/impl/Import.cpp +++ b/src/ripple/app/tx/impl/Import.cpp @@ -167,6 +167,9 @@ Import::preflight(PreflightContext const& ctx) if (!xpop) return temMALFORMED; + if (ctx.app.config().NETWORK_ID == 65534 /* replay network */) + return tesSUCCESS; + // we will check if we recognise the vl key in preclaim because it may be // from on-ledger object std::optional masterVLKey; @@ -270,7 +273,9 @@ Import::preflight(PreflightContext const& ctx) return temMALFORMED; } - if (stpTrans->getFieldU32(sfOperationLimit) != ctx.app.config().NETWORK_ID) + const auto nid = ctx.app.config().NETWORK_ID; + if (stpTrans->getFieldU32(sfOperationLimit) != nid && + nid != 65534 /* replay network */) { JLOG(ctx.j.warn()) << "Import: Wrong network ID for OperationLimit in " "inner txn. outer txid: " @@ -1307,8 +1312,8 @@ Import::doApply() view().rules().enabled(featureXahauGenesis) ? view().info().parentCloseTime.time_since_epoch().count() : view().rules().enabled(featureDeletableAccounts) - ? view().seq() - : 1}; + ? view().seq() + : 1}; sle = std::make_shared(keylet::account(id)); sle->setAccountID(sfAccount, id); diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 305e39c5e..33ac6c6fe 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -120,8 +120,11 @@ preflight1(PreflightContext const& ctx) auto const fee = ctx.tx.getFieldAmount(sfFee); if (!fee.native() || fee.negative() || !isLegalAmount(fee.xrp())) { - JLOG(ctx.j.debug()) << "preflight1: invalid fee"; - return temBAD_FEE; + if (ctx.app.config().NETWORK_ID != 65534 /* replay network */) + { + JLOG(ctx.j.debug()) << "preflight1: invalid fee"; + return temBAD_FEE; + } } // if a hook emitted this transaction we bypass signature checks @@ -437,6 +440,10 @@ Transactor::minimumFee( TER Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) { + // on the replay network fees are unimportant + if (ctx.app.config().NETWORK_ID == 65534 /* replay network */) + return tesSUCCESS; + if (!ctx.tx[sfFee].native()) return temBAD_FEE; @@ -478,6 +485,7 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) "a fee and an existing account."; } } + std::cout << "transactor 485 NO_ACCOUNT\n"; return terNO_ACCOUNT; } @@ -549,6 +557,7 @@ Transactor::checkSeqProxy( JLOG(j.trace()) << "applyTransaction: delay: source account does not exist " << toBase58(id); + std::cout << "transactor 557 NO_ACCOUNT\n"; return terNO_ACCOUNT; } @@ -635,6 +644,7 @@ Transactor::checkPriorTxAndLastLedger(PreclaimContext const& ctx) JLOG(ctx.j.trace()) << "applyTransaction: delay: source account does not exist " << toBase58(id); + std::cout << "transactor 644 NO_ACCOUNT\n"; return terNO_ACCOUNT; } @@ -792,12 +802,14 @@ Transactor::apply() // If the transactor requires a valid account and the transaction doesn't // list one, preflight will have already a flagged a failure. - auto const sle = view().peek(keylet::account(account_)); + auto sle = view().peek(keylet::account(account_)); + + const bool isReplayNetwork = (ctx_.app.config().NETWORK_ID == 65534); // sle must exist except for transactions // that allow zero account. (and ttIMPORT) assert( - sle != nullptr || account_ == beast::zero || + sle != nullptr || account_ == beast::zero || isReplayNetwork || view().rules().enabled(featureImport) && ctx_.tx.getTxnType() == ttIMPORT && !ctx_.tx.isFieldPresent(sfIssuer)); @@ -820,6 +832,39 @@ Transactor::apply() view().update(sle); } + else if (isReplayNetwork) + { + // create missing acc for replay network + // Create the account. + std::uint32_t const seqno{ + view().rules().enabled(featureXahauGenesis) + ? view().info().parentCloseTime.time_since_epoch().count() + : view().rules().enabled(featureDeletableAccounts) + ? view().seq() + : 1}; + + sle = std::make_shared(keylet::account(account_)); + sle->setAccountID(sfAccount, account_); + + sle->setFieldU32(sfSequence, seqno); + sle->setFieldU32(sfOwnerCount, 0); + + if (view().exists(keylet::fees()) && + view().rules().enabled(featureXahauGenesis)) + { + auto sleFees = view().peek(keylet::fees()); + uint64_t accIdx = sleFees->isFieldPresent(sfAccountCount) + ? sleFees->getFieldU64(sfAccountCount) + : 0; + sle->setFieldU64(sfAccountIndex, accIdx); + sleFees->setFieldU64(sfAccountCount, accIdx + 1); + view().update(sleFees); + } + + // we'll fix this up at the end + sle->setFieldAmount(sfBalance, STAmount{XRPAmount{100}}); + view().insert(sle); + } return doApply(); } @@ -842,7 +887,7 @@ Transactor::checkSign(PreclaimContext const& ctx) // wildcard network gets a free pass on all signatures if (ctx.tx.isFieldPresent(sfNetworkID) && - ctx.tx.getFieldU32(sfNetworkID) == 65535) + ctx.tx.getFieldU32(sfNetworkID) >= 65534) return tesSUCCESS; // pass ttIMPORTs, their signatures are checked at the preflight against the @@ -876,7 +921,18 @@ Transactor::checkSingleSign(PreclaimContext const& ctx) auto const sleAccount = ctx.view.read(keylet::account(idAccount)); if (!sleAccount) - return terNO_ACCOUNT; + { + std::cout << "transactor 922 NO_ACCOUNT\n"; + + if (ctx.app.config().NETWORK_ID == 65534) + { + // replay network allows transactions to create missing accounts + // implicitly and in this event we will just pass the txn + return tesSUCCESS; + } + else + return terNO_ACCOUNT; + } bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster); @@ -1941,7 +1997,9 @@ Transactor::operator()() { // Check invariants: if `tecINVARIANT_FAILED` is not returned, we can // proceed to apply the tx - result = ctx_.checkInvariants(result, fee); + + if (ctx_.app.config().NETWORK_ID != 65534) + result = ctx_.checkInvariants(result, fee); if (result == tecINVARIANT_FAILED) { diff --git a/src/ripple/app/tx/impl/applySteps.cpp b/src/ripple/app/tx/impl/applySteps.cpp index 2ae333fdd..eb6d8fbe6 100644 --- a/src/ripple/app/tx/impl/applySteps.cpp +++ b/src/ripple/app/tx/impl/applySteps.cpp @@ -196,14 +196,19 @@ invoke_preclaim(PreclaimContext const& ctx) // list one, preflight will have already a flagged a failure. auto const id = ctx.tx.getAccountID(sfAccount); + bool const isReplayNetwork = (ctx.app.config().NETWORK_ID == 65534); + if (id != beast::zero) { - TER result = T::checkSeqProxy(ctx.view, ctx.tx, ctx.j); + TER result = isReplayNetwork + ? tesSUCCESS + : T::checkSeqProxy(ctx.view, ctx.tx, ctx.j); if (!isTesSuccess(result)) return result; - result = T::checkPriorTxAndLastLedger(ctx); + if (!isReplayNetwork) + result = T::checkPriorTxAndLastLedger(ctx); if (!isTesSuccess(result)) return result; diff --git a/src/ripple/protocol/impl/STTx.cpp b/src/ripple/protocol/impl/STTx.cpp index cfa350381..74d41263e 100644 --- a/src/ripple/protocol/impl/STTx.cpp +++ b/src/ripple/protocol/impl/STTx.cpp @@ -302,7 +302,7 @@ STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const // wildcard network gets a free pass on all signatures bool const isWildcardNetwork = - isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) == 65535; + isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) >= 65534; bool validSig = false; try diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 0faa053f6..04fc52630 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -666,18 +666,19 @@ JSS(strict); // in: AccountCurrencies, AccountInfo JSS(sub_index); // in: LedgerEntry JSS(subcommand); // in: PathFind JSS(success); // rpc -JSS(supported); // out: AmendmentTableImpl -JSS(system_time_offset); // out: NetworkOPs -JSS(tag); // out: Peers -JSS(taker); // in: Subscribe, BookOffers -JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers -JSS(taker_gets_funded); // out: NetworkOPs -JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers -JSS(taker_pays_funded); // out: NetworkOPs -JSS(threshold); // in: Blacklist -JSS(ticket); // in: AccountObjects -JSS(ticket_count); // out: AccountInfo -JSS(ticket_seq); // in: LedgerEntry +JSS(success_count); +JSS(supported); // out: AmendmentTableImpl +JSS(system_time_offset); // out: NetworkOPs +JSS(tag); // out: Peers +JSS(taker); // in: Subscribe, BookOffers +JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers +JSS(taker_gets_funded); // out: NetworkOPs +JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers +JSS(taker_pays_funded); // out: NetworkOPs +JSS(threshold); // in: Blacklist +JSS(ticket); // in: AccountObjects +JSS(ticket_count); // out: AccountInfo +JSS(ticket_seq); // in: LedgerEntry JSS(time); JSS(timeouts); // out: InboundLedger JSS(track); // out: PeerImp @@ -701,11 +702,13 @@ JSS(trusted); // out: UnlList JSS(trusted_validator_keys); // out: ValidatorList JSS(tx); // out: STTx, AccountTx* JSS(txroot); -JSS(tx_blob); // in/out: Submit, - // in: TransactionSign, AccountTx* -JSS(tx_hash); // in: TransactionEntry -JSS(tx_json); // in/out: TransactionSign - // out: TransactionEntry +JSS(tx_blob); // in/out: Submit, +JSS(tx_blobs); +// in: TransactionSign, AccountTx* +JSS(tx_hash); // in: TransactionEntry +JSS(tx_json); // in/out: TransactionSign + // out: TransactionEntry +JSS(tx_results); JSS(tx_signing_hash); // out: TransactionSign JSS(tx_unsigned); // out: TransactionSign JSS(txn_count); // out: NetworkOPs diff --git a/src/ripple/rpc/handlers/Submit.cpp b/src/ripple/rpc/handlers/Submit.cpp index c9e3d5ff3..105df1926 100644 --- a/src/ripple/rpc/handlers/Submit.cpp +++ b/src/ripple/rpc/handlers/Submit.cpp @@ -29,6 +29,9 @@ #include #include #include +#include +#include +#include namespace ripple { @@ -82,15 +85,220 @@ doInject(RPC::JsonContext& context) return jvResult; } +// Helper function to process a single transaction blob +static Json::Value +processSingleTransaction( + RPC::JsonContext& context, + const std::string& txBlob, + const NetworkOPs::FailHard& failType) +{ + Json::Value result; + + auto ret = strUnHex(txBlob); + if (!ret || !ret->size()) + { + result[jss::error] = "invalidTransaction"; + result[jss::error_exception] = "Invalid hex encoding"; + return result; + } + + SerialIter sitTrans(makeSlice(*ret)); + std::shared_ptr stpTrans; + + try + { + stpTrans = std::make_shared(std::ref(sitTrans)); + } + catch (std::exception& e) + { + result[jss::error] = "invalidTransaction"; + result[jss::error_exception] = e.what(); + return result; + } + + // Validity check + { + if (!context.app.checkSigs()) + forceValidity( + context.app.getHashRouter(), + stpTrans->getTransactionID(), + Validity::SigGoodOnly); + auto [validity, reason] = checkValidity( + context.app.getHashRouter(), + *stpTrans, + context.ledgerMaster.getCurrentLedger()->rules(), + context.app.config()); + if (validity != Validity::Valid) + { + result[jss::error] = "invalidTransaction"; + result[jss::error_exception] = "fails local checks: " + reason; + return result; + } + } + + std::string reason; + auto tpTrans = std::make_shared(stpTrans, reason, context.app); + if (tpTrans->getStatus() != NEW) + { + result[jss::error] = "invalidTransaction"; + result[jss::error_exception] = "fails local checks: " + reason; + return result; + } + + try + { + context.netOps.processTransaction( + tpTrans, isUnlimited(context.role), true, failType); + } + catch (std::exception& e) + { + result[jss::error] = "internalSubmit"; + result[jss::error_exception] = e.what(); + return result; + } + + try + { + result[jss::tx_json] = tpTrans->getJson(JsonOptions::none); + result[jss::tx_blob] = + strHex(tpTrans->getSTransaction()->getSerializer().peekData()); + + if (temUNCERTAIN != tpTrans->getResult()) + { + std::string sToken; + std::string sHuman; + + transResultInfo(tpTrans->getResult(), sToken, sHuman); + + result[jss::engine_result] = sToken; + result[jss::engine_result_code] = tpTrans->getResult(); + result[jss::engine_result_message] = sHuman; + + auto const submitResult = tpTrans->getSubmitResult(); + + result[jss::accepted] = submitResult.any(); + result[jss::applied] = submitResult.applied; + result[jss::broadcast] = submitResult.broadcast; + result[jss::queued] = submitResult.queued; + result[jss::kept] = submitResult.kept; + + if (auto currentLedgerState = tpTrans->getCurrentLedgerState()) + { + result[jss::account_sequence_next] = + safe_cast( + currentLedgerState->accountSeqNext); + result[jss::account_sequence_available] = + safe_cast( + currentLedgerState->accountSeqAvail); + result[jss::open_ledger_cost] = + to_string(currentLedgerState->minFeeRequired); + result[jss::validated_ledger_index] = + safe_cast( + currentLedgerState->validatedLedger); + } + } + + return result; + } + catch (std::exception& e) + { + result[jss::error] = "internalJson"; + result[jss::error_exception] = e.what(); + return result; + } +} + // { // tx_json: , // secret: // } +// OR for batch submission: +// { +// "tx_blobs": [, , ...], +// } Json::Value doSubmit(RPC::JsonContext& context) { context.loadType = Resource::feeMediumBurdenRPC; + // Check for batch submission + if (context.params.isMember("tx_blobs")) + { + if (!context.params["tx_blobs"].isArray()) + return rpcError(rpcINVALID_PARAMS); + + const auto& txBlobs = context.params["tx_blobs"]; + const auto blobCount = txBlobs.size(); + + if (blobCount == 0) + return rpcError(rpcINVALID_PARAMS); + + // Limit batch size to prevent resource exhaustion + constexpr size_t maxBatchSize = 100; + if (blobCount > maxBatchSize) + { + Json::Value error; + error[jss::error] = "batchSizeExceeded"; + error["error_message"] = + "Batch size exceeds maximum of " + std::to_string(maxBatchSize); + return error; + } + + auto const failType = getFailHard(context); + + // Process transactions in parallel + std::vector> futures; + futures.reserve(blobCount); + + // Launch async tasks for each transaction + for (size_t i = 0; i < blobCount; ++i) + { + if (!txBlobs[i].isString()) + { + // Create error result for invalid blob + std::promise errorPromise; + Json::Value errorResult; + errorResult[jss::error] = "invalidTransaction"; + errorResult[jss::error_exception] = + "tx_blobs element must be string"; + errorPromise.set_value(std::move(errorResult)); + futures.push_back(errorPromise.get_future()); + continue; + } + + const std::string txBlobStr = txBlobs[i].asString(); + futures.push_back(std::async( + std::launch::async, [&context, txBlobStr, failType]() { + return processSingleTransaction( + context, txBlobStr, failType); + })); + } + + // Collect results + Json::Value jvResult; + Json::Value& results = jvResult["tx_results"] = Json::arrayValue; + + for (auto& future : futures) + { + results.append(future.get()); + } + + jvResult["batch_count"] = static_cast(blobCount); + + // Count successful submissions + Json::UInt successCount = 0; + for (const auto& result : results) + { + std::cout << result << "\n"; + if (!result.isMember(jss::error)) + ++successCount; + } + jvResult["success_count"] = successCount; + + return jvResult; + } + + // Single transaction submission (original code path) if (!context.params.isMember(jss::tx_blob)) { auto const failType = getFailHard(context); @@ -116,124 +324,10 @@ doSubmit(RPC::JsonContext& context) return ret; } - Json::Value jvResult; - - auto ret = strUnHex(context.params[jss::tx_blob].asString()); - - if (!ret || !ret->size()) - return rpcError(rpcINVALID_PARAMS); - - SerialIter sitTrans(makeSlice(*ret)); - - std::shared_ptr stpTrans; - - try - { - stpTrans = std::make_shared(std::ref(sitTrans)); - } - catch (std::exception& e) - { - jvResult[jss::error] = "invalidTransaction"; - jvResult[jss::error_exception] = e.what(); - - return jvResult; - } - - { - if (!context.app.checkSigs()) - forceValidity( - context.app.getHashRouter(), - stpTrans->getTransactionID(), - Validity::SigGoodOnly); - auto [validity, reason] = checkValidity( - context.app.getHashRouter(), - *stpTrans, - context.ledgerMaster.getCurrentLedger()->rules(), - context.app.config()); - if (validity != Validity::Valid) - { - jvResult[jss::error] = "invalidTransaction"; - jvResult[jss::error_exception] = "fails local checks: " + reason; - - return jvResult; - } - } - - std::string reason; - auto tpTrans = std::make_shared(stpTrans, reason, context.app); - if (tpTrans->getStatus() != NEW) - { - jvResult[jss::error] = "invalidTransaction"; - jvResult[jss::error_exception] = "fails local checks: " + reason; - - return jvResult; - } - - try - { - auto const failType = getFailHard(context); - - context.netOps.processTransaction( - tpTrans, isUnlimited(context.role), true, failType); - } - catch (std::exception& e) - { - jvResult[jss::error] = "internalSubmit"; - jvResult[jss::error_exception] = e.what(); - - return jvResult; - } - - try - { - jvResult[jss::tx_json] = tpTrans->getJson(JsonOptions::none); - jvResult[jss::tx_blob] = - strHex(tpTrans->getSTransaction()->getSerializer().peekData()); - - if (temUNCERTAIN != tpTrans->getResult()) - { - std::string sToken; - std::string sHuman; - - transResultInfo(tpTrans->getResult(), sToken, sHuman); - - jvResult[jss::engine_result] = sToken; - jvResult[jss::engine_result_code] = tpTrans->getResult(); - jvResult[jss::engine_result_message] = sHuman; - - auto const submitResult = tpTrans->getSubmitResult(); - - jvResult[jss::accepted] = submitResult.any(); - jvResult[jss::applied] = submitResult.applied; - jvResult[jss::broadcast] = submitResult.broadcast; - jvResult[jss::queued] = submitResult.queued; - jvResult[jss::kept] = submitResult.kept; - - if (auto currentLedgerState = tpTrans->getCurrentLedgerState()) - { - jvResult[jss::account_sequence_next] = - safe_cast( - currentLedgerState->accountSeqNext); - jvResult[jss::account_sequence_available] = - safe_cast( - currentLedgerState->accountSeqAvail); - jvResult[jss::open_ledger_cost] = - to_string(currentLedgerState->minFeeRequired); - jvResult[jss::validated_ledger_index] = - safe_cast( - currentLedgerState->validatedLedger); - } - } - - return jvResult; - } - catch (std::exception& e) - { - jvResult[jss::error] = "internalJson"; - jvResult[jss::error_exception] = e.what(); - - return jvResult; - } + // Process single tx_blob + auto const failType = getFailHard(context); + return processSingleTransaction( + context, context.params[jss::tx_blob].asString(), failType); } } // namespace ripple