mirror of
https://github.com/Xahau/xahaud.git
synced 2026-01-18 05:35:16 +00:00
Compare commits
11 Commits
reduced-im
...
parasig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c87f0817d1 | ||
|
|
cd1b18f47f | ||
|
|
88b7cb81af | ||
|
|
d801fdbe5d | ||
|
|
0eb46d0d49 | ||
|
|
13178193d6 | ||
|
|
428ee457dc | ||
|
|
2cf2ac6e12 | ||
|
|
cb29902a37 | ||
|
|
8c91d861c0 | ||
|
|
77bd6b9d20 |
@@ -729,6 +729,7 @@ if (tests)
|
||||
src/test/app/LedgerLoad_test.cpp
|
||||
src/test/app/LedgerMaster_test.cpp
|
||||
src/test/app/LedgerReplay_test.cpp
|
||||
src/test/app/LedgerStress_test.cpp
|
||||
src/test/app/LoadFeeTrack_test.cpp
|
||||
src/test/app/Manifest_test.cpp
|
||||
src/test/app/MultiSign_test.cpp
|
||||
|
||||
@@ -67,5 +67,5 @@ git-subtree. See those directories' README files for more details.
|
||||
- [explorer.xahau.network](https://explorer.xahau.network)
|
||||
- **Testnet & Faucet**: Test applications and obtain test XAH at [xahau-test.net](https://xahau-test.net) and use the testnet explorer at [explorer.xahau.network](https://explorer.xahau.network).
|
||||
- **Supporting Wallets**: A list of wallets that support XAH and Xahau-based assets.
|
||||
- [Xaman](https://xaman.app)
|
||||
- [Xumm](https://xumm.app)
|
||||
- [Crossmark](https://crossmark.io)
|
||||
|
||||
@@ -322,6 +322,9 @@ public:
|
||||
void
|
||||
transactionBatch();
|
||||
|
||||
void
|
||||
forceTransactionBatch();
|
||||
|
||||
/**
|
||||
* Attempt to apply transactions and post-process based on the results.
|
||||
*
|
||||
@@ -1136,6 +1139,7 @@ NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
|
||||
void
|
||||
NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
{
|
||||
// Launch async task and return immediately
|
||||
if (isNeedNetworkLedger())
|
||||
{
|
||||
// Nothing we can do if we've never been in sync
|
||||
@@ -1147,9 +1151,9 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
// Enforce Network bar for emitted txn
|
||||
if (view->rules().enabled(featureHooks) && hook::isEmittedTxn(*iTrans))
|
||||
{
|
||||
// RH NOTE: Warning removed here due to ConsesusSet using this function
|
||||
// which continually triggers this bar. Doesn't seem dangerous, just
|
||||
// annoying.
|
||||
// RH NOTE: Warning removed here due to ConsesusSet using this
|
||||
// function which continually triggers this bar. Doesn't seem
|
||||
// dangerous, just annoying.
|
||||
|
||||
// JLOG(m_journal.warn())
|
||||
// << "Submitted transaction invalid: EmitDetails present.";
|
||||
@@ -1164,9 +1168,9 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
|
||||
if ((flags & SF_BAD) != 0)
|
||||
{
|
||||
// RH NOTE: Warning removed here due to ConsesusSet using this function
|
||||
// which continually triggers this bar. Doesn't seem dangerous, just
|
||||
// annoying.
|
||||
// RH NOTE: Warning removed here due to ConsesusSet using this
|
||||
// function which continually triggers this bar. Doesn't seem
|
||||
// dangerous, just annoying.
|
||||
|
||||
// JLOG(m_journal.warn()) << "Submitted transaction cached bad";
|
||||
return;
|
||||
@@ -1364,6 +1368,17 @@ NetworkOPsImp::doTransactionSync(
|
||||
} while (transaction->getApplying());
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::forceTransactionBatch()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mDispatchState = DispatchState::scheduled;
|
||||
while (mTransactions.size())
|
||||
{
|
||||
apply(lock);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::transactionBatch()
|
||||
{
|
||||
@@ -1398,7 +1413,6 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
std::unique_lock ledgerLock{
|
||||
m_ledgerMaster.peekMutex(), std::defer_lock};
|
||||
std::lock(masterLock, ledgerLock);
|
||||
|
||||
app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
|
||||
for (TransactionStatus& e : transactions)
|
||||
{
|
||||
|
||||
@@ -137,7 +137,10 @@ public:
|
||||
std::shared_ptr<Transaction>& transaction,
|
||||
bool bUnlimited,
|
||||
bool bLocal,
|
||||
FailHard failType) = 0;
|
||||
FailHard failType = FailHard::no) = 0;
|
||||
|
||||
virtual void
|
||||
forceTransactionBatch() = 0;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <ripple/app/misc/TxQ.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/mulDiv.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Feature.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <ripple/protocol/st.h>
|
||||
@@ -1897,7 +1898,15 @@ TxQ::tryDirectApply(
|
||||
// transaction straight into the ledger.
|
||||
FeeLevel64 const feeLevelPaid = getFeeLevelPaid(view, *tx);
|
||||
|
||||
if (feeLevelPaid >= requiredFeeLevel)
|
||||
static auto const genesisAccountId = calcAccountID(
|
||||
generateKeyPair(KeyType::secp256k1, generateSeed("masterpassphrase"))
|
||||
.first);
|
||||
|
||||
// RH NOTE: exempting the genesis account from fee escalation is useful for
|
||||
// stress testing it also shouldn't require an amendment because it will be
|
||||
// fought out in consensus.
|
||||
if (feeLevelPaid >= requiredFeeLevel ||
|
||||
(*tx)[sfAccount] == genesisAccountId)
|
||||
{
|
||||
// Attempt to apply the transaction directly.
|
||||
auto const transactionID = tx->getTransactionID();
|
||||
|
||||
@@ -889,45 +889,6 @@ Import::preclaim(PreclaimContext const& ctx)
|
||||
}
|
||||
|
||||
auto const& sle = ctx.view.read(keylet::account(ctx.tx[sfAccount]));
|
||||
|
||||
auto const tt = stpTrans->getTxnType();
|
||||
if ((tt == ttSIGNER_LIST_SET || tt == ttREGULAR_KEY_SET) &&
|
||||
ctx.view.rules().enabled(fixReduceImport) && sle)
|
||||
{
|
||||
// blackhole check
|
||||
do
|
||||
{
|
||||
// if master key is not set then it is not blackholed
|
||||
if (!(sle->getFlags() & lsfDisableMaster))
|
||||
break;
|
||||
|
||||
// if a regular key is set then it must be acc 0, 1, or 2 otherwise
|
||||
// not blackholed
|
||||
if (sle->isFieldPresent(sfRegularKey))
|
||||
{
|
||||
AccountID rk = sle->getAccountID(sfRegularKey);
|
||||
static const AccountID ACCOUNT_ZERO(0);
|
||||
static const AccountID ACCOUNT_ONE(1);
|
||||
static const AccountID ACCOUNT_TWO(2);
|
||||
|
||||
if (rk != ACCOUNT_ZERO && rk != ACCOUNT_ONE &&
|
||||
rk != ACCOUNT_TWO)
|
||||
break;
|
||||
}
|
||||
|
||||
// if a signer list is set then it's not blackholed
|
||||
auto const signerListKeylet = keylet::signers(ctx.tx[sfAccount]);
|
||||
if (ctx.view.exists(signerListKeylet))
|
||||
break;
|
||||
|
||||
// execution to here means it's blackholed
|
||||
JLOG(ctx.j.warn())
|
||||
<< "Import: during preclaim target account is blackholed "
|
||||
<< ctx.tx[sfAccount] << ", bailing.";
|
||||
return tefIMPORT_BLACKHOLED;
|
||||
} while (0);
|
||||
}
|
||||
|
||||
if (sle && sle->isFieldPresent(sfImportSequence))
|
||||
{
|
||||
uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence);
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <unordered_set>
|
||||
#include <iterator>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -321,6 +322,12 @@ public:
|
||||
|
||||
// The range of transactions
|
||||
txs_type txs;
|
||||
|
||||
std::size_t
|
||||
txCount() const
|
||||
{
|
||||
return std::distance(txs.begin(), txs.end());
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -710,7 +710,10 @@ Shard::finalize(bool writeSQLite, std::optional<uint256> const& referenceHash)
|
||||
if (writeSQLite && !storeSQLite(ledger))
|
||||
return fail("failed storing to SQLite databases");
|
||||
|
||||
assert(ledger->info().seq == ledgerSeq && ledger->read(keylet::fees()));
|
||||
assert(
|
||||
ledger->info().seq == ledgerSeq &&
|
||||
(ledger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
|
||||
ledger->read(keylet::fees())));
|
||||
|
||||
hash = ledger->info().parentHash;
|
||||
next = std::move(ledger);
|
||||
|
||||
@@ -74,7 +74,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 75;
|
||||
static constexpr std::size_t numFeatures = 74;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
@@ -362,7 +362,6 @@ extern uint256 const fix240819;
|
||||
extern uint256 const fixPageCap;
|
||||
extern uint256 const fix240911;
|
||||
extern uint256 const fixFloatDivide;
|
||||
extern uint256 const fixReduceImport;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -184,7 +184,6 @@ enum TEFcodes : TERUnderlyingType {
|
||||
tefPAST_IMPORT_SEQ,
|
||||
tefPAST_IMPORT_VL_SEQ,
|
||||
tefNONDIR_EMIT,
|
||||
tefIMPORT_BLACKHOLED,
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -468,7 +468,6 @@ REGISTER_FIX (fix240819, Supported::yes, VoteBehavior::De
|
||||
REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes);
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
// because they could potentially get enabled.
|
||||
|
||||
@@ -116,7 +116,6 @@ transResults()
|
||||
MAKE_ERROR(tefNO_TICKET, "Ticket is not in ledger."),
|
||||
MAKE_ERROR(tefNFTOKEN_IS_NOT_TRANSFERABLE, "The specified NFToken is not transferable."),
|
||||
MAKE_ERROR(tefNONDIR_EMIT, "An emitted txn was injected into the ledger without a corresponding directory entry."),
|
||||
MAKE_ERROR(tefIMPORT_BLACKHOLED, "Cannot import keying because target account is blackholed."),
|
||||
|
||||
MAKE_ERROR(telLOCAL_ERROR, "Local failure."),
|
||||
MAKE_ERROR(telBAD_DOMAIN, "Domain too long."),
|
||||
|
||||
@@ -79,7 +79,7 @@ class Import_test : public beast::unit_test::suite
|
||||
importVLSequence(jtx::Env const& env, PublicKey const& pk)
|
||||
{
|
||||
auto const sle = env.le(keylet::import_vlseq(pk));
|
||||
if (sle && sle->isFieldPresent(sfImportSequence))
|
||||
if (sle->isFieldPresent(sfImportSequence))
|
||||
return (*sle)[sfImportSequence];
|
||||
return 0;
|
||||
}
|
||||
@@ -2672,134 +2672,6 @@ class Import_test : public beast::unit_test::suite
|
||||
env(import::import(alice, tmpXpop), ter(temMALFORMED));
|
||||
}
|
||||
|
||||
// tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountZero
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
auto const feeDrops = env.current()->fees().base;
|
||||
|
||||
auto const alice = Account("alice");
|
||||
env.fund(XRP(1000), alice);
|
||||
env.close();
|
||||
|
||||
// Set Regular Key
|
||||
Json::Value jv;
|
||||
jv[jss::Account] = alice.human();
|
||||
const AccountID ACCOUNT_ZERO(0);
|
||||
jv["RegularKey"] = to_string(ACCOUNT_ZERO);
|
||||
jv[jss::TransactionType] = jss::SetRegularKey;
|
||||
env(jv, alice);
|
||||
|
||||
// Disable Master Key
|
||||
env(fset(alice, asfDisableMaster), sig(alice));
|
||||
env.close();
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop =
|
||||
import::loadXpop(ImportTCSetRegularKey::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tefIMPORT_BLACKHOLED),
|
||||
fee(feeDrops * 10),
|
||||
sig(alice));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountOne
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
auto const feeDrops = env.current()->fees().base;
|
||||
|
||||
auto const alice = Account("alice");
|
||||
env.fund(XRP(1000), alice);
|
||||
env.close();
|
||||
|
||||
// Set Regular Key
|
||||
Json::Value jv;
|
||||
jv[jss::Account] = alice.human();
|
||||
const AccountID ACCOUNT_ONE(1);
|
||||
jv["RegularKey"] = to_string(ACCOUNT_ONE);
|
||||
jv[jss::TransactionType] = jss::SetRegularKey;
|
||||
env(jv, alice);
|
||||
|
||||
// Disable Master Key
|
||||
env(fset(alice, asfDisableMaster), sig(alice));
|
||||
env.close();
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop =
|
||||
import::loadXpop(ImportTCSetRegularKey::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tefIMPORT_BLACKHOLED),
|
||||
fee(feeDrops * 10),
|
||||
sig(alice));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountTwo
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
auto const feeDrops = env.current()->fees().base;
|
||||
|
||||
auto const alice = Account("alice");
|
||||
env.fund(XRP(1000), alice);
|
||||
env.close();
|
||||
|
||||
// Set Regular Key
|
||||
Json::Value jv;
|
||||
jv[jss::Account] = alice.human();
|
||||
const AccountID ACCOUNT_TWO(2);
|
||||
jv["RegularKey"] = to_string(ACCOUNT_TWO);
|
||||
jv[jss::TransactionType] = jss::SetRegularKey;
|
||||
env(jv, alice);
|
||||
|
||||
// Disable Master Key
|
||||
env(fset(alice, asfDisableMaster), sig(alice));
|
||||
env.close();
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop =
|
||||
import::loadXpop(ImportTCSetRegularKey::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tefIMPORT_BLACKHOLED),
|
||||
fee(feeDrops * 10),
|
||||
sig(alice));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// tefIMPORT_BLACKHOLED - SignersListSet (w/seed)
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
auto const feeDrops = env.current()->fees().base;
|
||||
|
||||
auto const alice = Account("alice");
|
||||
env.fund(XRP(1000), alice);
|
||||
env.close();
|
||||
|
||||
// Set Regular Key
|
||||
Json::Value jv;
|
||||
jv[jss::Account] = alice.human();
|
||||
const AccountID ACCOUNT_ZERO(0);
|
||||
jv["RegularKey"] = to_string(ACCOUNT_ZERO);
|
||||
jv[jss::TransactionType] = jss::SetRegularKey;
|
||||
env(jv, alice);
|
||||
|
||||
// Disable Master Key
|
||||
env(fset(alice, asfDisableMaster), sig(alice));
|
||||
env.close();
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop =
|
||||
import::loadXpop(ImportTCSignersListSet::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tefIMPORT_BLACKHOLED),
|
||||
fee(feeDrops * 10),
|
||||
sig(alice));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// tefPAST_IMPORT_SEQ
|
||||
{
|
||||
test::jtx::Env env{
|
||||
@@ -4708,22 +4580,14 @@ class Import_test : public beast::unit_test::suite
|
||||
// confirm signers set
|
||||
auto const [signers, signersSle] =
|
||||
signersKeyAndSle(*env.current(), alice);
|
||||
auto const signerEntries =
|
||||
signersSle->getFieldArray(sfSignerEntries);
|
||||
BEAST_EXPECT(signerEntries.size() == 2);
|
||||
BEAST_EXPECT(signerEntries[0u].getFieldU16(sfSignerWeight) == 1);
|
||||
BEAST_EXPECT(
|
||||
signersSle && signersSle->isFieldPresent(sfSignerEntries));
|
||||
if (signersSle && signersSle->isFieldPresent(sfSignerEntries))
|
||||
{
|
||||
auto const signerEntries =
|
||||
signersSle->getFieldArray(sfSignerEntries);
|
||||
BEAST_EXPECT(signerEntries.size() == 2);
|
||||
BEAST_EXPECT(
|
||||
signerEntries[0u].getFieldU16(sfSignerWeight) == 1);
|
||||
BEAST_EXPECT(
|
||||
signerEntries[0u].getAccountID(sfAccount) == carol.id());
|
||||
BEAST_EXPECT(
|
||||
signerEntries[1u].getFieldU16(sfSignerWeight) == 1);
|
||||
BEAST_EXPECT(
|
||||
signerEntries[1u].getAccountID(sfAccount) == bob.id());
|
||||
}
|
||||
signerEntries[0u].getAccountID(sfAccount) == carol.id());
|
||||
BEAST_EXPECT(signerEntries[1u].getFieldU16(sfSignerWeight) == 1);
|
||||
BEAST_EXPECT(signerEntries[1u].getAccountID(sfAccount) == bob.id());
|
||||
|
||||
// confirm multisign tx
|
||||
env.close();
|
||||
@@ -6122,69 +5986,6 @@ class Import_test : public beast::unit_test::suite
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testBlackhole(FeatureBitset features)
|
||||
{
|
||||
testcase("blackhole");
|
||||
|
||||
using namespace test::jtx;
|
||||
using namespace std::literals;
|
||||
|
||||
auto blackholeAccount = [&](Env& env, Account const& acct) {
|
||||
// Set Regular Key
|
||||
Json::Value jv;
|
||||
jv[jss::Account] = acct.human();
|
||||
const AccountID ACCOUNT_ZERO(0);
|
||||
jv["RegularKey"] = to_string(ACCOUNT_ZERO);
|
||||
jv[jss::TransactionType] = jss::SetRegularKey;
|
||||
env(jv, acct);
|
||||
|
||||
// Disable Master Key
|
||||
env(fset(acct, asfDisableMaster), sig(acct));
|
||||
env.close();
|
||||
};
|
||||
|
||||
auto burnHeader = [&](Env& env) {
|
||||
// confirm total coins header
|
||||
auto const initCoins = env.current()->info().drops;
|
||||
BEAST_EXPECT(initCoins == 100'000'000'000'000'000);
|
||||
|
||||
// burn 10'000 xrp
|
||||
auto const master = Account("masterpassphrase");
|
||||
env(noop(master), fee(100'000'000'000'000), ter(tesSUCCESS));
|
||||
env.close();
|
||||
|
||||
// confirm total coins header
|
||||
auto const burnCoins = env.current()->info().drops;
|
||||
BEAST_EXPECT(burnCoins == initCoins - 100'000'000'000'000);
|
||||
};
|
||||
|
||||
// AccountSet (w/seed)
|
||||
{
|
||||
test::jtx::Env env{
|
||||
*this, network::makeNetworkVLConfig(21337, keys)};
|
||||
auto const feeDrops = env.current()->fees().base;
|
||||
|
||||
// Burn Header
|
||||
burnHeader(env);
|
||||
|
||||
auto const alice = Account("alice");
|
||||
env.fund(XRP(1000), alice);
|
||||
env.close();
|
||||
|
||||
// Blackhole Account
|
||||
blackholeAccount(env, alice);
|
||||
|
||||
// Import with Master Key
|
||||
Json::Value tmpXpop = import::loadXpop(ImportTCAccountSet::w_seed);
|
||||
env(import::import(alice, tmpXpop),
|
||||
ter(tesSUCCESS),
|
||||
fee(feeDrops * 10),
|
||||
sig(alice));
|
||||
env.close();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
@@ -6225,7 +6026,6 @@ public:
|
||||
testMaxSupply(features);
|
||||
testMinMax(features);
|
||||
testHalving(features - featureOwnerPaysFee);
|
||||
testBlackhole(features);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
320
src/test/app/LedgerStress_test.cpp
Normal file
320
src/test/app/LedgerStress_test.cpp
Normal file
@@ -0,0 +1,320 @@
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/misc/TxQ.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Feature.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <test/jtx.h>
|
||||
#include <test/jtx/Env.h>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
using namespace jtx;
|
||||
class LedgerStress_test : public beast::unit_test::suite
|
||||
{
|
||||
private:
|
||||
static constexpr std::size_t TXN_PER_LEDGER = 50000;
|
||||
static constexpr std::size_t MAX_TXN_PER_ACCOUNT = 5; // Increased from 1
|
||||
static constexpr std::chrono::seconds MAX_CLOSE_TIME{15};
|
||||
static constexpr std::size_t REQUIRED_ACCOUNTS =
|
||||
(TXN_PER_LEDGER + MAX_TXN_PER_ACCOUNT - 1) / MAX_TXN_PER_ACCOUNT;
|
||||
|
||||
// Get number of hardware threads and use half
|
||||
const std::size_t NUM_THREADS =
|
||||
std::max(std::thread::hardware_concurrency() / 2, 1u);
|
||||
|
||||
struct LedgerMetrics
|
||||
{
|
||||
std::chrono::milliseconds submitTime{0};
|
||||
std::chrono::milliseconds closeTime{0};
|
||||
std::size_t txCount{0};
|
||||
std::size_t successfulTxCount{
|
||||
0}; // Added to track successful transactions
|
||||
std::size_t failedTxCount{0}; // Added to track failed transactions
|
||||
XRPAmount baseFee{0};
|
||||
|
||||
void
|
||||
log(beast::Journal const& journal) const
|
||||
{
|
||||
std::cout << "Metrics - Submit time: " << submitTime.count()
|
||||
<< "ms, "
|
||||
<< "Close time: " << closeTime.count() << "ms, "
|
||||
<< "Transaction count: " << txCount << ", "
|
||||
<< "Successful: " << successfulTxCount << ", "
|
||||
<< "Failed: " << failedTxCount << ", "
|
||||
<< "Base fee: " << baseFee;
|
||||
}
|
||||
};
|
||||
|
||||
// Thread-safe console output
|
||||
std::mutex consoleMutex;
|
||||
std::atomic<std::size_t> totalSuccessfulTxns{0};
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
threadSafeLog(T const& message)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(consoleMutex);
|
||||
std::cout << message << std::endl;
|
||||
}
|
||||
|
||||
XRPAmount
|
||||
getEscalatedFee(jtx::Env& env) const
|
||||
{
|
||||
auto const metrics = env.app().getTxQ().getMetrics(*env.current());
|
||||
auto const baseFee = env.current()->fees().base;
|
||||
|
||||
auto const feeLevel =
|
||||
mulDiv(metrics.medFeeLevel, baseFee, metrics.referenceFeeLevel)
|
||||
.second;
|
||||
|
||||
auto const escalatedFee = XRPAmount{feeLevel};
|
||||
return XRPAmount{escalatedFee.drops() + (escalatedFee.drops())};
|
||||
}
|
||||
|
||||
std::vector<jtx::Account>
|
||||
createAccounts(jtx::Env& env, std::size_t count)
|
||||
{
|
||||
std::vector<jtx::Account> accounts;
|
||||
accounts.reserve(count);
|
||||
|
||||
for (std::size_t i = 0; i < count; ++i)
|
||||
{
|
||||
std::string name = "account" + std::to_string(i);
|
||||
auto account = jtx::Account(name);
|
||||
accounts.push_back(account);
|
||||
env.fund(false, XRP(100000), account);
|
||||
|
||||
if (i % 2500 == 0 && i != 0)
|
||||
threadSafeLog("Accounts created: " + std::to_string(i));
|
||||
}
|
||||
env.close();
|
||||
return accounts;
|
||||
}
|
||||
|
||||
// Structure to hold work assignment for each thread
|
||||
struct ThreadWork
|
||||
{
|
||||
std::size_t startAccountIdx;
|
||||
std::size_t endAccountIdx;
|
||||
std::size_t numTxnsToSubmit;
|
||||
std::size_t successfulTxns{0};
|
||||
std::size_t failedTxns{0};
|
||||
};
|
||||
|
||||
void
|
||||
submitBatchThread(
|
||||
jtx::Env& env,
|
||||
std::vector<jtx::Account> const& accounts,
|
||||
ThreadWork& work) // Changed to non-const reference to update metrics
|
||||
{
|
||||
auto const escalatedFee = getEscalatedFee(env);
|
||||
std::size_t txnsSubmitted = 0;
|
||||
|
||||
// Track sequence numbers for all accounts in this thread's range
|
||||
std::map<AccountID, std::uint32_t> seqNumbers;
|
||||
for (std::size_t i = work.startAccountIdx; i < work.endAccountIdx; ++i)
|
||||
{
|
||||
seqNumbers[accounts[i].id()] = env.seq(accounts[i]);
|
||||
}
|
||||
|
||||
// Pre-calculate recipient indices for better distribution
|
||||
std::vector<std::size_t> recipientIndices;
|
||||
recipientIndices.reserve(accounts.size() - 1);
|
||||
for (std::size_t i = 0; i < accounts.size(); ++i)
|
||||
{
|
||||
if (i < work.startAccountIdx || i >= work.endAccountIdx)
|
||||
{
|
||||
recipientIndices.push_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t recipientIdx = 0;
|
||||
|
||||
for (std::size_t i = work.startAccountIdx;
|
||||
i < work.endAccountIdx && txnsSubmitted < work.numTxnsToSubmit;
|
||||
++i)
|
||||
{
|
||||
auto const& sender = accounts[i];
|
||||
|
||||
// Calculate how many txns to submit from this account
|
||||
std::size_t txnsRemaining = work.numTxnsToSubmit - txnsSubmitted;
|
||||
std::size_t txnsForAccount =
|
||||
std::min(MAX_TXN_PER_ACCOUNT, txnsRemaining);
|
||||
|
||||
// Submit transactions
|
||||
for (std::size_t tx = 0; tx < txnsForAccount; ++tx)
|
||||
{
|
||||
// Select next recipient using round-robin
|
||||
auto const& recipient =
|
||||
accounts[recipientIndices[recipientIdx]];
|
||||
recipientIdx = (recipientIdx + 1) % recipientIndices.size();
|
||||
|
||||
try
|
||||
{
|
||||
env.inject(
|
||||
pay(sender, recipient, XRP(1)),
|
||||
fee(escalatedFee),
|
||||
seq(seqNumbers[sender.id()]));
|
||||
|
||||
++work.successfulTxns;
|
||||
seqNumbers[sender.id()]++;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
++work.failedTxns;
|
||||
threadSafeLog(
|
||||
"Exception submitting transaction: " +
|
||||
std::string(e.what()));
|
||||
}
|
||||
|
||||
++txnsSubmitted;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
runStressTest(std::size_t numLedgers)
|
||||
{
|
||||
testcase(
|
||||
"Multithreaded stress test: " + std::to_string(TXN_PER_LEDGER) +
|
||||
" txns/ledger for " + std::to_string(numLedgers) +
|
||||
" ledgers using " + std::to_string(NUM_THREADS) + " threads");
|
||||
|
||||
Env env{*this, envconfig(many_workers)};
|
||||
env.app().config().MAX_TRANSACTIONS = TXN_PER_LEDGER;
|
||||
|
||||
auto const journal = env.app().journal("LedgerStressTest");
|
||||
|
||||
// Get actual hardware thread count
|
||||
std::size_t hardwareThreads =
|
||||
static_cast<std::size_t>(std::thread::hardware_concurrency());
|
||||
if (hardwareThreads == 0)
|
||||
hardwareThreads = 4; // Fallback
|
||||
|
||||
const std::size_t THREAD_COUNT = std::min(NUM_THREADS, hardwareThreads);
|
||||
|
||||
threadSafeLog(
|
||||
"Using " + std::to_string(THREAD_COUNT) + " hardware threads");
|
||||
threadSafeLog(
|
||||
"Creating " + std::to_string(REQUIRED_ACCOUNTS) + " accounts");
|
||||
|
||||
auto accounts = createAccounts(env, REQUIRED_ACCOUNTS);
|
||||
|
||||
std::vector<LedgerMetrics> metrics;
|
||||
metrics.reserve(numLedgers);
|
||||
|
||||
for (std::size_t ledger = 0; ledger < numLedgers; ++ledger)
|
||||
{
|
||||
threadSafeLog("Starting ledger " + std::to_string(ledger));
|
||||
LedgerMetrics ledgerMetrics;
|
||||
|
||||
auto submitStart = std::chrono::steady_clock::now();
|
||||
ledgerMetrics.baseFee = env.current()->fees().base;
|
||||
|
||||
// Calculate even distribution of work
|
||||
std::vector<ThreadWork> threadAssignments;
|
||||
threadAssignments.reserve(THREAD_COUNT);
|
||||
|
||||
std::size_t baseWorkload = TXN_PER_LEDGER / THREAD_COUNT;
|
||||
std::size_t remainder = TXN_PER_LEDGER % THREAD_COUNT;
|
||||
std::size_t accountsPerThread = accounts.size() / THREAD_COUNT;
|
||||
std::size_t totalAccountsAssigned = 0;
|
||||
|
||||
for (std::size_t t = 0; t < THREAD_COUNT; ++t)
|
||||
{
|
||||
ThreadWork work;
|
||||
work.startAccountIdx = totalAccountsAssigned;
|
||||
work.endAccountIdx = (t == THREAD_COUNT - 1)
|
||||
? accounts.size()
|
||||
: work.startAccountIdx + accountsPerThread;
|
||||
work.numTxnsToSubmit = baseWorkload + (t < remainder ? 1 : 0);
|
||||
|
||||
totalAccountsAssigned = work.endAccountIdx;
|
||||
threadAssignments.push_back(work);
|
||||
}
|
||||
|
||||
// Launch threads with work assignments
|
||||
std::vector<std::thread> threads;
|
||||
threads.reserve(THREAD_COUNT);
|
||||
|
||||
for (std::size_t t = 0; t < THREAD_COUNT; ++t)
|
||||
{
|
||||
threads.emplace_back(
|
||||
[&env, &accounts, &work = threadAssignments[t], this]() {
|
||||
submitBatchThread(env, accounts, work);
|
||||
});
|
||||
}
|
||||
|
||||
// Wait for all threads
|
||||
for (auto& thread : threads)
|
||||
{
|
||||
if (thread.joinable())
|
||||
thread.join();
|
||||
}
|
||||
|
||||
// Aggregate metrics from all threads
|
||||
ledgerMetrics.successfulTxCount = 0;
|
||||
ledgerMetrics.failedTxCount = 0;
|
||||
for (auto const& work : threadAssignments)
|
||||
{
|
||||
ledgerMetrics.successfulTxCount += work.successfulTxns;
|
||||
ledgerMetrics.failedTxCount += work.failedTxns;
|
||||
}
|
||||
|
||||
ledgerMetrics.submitTime =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - submitStart);
|
||||
|
||||
auto closeStart = std::chrono::steady_clock::now();
|
||||
env.close();
|
||||
ledgerMetrics.closeTime =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - closeStart);
|
||||
|
||||
auto const closed = env.closed();
|
||||
ledgerMetrics.txCount = closed->txCount();
|
||||
|
||||
ledgerMetrics.log(journal);
|
||||
metrics.push_back(ledgerMetrics);
|
||||
|
||||
auto const totalTime =
|
||||
ledgerMetrics.submitTime + ledgerMetrics.closeTime;
|
||||
|
||||
// Updated expectations
|
||||
BEAST_EXPECT(
|
||||
ledgerMetrics.txCount >=
|
||||
ledgerMetrics.successfulTxCount * 0.8); // Allow 20% variance
|
||||
BEAST_EXPECT(
|
||||
ledgerMetrics.closeTime <=
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
MAX_CLOSE_TIME));
|
||||
|
||||
threadSafeLog(
|
||||
"\nCompleted ledger " + std::to_string(ledger) + " in " +
|
||||
std::to_string(totalTime.count()) + "ms" + " with " +
|
||||
std::to_string(ledgerMetrics.successfulTxCount) +
|
||||
" successful transactions using " +
|
||||
std::to_string(THREAD_COUNT) + " threads");
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
runStressTest(5);
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(LedgerStress, app, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -148,6 +148,12 @@ public:
|
||||
operator=(Env const&) = delete;
|
||||
Env(Env const&) = delete;
|
||||
|
||||
Application*
|
||||
getApp()
|
||||
{
|
||||
return bundle_.app;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create Env using suite, Config pointer, and explicit features.
|
||||
*
|
||||
@@ -508,6 +514,9 @@ public:
|
||||
virtual void
|
||||
submit(JTx const& jt);
|
||||
|
||||
virtual void
|
||||
inject_jtx(JTx const& jt);
|
||||
|
||||
/** Use the submit RPC command with a provided JTx object.
|
||||
This calls postconditions.
|
||||
*/
|
||||
@@ -529,6 +538,13 @@ public:
|
||||
submit(jt(std::forward<JsonValue>(jv), fN...));
|
||||
}
|
||||
|
||||
template <class JsonValue, class... FN>
|
||||
void
|
||||
inject(JsonValue&& jv, FN const&... fN)
|
||||
{
|
||||
inject_jtx(jt(std::forward<JsonValue>(jv), fN...));
|
||||
}
|
||||
|
||||
template <class JsonValue, class... FN>
|
||||
void
|
||||
operator()(JsonValue&& jv, FN const&... fN)
|
||||
@@ -591,7 +607,6 @@ public:
|
||||
void
|
||||
disableFeature(uint256 const feature);
|
||||
|
||||
private:
|
||||
void
|
||||
fund(bool setDefaultRipple, STAmount const& amount, Account const& account);
|
||||
|
||||
|
||||
@@ -86,6 +86,9 @@ std::unique_ptr<Config> no_admin(std::unique_ptr<Config>);
|
||||
std::unique_ptr<Config>
|
||||
no_admin_networkid(std::unique_ptr<Config> cfg);
|
||||
|
||||
std::unique_ptr<Config>
|
||||
many_workers(std::unique_ptr<Config> cfg);
|
||||
|
||||
std::unique_ptr<Config> secure_gateway(std::unique_ptr<Config>);
|
||||
|
||||
std::unique_ptr<Config> admin_localnet(std::unique_ptr<Config>);
|
||||
|
||||
@@ -49,6 +49,8 @@
|
||||
#include <test/jtx/sig.h>
|
||||
#include <test/jtx/trust.h>
|
||||
#include <test/jtx/utility.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/misc/Transaction.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
@@ -124,13 +126,17 @@ Env::close(
|
||||
{
|
||||
// Round up to next distinguishable value
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
auto& netOPs = app().getOPs();
|
||||
netOPs.forceTransactionBatch();
|
||||
|
||||
bool res = true;
|
||||
closeTime += closed()->info().closeTimeResolution - 1s;
|
||||
timeKeeper().set(closeTime);
|
||||
// Go through the rpc interface unless we need to simulate
|
||||
// a specific consensus delay.
|
||||
if (consensusDelay)
|
||||
app().getOPs().acceptLedger(consensusDelay);
|
||||
netOPs.acceptLedger(consensusDelay);
|
||||
else
|
||||
{
|
||||
auto resp = rpc("ledger_accept");
|
||||
@@ -284,6 +290,33 @@ Env::parseResult(Json::Value const& jr)
|
||||
return std::make_pair(ter, isTesSuccess(ter) || isTecClaim(ter));
|
||||
}
|
||||
|
||||
void
|
||||
Env::inject_jtx(JTx const& jt)
|
||||
{
|
||||
Application& app = *(getApp());
|
||||
auto& netOPs = app.getOPs();
|
||||
if (jt.stx)
|
||||
{
|
||||
std::string reason;
|
||||
// make a copy
|
||||
//STTx* newData = new STTx(*jt.stx);
|
||||
//auto stx = std::shared_ptr<STTx const>(newData);
|
||||
auto id = jt.stx->getTransactionID();
|
||||
|
||||
auto tx = std::make_shared<Transaction>(jt.stx, reason, app);
|
||||
|
||||
/*
|
||||
static int counter = 0;
|
||||
counter++;
|
||||
if (counter % 2500 == 0)
|
||||
std::cout << "inject_jtx [" << counter++ << "] id=" << id << "\n";
|
||||
*/
|
||||
netOPs.processTransaction(tx, true, false);
|
||||
}
|
||||
return postconditions(jt, ter_, true);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
Env::submit(JTx const& jt)
|
||||
{
|
||||
|
||||
@@ -76,6 +76,14 @@ setupConfigForUnitTests(Config& cfg)
|
||||
|
||||
namespace jtx {
|
||||
|
||||
std::unique_ptr<Config>
|
||||
many_workers(std::unique_ptr<Config> cfg)
|
||||
{
|
||||
cfg->WORKERS = 128;
|
||||
return cfg;
|
||||
}
|
||||
|
||||
|
||||
std::unique_ptr<Config>
|
||||
no_admin(std::unique_ptr<Config> cfg)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user