Compare commits

..

3 Commits

Author SHA1 Message Date
RichardAH
af1920b8c3 Merge branch 'dev' into nd-allow-multi-threaded-writes-to-suite-log-2025-08-15 2025-09-08 13:08:38 +10:00
Niq Dudfield
3c4c9c87c5 Fix rwdb memory leak with online_delete and remove flatmap (#570)
Co-authored-by: Denis Angell <dangell@transia.co>
2025-08-26 14:00:58 +10:00
Nicholas Dudfield
b11397e4df fix(tests): prevent buffer corruption from concurrent log writes
std::endl triggers flush() which calls sync() on the shared log buffer.
Multiple threads racing in sync() cause str()/str("") operations to
corrupt buffer state, leading to crashes and double frees.

Added mutex to serialize access to suite.log, preventing concurrent
sync() calls on the same buffer.
2025-08-15 07:57:08 +07:00
24 changed files with 1234 additions and 2194 deletions

View File

@@ -548,7 +548,6 @@ target_sources (rippled PRIVATE
src/ripple/nodestore/backend/CassandraFactory.cpp
src/ripple/nodestore/backend/RWDBFactory.cpp
src/ripple/nodestore/backend/MemoryFactory.cpp
src/ripple/nodestore/backend/FlatmapFactory.cpp
src/ripple/nodestore/backend/NuDBFactory.cpp
src/ripple/nodestore/backend/NullFactory.cpp
src/ripple/nodestore/backend/RocksDBFactory.cpp
@@ -995,6 +994,11 @@ if (tests)
subdir: resource
#]===============================]
src/test/resource/Logic_test.cpp
#[===============================[
test sources:
subdir: rdb
#]===============================]
src/test/rdb/RelationalDatabase_test.cpp
#[===============================[
test sources:
subdir: rpc

View File

@@ -186,6 +186,10 @@ test.protocol > ripple.crypto
test.protocol > ripple.json
test.protocol > ripple.protocol
test.protocol > test.toplevel
test.rdb > ripple.app
test.rdb > ripple.core
test.rdb > test.jtx
test.rdb > test.toplevel
test.resource > ripple.basics
test.resource > ripple.beast
test.resource > ripple.resource

View File

@@ -1063,14 +1063,16 @@
# RWDB is recommended for Validator and Peer nodes that are not required to
# store history.
#
# RWDB maintains its high speed regardless of the amount of history
# stored. Online delete should NOT be used instead RWDB will use the
# ledger_history config value to determine how many ledgers to keep in memory.
#
# Required keys for NuDB, RWDB and RocksDB:
# Required keys for NuDB and RocksDB:
#
# path Location to store the database
#
# Required keys for RWDB:
#
# online_delete Required. RWDB stores data in memory and will
# grow unbounded without online_delete. See the
# online_delete section below.
#
# Required keys for Cassandra:
#
# contact_points IP of a node in the Cassandra cluster
@@ -1110,7 +1112,17 @@
# if sufficient IOPS capacity is available.
# Default 0.
#
# Optional keys for NuDB or RocksDB:
# online_delete for RWDB, NuDB and RocksDB:
#
# online_delete Minimum value of 256. Enable automatic purging
# of older ledger information. Maintain at least this
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# REQUIRED for RWDB to prevent out-of-memory errors.
# Optional for NuDB and RocksDB.
#
# Optional keys for NuDB and RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
@@ -1120,12 +1132,7 @@
# it must be defined with the same value in both
# sections.
#
# online_delete Minimum value of 256. Enable automatic purging
# of older ledger information. Maintain at least this
# number of ledger records online. Must be greater
# than or equal to ledger_history. If using RWDB
# this value is ignored.
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#

View File

@@ -1,851 +0,0 @@
#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#include <ripple/app/ledger/AcceptedLedger.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionMaster.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <algorithm>
#include <map>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <vector>
#include <boost/unordered/concurrent_flat_map.hpp>
namespace ripple {
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
result_type
operator()(AccountID const& value) const
{
return hardened_hash<>{}(value);
}
};
class FlatmapDatabase : public SQLiteDatabase
{
private:
struct LedgerData
{
LedgerInfo info;
boost::unordered::
concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactions;
};
struct AccountTxData
{
boost::unordered::
concurrent_flat_map<std::pair<uint32_t, uint32_t>, AccountTx>
transactions;
};
Application& app_;
boost::unordered::concurrent_flat_map<LedgerIndex, LedgerData> ledgers_;
boost::unordered::
concurrent_flat_map<uint256, LedgerIndex, base_uint_hasher>
ledgerHashToSeq_;
boost::unordered::concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactionMap_;
boost::unordered::
concurrent_flat_map<AccountID, AccountTxData, base_uint_hasher>
accountTxMap_;
public:
FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
: app_(app)
{
}
std::optional<LedgerIndex>
getMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
ledgers_.visit_all([&minSeq](auto const& pair) {
if (!minSeq || pair.first < *minSeq)
{
minSeq = pair.first;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
transactionMap_.visit_all([&minSeq](auto const& pair) {
LedgerIndex seq = pair.second.second->getLgrSeq();
if (!minSeq || seq < *minSeq)
{
minSeq = seq;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getAccountTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
accountTxMap_.visit_all([&minSeq](auto const& pair) {
pair.second.transactions.visit_all([&minSeq](auto const& tx) {
if (!minSeq || tx.first.first < *minSeq)
{
minSeq = tx.first.first;
}
});
});
return minSeq;
}
std::optional<LedgerIndex>
getMaxLedgerSeq() override
{
std::optional<LedgerIndex> maxSeq;
ledgers_.visit_all([&maxSeq](auto const& pair) {
if (!maxSeq || pair.first > *maxSeq)
{
maxSeq = pair.first;
}
});
return maxSeq;
}
void
deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit(ledgerSeq, [this](auto& item) {
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first == ledgerSeq;
});
});
}
void
deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.erase_if([this, ledgerSeq](auto const& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
return true;
}
return false;
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit_all([this, ledgerSeq](auto& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
}
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
std::size_t
getTransactionCount() override
{
return transactionMap_.size();
}
std::size_t
getAccountTransactionCount() override
{
std::size_t count = 0;
accountTxMap_.visit_all([&count](auto const& item) {
count += item.second.transactions.size();
});
return count;
}
CountMinMax
getLedgerCountMinMax() override
{
CountMinMax result{0, 0, 0};
ledgers_.visit_all([&result](auto const& item) {
result.numberOfRows++;
if (result.minLedgerSequence == 0 ||
item.first < result.minLedgerSequence)
{
result.minLedgerSequence = item.first;
}
if (item.first > result.maxLedgerSequence)
{
result.maxLedgerSequence = item.first;
}
});
return result;
}
bool
saveValidatedLedger(
std::shared_ptr<Ledger const> const& ledger,
bool current) override
{
try
{
LedgerData ledgerData;
ledgerData.info = ledger->info();
auto aLedger = std::make_shared<AcceptedLedger>(ledger, app_);
for (auto const& acceptedLedgerTx : *aLedger)
{
auto const& txn = acceptedLedgerTx->getTxn();
auto const& meta = acceptedLedgerTx->getMeta();
auto const& id = txn->getTransactionID();
std::string reason;
auto accTx = std::make_pair(
std::make_shared<ripple::Transaction>(txn, reason, app_),
std::make_shared<ripple::TxMeta>(meta));
ledgerData.transactions.emplace(id, accTx);
transactionMap_.emplace(id, accTx);
for (auto const& account : meta.getAffectedAccounts())
{
accountTxMap_.visit(account, [&](auto& data) {
data.second.transactions.emplace(
std::make_pair(
ledger->info().seq,
acceptedLedgerTx->getTxnSeq()),
accTx);
});
}
}
ledgers_.emplace(ledger->info().seq, std::move(ledgerData));
ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq);
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
const std::size_t BATCH_SIZE = 128;
std::size_t deleted = 0;
ledgers_.erase_if([&](auto const& item) {
if (deleted >= BATCH_SIZE)
return false;
if (item.first < cutoffSeq)
{
item.second.transactions.visit_all(
[this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
deleted++;
return true;
}
return false;
});
if (deleted > 0)
{
accountTxMap_.visit_all([cutoffSeq](auto& item) {
item.second.transactions.erase_if(
[cutoffSeq](auto const& tx) {
return tx.first.first < cutoffSeq;
});
});
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
catch (std::exception const&)
{
deleteTransactionByLedgerSeq(ledger->info().seq);
return false;
}
}
std::optional<LedgerInfo>
getLedgerInfoByIndex(LedgerIndex ledgerSeq) override
{
std::optional<LedgerInfo> result;
ledgers_.visit(ledgerSeq, [&result](auto const& item) {
result = item.second.info;
});
return result;
}
std::optional<LedgerInfo>
getNewestLedgerInfo() override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&result](auto const& item) {
if (!result || item.second.info.seq > result->seq)
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first < result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first > result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLedgerInfoByHash(uint256 const& ledgerHash) override
{
std::optional<LedgerInfo> result;
ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) {
ledgers_.visit(item.second, [&result](auto const& item) {
result = item.second.info;
});
});
return result;
}
uint256
getHashByIndex(LedgerIndex ledgerIndex) override
{
uint256 result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = item.second.info.hash;
});
return result;
}
std::optional<LedgerHashPair>
getHashesByIndex(LedgerIndex ledgerIndex) override
{
std::optional<LedgerHashPair> result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
});
return result;
}
std::map<LedgerIndex, LedgerHashPair>
getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override
{
std::map<LedgerIndex, LedgerHashPair> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= minSeq && item.first <= maxSeq)
{
result[item.first] = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
}
});
return result;
}
std::variant<AccountTx, TxSearched>
getTransaction(
uint256 const& id,
std::optional<ClosedInterval<std::uint32_t>> const& range,
error_code_i& ec) override
{
std::variant<AccountTx, TxSearched> result = TxSearched::unknown;
transactionMap_.visit(id, [&](auto const& item) {
auto const& tx = item.second;
if (!range ||
(range->lower() <= tx.second->getLgrSeq() &&
tx.second->getLgrSeq() <= range->upper()))
{
result = tx;
}
else
{
result = TxSearched::all;
}
});
return result;
}
bool
ledgerDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
bool
transactionDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
std::uint32_t
getKBUsedAll() override
{
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024; // Convert to KB
}
std::uint32_t
getKBUsedLedger() override
{
std::uint32_t size =
ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
}
std::uint32_t
getKBUsedTransaction() override
{
std::uint32_t size =
transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024;
}
void
closeLedgerDB() override
{
// No-op for in-memory database
}
void
closeTransactionDB() override
{
// No-op for in-memory database
}
~FlatmapDatabase()
{
// Concurrent maps need visit_all
accountTxMap_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
accountTxMap_.clear();
transactionMap_.clear();
ledgers_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
ledgers_.clear();
ledgerHashToSeq_.clear();
}
std::vector<std::shared_ptr<Transaction>>
getTxHistory(LedgerIndex startIndex) override
{
std::vector<std::shared_ptr<Transaction>> result;
transactionMap_.visit_all([&](auto const& item) {
if (item.second.second->getLgrSeq() >= startIndex)
{
result.push_back(item.second.first);
}
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a->getLedger() > b->getLedger();
});
if (result.size() > 20)
{
result.resize(20);
}
return result;
}
// Helper function to handle limits
template <typename Container>
void
applyLimit(Container& container, std::size_t limit, bool bUnlimited)
{
if (!bUnlimited && limit > 0 && container.size() > limit)
{
container.resize(limit);
}
}
AccountTxs
getOldestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() < b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
AccountTxs
getNewestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() > b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getOldestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) < std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getNewestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) > std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
oldestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first < b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
newestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first > b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
oldestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
newestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end(), std::greater<>());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
};
// Factory function
std::unique_ptr<SQLiteDatabase>
getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
{
return std::make_unique<FlatmapDatabase>(app, config, jobQueue);
}
} // namespace ripple
#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED

View File

@@ -28,9 +28,8 @@ private:
struct AccountTxData
{
AccountTxs transactions;
std::map<uint32_t, std::map<uint32_t, size_t>>
ledgerTxMap; // ledgerSeq -> txSeq -> index in transactions
std::map<uint32_t, std::vector<AccountTx>>
ledgerTxMap; // ledgerSeq -> vector of transactions
};
Application& app_;
@@ -65,9 +64,12 @@ public:
return {};
std::shared_lock<std::shared_mutex> lock(mutex_);
if (transactionMap_.empty())
return std::nullopt;
return transactionMap_.begin()->second.second->getLgrSeq();
for (const auto& [ledgerSeq, ledgerData] : ledgers_)
{
if (!ledgerData.transactions.empty())
return ledgerSeq;
}
return std::nullopt;
}
std::optional<LedgerIndex>
@@ -163,14 +165,6 @@ public:
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
accountData.transactions.erase(
std::remove_if(
accountData.transactions.begin(),
accountData.transactions.end(),
[ledgerSeq](const AccountTx& tx) {
return tx.second->getLgrSeq() < ledgerSeq;
}),
accountData.transactions.end());
}
}
std::size_t
@@ -193,7 +187,10 @@ public:
std::size_t count = 0;
for (const auto& [_, accountData] : accountTxMap_)
{
count += accountData.transactions.size();
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
count += txVector.size();
}
}
return count;
}
@@ -293,10 +290,7 @@ public:
accountTxMap_[account] = AccountTxData();
auto& accountData = accountTxMap_[account];
accountData.transactions.push_back(accTx);
accountData
.ledgerTxMap[seq][acceptedLedgerTx->getTxnSeq()] =
accountData.transactions.size() - 1;
accountData.ledgerTxMap[seq].push_back(accTx);
}
app_.getMasterTransaction().inLedger(
@@ -451,59 +445,108 @@ public:
return true; // In-memory database always has space
}
// Red-black tree node overhead per map entry
static constexpr size_t MAP_NODE_OVERHEAD = 40;
private:
std::uint64_t
getBytesUsedLedger_unlocked() const
{
std::uint64_t size = 0;
// Count structural overhead of ledger storage including map node
// overhead Note: sizeof(LedgerData) includes the map container for
// transactions, but not the actual transaction data
size += ledgers_.size() *
(sizeof(LedgerIndex) + sizeof(LedgerData) + MAP_NODE_OVERHEAD);
// Add the transaction map nodes inside each ledger (ledger's view of
// its transactions)
for (const auto& [_, ledgerData] : ledgers_)
{
size += ledgerData.transactions.size() *
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
}
// Count the ledger hash to sequence lookup map
size += ledgerHashToSeq_.size() *
(sizeof(uint256) + sizeof(LedgerIndex) + MAP_NODE_OVERHEAD);
return size;
}
std::uint64_t
getBytesUsedTransaction_unlocked() const
{
if (!useTxTables_)
return 0;
std::uint64_t size = 0;
// Count structural overhead of transaction map
// sizeof(AccountTx) is just the size of two shared_ptrs (~32 bytes)
size += transactionMap_.size() *
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
// Add actual transaction and metadata data sizes
for (const auto& [_, accountTx] : transactionMap_)
{
if (accountTx.first)
size += accountTx.first->getSTransaction()
->getSerializer()
.peekData()
.size();
if (accountTx.second)
size += accountTx.second->getAsObject()
.getSerializer()
.peekData()
.size();
}
// Count structural overhead of account transaction index
// The actual transaction data is already counted above from
// transactionMap_
for (const auto& [accountId, accountData] : accountTxMap_)
{
size +=
sizeof(accountId) + sizeof(AccountTxData) + MAP_NODE_OVERHEAD;
for (const auto& [ledgerSeq, txVector] : accountData.ledgerTxMap)
{
// Use capacity() to account for actual allocated memory
size += sizeof(ledgerSeq) + MAP_NODE_OVERHEAD;
size += txVector.capacity() * sizeof(AccountTx);
}
}
return size;
}
public:
std::uint32_t
getKBUsedAll() override
{
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
}
}
return size / 1024;
// Total = base object + ledger infrastructure + transaction data
std::uint64_t size = sizeof(*this) + getBytesUsedLedger_unlocked() +
getBytesUsedTransaction_unlocked();
return static_cast<std::uint32_t>(size / 1024);
}
std::uint32_t
getKBUsedLedger() override
{
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = 0;
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
return static_cast<std::uint32_t>(getBytesUsedLedger_unlocked() / 1024);
}
std::uint32_t
getKBUsedTransaction() override
{
if (!useTxTables_)
return 0;
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = 0;
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
}
}
return size / 1024;
return static_cast<std::uint32_t>(
getBytesUsedTransaction_unlocked() / 1024);
}
void
@@ -605,14 +648,13 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
AccountTx const accountTx = accountData.transactions[txIndex];
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setStatus(COMMITTED);
@@ -657,8 +699,7 @@ public:
++skipped;
continue;
}
AccountTx const accountTx =
accountData.transactions[innerRIt->second];
AccountTx const accountTx = *innerRIt;
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setLedger(inLedger);
@@ -692,14 +733,14 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
const auto& [txn, txMeta] = accountData.transactions[txIndex];
const auto& [txn, txMeta] = accountTx;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -743,8 +784,7 @@ public:
++skipped;
continue;
}
const auto& [txn, txMeta] =
accountData.transactions[innerRIt->second];
const auto& [txn, txMeta] = *innerRIt;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -816,11 +856,9 @@ public:
for (; txIt != txEnd; ++txIt)
{
std::uint32_t const ledgerSeq = txIt->first;
for (auto seqIt = txIt->second.begin();
seqIt != txIt->second.end();
++seqIt)
std::uint32_t txnSeq = 0;
for (const auto& accountTx : txIt->second)
{
const auto& [txnSeq, index] = *seqIt;
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -828,7 +866,10 @@ public:
lookingForMarker = false;
}
else
{
++txnSeq;
continue;
}
}
else if (numberOfResults == 0)
{
@@ -837,12 +878,10 @@ public:
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -856,6 +895,7 @@ public:
std::move(rawMeta));
--numberOfResults;
++total;
++txnSeq;
}
}
}
@@ -871,11 +911,11 @@ public:
for (; rtxIt != rtxEnd; ++rtxIt)
{
std::uint32_t const ledgerSeq = rtxIt->first;
std::uint32_t txnSeq = rtxIt->second.size() - 1;
for (auto innerRIt = rtxIt->second.rbegin();
innerRIt != rtxIt->second.rend();
++innerRIt)
{
const auto& [txnSeq, index] = *innerRIt;
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -883,7 +923,10 @@ public:
lookingForMarker = false;
}
else
{
--txnSeq;
continue;
}
}
else if (numberOfResults == 0)
{
@@ -892,12 +935,11 @@ public:
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
const auto& accountTx = *innerRIt;
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -911,6 +953,7 @@ public:
std::move(rawMeta));
--numberOfResults;
++total;
--txnSeq;
}
}
}

View File

@@ -19,7 +19,6 @@
#include <ripple/app/main/Application.h>
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
#include <ripple/app/rdb/backend/RWDBDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/DatabaseShard.h>
@@ -41,7 +40,6 @@ RelationalDatabase::init(
bool use_sqlite = false;
bool use_postgres = false;
bool use_rwdb = false;
bool use_flatmap = false;
if (config.reporting())
{
@@ -60,10 +58,6 @@ RelationalDatabase::init(
{
use_rwdb = true;
}
else if (boost::iequals(get(rdb_section, "backend"), "flatmap"))
{
use_flatmap = true;
}
else
{
Throw<std::runtime_error>(
@@ -89,10 +83,6 @@ RelationalDatabase::init(
{
return getRWDBDatabase(app, config, jobQueue);
}
else if (use_flatmap)
{
return getFlatmapDatabase(app, config, jobQueue);
}
return std::unique_ptr<RelationalDatabase>();
}

View File

@@ -82,7 +82,6 @@ preflight0(PreflightContext const& ctx)
{
JLOG(ctx.j.warn())
<< "applyTransaction: transaction id may not be zero";
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
@@ -131,10 +130,7 @@ preflight1(PreflightContext const& ctx)
{
if (ctx.tx.getSeqProxy().isTicket() &&
ctx.tx.isFieldPresent(sfAccountTxnID))
{
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
}
@@ -167,10 +163,7 @@ preflight1(PreflightContext const& ctx)
// We return temINVALID for such transactions.
if (ctx.tx.getSeqProxy().isTicket() &&
ctx.tx.isFieldPresent(sfAccountTxnID))
{
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
}
@@ -188,7 +181,6 @@ preflight2(PreflightContext const& ctx)
if (sigValid.first == Validity::SigBad)
{
JLOG(ctx.j.debug()) << "preflight2: bad signature. " << sigValid.second;
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
@@ -297,40 +289,8 @@ Transactor::calculateBaseFee(ReadView const& view, STTx const& tx)
// Each signer adds one more baseFee to the minimum required fee
// for the transaction.
std::size_t signerCount = 0;
if (tx.isFieldPresent(sfSigners))
{
// Define recursive lambda to count all leaf signers
std::function<std::size_t(STArray const&)> countSigners;
countSigners = [&](STArray const& signers) -> std::size_t {
std::size_t count = 0;
for (auto const& signer : signers)
{
if (signer.isFieldPresent(sfSigners))
{
// This is a nested signer - recursively count its signers
count += countSigners(signer.getFieldArray(sfSigners));
}
else
{
// This is a leaf signer (one who actually signs)
// Count it only if it has signing fields (not just a
// placeholder)
if (signer.isFieldPresent(sfSigningPubKey) &&
signer.isFieldPresent(sfTxnSignature))
{
count += 1;
}
}
}
return count;
};
signerCount = countSigners(tx.getFieldArray(sfSigners));
}
std::size_t const signerCount =
tx.isFieldPresent(sfSigners) ? tx.getFieldArray(sfSigners).size() : 0;
XRPAmount hookExecutionFee{0};
uint64_t burden{1};
@@ -963,246 +923,157 @@ NotTEC
Transactor::checkMultiSign(PreclaimContext const& ctx)
{
auto const id = ctx.tx.getAccountID(sfAccount);
// Set max depth based on feature flag
bool const allowNested = ctx.view.rules().enabled(featureNestedMultiSign);
int const maxDepth = allowNested ? 4 : 1;
std::string lineno = "(unknown)";
if (ctx.tx.isFieldPresent(sfMemos))
// Get mTxnAccountID's SignerList and Quorum.
std::shared_ptr<STLedgerEntry const> sleAccountSigners =
ctx.view.read(keylet::signers(id));
// If the signer list doesn't exist the account is not multi-signing.
if (!sleAccountSigners)
{
auto const& memos = ctx.tx.getFieldArray(sfMemos);
for (auto const& memo : memos)
{
auto memoObj = dynamic_cast<STObject const*>(&memo);
auto hex = memoObj->getFieldVL(sfMemoData);
lineno = strHex(hex);
break;
}
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid: Not a multi-signing account.";
return tefNOT_MULTI_SIGNING;
}
// Define recursive lambda for checking signers at any depth
std::function<NotTEC(AccountID const&, STArray const&, int)>
validateSigners;
// We have plans to support multiple SignerLists in the future. The
// presence and defaulted value of the SignerListID field will enable that.
assert(sleAccountSigners->isFieldPresent(sfSignerListID));
assert(sleAccountSigners->getFieldU32(sfSignerListID) == 0);
validateSigners =
[&](AccountID const& acc, STArray const& signers, int depth) -> NotTEC {
// Check depth limit
if (depth > maxDepth)
auto accountSigners =
SignerEntries::deserialize(*sleAccountSigners, ctx.j, "ledger");
if (!accountSigners)
return accountSigners.error();
// Get the array of transaction signers.
STArray const& txSigners(ctx.tx.getFieldArray(sfSigners));
// Walk the accountSigners performing a variety of checks and see if
// the quorum is met.
// Both the multiSigners and accountSigners are sorted by account. So
// matching multi-signers to account signers should be a simple
// linear walk. *All* signers must be valid or the transaction fails.
std::uint32_t weightSum = 0;
auto iter = accountSigners->begin();
for (auto const& txSigner : txSigners)
{
AccountID const txSignerAcctID = txSigner.getAccountID(sfAccount);
// Attempt to match the SignerEntry with a Signer;
while (iter->account < txSignerAcctID)
{
if (allowNested)
if (++iter == accountSigners->end())
{
JLOG(ctx.j.trace())
<< "applyTransaction: Multi-signing depth limit exceeded.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
JLOG(ctx.j.warn())
<< "applyTransaction: Nested multisigning disabled.";
std::cout << "!!! temMALFORMED " << __FILE__ << " " << __LINE__
<< "\n";
return temMALFORMED;
}
// Get the SignerList for the account we're validating signers for
std::shared_ptr<STLedgerEntry const> sleAllowedSigners =
ctx.view.read(keylet::signers(acc));
// If the signer list doesn't exist, this account is not set up for
// multi-signing
if (!sleAllowedSigners)
{
JLOG(ctx.j.trace()) << "applyTransaction: Invalid: Account " << acc
<< " not set up for multi-signing.";
return tefNOT_MULTI_SIGNING;
}
uint32_t quorum = sleAllowedSigners->getFieldU32(sfSignerQuorum);
uint32_t sum{0};
auto allowedSigners =
SignerEntries::deserialize(*sleAllowedSigners, ctx.j, "ledger");
if (!allowedSigners)
return allowedSigners.error();
std::set<AccountID> allowedSignerSet;
for (auto const& as : *allowedSigners)
allowedSignerSet.emplace(as.account);
// Walk the signers array, validating each signer
auto iter = allowedSigners->begin();
for (auto const& signerEntry : signers)
{
AccountID const signer = signerEntry.getAccountID(sfAccount);
bool const isNested = signerEntry.isFieldPresent(sfSigners);
// Find this signer in the authorized SignerEntries list
while (iter->account < signer)
{
std::cout << "iter acc: " << to_string(iter->account) << " < "
<< to_string(signer) << "\n";
if (++iter == allowedSigners->end())
{
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__
<< " in signer set? "
<< (allowedSignerSet.find(signer) ==
allowedSignerSet.end()
? "n"
: "y")
<< "\n";
return tefBAD_SIGNATURE;
}
}
if (iter->account != signer)
{
// The SigningAccount is not in the SignerEntries.
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
}
if (iter->account != txSignerAcctID)
{
// The SigningAccount is not in the SignerEntries.
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
return tefBAD_SIGNATURE;
}
// We found the SigningAccount in the list of valid signers. Now we
// need to compute the accountID that is associated with the signer's
// public key.
auto const spk = txSigner.getFieldVL(sfSigningPubKey);
if (!publicKeyType(makeSlice(spk)))
{
JLOG(ctx.j.trace())
<< "checkMultiSign: signing public key type is unknown";
return tefBAD_SIGNATURE;
}
AccountID const signingAcctIDFromPubKey =
calcAccountID(PublicKey(makeSlice(spk)));
// Verify that the signingAcctID and the signingAcctIDFromPubKey
// belong together. Here is are the rules:
//
// 1. "Phantom account": an account that is not in the ledger
// A. If signingAcctID == signingAcctIDFromPubKey and the
// signingAcctID is not in the ledger then we have a phantom
// account.
// B. Phantom accounts are always allowed as multi-signers.
//
// 2. "Master Key"
// A. signingAcctID == signingAcctIDFromPubKey, and signingAcctID
// is in the ledger.
// B. If the signingAcctID in the ledger does not have the
// asfDisableMaster flag set, then the signature is allowed.
//
// 3. "Regular Key"
// A. signingAcctID != signingAcctIDFromPubKey, and signingAcctID
// is in the ledger.
// B. If signingAcctIDFromPubKey == signingAcctID.RegularKey (from
// ledger) then the signature is allowed.
//
// No other signatures are allowed. (January 2015)
// In any of these cases we need to know whether the account is in
// the ledger. Determine that now.
auto sleTxSignerRoot = ctx.view.read(keylet::account(txSignerAcctID));
if (signingAcctIDFromPubKey == txSignerAcctID)
{
// Either Phantom or Master. Phantoms automatically pass.
if (sleTxSignerRoot)
{
// Master Key. Account may not have asfDisableMaster set.
std::uint32_t const signerAccountFlags =
sleTxSignerRoot->getFieldU32(sfFlags);
if (signerAccountFlags & lsfDisableMaster)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer:Account lsfDisableMaster.";
return tefMASTER_DISABLED;
}
}
}
else
{
// May be a Regular Key. Let's find out.
// Public key must hash to the account's regular key.
if (!sleTxSignerRoot)
{
JLOG(ctx.j.trace()) << "applyTransaction: Non-phantom signer "
"lacks account root.";
return tefBAD_SIGNATURE;
}
// Check if this signer has nested signers (delegation)
if (signerEntry.isFieldPresent(sfSigners))
if (!sleTxSignerRoot->isFieldPresent(sfRegularKey))
{
// This is a nested multi-signer that delegates to sub-signers
if (signerEntry.isFieldPresent(sfSigningPubKey) ||
signerEntry.isFieldPresent(sfTxnSignature))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer cannot have both nested "
"signers and signature fields.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
// Recursively validate the nested signers against
// signer's signer list
STArray const& nestedSigners =
signerEntry.getFieldArray(sfSigners);
NotTEC result =
validateSigners(signer, nestedSigners, depth + 1);
if (!isTesSuccess(result))
return result;
// If we get here, the nested signers met their quorum
// So we add THIS signer's weight (from current level's signer
// list)
sum += iter->weight;
JLOG(ctx.j.trace())
<< "applyTransaction: Account lacks RegularKey.";
return tefBAD_SIGNATURE;
}
else
if (signingAcctIDFromPubKey !=
sleTxSignerRoot->getAccountID(sfRegularKey))
{
// This is a leaf signer - validate signature as before
if (!signerEntry.isFieldPresent(sfSigningPubKey) ||
!signerEntry.isFieldPresent(sfTxnSignature))
{
JLOG(ctx.j.trace())
<< "applyApplication: Leaf signer must have "
"SigningPubKey and TxnSignature.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
auto const spk = signerEntry.getFieldVL(sfSigningPubKey);
if (!publicKeyType(makeSlice(spk)))
{
JLOG(ctx.j.trace())
<< "checkMultiSign: signing public key type is unknown";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
AccountID const signingAcctIDFromPubKey =
calcAccountID(PublicKey(makeSlice(spk)));
auto sleTxSignerRoot = ctx.view.read(keylet::account(signer));
if (signingAcctIDFromPubKey == signer)
{
if (sleTxSignerRoot)
{
std::uint32_t const signerAccountFlags =
sleTxSignerRoot->getFieldU32(sfFlags);
if (signerAccountFlags & lsfDisableMaster)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer:Account "
"lsfDisableMaster.";
return tefMASTER_DISABLED;
}
}
}
else
{
if (!sleTxSignerRoot)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Non-phantom signer "
"lacks account root.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
if (!sleTxSignerRoot->isFieldPresent(sfRegularKey))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Account lacks RegularKey.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
if (signingAcctIDFromPubKey !=
sleTxSignerRoot->getAccountID(sfRegularKey))
{
JLOG(ctx.j.trace()) << "applyTransaction: Account "
"doesn't match RegularKey.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
}
// Valid leaf signer - add their weight
sum += iter->weight;
JLOG(ctx.j.trace())
<< "applyTransaction: Account doesn't match RegularKey.";
return tefBAD_SIGNATURE;
}
char spacing[] = " ";
spacing[depth] = '\0';
std::cout << spacing << "sig check: "
<< "line: " << lineno << ", a=" << to_string(acc)
<< ", s=" << to_string(signer) << ", w=" << iter->weight
<< ", l=" << (isNested ? "f" : "t") << ", d=" << depth
<< ", " << sum << "/" << quorum << "\n";
}
// Check if this level's accumulated weight meets its required quorum
if (sum < quorum)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signers failed to meet quorum at depth "
<< depth;
return tefBAD_QUORUM;
}
return tesSUCCESS;
};
STArray const& entries(ctx.tx.getFieldArray(sfSigners));
NotTEC result = validateSigners(id, entries, 1);
if (!isTesSuccess(result))
{
std::cout << "Error: " << transToken(result) << "\n";
return result;
// The signer is legitimate. Add their weight toward the quorum.
weightSum += iter->weight;
}
// The quorum check is already done inside validateSigners for the top level
// so if we get here, we've met the quorum
// Cannot perform transaction if quorum is not met.
if (weightSum < sleAccountSigners->getFieldU32(sfSignerQuorum))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signers failed to meet quorum.";
return tefBAD_QUORUM;
}
// Met the quorum. Continue.
return tesSUCCESS;
}

View File

@@ -361,9 +361,7 @@ public:
boost::beast::iequals(
get(section(SECTION_RELATIONAL_DB), "backend"), "rwdb")) ||
(!section("node_db").empty() &&
(boost::beast::iequals(get(section("node_db"), "type"), "rwdb") ||
boost::beast::iequals(
get(section("node_db"), "type"), "flatmap")));
boost::beast::iequals(get(section("node_db"), "type"), "rwdb"));
// RHNOTE: memory type is not selected for here because it breaks
// tests
return isMem;

View File

@@ -45,7 +45,6 @@
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -54,7 +53,6 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
#endif
@@ -64,7 +62,6 @@ getMemorySize()
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -73,7 +70,6 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
@@ -85,7 +81,6 @@ getMemorySize()
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -98,13 +93,11 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
#endif
namespace ripple {
// clang-format off
// The configurable node sizes are "tiny", "small", "medium", "large", "huge"
inline constexpr std::array<std::pair<SizedItem, std::array<int, 5>>, 13>
@@ -1007,6 +1000,23 @@ Config::loadFromString(std::string const& fileContents)
"the maximum number of allowed peers (peers_max)");
}
}
if (!RUN_STANDALONE)
{
auto db_section = section(ConfigSection::nodeDatabase());
if (auto type = get(db_section, "type", ""); type == "rwdb")
{
if (auto delete_interval = get(db_section, "online_delete", 0);
delete_interval == 0)
{
Throw<std::runtime_error>(
"RWDB (in-memory backend) requires online_delete to "
"prevent OOM "
"Exception: standalone mode (used by tests) doesn't need "
"online_delete");
}
}
}
}
boost::filesystem::path
@@ -1071,5 +1081,4 @@ setup_FeeVote(Section const& section)
}
return setup;
}
} // namespace ripple

View File

@@ -1,235 +0,0 @@
#include <ripple/basics/contract.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/nodestore/impl/codec.h>
#include <boost/beast/core/string.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/unordered/concurrent_flat_map.hpp>
#include <memory>
#include <mutex>
namespace ripple {
namespace NodeStore {
class FlatmapBackend : public Backend
{
private:
std::string name_;
beast::Journal journal_;
bool isOpen_{false};
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
};
using DataStore = boost::unordered::concurrent_flat_map<
uint256,
std::vector<std::uint8_t>, // Store compressed blob data
base_uint_hasher>;
DataStore table_;
public:
FlatmapBackend(
size_t keyBytes,
Section const& keyValues,
beast::Journal journal)
: name_(get(keyValues, "path")), journal_(journal)
{
boost::ignore_unused(journal_);
if (name_.empty())
name_ = "node_db";
}
~FlatmapBackend() override
{
close();
}
std::string
getName() override
{
return name_;
}
void
open(bool createIfMissing) override
{
if (isOpen_)
Throw<std::runtime_error>("already open");
isOpen_ = true;
}
bool
isOpen() override
{
return isOpen_;
}
void
close() override
{
table_.clear();
isOpen_ = false;
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
{
if (!isOpen_)
return notFound;
uint256 const hash(uint256::fromVoid(key));
bool found = table_.visit(hash, [&](const auto& key_value_pair) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
key_value_pair.second.data(), key_value_pair.second.size(), bf);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
*pObject = nullptr;
return;
}
*pObject = decoded.createObject();
});
return found ? (*pObject ? ok : dataCorrupt) : notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
if (status != ok)
results.push_back({});
else
results.push_back(nObj);
}
return {results, ok};
}
void
store(std::shared_ptr<NodeObject> const& object) override
{
if (!isOpen_)
return;
if (!object)
return;
EncodedBlob encoded(object);
nudb::detail::buffer bf;
auto const result =
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
std::vector<std::uint8_t> compressed(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
table_.insert_or_assign(object->getHash(), std::move(compressed));
}
void
storeBatch(Batch const& batch) override
{
for (auto const& e : batch)
store(e);
}
void
sync() override
{
}
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
if (!isOpen_)
return;
table_.visit_all([&f](const auto& entry) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
entry.second.data(), entry.second.size(), bf);
DecodedBlob decoded(
entry.first.data(), result.first, result.second);
if (decoded.wasOk())
f(decoded.createObject());
});
}
int
getWriteLoad() override
{
return 0;
}
void
setDeletePath() override
{
close();
}
int
fdRequired() const override
{
return 0;
}
private:
size_t
size() const
{
return table_.size();
}
};
class FlatmapFactory : public Factory
{
public:
FlatmapFactory()
{
Manager::instance().insert(*this);
}
~FlatmapFactory() override
{
Manager::instance().erase(*this);
}
std::string
getName() const override
{
return "Flatmap";
}
std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<FlatmapBackend>(keyBytes, keyValues, journal);
}
};
static FlatmapFactory flatmapFactory;
} // namespace NodeStore
} // namespace ripple

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 86;
static constexpr std::size_t numFeatures = 85;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -373,7 +373,6 @@ extern uint256 const fixProvisionalDoubleThreading;
extern uint256 const featureClawback;
extern uint256 const featureDeepFreeze;
extern uint256 const featureIOUIssuerWeakTSH;
extern uint256 const featureNestedMultiSign;
} // namespace ripple

View File

@@ -479,7 +479,6 @@ REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::De
REGISTER_FIX (fixProvisionalDoubleThreading, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FEATURE(NestedMultiSign, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -44,9 +44,8 @@ InnerObjectFormats::InnerObjectFormats()
sfSigner.getCode(),
{
{sfAccount, soeREQUIRED},
{sfSigningPubKey, soeOPTIONAL},
{sfTxnSignature, soeOPTIONAL},
{sfSigners, soeOPTIONAL},
{sfSigningPubKey, soeREQUIRED},
{sfTxnSignature, soeREQUIRED},
});
add(sfMajority.jsonName.c_str(),

View File

@@ -369,146 +369,64 @@ STTx::checkMultiSign(
bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) ||
(requireCanonicalSig == RequireFullyCanonicalSig::yes);
// Signers must be in sorted order by AccountID.
AccountID lastAccountID(beast::zero);
bool const isWildcardNetwork =
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) == 65535;
// Set max depth based on feature flag
int const maxDepth = rules.enabled(featureNestedMultiSign) ? 4 : 1;
for (auto const& signer : signers)
{
auto const accountID = signer.getAccountID(sfAccount);
// Define recursive lambda for checking signatures at any depth
std::function<Expected<void, std::string>(
STArray const&, AccountID const&, int)>
checkSignersArray;
// The account owner may not multisign for themselves.
if (accountID == txnAccountID)
return Unexpected("Invalid multisigner.");
checkSignersArray = [&](STArray const& signersArray,
AccountID const& parentAccountID,
int depth) -> Expected<void, std::string> {
// Check depth limit
if (depth > maxDepth)
// No duplicate signers allowed.
if (lastAccountID == accountID)
return Unexpected("Duplicate Signers not allowed.");
// Accounts must be in order by account ID. No duplicates allowed.
if (lastAccountID > accountID)
return Unexpected("Unsorted Signers array.");
// The next signature must be greater than this one.
lastAccountID = accountID;
// Verify the signature.
bool validSig = false;
try
{
std::cout << "Multi-signing depth limit exceeded.\n";
return Unexpected("Multi-signing depth limit exceeded.");
}
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
// There are well known bounds that the number of signers must be
// within.
if (signersArray.size() < minMultiSigners ||
signersArray.size() > maxMultiSigners(&rules))
{
std::cout << "Invalid Signers array size.\n";
return Unexpected("Invalid Signers array size.");
}
auto spk = signer.getFieldVL(sfSigningPubKey);
// Signers must be in sorted order by AccountID.
AccountID lastAccountID(beast::zero);
for (auto const& signer : signersArray)
{
auto const accountID = signer.getAccountID(sfAccount);
// The account owner may not multisign for themselves.
if (accountID == txnAccountID)
if (publicKeyType(makeSlice(spk)))
{
std::cout << "Invalid multisigner.\n";
return Unexpected("Invalid multisigner.");
}
Blob const signature = signer.getFieldVL(sfTxnSignature);
// No duplicate signers allowed.
if (lastAccountID == accountID)
{
std::cout << "Duplicate Signers not allowed.\n";
return Unexpected("Duplicate Signers not allowed.");
}
// Accounts must be in order by account ID. No duplicates allowed.
if (lastAccountID > accountID)
{
std::cout << "Unsorted Signers array.\n";
return Unexpected("Unsorted Signers array.");
}
// The next signature must be greater than this one.
lastAccountID = accountID;
// Check if this signer has nested signers
if (signer.isFieldPresent(sfSigners))
{
// This is a nested multi-signer
// Ensure it doesn't also have signature fields
if (signer.isFieldPresent(sfSigningPubKey) ||
signer.isFieldPresent(sfTxnSignature))
{
std::cout << "Signer cannot have both nested signers and "
"signature "
"fields.\n";
return Unexpected(
"Signer cannot have both nested signers and signature "
"fields.");
}
// Recursively check nested signers
STArray const& nestedSigners = signer.getFieldArray(sfSigners);
auto result =
checkSignersArray(nestedSigners, accountID, depth + 1);
if (!result)
return result;
}
else
{
// This is a leaf node - must have signature
if (!signer.isFieldPresent(sfSigningPubKey) ||
!signer.isFieldPresent(sfTxnSignature))
{
std::cout << "Leaf signer must have SigningPubKey and "
"TxnSignature.\n";
return Unexpected(
"Leaf signer must have SigningPubKey and "
"TxnSignature.");
}
// Verify the signature
bool validSig = false;
try
{
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
auto spk = signer.getFieldVL(sfSigningPubKey);
if (publicKeyType(makeSlice(spk)))
{
Blob const signature =
signer.getFieldVL(sfTxnSignature);
// wildcard network gets a free pass
validSig = isWildcardNetwork ||
verify(PublicKey(makeSlice(spk)),
s.slice(),
makeSlice(signature),
fullyCanonical);
}
}
catch (std::exception const&)
{
// We assume any problem lies with the signature.
validSig = false;
}
if (!validSig)
{
std::cout << std::string("Invalid signature on account ") +
toBase58(accountID) + ".\n";
return Unexpected(
std::string("Invalid signature on account ") +
toBase58(accountID) + ".");
}
// wildcard network gets a free pass
validSig = isWildcardNetwork ||
verify(PublicKey(makeSlice(spk)),
s.slice(),
makeSlice(signature),
fullyCanonical);
}
}
return {};
};
// Start the recursive check at depth 1
return checkSignersArray(signers, txnAccountID, 1);
catch (std::exception const&)
{
// We assume any problem lies with the signature.
validSig = false;
}
if (!validSig)
return Unexpected(
std::string("Invalid signature on account ") +
toBase58(accountID) + ".");
}
// All signatures verified.
return {};
}
//------------------------------------------------------------------------------

View File

@@ -1183,32 +1183,12 @@ transactionSubmitMultiSigned(
// The Signers array may only contain Signer objects.
if (std::find_if_not(
signers.begin(), signers.end(), [](STObject const& obj) {
if (obj.getCount() != 4 || !obj.isFieldPresent(sfAccount))
return false;
// leaf signer
if (obj.isFieldPresent(sfSigningPubKey) &&
obj.isFieldPresent(sfTxnSignature) &&
!obj.isFieldPresent(sfSigners))
return true;
// nested signer
if (!obj.isFieldPresent(sfSigningPubKey) &&
!obj.isFieldPresent(sfTxnSignature) &&
obj.isFieldPresent(sfSigners))
return true;
/*
std::cout << "Error caused by:\n" <<
obj.getJson(JsonOptions::none) << "\n"
<< "obj.isFieldPresent(sfAccount) = " <<
(obj.isFieldPresent(sfAccount) ? "t" : "f") << "\n"
<< "obj.isFieldPresent(sfSigningPubKey) = " <<
(obj.isFieldPresent(sfSigningPubKey) ? "t" : "f") << "\n"
<< "obj.isFieldPresent(sfTxnSignature) = " <<
(obj.isFieldPresent(sfTxnSignature) ? "t" : "f") << "\n"
<< "obj.getCount() = " << obj.getCount() << "\n\n";
*/
return false;
return (
// A Signer object always contains these fields and no
// others.
obj.isFieldPresent(sfAccount) &&
obj.isFieldPresent(sfSigningPubKey) &&
obj.isFieldPresent(sfTxnSignature) && obj.getCount() == 3);
}) != signers.end())
{
return RPC::make_param_error(

View File

@@ -1659,419 +1659,6 @@ public:
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
void
test_nestedMultiSign(FeatureBitset features)
{
testcase("Nested MultiSign");
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define LINE_TO_HEX_STRING \
[]() -> std::string { \
const char* line = TOSTRING(__LINE__); \
int len = 0; \
while (line[len]) \
len++; \
std::string result; \
if (len % 2 == 1) \
{ \
result += (char)(0x00 * 16 + (line[0] - '0')); \
line++; \
} \
for (int i = 0; line[i]; i += 2) \
{ \
result += (char)((line[i] - '0') * 16 + (line[i + 1] - '0')); \
} \
return result; \
}()
#define M(m) memo(m, "", "")
#define L() memo(LINE_TO_HEX_STRING, "", "")
using namespace jtx;
Env env{*this, envconfig(), features};
Account const alice{"alice", KeyType::secp256k1};
Account const becky{"becky", KeyType::ed25519};
Account const cheri{"cheri", KeyType::secp256k1};
Account const daria{"daria", KeyType::ed25519};
Account const edgar{"edgar", KeyType::secp256k1};
Account const fiona{"fiona", KeyType::ed25519};
Account const grace{"grace", KeyType::secp256k1};
Account const henry{"henry", KeyType::ed25519};
Account const f1{"f1", KeyType::ed25519};
Account const f2{"f2", KeyType::ed25519};
Account const f3{"f3", KeyType::ed25519};
env.fund(
XRP(1000),
alice,
becky,
cheri,
daria,
edgar,
fiona,
grace,
henry,
f1,
f2,
f3,
phase,
jinni,
acc10,
acc11,
acc12);
env.close();
std::cout << "alice: " << to_string(alice) << "\n";
std::cout << "becky: " << to_string(becky) << "\n";
std::cout << "cheri: " << to_string(cheri) << "\n";
std::cout << "daria: " << to_string(daria) << "\n";
std::cout << "edgar: " << to_string(edgar) << "\n";
std::cout << "fiona: " << to_string(fiona) << "\n";
std::cout << "grace: " << to_string(grace) << "\n";
std::cout << "henry: " << to_string(henry) << "\n";
std::cout << "f1: " << to_string(f1) << "\n";
std::cout << "f2: " << to_string(f2) << "\n";
std::cout << "f3: " << to_string(f3) << "\n";
std::cout << "phase: " << to_string(phase) << "\n";
std::cout << "jinni: " << to_string(jinni) << "\n";
std::cout << "acc10: " << to_string(acc10) << "\n";
std::cout << "acc11: " << to_string(acc11) << "\n";
std::cout << "acc12: " << to_string(acc12) << "\n";
auto const baseFee = env.current()->fees().base;
if (!features[featureNestedMultiSign])
{
// When feature is disabled, nested signing should fail
env(signers(f1, 1, {{f2, 1}}));
env(signers(f2, 1, {{f3, 1}}));
env.close();
std::uint32_t f1Seq = env.seq(f1);
env(noop(f1),
msig({msigner(f2, msigner(f3))}),
L(),
fee(3 * baseFee),
ter(temINVALID));
env.close();
BEAST_EXPECT(env.seq(f1) == f1Seq);
return;
}
// Test Case 1: Basic 2-level nested signing with quorum
{
// Set up signer lists with quorum requirements
env(signers(becky, 2, {{bogie, 1}, {demon, 1}, {ghost, 1}}));
env(signers(cheri, 3, {{haunt, 2}, {jinni, 2}}));
env.close();
// Alice requires quorum of 3 with weighted signers
env(signers(alice, 3, {{becky, 2}, {cheri, 2}, {daria, 1}}));
env.close();
// Test 1a: becky alone (weight 2) doesn't meet alice's quorum
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(becky, msigner(bogie), msigner(demon))}),
L(),
fee(4 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 1b: becky (2) + daria (1) meets quorum of 3
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(becky, msigner(bogie), msigner(demon)),
msigner(daria)}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 1c: cheri's nested signers must meet her quorum
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(
becky,
msigner(bogie),
msigner(demon)), // becky has a satisfied quorum
msigner(cheri, msigner(haunt))}), // but cheri does not
// (needs jinni too)
L(),
fee(5 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 1d: cheri with both signers meets her quorum
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(cheri, msigner(haunt), msigner(jinni)),
msigner(daria)}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 2: 3-level maximum depth with quorum at each level
{
// Level 2: phase needs direct signatures (no deeper nesting)
env(signers(phase, 2, {{acc10, 1}, {acc11, 1}, {acc12, 1}}));
// Level 1: jinni needs weighted signatures
env(signers(jinni, 3, {{phase, 2}, {shade, 2}, {spook, 1}}));
// Level 0: edgar needs 2 from weighted signers
env(signers(edgar, 2, {{jinni, 1}, {bogie, 1}, {demon, 1}}));
// Alice now requires edgar with weight 3
env(signers(alice, 3, {{edgar, 3}, {fiona, 2}}));
env.close();
// Test 2a: 3-level signing with phase signing directly (not through
// nested signers)
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
edgar,
msigner(
jinni,
msigner(phase), // phase signs directly at level 3
msigner(shade)) // jinni quorum: 2+2 = 4 >= 3 ✓
) // edgar quorum: 1+0 = 1 < 2 ✗
}),
L(),
fee(4 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 2b: Edgar needs to meet his quorum too
aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
edgar,
msigner(
jinni,
msigner(phase), // phase signs directly
msigner(shade)),
msigner(bogie)) // edgar quorum: 1+1 = 2 ✓
}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 2c: Use phase's signers (making it effectively 3-level from
// alice)
aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(
edgar,
msigner(
jinni,
msigner(phase, msigner(acc10), msigner(acc11)),
msigner(spook)),
msigner(bogie))}),
L(),
fee(6 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 3: Mixed levels - some direct, some nested at different
// depths (max 3)
{
// Set up mixed-level signing for alice
// grace has direct signers
env(signers(grace, 2, {{bogie, 1}, {demon, 1}}));
// henry has 2-level signers (henry -> becky -> bogie/demon)
env(signers(henry, 1, {{becky, 1}, {cheri, 1}}));
// edgar can be signed for by bogie
env(signers(edgar, 1, {{bogie, 1}}));
// Alice has mix of direct and nested signers at different weights
env(signers(
alice,
5,
{
{daria, 1}, // direct signer
{edgar, 2}, // has 2-level signers
{fiona, 1}, // direct signer
{grace, 2}, // has direct signers
{henry, 2} // has 2-level signers
}));
env.close();
// Test 3a: Mix of all levels meeting quorum exactly
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(daria), // weight 1, direct
msigner(edgar, msigner(bogie)), // weight 2, 2-level
msigner(grace, msigner(bogie), msigner(demon)) // weight 2,
// 2-level
}),
L(),
fee(6 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 3b: 3-level signing through henry
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(fiona), // weight 1, direct
msigner(
grace, msigner(bogie)), // weight 2, 2-level (partial)
msigner(
henry, // weight 2, 3-level
msigner(becky, msigner(bogie), msigner(demon)))}),
L(),
fee(6 * baseFee),
ter(tefBAD_QUORUM)); // grace didn't meet quorum
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 3c: Correct version with all quorums met
aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
henry, // weight 2
msigner(becky, msigner(bogie), msigner(demon))),
msigner(fiona), // weight 1
msigner(edgar, msigner(bogie), msigner(demon)) // weight 2
}),
L(),
fee(8 * baseFee)); // Total weight: 1+2+2 = 5 ✓
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 4: Complex scenario with maximum signers at mixed depths
// (max 3)
{
// Create a signing tree that uses close to maximum signers
// and tests weight accumulation across all levels
// Set up for alice: needs 15 out of possible 20 weight
env(signers(
alice,
15,
{
{becky, 3}, // will use 2-level
{cheri, 3}, // will use 2-level
{daria, 3}, // will use direct
{edgar, 3}, // will use 2-level
{fiona, 3}, // will use direct
{grace, 3}, // will use direct
{henry, 2} // will use 2-level
}));
env.close();
// Complex multi-level transaction just meeting quorum
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
becky, // weight 3, 2-level
msigner(demon),
msigner(ghost)),
msigner(
cheri, // weight 3, 2-level
msigner(haunt),
msigner(jinni)),
msigner(daria), // weight 3, direct
msigner(
edgar, // weight 3, 2-level
msigner(bogie),
msigner(demon)),
msigner(grace) // weight 3, direct
}),
L(),
fee(10 * baseFee)); // Total weight: 3+3+3+3+3 = 15 ✓
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 4b: Test with henry using 3-level depth (maximum)
// First set up henry's chain properly
env(signers(henry, 1, {{jinni, 1}}));
env(signers(jinni, 2, {{acc10, 1}, {acc11, 1}}));
env.close();
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(
becky, // weight 3
msigner(demon)), // becky quorum not met!
msigner(
cheri, // weight 3
msigner(haunt),
msigner(jinni)),
msigner(daria), // weight 3
msigner(
henry, // weight 2, 3-level depth
msigner(jinni, msigner(acc10), msigner(acc11))),
msigner(
edgar, // weight 3
msigner(demon),
msigner(bogie))}),
L(),
fee(10 * baseFee),
ter(tefBAD_QUORUM)); // becky's quorum not met
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
}
// Test Case 5: Edge case - single signer with maximum nesting (depth 3)
{
// Alice needs just one signer, but that signer uses depth up to 3
env(signers(alice, 1, {{becky, 1}}));
env.close();
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(becky, msigner(demon), msigner(ghost))}),
L(),
fee(4 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Now with 3-level depth (maximum allowed)
// Structure: alice -> becky -> cheri -> jinni (jinni signs
// directly)
env(signers(becky, 1, {{cheri, 1}}));
env(signers(cheri, 1, {{jinni, 1}}));
// Note: We do NOT add signers to jinni to keep max depth at 3
env.close();
aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(
becky,
msigner(
cheri,
msigner(jinni)))}), // jinni signs directly (depth 3)
L(),
fee(4 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
}
void
testAll(FeatureBitset features)
{
@@ -2093,7 +1680,6 @@ public:
test_signForHash(features);
test_signersWithTickets(features);
test_signersWithTags(features);
test_nestedMultiSign(features);
}
void
@@ -2106,11 +1692,8 @@ public:
// featureMultiSignReserve. Limits on the number of signers
// changes based on featureExpandedSignerList. Test both with and
// without.
testAll(
all - featureMultiSignReserve - featureExpandedSignerList -
featureNestedMultiSign);
testAll(all - featureExpandedSignerList - featureNestedMultiSign);
testAll(all - featureNestedMultiSign);
testAll(all - featureMultiSignReserve - featureExpandedSignerList);
testAll(all - featureExpandedSignerList);
testAll(all);
test_amendmentTransition();
}

View File

@@ -216,6 +216,10 @@ public:
}
BEAST_EXPECT(store.getLastRotated() == lastRotated);
SQLiteDatabase* const db =
dynamic_cast<SQLiteDatabase*>(&env.app().getRelationalDatabase());
BEAST_EXPECT(*db->getTransactionsMinLedgerSeq() == 3);
for (auto i = 3; i < deleteInterval + lastRotated; ++i)
{
ledgers.emplace(

View File

@@ -1206,6 +1206,97 @@ r.ripple.com:51235
}
}
void
testRWDBOnlineDelete()
{
testcase("RWDB online_delete validation");
// Test 1: RWDB without online_delete in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n";
c.setupControl(true, true, true); // standalone = true
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw in standalone mode");
}
}
// Test 2: RWDB without online_delete NOT in standalone mode (should
// throw)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
fail("Expected exception for RWDB without online_delete");
}
catch (std::runtime_error const& e)
{
BEAST_EXPECT(
std::string(e.what()).find(
"RWDB (in-memory backend) requires online_delete") !=
std::string::npos);
pass();
}
}
// Test 3: RWDB with online_delete NOT in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n"
"online_delete=256\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw when online_delete is configured");
}
}
// Test 4: Non-RWDB without online_delete NOT in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=NuDB\n"
"path=main\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw for non-RWDB backends");
}
}
}
void
testOverlay()
{
@@ -1295,6 +1386,7 @@ r.ripple.com:51235
testComments();
testGetters();
testAmendment();
testRWDBOnlineDelete();
testOverlay();
testNetworkID();
}

View File

@@ -310,7 +310,6 @@ Env::submit(JTx const& jt)
{
// Parsing failed or the JTx is
// otherwise missing the stx field.
std::cout << "!!! temMALFORMED " << __FILE__ << " " << __LINE__ << "\n";
ter_ = temMALFORMED;
didApply = false;
}

View File

@@ -66,45 +66,15 @@ signers(Account const& account, none_t)
//------------------------------------------------------------------------------
// Helper function to recursively sort nested signers
void
sortSignersRecursive(std::vector<msig::SignerPtr>& signers)
msig::msig(std::vector<msig::Reg> signers_) : signers(std::move(signers_))
{
// Sort current level by account ID
// Signatures must be applied in sorted order.
std::sort(
signers.begin(),
signers.end(),
[](msig::SignerPtr const& lhs, msig::SignerPtr const& rhs) {
return lhs->id() < rhs->id();
[](msig::Reg const& lhs, msig::Reg const& rhs) {
return lhs.acct.id() < rhs.acct.id();
});
// Recursively sort nested signers for each signer at this level
for (auto& signer : signers)
{
if (signer->isNested() && !signer->nested.empty())
{
sortSignersRecursive(signer->nested);
}
}
}
msig::msig(std::vector<msig::SignerPtr> signers_) : signers(std::move(signers_))
{
// Recursively sort all signers at all nesting levels
// This ensures account IDs are in strictly ascending order at each level
sortSignersRecursive(signers);
}
msig::msig(std::vector<msig::Reg> signers_)
{
// Convert Reg vector to SignerPtr vector for backward compatibility
signers.reserve(signers_.size());
for (auto const& s : signers_)
signers.push_back(s.toSigner());
// Recursively sort all signers at all nesting levels
// This ensures account IDs are in strictly ascending order at each level
sortSignersRecursive(signers);
}
void
@@ -123,47 +93,19 @@ msig::operator()(Env& env, JTx& jt) const
env.test.log << pretty(jtx.jv) << std::endl;
Rethrow();
}
// Recursive function to build signer JSON
std::function<Json::Value(SignerPtr const&)> buildSignerJson;
buildSignerJson = [&](SignerPtr const& signer) -> Json::Value {
Json::Value jo;
jo[jss::Account] = signer->acct.human();
if (signer->isNested())
{
// For nested signers, we use the already-sorted nested vector
// (sorted during construction via sortSignersRecursive)
// This ensures account IDs are in strictly ascending order
auto& subJs = jo[sfSigners.getJsonName()];
for (std::size_t i = 0; i < signer->nested.size(); ++i)
{
auto& subJo = subJs[i][sfSigner.getJsonName()];
subJo = buildSignerJson(signer->nested[i]);
}
}
else
{
// This is a leaf signer - add signature
jo[jss::SigningPubKey] = strHex(signer->sig.pk().slice());
Serializer ss{buildMultiSigningData(*st, signer->acct.id())};
auto const sig = ripple::sign(
*publicKeyType(signer->sig.pk().slice()),
signer->sig.sk(),
ss.slice());
jo[sfTxnSignature.getJsonName()] =
strHex(Slice{sig.data(), sig.size()});
}
return jo;
};
auto& js = jtx[sfSigners.getJsonName()];
for (std::size_t i = 0; i < mySigners.size(); ++i)
{
auto const& e = mySigners[i];
auto& jo = js[i][sfSigner.getJsonName()];
jo = buildSignerJson(mySigners[i]);
jo[jss::Account] = e.acct.human();
jo[jss::SigningPubKey] = strHex(e.sig.pk().slice());
Serializer ss{buildMultiSigningData(*st, e.acct.id())};
auto const sig = ripple::sign(
*publicKeyType(e.sig.pk().slice()), e.sig.sk(), ss.slice());
jo[sfTxnSignature.getJsonName()] =
strHex(Slice{sig.data(), sig.size()});
}
};
}

View File

@@ -21,7 +21,6 @@
#define RIPPLE_TEST_JTX_MULTISIGN_H_INCLUDED
#include <cstdint>
#include <memory>
#include <optional>
#include <test/jtx/Account.h>
#include <test/jtx/amount.h>
@@ -66,48 +65,6 @@ signers(Account const& account, none_t);
class msig
{
public:
// Recursive signer structure
struct Signer
{
Account acct;
Account sig; // For leaf signers (same as acct for master key)
std::vector<std::shared_ptr<Signer>> nested; // For nested signers
// Leaf signer constructor (regular signing)
Signer(Account const& masterSig) : acct(masterSig), sig(masterSig)
{
}
// Leaf signer constructor (with different signing key)
Signer(Account const& acct_, Account const& regularSig)
: acct(acct_), sig(regularSig)
{
}
// Nested signer constructor
Signer(
Account const& acct_,
std::vector<std::shared_ptr<Signer>> nested_)
: acct(acct_), nested(std::move(nested_))
{
}
bool
isNested() const
{
return !nested.empty();
}
AccountID
id() const
{
return acct.id();
}
};
using SignerPtr = std::shared_ptr<Signer>;
// For backward compatibility
struct Reg
{
Account acct;
@@ -116,13 +73,16 @@ public:
Reg(Account const& masterSig) : acct(masterSig), sig(masterSig)
{
}
Reg(Account const& acct_, Account const& regularSig)
: acct(acct_), sig(regularSig)
{
}
Reg(char const* masterSig) : acct(masterSig), sig(masterSig)
{
}
Reg(char const* acct_, char const* regularSig)
: acct(acct_), sig(regularSig)
{
@@ -133,25 +93,13 @@ public:
{
return acct < rhs.acct;
}
// Convert to Signer
SignerPtr
toSigner() const
{
return std::make_shared<Signer>(acct, sig);
}
};
std::vector<SignerPtr> signers;
std::vector<Reg> signers;
public:
// Direct constructor with SignerPtr vector
msig(std::vector<SignerPtr> signers_);
// Backward compatibility constructor
msig(std::vector<Reg> signers_);
// Variadic constructor for backward compatibility
template <class AccountType, class... Accounts>
explicit msig(AccountType&& a0, Accounts&&... aN)
: msig{std::vector<Reg>{
@@ -164,30 +112,6 @@ public:
operator()(Env&, JTx& jt) const;
};
// Helper functions to create signers - renamed to avoid conflict with sig()
// transaction modifier
inline msig::SignerPtr
msigner(Account const& acct)
{
return std::make_shared<msig::Signer>(acct);
}
inline msig::SignerPtr
msigner(Account const& acct, Account const& signingKey)
{
return std::make_shared<msig::Signer>(acct, signingKey);
}
// Create nested signer with initializer list
template <typename... Args>
inline msig::SignerPtr
msigner(Account const& acct, Args&&... args)
{
std::vector<msig::SignerPtr> nested;
(nested.push_back(std::forward<Args>(args)), ...);
return std::make_shared<msig::Signer>(acct, std::move(nested));
}
//------------------------------------------------------------------------------
/** The number of signer lists matches. */

View File

@@ -0,0 +1,756 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <boost/filesystem.hpp>
#include <chrono>
#include <test/jtx.h>
#include <test/jtx/envconfig.h>
namespace ripple {
namespace test {
class RelationalDatabase_test : public beast::unit_test::suite
{
private:
// Helper to get SQLiteDatabase* (works for both SQLite and RWDB since RWDB
// inherits from SQLiteDatabase)
static SQLiteDatabase*
getInterface(Application& app)
{
return dynamic_cast<SQLiteDatabase*>(&app.getRelationalDatabase());
}
static SQLiteDatabase*
getInterface(RelationalDatabase& db)
{
return dynamic_cast<SQLiteDatabase*>(&db);
}
static std::unique_ptr<Config>
makeConfig(std::string const& backend)
{
auto config = test::jtx::envconfig();
// Sqlite backend doesn't need a database_path as it will just use
// in-memory databases when in standalone mode anyway.
config->overwrite(SECTION_RELATIONAL_DB, "backend", backend);
return config;
}
public:
RelationalDatabase_test() = default;
void
testBasicInitialization(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Basic initialization and empty database - " + backend);
using namespace test::jtx;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
// Test empty database state
BEAST_EXPECT(db.getMinLedgerSeq() == 2);
BEAST_EXPECT(db.getMaxLedgerSeq() == 2);
BEAST_EXPECT(db.getNewestLedgerInfo()->seq == 2);
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (sqliteDb)
{
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
BEAST_EXPECT(
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCount.numberOfRows == 1);
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 2);
}
}
void
testLedgerSequenceOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Ledger sequence operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
// Create initial ledger
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
// Test basic sequence operations
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
BEAST_EXPECT(minSeq.has_value());
BEAST_EXPECT(maxSeq.has_value());
BEAST_EXPECT(*minSeq == 2);
BEAST_EXPECT(*maxSeq == 3);
// Create more ledgers
env(pay(alice, Account("bob"), XRP(1000)));
env.close();
env(pay(alice, Account("carol"), XRP(500)));
env.close();
// Verify sequence updates
minSeq = db.getMinLedgerSeq();
maxSeq = db.getMaxLedgerSeq();
BEAST_EXPECT(*minSeq == 2);
BEAST_EXPECT(*maxSeq == 5);
auto* sqliteDb = getInterface(db);
if (sqliteDb)
{
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCount.numberOfRows == 4);
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 5);
}
}
void
testLedgerInfoOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Ledger info retrieval operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto* db = getInterface(env.app());
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
// Test getNewestLedgerInfo
auto newestLedger = db->getNewestLedgerInfo();
BEAST_EXPECT(newestLedger.has_value());
BEAST_EXPECT(newestLedger->seq == 3);
// Test getLedgerInfoByIndex
auto ledgerByIndex = db->getLedgerInfoByIndex(3);
BEAST_EXPECT(ledgerByIndex.has_value());
BEAST_EXPECT(ledgerByIndex->seq == 3);
BEAST_EXPECT(ledgerByIndex->hash == newestLedger->hash);
// Test getLedgerInfoByHash
auto ledgerByHash = db->getLedgerInfoByHash(newestLedger->hash);
BEAST_EXPECT(ledgerByHash.has_value());
BEAST_EXPECT(ledgerByHash->seq == 3);
BEAST_EXPECT(ledgerByHash->hash == newestLedger->hash);
// Test getLimitedOldestLedgerInfo
auto oldestLedger = db->getLimitedOldestLedgerInfo(2);
BEAST_EXPECT(oldestLedger.has_value());
BEAST_EXPECT(oldestLedger->seq == 2);
// Test getLimitedNewestLedgerInfo
auto limitedNewest = db->getLimitedNewestLedgerInfo(2);
BEAST_EXPECT(limitedNewest.has_value());
BEAST_EXPECT(limitedNewest->seq == 3);
// Test invalid queries
auto invalidLedger = db->getLedgerInfoByIndex(999);
BEAST_EXPECT(!invalidLedger.has_value());
uint256 invalidHash;
auto invalidHashLedger = db->getLedgerInfoByHash(invalidHash);
BEAST_EXPECT(!invalidHashLedger.has_value());
}
void
testHashOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Hash retrieval operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
env(pay(alice, Account("bob"), XRP(1000)));
env.close();
// Test getHashByIndex
auto hash1 = db.getHashByIndex(3);
auto hash2 = db.getHashByIndex(4);
BEAST_EXPECT(hash1 != uint256());
BEAST_EXPECT(hash2 != uint256());
BEAST_EXPECT(hash1 != hash2);
// Test getHashesByIndex (single)
auto hashPair = db.getHashesByIndex(4);
BEAST_EXPECT(hashPair.has_value());
BEAST_EXPECT(hashPair->ledgerHash == hash2);
BEAST_EXPECT(hashPair->parentHash == hash1);
// Test getHashesByIndex (range)
auto hashRange = db.getHashesByIndex(3, 4);
BEAST_EXPECT(hashRange.size() == 2);
BEAST_EXPECT(hashRange[3].ledgerHash == hash1);
BEAST_EXPECT(hashRange[4].ledgerHash == hash2);
BEAST_EXPECT(hashRange[4].parentHash == hash1);
// Test invalid hash queries
auto invalidHash = db.getHashByIndex(999);
BEAST_EXPECT(invalidHash == uint256());
auto invalidHashPair = db.getHashesByIndex(999);
BEAST_EXPECT(!invalidHashPair.has_value());
auto emptyRange = db.getHashesByIndex(10, 5); // max < min
BEAST_EXPECT(emptyRange.empty());
}
void
testTransactionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Transaction storage and retrieval - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Test initial transaction counts after funding
auto initialTxCount = sqliteDb->getTransactionCount();
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(initialTxCount == 4);
BEAST_EXPECT(initialAcctTxCount == 6);
// Create transactions
env(pay(alice, bob, XRP(1000)));
env.close();
env(pay(bob, alice, XRP(500)));
env.close();
// Test transaction counts after creation
auto txCount = sqliteDb->getTransactionCount();
auto acctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(txCount == 6);
BEAST_EXPECT(acctTxCount == 10);
// Test transaction retrieval
uint256 invalidTxId;
error_code_i ec;
auto invalidTxResult =
sqliteDb->getTransaction(invalidTxId, std::nullopt, ec);
BEAST_EXPECT(std::holds_alternative<TxSearched>(invalidTxResult));
// Test transaction history
auto txHistory = db.getTxHistory(0);
BEAST_EXPECT(!txHistory.empty());
BEAST_EXPECT(txHistory.size() == 6);
// Test with valid transaction range
auto minSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (minSeq && maxSeq)
{
ClosedInterval<std::uint32_t> range(*minSeq, *maxSeq);
auto rangeResult = sqliteDb->getTransaction(invalidTxId, range, ec);
auto searched = std::get<TxSearched>(rangeResult);
BEAST_EXPECT(
searched == TxSearched::all || searched == TxSearched::some);
}
}
void
testAccountTransactionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Account transaction operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
Account carol("carol");
env.fund(XRP(10000), alice, bob, carol);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple transactions involving alice
env(pay(alice, bob, XRP(1000)));
env.close();
env(pay(bob, alice, XRP(500)));
env.close();
env(pay(alice, carol, XRP(250)));
env.close();
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (!minSeq || !maxSeq)
return;
// Test getOldestAccountTxs
RelationalDatabase::AccountTxOptions options{
alice.id(), *minSeq, *maxSeq, 0, 10, false};
auto oldestTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(oldestTxs.size() == 5);
// Test getNewestAccountTxs
auto newestTxs = sqliteDb->getNewestAccountTxs(options);
BEAST_EXPECT(newestTxs.size() == 5);
// Test binary format versions
auto oldestTxsB = sqliteDb->getOldestAccountTxsB(options);
BEAST_EXPECT(oldestTxsB.size() == 5);
auto newestTxsB = sqliteDb->getNewestAccountTxsB(options);
BEAST_EXPECT(newestTxsB.size() == 5);
// Test with limit
options.limit = 1;
auto limitedTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(limitedTxs.size() == 1);
// Test with offset
options.limit = 10;
options.offset = 1;
auto offsetTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(offsetTxs.size() == 4);
// Test with invalid account
{
Account invalidAccount("invalid");
RelationalDatabase::AccountTxOptions invalidOptions{
invalidAccount.id(), *minSeq, *maxSeq, 0, 10, false};
auto invalidAccountTxs =
sqliteDb->getOldestAccountTxs(invalidOptions);
BEAST_EXPECT(invalidAccountTxs.empty());
}
}
void
testAccountTransactionPaging(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Account transaction paging operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple transactions for paging
for (int i = 0; i < 5; ++i)
{
env(pay(alice, bob, XRP(100 + i)));
env.close();
}
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (!minSeq || !maxSeq)
return;
RelationalDatabase::AccountTxPageOptions pageOptions{
alice.id(), *minSeq, *maxSeq, std::nullopt, 2, false};
// Test oldestAccountTxPage
auto [oldestPage, oldestMarker] =
sqliteDb->oldestAccountTxPage(pageOptions);
BEAST_EXPECT(oldestPage.size() == 2);
BEAST_EXPECT(oldestMarker.has_value() == true);
// Test newestAccountTxPage
auto [newestPage, newestMarker] =
sqliteDb->newestAccountTxPage(pageOptions);
BEAST_EXPECT(newestPage.size() == 2);
BEAST_EXPECT(newestMarker.has_value() == true);
// Test binary versions
auto [oldestPageB, oldestMarkerB] =
sqliteDb->oldestAccountTxPageB(pageOptions);
BEAST_EXPECT(oldestPageB.size() == 2);
auto [newestPageB, newestMarkerB] =
sqliteDb->newestAccountTxPageB(pageOptions);
BEAST_EXPECT(newestPageB.size() == 2);
// Test with marker continuation
if (oldestMarker.has_value())
{
pageOptions.marker = oldestMarker;
auto [continuedPage, continuedMarker] =
sqliteDb->oldestAccountTxPage(pageOptions);
BEAST_EXPECT(continuedPage.size() == 2);
}
}
void
testDeletionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Deletion operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple ledgers and transactions
for (int i = 0; i < 3; ++i)
{
env(pay(alice, bob, XRP(100 + i)));
env.close();
}
auto initialTxCount = sqliteDb->getTransactionCount();
BEAST_EXPECT(initialTxCount == 7);
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(initialAcctTxCount == 12);
auto initialLedgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(initialLedgerCount.numberOfRows == 5);
auto maxSeq = db.getMaxLedgerSeq();
if (!maxSeq || *maxSeq <= 2)
return;
// Test deleteTransactionByLedgerSeq
sqliteDb->deleteTransactionByLedgerSeq(*maxSeq);
auto txCountAfterDelete = sqliteDb->getTransactionCount();
BEAST_EXPECT(txCountAfterDelete == 6);
// Test deleteTransactionsBeforeLedgerSeq
sqliteDb->deleteTransactionsBeforeLedgerSeq(*maxSeq - 1);
auto txCountAfterBulkDelete = sqliteDb->getTransactionCount();
BEAST_EXPECT(txCountAfterBulkDelete == 1);
// Test deleteAccountTransactionsBeforeLedgerSeq
sqliteDb->deleteAccountTransactionsBeforeLedgerSeq(*maxSeq - 1);
auto acctTxCountAfterDelete = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(acctTxCountAfterDelete == 4);
// Test deleteBeforeLedgerSeq
auto minSeq = db.getMinLedgerSeq();
if (minSeq)
{
sqliteDb->deleteBeforeLedgerSeq(*minSeq + 1);
auto ledgerCountAfterDelete = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCountAfterDelete.numberOfRows == 4);
}
}
void
testDatabaseSpaceOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Database space and size operations - " + backend);
using namespace test::jtx;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Test size queries
auto allKB = sqliteDb->getKBUsedAll();
auto ledgerKB = sqliteDb->getKBUsedLedger();
auto txKB = sqliteDb->getKBUsedTransaction();
if (backend == "rwdb")
{
// RWDB reports actual data memory (rounded down to KB)
// Initially should be < 1KB, so rounds down to 0
// Note: These are 0 due to rounding, not because there's literally
// no data
BEAST_EXPECT(allKB == 0); // < 1024 bytes rounds to 0 KB
BEAST_EXPECT(ledgerKB == 0); // < 1024 bytes rounds to 0 KB
BEAST_EXPECT(txKB == 0); // < 1024 bytes rounds to 0 KB
}
else
{
// SQLite reports cache/engine memory which has overhead even when
// empty Just verify the functions return reasonable values
BEAST_EXPECT(allKB >= 0);
BEAST_EXPECT(ledgerKB >= 0);
BEAST_EXPECT(txKB >= 0);
}
// Create some data and verify size increases
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
auto newAllKB = sqliteDb->getKBUsedAll();
auto newLedgerKB = sqliteDb->getKBUsedLedger();
auto newTxKB = sqliteDb->getKBUsedTransaction();
if (backend == "rwdb")
{
// RWDB reports actual data memory
// After adding data, should see some increase
BEAST_EXPECT(newAllKB >= 1); // Should have at least 1KB total
BEAST_EXPECT(
newTxKB >= 0); // Transactions added (might still be < 1KB)
BEAST_EXPECT(
newLedgerKB >= 0); // Ledger data (might still be < 1KB)
// Key relationships
BEAST_EXPECT(newAllKB >= newLedgerKB + newTxKB); // Total >= parts
BEAST_EXPECT(newAllKB >= allKB); // Should increase or stay same
BEAST_EXPECT(newTxKB >= txKB); // Should increase or stay same
}
else
{
// SQLite: Memory usage should not decrease after adding data
// Values might increase due to cache growth
BEAST_EXPECT(newAllKB >= allKB);
BEAST_EXPECT(newLedgerKB >= ledgerKB);
BEAST_EXPECT(newTxKB >= txKB);
// SQLite's getKBUsedAll is global memory, should be >= parts
BEAST_EXPECT(newAllKB >= newLedgerKB);
BEAST_EXPECT(newAllKB >= newTxKB);
}
// Test space availability
// Both SQLite and RWDB use in-memory databases in standalone mode,
// so file-based space checks don't apply to either backend.
// Skip these checks for both.
// if (backend == "rwdb")
// {
// BEAST_EXPECT(db.ledgerDbHasSpace(env.app().config()));
// BEAST_EXPECT(db.transactionDbHasSpace(env.app().config()));
// }
// Test database closure operations (should not throw)
try
{
sqliteDb->closeLedgerDB();
sqliteDb->closeTransactionDB();
}
catch (std::exception const& e)
{
BEAST_EXPECT(false); // Should not throw
}
}
void
testTransactionMinLedgerSeq(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Transaction minimum ledger sequence tracking - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Initially should have no transactions
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
BEAST_EXPECT(
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
// Create first transaction
env(pay(alice, bob, XRP(1000)));
env.close();
auto txMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto acctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
BEAST_EXPECT(txMinSeq.has_value());
BEAST_EXPECT(acctTxMinSeq.has_value());
BEAST_EXPECT(*txMinSeq == 3);
BEAST_EXPECT(*acctTxMinSeq == 3);
// Create more transactions
env(pay(bob, alice, XRP(500)));
env.close();
env(pay(alice, bob, XRP(250)));
env.close();
// Min sequences should remain the same (first transaction ledger)
auto newTxMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto newAcctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
BEAST_EXPECT(newTxMinSeq == txMinSeq);
BEAST_EXPECT(newAcctTxMinSeq == acctTxMinSeq);
}
std::vector<std::string> static getBackends(std::string const& unittest_arg)
{
// Valid backends
static const std::set<std::string> validBackends = {"sqlite", "rwdb"};
// Default to all valid backends if no arg specified
if (unittest_arg.empty())
return {validBackends.begin(), validBackends.end()};
std::set<std::string> backends; // Use set to avoid duplicates
std::stringstream ss(unittest_arg);
std::string backend;
while (std::getline(ss, backend, ','))
{
if (!backend.empty())
{
// Validate backend
if (validBackends.contains(backend))
{
backends.insert(backend);
}
}
}
// Return as vector (sorted due to set)
return {backends.begin(), backends.end()};
}
void
run() override
{
auto backends = getBackends(arg());
if (backends.empty())
{
fail("no valid backend specified: '" + arg() + "'");
}
for (auto const& backend : backends)
{
testBasicInitialization(backend, makeConfig(backend));
testLedgerSequenceOperations(backend, makeConfig(backend));
testLedgerInfoOperations(backend, makeConfig(backend));
testHashOperations(backend, makeConfig(backend));
testTransactionOperations(backend, makeConfig(backend));
testAccountTransactionOperations(backend, makeConfig(backend));
testAccountTransactionPaging(backend, makeConfig(backend));
testDeletionOperations(backend, makeConfig(backend));
testDatabaseSpaceOperations(backend, makeConfig(backend));
testTransactionMinLedgerSeq(backend, makeConfig(backend));
}
}
};
BEAST_DEFINE_TESTSUITE(RelationalDatabase, rdb, ripple);
} // namespace test
} // namespace ripple

View File

@@ -19,9 +19,9 @@
#ifndef TEST_UNIT_TEST_SUITE_JOURNAL_H
#define TEST_UNIT_TEST_SUITE_JOURNAL_H
#include <ripple/beast/unit_test.h>
#include <ripple/beast/utility/Journal.h>
#include <mutex>
namespace ripple {
namespace test {
@@ -82,7 +82,13 @@ SuiteJournalSink::write(
// Only write the string if the level at least equals the threshold.
if (level >= threshold())
{
// std::endl flushes → sync() → str()/str("") race in shared buffer →
// crashes
static std::mutex log_mutex;
std::lock_guard lock(log_mutex);
suite_.log << s << partition_ << text << std::endl;
}
}
class SuiteJournal

View File

@@ -332,7 +332,6 @@ multi_runner_child::run_multi(Pred pred)
{
if (!pred(*t))
continue;
try
{
failed = run(*t) || failed;