Compare commits

..

9 Commits

Author SHA1 Message Date
Nicholas Dudfield
4089b7c4ba fix: remove hallucinated code 2025-08-18 16:52:44 +07:00
Nicholas Dudfield
61b364ec82 chore: clang format 2025-08-18 16:28:41 +07:00
Nicholas Dudfield
7fae6c3eb7 replace index-based account data with map of vectors
- Change AccountTxData to use map<ledgerSeq, vector<AccountTx>>
- Remove vector index storage to prevent invalidation during cleanup
- Update account transaction query functions accordingly
- Fix type conversions for transaction sequence markers
2025-08-18 16:22:52 +07:00
Nicholas Dudfield
70dd2a0f0e wip 2025-08-18 16:11:06 +07:00
Nicholas Dudfield
d15063bca4 remove flatmap database implementation 2025-08-18 15:11:54 +07:00
Nicholas Dudfield
998ae5535b add automatic ledger history cleanup to rwdb 2025-08-18 14:16:16 +07:00
Nicholas Dudfield
4c41d32276 use ordered ledgers_ map for faster first txn lookup 2025-08-18 12:01:39 +07:00
RichardAH
a2137d5436 Merge branch 'dev' into fix-online-delete 2025-08-14 14:02:57 +10:00
Denis Angell
a84d72a7f7 fix online delete 2025-07-10 13:07:53 +02:00
23 changed files with 303 additions and 2159 deletions

View File

@@ -59,6 +59,7 @@ runs:
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-
- name: Configure project
shell: bash

View File

@@ -54,6 +54,7 @@ runs:
restore-keys: |
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
${{ runner.os }}-conan-v${{ inputs.cache_version }}-
- name: Export custom recipes
shell: bash

View File

@@ -22,14 +22,13 @@ jobs:
configuration: [Debug]
include:
- compiler: gcc
cc: gcc-13
cxx: g++-13
compiler_id: gcc-13
compiler_version: 13
cc: gcc-11
cxx: g++-11
compiler_id: gcc-11
env:
build_dir: .build
# Bump this number to invalidate all caches globally.
CACHE_VERSION: 2
CACHE_VERSION: 1
MAIN_BRANCH_NAME: dev
steps:
- name: Checkout
@@ -59,8 +58,12 @@ jobs:
conan profile update env.CXX=/usr/bin/${{ matrix.cxx }} default
conan profile update conf.tools.build:compiler_executables='{"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}' default
# Set compiler version from matrix
conan profile update settings.compiler.version=${{ matrix.compiler_version }} default
# Set correct compiler version based on matrix.compiler
if [ "${{ matrix.compiler }}" = "gcc" ]; then
conan profile update settings.compiler.version=11 default
elif [ "${{ matrix.compiler }}" = "clang" ]; then
conan profile update settings.compiler.version=14 default
fi
# Display profile for verification
conan profile show default
@@ -117,4 +120,4 @@ jobs:
else
echo "Error: rippled executable not found in ${{ env.build_dir }}"
exit 1
fi
fi

View File

@@ -548,7 +548,6 @@ target_sources (rippled PRIVATE
src/ripple/nodestore/backend/CassandraFactory.cpp
src/ripple/nodestore/backend/RWDBFactory.cpp
src/ripple/nodestore/backend/MemoryFactory.cpp
src/ripple/nodestore/backend/FlatmapFactory.cpp
src/ripple/nodestore/backend/NuDBFactory.cpp
src/ripple/nodestore/backend/NullFactory.cpp
src/ripple/nodestore/backend/RocksDBFactory.cpp

View File

@@ -1,4 +1,3 @@
#include <cstdint>
#include <map>
#include <set>
#include <string>

View File

@@ -1,851 +0,0 @@
#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#include <ripple/app/ledger/AcceptedLedger.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionMaster.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <algorithm>
#include <map>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <vector>
#include <boost/unordered/concurrent_flat_map.hpp>
namespace ripple {
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
result_type
operator()(AccountID const& value) const
{
return hardened_hash<>{}(value);
}
};
class FlatmapDatabase : public SQLiteDatabase
{
private:
struct LedgerData
{
LedgerInfo info;
boost::unordered::
concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactions;
};
struct AccountTxData
{
boost::unordered::
concurrent_flat_map<std::pair<uint32_t, uint32_t>, AccountTx>
transactions;
};
Application& app_;
boost::unordered::concurrent_flat_map<LedgerIndex, LedgerData> ledgers_;
boost::unordered::
concurrent_flat_map<uint256, LedgerIndex, base_uint_hasher>
ledgerHashToSeq_;
boost::unordered::concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactionMap_;
boost::unordered::
concurrent_flat_map<AccountID, AccountTxData, base_uint_hasher>
accountTxMap_;
public:
FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
: app_(app)
{
}
std::optional<LedgerIndex>
getMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
ledgers_.visit_all([&minSeq](auto const& pair) {
if (!minSeq || pair.first < *minSeq)
{
minSeq = pair.first;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
transactionMap_.visit_all([&minSeq](auto const& pair) {
LedgerIndex seq = pair.second.second->getLgrSeq();
if (!minSeq || seq < *minSeq)
{
minSeq = seq;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getAccountTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
accountTxMap_.visit_all([&minSeq](auto const& pair) {
pair.second.transactions.visit_all([&minSeq](auto const& tx) {
if (!minSeq || tx.first.first < *minSeq)
{
minSeq = tx.first.first;
}
});
});
return minSeq;
}
std::optional<LedgerIndex>
getMaxLedgerSeq() override
{
std::optional<LedgerIndex> maxSeq;
ledgers_.visit_all([&maxSeq](auto const& pair) {
if (!maxSeq || pair.first > *maxSeq)
{
maxSeq = pair.first;
}
});
return maxSeq;
}
void
deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit(ledgerSeq, [this](auto& item) {
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first == ledgerSeq;
});
});
}
void
deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.erase_if([this, ledgerSeq](auto const& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
return true;
}
return false;
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit_all([this, ledgerSeq](auto& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
}
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
std::size_t
getTransactionCount() override
{
return transactionMap_.size();
}
std::size_t
getAccountTransactionCount() override
{
std::size_t count = 0;
accountTxMap_.visit_all([&count](auto const& item) {
count += item.second.transactions.size();
});
return count;
}
CountMinMax
getLedgerCountMinMax() override
{
CountMinMax result{0, 0, 0};
ledgers_.visit_all([&result](auto const& item) {
result.numberOfRows++;
if (result.minLedgerSequence == 0 ||
item.first < result.minLedgerSequence)
{
result.minLedgerSequence = item.first;
}
if (item.first > result.maxLedgerSequence)
{
result.maxLedgerSequence = item.first;
}
});
return result;
}
bool
saveValidatedLedger(
std::shared_ptr<Ledger const> const& ledger,
bool current) override
{
try
{
LedgerData ledgerData;
ledgerData.info = ledger->info();
auto aLedger = std::make_shared<AcceptedLedger>(ledger, app_);
for (auto const& acceptedLedgerTx : *aLedger)
{
auto const& txn = acceptedLedgerTx->getTxn();
auto const& meta = acceptedLedgerTx->getMeta();
auto const& id = txn->getTransactionID();
std::string reason;
auto accTx = std::make_pair(
std::make_shared<ripple::Transaction>(txn, reason, app_),
std::make_shared<ripple::TxMeta>(meta));
ledgerData.transactions.emplace(id, accTx);
transactionMap_.emplace(id, accTx);
for (auto const& account : meta.getAffectedAccounts())
{
accountTxMap_.visit(account, [&](auto& data) {
data.second.transactions.emplace(
std::make_pair(
ledger->info().seq,
acceptedLedgerTx->getTxnSeq()),
accTx);
});
}
}
ledgers_.emplace(ledger->info().seq, std::move(ledgerData));
ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq);
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
const std::size_t BATCH_SIZE = 128;
std::size_t deleted = 0;
ledgers_.erase_if([&](auto const& item) {
if (deleted >= BATCH_SIZE)
return false;
if (item.first < cutoffSeq)
{
item.second.transactions.visit_all(
[this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
deleted++;
return true;
}
return false;
});
if (deleted > 0)
{
accountTxMap_.visit_all([cutoffSeq](auto& item) {
item.second.transactions.erase_if(
[cutoffSeq](auto const& tx) {
return tx.first.first < cutoffSeq;
});
});
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
catch (std::exception const&)
{
deleteTransactionByLedgerSeq(ledger->info().seq);
return false;
}
}
std::optional<LedgerInfo>
getLedgerInfoByIndex(LedgerIndex ledgerSeq) override
{
std::optional<LedgerInfo> result;
ledgers_.visit(ledgerSeq, [&result](auto const& item) {
result = item.second.info;
});
return result;
}
std::optional<LedgerInfo>
getNewestLedgerInfo() override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&result](auto const& item) {
if (!result || item.second.info.seq > result->seq)
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first < result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first > result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLedgerInfoByHash(uint256 const& ledgerHash) override
{
std::optional<LedgerInfo> result;
ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) {
ledgers_.visit(item.second, [&result](auto const& item) {
result = item.second.info;
});
});
return result;
}
uint256
getHashByIndex(LedgerIndex ledgerIndex) override
{
uint256 result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = item.second.info.hash;
});
return result;
}
std::optional<LedgerHashPair>
getHashesByIndex(LedgerIndex ledgerIndex) override
{
std::optional<LedgerHashPair> result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
});
return result;
}
std::map<LedgerIndex, LedgerHashPair>
getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override
{
std::map<LedgerIndex, LedgerHashPair> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= minSeq && item.first <= maxSeq)
{
result[item.first] = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
}
});
return result;
}
std::variant<AccountTx, TxSearched>
getTransaction(
uint256 const& id,
std::optional<ClosedInterval<std::uint32_t>> const& range,
error_code_i& ec) override
{
std::variant<AccountTx, TxSearched> result = TxSearched::unknown;
transactionMap_.visit(id, [&](auto const& item) {
auto const& tx = item.second;
if (!range ||
(range->lower() <= tx.second->getLgrSeq() &&
tx.second->getLgrSeq() <= range->upper()))
{
result = tx;
}
else
{
result = TxSearched::all;
}
});
return result;
}
bool
ledgerDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
bool
transactionDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
std::uint32_t
getKBUsedAll() override
{
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024; // Convert to KB
}
std::uint32_t
getKBUsedLedger() override
{
std::uint32_t size =
ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
}
std::uint32_t
getKBUsedTransaction() override
{
std::uint32_t size =
transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024;
}
void
closeLedgerDB() override
{
// No-op for in-memory database
}
void
closeTransactionDB() override
{
// No-op for in-memory database
}
~FlatmapDatabase()
{
// Concurrent maps need visit_all
accountTxMap_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
accountTxMap_.clear();
transactionMap_.clear();
ledgers_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
ledgers_.clear();
ledgerHashToSeq_.clear();
}
std::vector<std::shared_ptr<Transaction>>
getTxHistory(LedgerIndex startIndex) override
{
std::vector<std::shared_ptr<Transaction>> result;
transactionMap_.visit_all([&](auto const& item) {
if (item.second.second->getLgrSeq() >= startIndex)
{
result.push_back(item.second.first);
}
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a->getLedger() > b->getLedger();
});
if (result.size() > 20)
{
result.resize(20);
}
return result;
}
// Helper function to handle limits
template <typename Container>
void
applyLimit(Container& container, std::size_t limit, bool bUnlimited)
{
if (!bUnlimited && limit > 0 && container.size() > limit)
{
container.resize(limit);
}
}
AccountTxs
getOldestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() < b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
AccountTxs
getNewestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() > b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getOldestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) < std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getNewestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) > std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
oldestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first < b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
newestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first > b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
oldestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
newestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end(), std::greater<>());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
};
// Factory function
std::unique_ptr<SQLiteDatabase>
getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
{
return std::make_unique<FlatmapDatabase>(app, config, jobQueue);
}
} // namespace ripple
#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED

View File

@@ -28,9 +28,8 @@ private:
struct AccountTxData
{
AccountTxs transactions;
std::map<uint32_t, std::map<uint32_t, size_t>>
ledgerTxMap; // ledgerSeq -> txSeq -> index in transactions
std::map<uint32_t, AccountTxs>
ledgerTxMap; // ledgerSeq -> vector of AccountTx
};
Application& app_;
@@ -65,9 +64,12 @@ public:
return {};
std::shared_lock<std::shared_mutex> lock(mutex_);
if (transactionMap_.empty())
return std::nullopt;
return transactionMap_.begin()->second.second->getLgrSeq();
for (const auto& [ledgerSeq, ledgerData] : ledgers_)
{
if (!ledgerData.transactions.empty())
return ledgerSeq;
}
return std::nullopt;
}
std::optional<LedgerIndex>
@@ -163,14 +165,6 @@ public:
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
accountData.transactions.erase(
std::remove_if(
accountData.transactions.begin(),
accountData.transactions.end(),
[ledgerSeq](const AccountTx& tx) {
return tx.second->getLgrSeq() < ledgerSeq;
}),
accountData.transactions.end());
}
}
std::size_t
@@ -193,7 +187,10 @@ public:
std::size_t count = 0;
for (const auto& [_, accountData] : accountTxMap_)
{
count += accountData.transactions.size();
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
count += txVector.size();
}
}
return count;
}
@@ -293,10 +290,7 @@ public:
accountTxMap_[account] = AccountTxData();
auto& accountData = accountTxMap_[account];
accountData.transactions.push_back(accTx);
accountData
.ledgerTxMap[seq][acceptedLedgerTx->getTxnSeq()] =
accountData.transactions.size() - 1;
accountData.ledgerTxMap[seq].push_back(accTx);
}
app_.getMasterTransaction().inLedger(
@@ -310,6 +304,46 @@ public:
// Overwrite Current Ledger
ledgers_[seq] = std::move(ledgerData);
ledgerHashToSeq_[ledger->info().hash] = seq;
// Automatic cleanup based on LEDGER_HISTORY (ported from
// FlatmapDatabase)
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
// Delete old ledgers before cutoff
auto it = ledgers_.begin();
while (it != ledgers_.end() && it->first < cutoffSeq)
{
// Clean up transactions from this ledger
for (const auto& [txHash, _] : it->second.transactions)
{
transactionMap_.erase(txHash);
}
ledgerHashToSeq_.erase(it->second.info.hash);
it = ledgers_.erase(it);
}
// Clean up account transactions before cutoff
for (auto& [_, accountData] : accountTxMap_)
{
auto txIt = accountData.ledgerTxMap.begin();
while (txIt != accountData.ledgerTxMap.end() &&
txIt->first < cutoffSeq)
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
@@ -463,11 +497,9 @@ public:
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
}
}
return size / 1024;
@@ -496,11 +528,9 @@ public:
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
}
}
return size / 1024;
@@ -605,14 +635,13 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
AccountTx const accountTx = accountData.transactions[txIndex];
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setStatus(COMMITTED);
@@ -657,8 +686,7 @@ public:
++skipped;
continue;
}
AccountTx const accountTx =
accountData.transactions[innerRIt->second];
const AccountTx& accountTx = *innerRIt;
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setLedger(inLedger);
@@ -692,14 +720,14 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
const auto& [txn, txMeta] = accountData.transactions[txIndex];
const auto& [txn, txMeta] = accountTx;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -743,8 +771,7 @@ public:
++skipped;
continue;
}
const auto& [txn, txMeta] =
accountData.transactions[innerRIt->second];
const auto& [txn, txMeta] = *innerRIt;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -816,11 +843,9 @@ public:
for (; txIt != txEnd; ++txIt)
{
std::uint32_t const ledgerSeq = txIt->first;
for (auto seqIt = txIt->second.begin();
seqIt != txIt->second.end();
++seqIt)
for (size_t txnSeq = 0; txnSeq < txIt->second.size(); ++txnSeq)
{
const auto& [txnSeq, index] = *seqIt;
const auto& accountTx = txIt->second[txnSeq];
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -833,16 +858,15 @@ public:
else if (numberOfResults == 0)
{
newmarker = {
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
rangeCheckedCast<std::uint32_t>(ledgerSeq),
static_cast<std::uint32_t>(txnSeq)};
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -871,11 +895,10 @@ public:
for (; rtxIt != rtxEnd; ++rtxIt)
{
std::uint32_t const ledgerSeq = rtxIt->first;
for (auto innerRIt = rtxIt->second.rbegin();
innerRIt != rtxIt->second.rend();
++innerRIt)
for (int txnSeq = rtxIt->second.size() - 1; txnSeq >= 0;
--txnSeq)
{
const auto& [txnSeq, index] = *innerRIt;
const auto& accountTx = rtxIt->second[txnSeq];
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -888,16 +911,15 @@ public:
else if (numberOfResults == 0)
{
newmarker = {
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
rangeCheckedCast<std::uint32_t>(ledgerSeq),
static_cast<std::uint32_t>(txnSeq)};
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();

View File

@@ -19,7 +19,6 @@
#include <ripple/app/main/Application.h>
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
#include <ripple/app/rdb/backend/RWDBDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/DatabaseShard.h>
@@ -41,7 +40,6 @@ RelationalDatabase::init(
bool use_sqlite = false;
bool use_postgres = false;
bool use_rwdb = false;
bool use_flatmap = false;
if (config.reporting())
{
@@ -60,10 +58,6 @@ RelationalDatabase::init(
{
use_rwdb = true;
}
else if (boost::iequals(get(rdb_section, "backend"), "flatmap"))
{
use_flatmap = true;
}
else
{
Throw<std::runtime_error>(
@@ -89,10 +83,6 @@ RelationalDatabase::init(
{
return getRWDBDatabase(app, config, jobQueue);
}
else if (use_flatmap)
{
return getFlatmapDatabase(app, config, jobQueue);
}
return std::unique_ptr<RelationalDatabase>();
}

View File

@@ -82,7 +82,6 @@ preflight0(PreflightContext const& ctx)
{
JLOG(ctx.j.warn())
<< "applyTransaction: transaction id may not be zero";
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
@@ -131,10 +130,7 @@ preflight1(PreflightContext const& ctx)
{
if (ctx.tx.getSeqProxy().isTicket() &&
ctx.tx.isFieldPresent(sfAccountTxnID))
{
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
}
@@ -167,10 +163,7 @@ preflight1(PreflightContext const& ctx)
// We return temINVALID for such transactions.
if (ctx.tx.getSeqProxy().isTicket() &&
ctx.tx.isFieldPresent(sfAccountTxnID))
{
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
}
@@ -188,7 +181,6 @@ preflight2(PreflightContext const& ctx)
if (sigValid.first == Validity::SigBad)
{
JLOG(ctx.j.debug()) << "preflight2: bad signature. " << sigValid.second;
std::cout << "temINVALID " << __LINE__ << "\n";
return temINVALID;
}
return tesSUCCESS;
@@ -297,40 +289,8 @@ Transactor::calculateBaseFee(ReadView const& view, STTx const& tx)
// Each signer adds one more baseFee to the minimum required fee
// for the transaction.
std::size_t signerCount = 0;
if (tx.isFieldPresent(sfSigners))
{
// Define recursive lambda to count all leaf signers
std::function<std::size_t(STArray const&)> countSigners;
countSigners = [&](STArray const& signers) -> std::size_t {
std::size_t count = 0;
for (auto const& signer : signers)
{
if (signer.isFieldPresent(sfSigners))
{
// This is a nested signer - recursively count its signers
count += countSigners(signer.getFieldArray(sfSigners));
}
else
{
// This is a leaf signer (one who actually signs)
// Count it only if it has signing fields (not just a
// placeholder)
if (signer.isFieldPresent(sfSigningPubKey) &&
signer.isFieldPresent(sfTxnSignature))
{
count += 1;
}
}
}
return count;
};
signerCount = countSigners(tx.getFieldArray(sfSigners));
}
std::size_t const signerCount =
tx.isFieldPresent(sfSigners) ? tx.getFieldArray(sfSigners).size() : 0;
XRPAmount hookExecutionFee{0};
uint64_t burden{1};
@@ -963,246 +923,157 @@ NotTEC
Transactor::checkMultiSign(PreclaimContext const& ctx)
{
auto const id = ctx.tx.getAccountID(sfAccount);
// Set max depth based on feature flag
bool const allowNested = ctx.view.rules().enabled(featureNestedMultiSign);
int const maxDepth = allowNested ? 4 : 1;
std::string lineno = "(unknown)";
if (ctx.tx.isFieldPresent(sfMemos))
// Get mTxnAccountID's SignerList and Quorum.
std::shared_ptr<STLedgerEntry const> sleAccountSigners =
ctx.view.read(keylet::signers(id));
// If the signer list doesn't exist the account is not multi-signing.
if (!sleAccountSigners)
{
auto const& memos = ctx.tx.getFieldArray(sfMemos);
for (auto const& memo : memos)
{
auto memoObj = dynamic_cast<STObject const*>(&memo);
auto hex = memoObj->getFieldVL(sfMemoData);
lineno = strHex(hex);
break;
}
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid: Not a multi-signing account.";
return tefNOT_MULTI_SIGNING;
}
// Define recursive lambda for checking signers at any depth
std::function<NotTEC(AccountID const&, STArray const&, int)>
validateSigners;
// We have plans to support multiple SignerLists in the future. The
// presence and defaulted value of the SignerListID field will enable that.
assert(sleAccountSigners->isFieldPresent(sfSignerListID));
assert(sleAccountSigners->getFieldU32(sfSignerListID) == 0);
validateSigners =
[&](AccountID const& acc, STArray const& signers, int depth) -> NotTEC {
// Check depth limit
if (depth > maxDepth)
auto accountSigners =
SignerEntries::deserialize(*sleAccountSigners, ctx.j, "ledger");
if (!accountSigners)
return accountSigners.error();
// Get the array of transaction signers.
STArray const& txSigners(ctx.tx.getFieldArray(sfSigners));
// Walk the accountSigners performing a variety of checks and see if
// the quorum is met.
// Both the multiSigners and accountSigners are sorted by account. So
// matching multi-signers to account signers should be a simple
// linear walk. *All* signers must be valid or the transaction fails.
std::uint32_t weightSum = 0;
auto iter = accountSigners->begin();
for (auto const& txSigner : txSigners)
{
AccountID const txSignerAcctID = txSigner.getAccountID(sfAccount);
// Attempt to match the SignerEntry with a Signer;
while (iter->account < txSignerAcctID)
{
if (allowNested)
if (++iter == accountSigners->end())
{
JLOG(ctx.j.trace())
<< "applyTransaction: Multi-signing depth limit exceeded.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
JLOG(ctx.j.warn())
<< "applyTransaction: Nested multisigning disabled.";
std::cout << "!!! temMALFORMED " << __FILE__ << " " << __LINE__
<< "\n";
return temMALFORMED;
}
// Get the SignerList for the account we're validating signers for
std::shared_ptr<STLedgerEntry const> sleAllowedSigners =
ctx.view.read(keylet::signers(acc));
// If the signer list doesn't exist, this account is not set up for
// multi-signing
if (!sleAllowedSigners)
{
JLOG(ctx.j.trace()) << "applyTransaction: Invalid: Account " << acc
<< " not set up for multi-signing.";
return tefNOT_MULTI_SIGNING;
}
uint32_t quorum = sleAllowedSigners->getFieldU32(sfSignerQuorum);
uint32_t sum{0};
auto allowedSigners =
SignerEntries::deserialize(*sleAllowedSigners, ctx.j, "ledger");
if (!allowedSigners)
return allowedSigners.error();
std::set<AccountID> allowedSignerSet;
for (auto const& as : *allowedSigners)
allowedSignerSet.emplace(as.account);
// Walk the signers array, validating each signer
auto iter = allowedSigners->begin();
for (auto const& signerEntry : signers)
{
AccountID const signer = signerEntry.getAccountID(sfAccount);
bool const isNested = signerEntry.isFieldPresent(sfSigners);
// Find this signer in the authorized SignerEntries list
while (iter->account < signer)
{
std::cout << "iter acc: " << to_string(iter->account) << " < "
<< to_string(signer) << "\n";
if (++iter == allowedSigners->end())
{
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__
<< " in signer set? "
<< (allowedSignerSet.find(signer) ==
allowedSignerSet.end()
? "n"
: "y")
<< "\n";
return tefBAD_SIGNATURE;
}
}
if (iter->account != signer)
{
// The SigningAccount is not in the SignerEntries.
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
}
if (iter->account != txSignerAcctID)
{
// The SigningAccount is not in the SignerEntries.
JLOG(ctx.j.trace())
<< "applyTransaction: Invalid SigningAccount.Account.";
return tefBAD_SIGNATURE;
}
// We found the SigningAccount in the list of valid signers. Now we
// need to compute the accountID that is associated with the signer's
// public key.
auto const spk = txSigner.getFieldVL(sfSigningPubKey);
if (!publicKeyType(makeSlice(spk)))
{
JLOG(ctx.j.trace())
<< "checkMultiSign: signing public key type is unknown";
return tefBAD_SIGNATURE;
}
AccountID const signingAcctIDFromPubKey =
calcAccountID(PublicKey(makeSlice(spk)));
// Verify that the signingAcctID and the signingAcctIDFromPubKey
// belong together. Here is are the rules:
//
// 1. "Phantom account": an account that is not in the ledger
// A. If signingAcctID == signingAcctIDFromPubKey and the
// signingAcctID is not in the ledger then we have a phantom
// account.
// B. Phantom accounts are always allowed as multi-signers.
//
// 2. "Master Key"
// A. signingAcctID == signingAcctIDFromPubKey, and signingAcctID
// is in the ledger.
// B. If the signingAcctID in the ledger does not have the
// asfDisableMaster flag set, then the signature is allowed.
//
// 3. "Regular Key"
// A. signingAcctID != signingAcctIDFromPubKey, and signingAcctID
// is in the ledger.
// B. If signingAcctIDFromPubKey == signingAcctID.RegularKey (from
// ledger) then the signature is allowed.
//
// No other signatures are allowed. (January 2015)
// In any of these cases we need to know whether the account is in
// the ledger. Determine that now.
auto sleTxSignerRoot = ctx.view.read(keylet::account(txSignerAcctID));
if (signingAcctIDFromPubKey == txSignerAcctID)
{
// Either Phantom or Master. Phantoms automatically pass.
if (sleTxSignerRoot)
{
// Master Key. Account may not have asfDisableMaster set.
std::uint32_t const signerAccountFlags =
sleTxSignerRoot->getFieldU32(sfFlags);
if (signerAccountFlags & lsfDisableMaster)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer:Account lsfDisableMaster.";
return tefMASTER_DISABLED;
}
}
}
else
{
// May be a Regular Key. Let's find out.
// Public key must hash to the account's regular key.
if (!sleTxSignerRoot)
{
JLOG(ctx.j.trace()) << "applyTransaction: Non-phantom signer "
"lacks account root.";
return tefBAD_SIGNATURE;
}
// Check if this signer has nested signers (delegation)
if (signerEntry.isFieldPresent(sfSigners))
if (!sleTxSignerRoot->isFieldPresent(sfRegularKey))
{
// This is a nested multi-signer that delegates to sub-signers
if (signerEntry.isFieldPresent(sfSigningPubKey) ||
signerEntry.isFieldPresent(sfTxnSignature))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer cannot have both nested "
"signers and signature fields.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
// Recursively validate the nested signers against
// signer's signer list
STArray const& nestedSigners =
signerEntry.getFieldArray(sfSigners);
NotTEC result =
validateSigners(signer, nestedSigners, depth + 1);
if (!isTesSuccess(result))
return result;
// If we get here, the nested signers met their quorum
// So we add THIS signer's weight (from current level's signer
// list)
sum += iter->weight;
JLOG(ctx.j.trace())
<< "applyTransaction: Account lacks RegularKey.";
return tefBAD_SIGNATURE;
}
else
if (signingAcctIDFromPubKey !=
sleTxSignerRoot->getAccountID(sfRegularKey))
{
// This is a leaf signer - validate signature as before
if (!signerEntry.isFieldPresent(sfSigningPubKey) ||
!signerEntry.isFieldPresent(sfTxnSignature))
{
JLOG(ctx.j.trace())
<< "applyApplication: Leaf signer must have "
"SigningPubKey and TxnSignature.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
auto const spk = signerEntry.getFieldVL(sfSigningPubKey);
if (!publicKeyType(makeSlice(spk)))
{
JLOG(ctx.j.trace())
<< "checkMultiSign: signing public key type is unknown";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
AccountID const signingAcctIDFromPubKey =
calcAccountID(PublicKey(makeSlice(spk)));
auto sleTxSignerRoot = ctx.view.read(keylet::account(signer));
if (signingAcctIDFromPubKey == signer)
{
if (sleTxSignerRoot)
{
std::uint32_t const signerAccountFlags =
sleTxSignerRoot->getFieldU32(sfFlags);
if (signerAccountFlags & lsfDisableMaster)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signer:Account "
"lsfDisableMaster.";
return tefMASTER_DISABLED;
}
}
}
else
{
if (!sleTxSignerRoot)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Non-phantom signer "
"lacks account root.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
if (!sleTxSignerRoot->isFieldPresent(sfRegularKey))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Account lacks RegularKey.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
if (signingAcctIDFromPubKey !=
sleTxSignerRoot->getAccountID(sfRegularKey))
{
JLOG(ctx.j.trace()) << "applyTransaction: Account "
"doesn't match RegularKey.";
std::cout << "tefBAD_SIGNATURE: " << __LINE__ << "\n";
return tefBAD_SIGNATURE;
}
}
// Valid leaf signer - add their weight
sum += iter->weight;
JLOG(ctx.j.trace())
<< "applyTransaction: Account doesn't match RegularKey.";
return tefBAD_SIGNATURE;
}
char spacing[] = " ";
spacing[depth] = '\0';
std::cout << spacing << "sig check: "
<< "line: " << lineno << ", a=" << to_string(acc)
<< ", s=" << to_string(signer) << ", w=" << iter->weight
<< ", l=" << (isNested ? "f" : "t") << ", d=" << depth
<< ", " << sum << "/" << quorum << "\n";
}
// Check if this level's accumulated weight meets its required quorum
if (sum < quorum)
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signers failed to meet quorum at depth "
<< depth;
return tefBAD_QUORUM;
}
return tesSUCCESS;
};
STArray const& entries(ctx.tx.getFieldArray(sfSigners));
NotTEC result = validateSigners(id, entries, 1);
if (!isTesSuccess(result))
{
std::cout << "Error: " << transToken(result) << "\n";
return result;
// The signer is legitimate. Add their weight toward the quorum.
weightSum += iter->weight;
}
// The quorum check is already done inside validateSigners for the top level
// so if we get here, we've met the quorum
// Cannot perform transaction if quorum is not met.
if (weightSum < sleAccountSigners->getFieldU32(sfSignerQuorum))
{
JLOG(ctx.j.trace())
<< "applyTransaction: Signers failed to meet quorum.";
return tefBAD_QUORUM;
}
// Met the quorum. Continue.
return tesSUCCESS;
}

View File

@@ -361,9 +361,7 @@ public:
boost::beast::iequals(
get(section(SECTION_RELATIONAL_DB), "backend"), "rwdb")) ||
(!section("node_db").empty() &&
(boost::beast::iequals(get(section("node_db"), "type"), "rwdb") ||
boost::beast::iequals(
get(section("node_db"), "type"), "flatmap")));
boost::beast::iequals(get(section("node_db"), "type"), "rwdb"));
// RHNOTE: memory type is not selected for here because it breaks
// tests
return isMem;

View File

@@ -1,235 +0,0 @@
#include <ripple/basics/contract.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/nodestore/impl/codec.h>
#include <boost/beast/core/string.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/unordered/concurrent_flat_map.hpp>
#include <memory>
#include <mutex>
namespace ripple {
namespace NodeStore {
class FlatmapBackend : public Backend
{
private:
std::string name_;
beast::Journal journal_;
bool isOpen_{false};
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
};
using DataStore = boost::unordered::concurrent_flat_map<
uint256,
std::vector<std::uint8_t>, // Store compressed blob data
base_uint_hasher>;
DataStore table_;
public:
FlatmapBackend(
size_t keyBytes,
Section const& keyValues,
beast::Journal journal)
: name_(get(keyValues, "path")), journal_(journal)
{
boost::ignore_unused(journal_);
if (name_.empty())
name_ = "node_db";
}
~FlatmapBackend() override
{
close();
}
std::string
getName() override
{
return name_;
}
void
open(bool createIfMissing) override
{
if (isOpen_)
Throw<std::runtime_error>("already open");
isOpen_ = true;
}
bool
isOpen() override
{
return isOpen_;
}
void
close() override
{
table_.clear();
isOpen_ = false;
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
{
if (!isOpen_)
return notFound;
uint256 const hash(uint256::fromVoid(key));
bool found = table_.visit(hash, [&](const auto& key_value_pair) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
key_value_pair.second.data(), key_value_pair.second.size(), bf);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
*pObject = nullptr;
return;
}
*pObject = decoded.createObject();
});
return found ? (*pObject ? ok : dataCorrupt) : notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
if (status != ok)
results.push_back({});
else
results.push_back(nObj);
}
return {results, ok};
}
void
store(std::shared_ptr<NodeObject> const& object) override
{
if (!isOpen_)
return;
if (!object)
return;
EncodedBlob encoded(object);
nudb::detail::buffer bf;
auto const result =
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
std::vector<std::uint8_t> compressed(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
table_.insert_or_assign(object->getHash(), std::move(compressed));
}
void
storeBatch(Batch const& batch) override
{
for (auto const& e : batch)
store(e);
}
void
sync() override
{
}
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
if (!isOpen_)
return;
table_.visit_all([&f](const auto& entry) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
entry.second.data(), entry.second.size(), bf);
DecodedBlob decoded(
entry.first.data(), result.first, result.second);
if (decoded.wasOk())
f(decoded.createObject());
});
}
int
getWriteLoad() override
{
return 0;
}
void
setDeletePath() override
{
close();
}
int
fdRequired() const override
{
return 0;
}
private:
size_t
size() const
{
return table_.size();
}
};
class FlatmapFactory : public Factory
{
public:
FlatmapFactory()
{
Manager::instance().insert(*this);
}
~FlatmapFactory() override
{
Manager::instance().erase(*this);
}
std::string
getName() const override
{
return "Flatmap";
}
std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<FlatmapBackend>(keyBytes, keyValues, journal);
}
};
static FlatmapFactory flatmapFactory;
} // namespace NodeStore
} // namespace ripple

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 86;
static constexpr std::size_t numFeatures = 85;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -373,7 +373,6 @@ extern uint256 const fixProvisionalDoubleThreading;
extern uint256 const featureClawback;
extern uint256 const featureDeepFreeze;
extern uint256 const featureIOUIssuerWeakTSH;
extern uint256 const featureNestedMultiSign;
} // namespace ripple

View File

@@ -479,7 +479,6 @@ REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::De
REGISTER_FIX (fixProvisionalDoubleThreading, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FEATURE(NestedMultiSign, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -44,9 +44,8 @@ InnerObjectFormats::InnerObjectFormats()
sfSigner.getCode(),
{
{sfAccount, soeREQUIRED},
{sfSigningPubKey, soeOPTIONAL},
{sfTxnSignature, soeOPTIONAL},
{sfSigners, soeOPTIONAL},
{sfSigningPubKey, soeREQUIRED},
{sfTxnSignature, soeREQUIRED},
});
add(sfMajority.jsonName.c_str(),

View File

@@ -369,146 +369,64 @@ STTx::checkMultiSign(
bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) ||
(requireCanonicalSig == RequireFullyCanonicalSig::yes);
// Signers must be in sorted order by AccountID.
AccountID lastAccountID(beast::zero);
bool const isWildcardNetwork =
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) == 65535;
// Set max depth based on feature flag
int const maxDepth = rules.enabled(featureNestedMultiSign) ? 4 : 1;
for (auto const& signer : signers)
{
auto const accountID = signer.getAccountID(sfAccount);
// Define recursive lambda for checking signatures at any depth
std::function<Expected<void, std::string>(
STArray const&, AccountID const&, int)>
checkSignersArray;
// The account owner may not multisign for themselves.
if (accountID == txnAccountID)
return Unexpected("Invalid multisigner.");
checkSignersArray = [&](STArray const& signersArray,
AccountID const& parentAccountID,
int depth) -> Expected<void, std::string> {
// Check depth limit
if (depth > maxDepth)
// No duplicate signers allowed.
if (lastAccountID == accountID)
return Unexpected("Duplicate Signers not allowed.");
// Accounts must be in order by account ID. No duplicates allowed.
if (lastAccountID > accountID)
return Unexpected("Unsorted Signers array.");
// The next signature must be greater than this one.
lastAccountID = accountID;
// Verify the signature.
bool validSig = false;
try
{
std::cout << "Multi-signing depth limit exceeded.\n";
return Unexpected("Multi-signing depth limit exceeded.");
}
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
// There are well known bounds that the number of signers must be
// within.
if (signersArray.size() < minMultiSigners ||
signersArray.size() > maxMultiSigners(&rules))
{
std::cout << "Invalid Signers array size.\n";
return Unexpected("Invalid Signers array size.");
}
auto spk = signer.getFieldVL(sfSigningPubKey);
// Signers must be in sorted order by AccountID.
AccountID lastAccountID(beast::zero);
for (auto const& signer : signersArray)
{
auto const accountID = signer.getAccountID(sfAccount);
// The account owner may not multisign for themselves.
if (accountID == txnAccountID)
if (publicKeyType(makeSlice(spk)))
{
std::cout << "Invalid multisigner.\n";
return Unexpected("Invalid multisigner.");
}
Blob const signature = signer.getFieldVL(sfTxnSignature);
// No duplicate signers allowed.
if (lastAccountID == accountID)
{
std::cout << "Duplicate Signers not allowed.\n";
return Unexpected("Duplicate Signers not allowed.");
}
// Accounts must be in order by account ID. No duplicates allowed.
if (lastAccountID > accountID)
{
std::cout << "Unsorted Signers array.\n";
return Unexpected("Unsorted Signers array.");
}
// The next signature must be greater than this one.
lastAccountID = accountID;
// Check if this signer has nested signers
if (signer.isFieldPresent(sfSigners))
{
// This is a nested multi-signer
// Ensure it doesn't also have signature fields
if (signer.isFieldPresent(sfSigningPubKey) ||
signer.isFieldPresent(sfTxnSignature))
{
std::cout << "Signer cannot have both nested signers and "
"signature "
"fields.\n";
return Unexpected(
"Signer cannot have both nested signers and signature "
"fields.");
}
// Recursively check nested signers
STArray const& nestedSigners = signer.getFieldArray(sfSigners);
auto result =
checkSignersArray(nestedSigners, accountID, depth + 1);
if (!result)
return result;
}
else
{
// This is a leaf node - must have signature
if (!signer.isFieldPresent(sfSigningPubKey) ||
!signer.isFieldPresent(sfTxnSignature))
{
std::cout << "Leaf signer must have SigningPubKey and "
"TxnSignature.\n";
return Unexpected(
"Leaf signer must have SigningPubKey and "
"TxnSignature.");
}
// Verify the signature
bool validSig = false;
try
{
Serializer s = dataStart;
finishMultiSigningData(accountID, s);
auto spk = signer.getFieldVL(sfSigningPubKey);
if (publicKeyType(makeSlice(spk)))
{
Blob const signature =
signer.getFieldVL(sfTxnSignature);
// wildcard network gets a free pass
validSig = isWildcardNetwork ||
verify(PublicKey(makeSlice(spk)),
s.slice(),
makeSlice(signature),
fullyCanonical);
}
}
catch (std::exception const&)
{
// We assume any problem lies with the signature.
validSig = false;
}
if (!validSig)
{
std::cout << std::string("Invalid signature on account ") +
toBase58(accountID) + ".\n";
return Unexpected(
std::string("Invalid signature on account ") +
toBase58(accountID) + ".");
}
// wildcard network gets a free pass
validSig = isWildcardNetwork ||
verify(PublicKey(makeSlice(spk)),
s.slice(),
makeSlice(signature),
fullyCanonical);
}
}
return {};
};
// Start the recursive check at depth 1
return checkSignersArray(signers, txnAccountID, 1);
catch (std::exception const&)
{
// We assume any problem lies with the signature.
validSig = false;
}
if (!validSig)
return Unexpected(
std::string("Invalid signature on account ") +
toBase58(accountID) + ".");
}
// All signatures verified.
return {};
}
//------------------------------------------------------------------------------

View File

@@ -1183,32 +1183,12 @@ transactionSubmitMultiSigned(
// The Signers array may only contain Signer objects.
if (std::find_if_not(
signers.begin(), signers.end(), [](STObject const& obj) {
if (obj.getCount() != 4 || !obj.isFieldPresent(sfAccount))
return false;
// leaf signer
if (obj.isFieldPresent(sfSigningPubKey) &&
obj.isFieldPresent(sfTxnSignature) &&
!obj.isFieldPresent(sfSigners))
return true;
// nested signer
if (!obj.isFieldPresent(sfSigningPubKey) &&
!obj.isFieldPresent(sfTxnSignature) &&
obj.isFieldPresent(sfSigners))
return true;
/*
std::cout << "Error caused by:\n" <<
obj.getJson(JsonOptions::none) << "\n"
<< "obj.isFieldPresent(sfAccount) = " <<
(obj.isFieldPresent(sfAccount) ? "t" : "f") << "\n"
<< "obj.isFieldPresent(sfSigningPubKey) = " <<
(obj.isFieldPresent(sfSigningPubKey) ? "t" : "f") << "\n"
<< "obj.isFieldPresent(sfTxnSignature) = " <<
(obj.isFieldPresent(sfTxnSignature) ? "t" : "f") << "\n"
<< "obj.getCount() = " << obj.getCount() << "\n\n";
*/
return false;
return (
// A Signer object always contains these fields and no
// others.
obj.isFieldPresent(sfAccount) &&
obj.isFieldPresent(sfSigningPubKey) &&
obj.isFieldPresent(sfTxnSignature) && obj.getCount() == 3);
}) != signers.end())
{
return RPC::make_param_error(

View File

@@ -1659,419 +1659,6 @@ public:
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
void
test_nestedMultiSign(FeatureBitset features)
{
testcase("Nested MultiSign");
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define LINE_TO_HEX_STRING \
[]() -> std::string { \
const char* line = TOSTRING(__LINE__); \
int len = 0; \
while (line[len]) \
len++; \
std::string result; \
if (len % 2 == 1) \
{ \
result += (char)(0x00 * 16 + (line[0] - '0')); \
line++; \
} \
for (int i = 0; line[i]; i += 2) \
{ \
result += (char)((line[i] - '0') * 16 + (line[i + 1] - '0')); \
} \
return result; \
}()
#define M(m) memo(m, "", "")
#define L() memo(LINE_TO_HEX_STRING, "", "")
using namespace jtx;
Env env{*this, envconfig(), features};
Account const alice{"alice", KeyType::secp256k1};
Account const becky{"becky", KeyType::ed25519};
Account const cheri{"cheri", KeyType::secp256k1};
Account const daria{"daria", KeyType::ed25519};
Account const edgar{"edgar", KeyType::secp256k1};
Account const fiona{"fiona", KeyType::ed25519};
Account const grace{"grace", KeyType::secp256k1};
Account const henry{"henry", KeyType::ed25519};
Account const f1{"f1", KeyType::ed25519};
Account const f2{"f2", KeyType::ed25519};
Account const f3{"f3", KeyType::ed25519};
env.fund(
XRP(1000),
alice,
becky,
cheri,
daria,
edgar,
fiona,
grace,
henry,
f1,
f2,
f3,
phase,
jinni,
acc10,
acc11,
acc12);
env.close();
std::cout << "alice: " << to_string(alice) << "\n";
std::cout << "becky: " << to_string(becky) << "\n";
std::cout << "cheri: " << to_string(cheri) << "\n";
std::cout << "daria: " << to_string(daria) << "\n";
std::cout << "edgar: " << to_string(edgar) << "\n";
std::cout << "fiona: " << to_string(fiona) << "\n";
std::cout << "grace: " << to_string(grace) << "\n";
std::cout << "henry: " << to_string(henry) << "\n";
std::cout << "f1: " << to_string(f1) << "\n";
std::cout << "f2: " << to_string(f2) << "\n";
std::cout << "f3: " << to_string(f3) << "\n";
std::cout << "phase: " << to_string(phase) << "\n";
std::cout << "jinni: " << to_string(jinni) << "\n";
std::cout << "acc10: " << to_string(acc10) << "\n";
std::cout << "acc11: " << to_string(acc11) << "\n";
std::cout << "acc12: " << to_string(acc12) << "\n";
auto const baseFee = env.current()->fees().base;
if (!features[featureNestedMultiSign])
{
// When feature is disabled, nested signing should fail
env(signers(f1, 1, {{f2, 1}}));
env(signers(f2, 1, {{f3, 1}}));
env.close();
std::uint32_t f1Seq = env.seq(f1);
env(noop(f1),
msig({msigner(f2, msigner(f3))}),
L(),
fee(3 * baseFee),
ter(temINVALID));
env.close();
BEAST_EXPECT(env.seq(f1) == f1Seq);
return;
}
// Test Case 1: Basic 2-level nested signing with quorum
{
// Set up signer lists with quorum requirements
env(signers(becky, 2, {{bogie, 1}, {demon, 1}, {ghost, 1}}));
env(signers(cheri, 3, {{haunt, 2}, {jinni, 2}}));
env.close();
// Alice requires quorum of 3 with weighted signers
env(signers(alice, 3, {{becky, 2}, {cheri, 2}, {daria, 1}}));
env.close();
// Test 1a: becky alone (weight 2) doesn't meet alice's quorum
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(becky, msigner(bogie), msigner(demon))}),
L(),
fee(4 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 1b: becky (2) + daria (1) meets quorum of 3
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(becky, msigner(bogie), msigner(demon)),
msigner(daria)}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 1c: cheri's nested signers must meet her quorum
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(
becky,
msigner(bogie),
msigner(demon)), // becky has a satisfied quorum
msigner(cheri, msigner(haunt))}), // but cheri does not
// (needs jinni too)
L(),
fee(5 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 1d: cheri with both signers meets her quorum
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(cheri, msigner(haunt), msigner(jinni)),
msigner(daria)}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 2: 3-level maximum depth with quorum at each level
{
// Level 2: phase needs direct signatures (no deeper nesting)
env(signers(phase, 2, {{acc10, 1}, {acc11, 1}, {acc12, 1}}));
// Level 1: jinni needs weighted signatures
env(signers(jinni, 3, {{phase, 2}, {shade, 2}, {spook, 1}}));
// Level 0: edgar needs 2 from weighted signers
env(signers(edgar, 2, {{jinni, 1}, {bogie, 1}, {demon, 1}}));
// Alice now requires edgar with weight 3
env(signers(alice, 3, {{edgar, 3}, {fiona, 2}}));
env.close();
// Test 2a: 3-level signing with phase signing directly (not through
// nested signers)
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
edgar,
msigner(
jinni,
msigner(phase), // phase signs directly at level 3
msigner(shade)) // jinni quorum: 2+2 = 4 >= 3 ✓
) // edgar quorum: 1+0 = 1 < 2 ✗
}),
L(),
fee(4 * baseFee),
ter(tefBAD_QUORUM));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 2b: Edgar needs to meet his quorum too
aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
edgar,
msigner(
jinni,
msigner(phase), // phase signs directly
msigner(shade)),
msigner(bogie)) // edgar quorum: 1+1 = 2 ✓
}),
L(),
fee(5 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 2c: Use phase's signers (making it effectively 3-level from
// alice)
aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(
edgar,
msigner(
jinni,
msigner(phase, msigner(acc10), msigner(acc11)),
msigner(spook)),
msigner(bogie))}),
L(),
fee(6 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 3: Mixed levels - some direct, some nested at different
// depths (max 3)
{
// Set up mixed-level signing for alice
// grace has direct signers
env(signers(grace, 2, {{bogie, 1}, {demon, 1}}));
// henry has 2-level signers (henry -> becky -> bogie/demon)
env(signers(henry, 1, {{becky, 1}, {cheri, 1}}));
// edgar can be signed for by bogie
env(signers(edgar, 1, {{bogie, 1}}));
// Alice has mix of direct and nested signers at different weights
env(signers(
alice,
5,
{
{daria, 1}, // direct signer
{edgar, 2}, // has 2-level signers
{fiona, 1}, // direct signer
{grace, 2}, // has direct signers
{henry, 2} // has 2-level signers
}));
env.close();
// Test 3a: Mix of all levels meeting quorum exactly
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(daria), // weight 1, direct
msigner(edgar, msigner(bogie)), // weight 2, 2-level
msigner(grace, msigner(bogie), msigner(demon)) // weight 2,
// 2-level
}),
L(),
fee(6 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 3b: 3-level signing through henry
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(fiona), // weight 1, direct
msigner(
grace, msigner(bogie)), // weight 2, 2-level (partial)
msigner(
henry, // weight 2, 3-level
msigner(becky, msigner(bogie), msigner(demon)))}),
L(),
fee(6 * baseFee),
ter(tefBAD_QUORUM)); // grace didn't meet quorum
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
// Test 3c: Correct version with all quorums met
aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
henry, // weight 2
msigner(becky, msigner(bogie), msigner(demon))),
msigner(fiona), // weight 1
msigner(edgar, msigner(bogie), msigner(demon)) // weight 2
}),
L(),
fee(8 * baseFee)); // Total weight: 1+2+2 = 5 ✓
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
// Test Case 4: Complex scenario with maximum signers at mixed depths
// (max 3)
{
// Create a signing tree that uses close to maximum signers
// and tests weight accumulation across all levels
// Set up for alice: needs 15 out of possible 20 weight
env(signers(
alice,
15,
{
{becky, 3}, // will use 2-level
{cheri, 3}, // will use 2-level
{daria, 3}, // will use direct
{edgar, 3}, // will use 2-level
{fiona, 3}, // will use direct
{grace, 3}, // will use direct
{henry, 2} // will use 2-level
}));
env.close();
// Complex multi-level transaction just meeting quorum
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({
msigner(
becky, // weight 3, 2-level
msigner(demon),
msigner(ghost)),
msigner(
cheri, // weight 3, 2-level
msigner(haunt),
msigner(jinni)),
msigner(daria), // weight 3, direct
msigner(
edgar, // weight 3, 2-level
msigner(bogie),
msigner(demon)),
msigner(grace) // weight 3, direct
}),
L(),
fee(10 * baseFee)); // Total weight: 3+3+3+3+3 = 15 ✓
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Test 4b: Test with henry using 3-level depth (maximum)
// First set up henry's chain properly
env(signers(henry, 1, {{jinni, 1}}));
env(signers(jinni, 2, {{acc10, 1}, {acc11, 1}}));
env.close();
aliceSeq = env.seq(alice);
env(noop(alice),
msig(
{msigner(
becky, // weight 3
msigner(demon)), // becky quorum not met!
msigner(
cheri, // weight 3
msigner(haunt),
msigner(jinni)),
msigner(daria), // weight 3
msigner(
henry, // weight 2, 3-level depth
msigner(jinni, msigner(acc10), msigner(acc11))),
msigner(
edgar, // weight 3
msigner(demon),
msigner(bogie))}),
L(),
fee(10 * baseFee),
ter(tefBAD_QUORUM)); // becky's quorum not met
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq);
}
// Test Case 5: Edge case - single signer with maximum nesting (depth 3)
{
// Alice needs just one signer, but that signer uses depth up to 3
env(signers(alice, 1, {{becky, 1}}));
env.close();
std::uint32_t aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(becky, msigner(demon), msigner(ghost))}),
L(),
fee(4 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
// Now with 3-level depth (maximum allowed)
// Structure: alice -> becky -> cheri -> jinni (jinni signs
// directly)
env(signers(becky, 1, {{cheri, 1}}));
env(signers(cheri, 1, {{jinni, 1}}));
// Note: We do NOT add signers to jinni to keep max depth at 3
env.close();
aliceSeq = env.seq(alice);
env(noop(alice),
msig({msigner(
becky,
msigner(
cheri,
msigner(jinni)))}), // jinni signs directly (depth 3)
L(),
fee(4 * baseFee));
env.close();
BEAST_EXPECT(env.seq(alice) == aliceSeq + 1);
}
}
void
testAll(FeatureBitset features)
{
@@ -2093,7 +1680,6 @@ public:
test_signForHash(features);
test_signersWithTickets(features);
test_signersWithTags(features);
test_nestedMultiSign(features);
}
void
@@ -2106,11 +1692,8 @@ public:
// featureMultiSignReserve. Limits on the number of signers
// changes based on featureExpandedSignerList. Test both with and
// without.
testAll(
all - featureMultiSignReserve - featureExpandedSignerList -
featureNestedMultiSign);
testAll(all - featureExpandedSignerList - featureNestedMultiSign);
testAll(all - featureNestedMultiSign);
testAll(all - featureMultiSignReserve - featureExpandedSignerList);
testAll(all - featureExpandedSignerList);
testAll(all);
test_amendmentTransition();
}

View File

@@ -216,6 +216,10 @@ public:
}
BEAST_EXPECT(store.getLastRotated() == lastRotated);
SQLiteDatabase* const db =
dynamic_cast<SQLiteDatabase*>(&env.app().getRelationalDatabase());
BEAST_EXPECT(*db->getTransactionsMinLedgerSeq() == 3);
for (auto i = 3; i < deleteInterval + lastRotated; ++i)
{
ledgers.emplace(

View File

@@ -321,7 +321,7 @@ public:
HSFEE,
ter(temMALFORMED));
env(ripple::test::jtx::hook(alice, std::vector<Json::Value>{}, 0),
env(ripple::test::jtx::hook(alice, {{}}, 0),
M("Must have a non-empty hooks field"),
HSFEE,
ter(temMALFORMED));

View File

@@ -310,7 +310,6 @@ Env::submit(JTx const& jt)
{
// Parsing failed or the JTx is
// otherwise missing the stx field.
std::cout << "!!! temMALFORMED " << __FILE__ << " " << __LINE__ << "\n";
ter_ = temMALFORMED;
didApply = false;
}

View File

@@ -66,45 +66,15 @@ signers(Account const& account, none_t)
//------------------------------------------------------------------------------
// Helper function to recursively sort nested signers
void
sortSignersRecursive(std::vector<msig::SignerPtr>& signers)
msig::msig(std::vector<msig::Reg> signers_) : signers(std::move(signers_))
{
// Sort current level by account ID
// Signatures must be applied in sorted order.
std::sort(
signers.begin(),
signers.end(),
[](msig::SignerPtr const& lhs, msig::SignerPtr const& rhs) {
return lhs->id() < rhs->id();
[](msig::Reg const& lhs, msig::Reg const& rhs) {
return lhs.acct.id() < rhs.acct.id();
});
// Recursively sort nested signers for each signer at this level
for (auto& signer : signers)
{
if (signer->isNested() && !signer->nested.empty())
{
sortSignersRecursive(signer->nested);
}
}
}
msig::msig(std::vector<msig::SignerPtr> signers_) : signers(std::move(signers_))
{
// Recursively sort all signers at all nesting levels
// This ensures account IDs are in strictly ascending order at each level
sortSignersRecursive(signers);
}
msig::msig(std::vector<msig::Reg> signers_)
{
// Convert Reg vector to SignerPtr vector for backward compatibility
signers.reserve(signers_.size());
for (auto const& s : signers_)
signers.push_back(s.toSigner());
// Recursively sort all signers at all nesting levels
// This ensures account IDs are in strictly ascending order at each level
sortSignersRecursive(signers);
}
void
@@ -123,47 +93,19 @@ msig::operator()(Env& env, JTx& jt) const
env.test.log << pretty(jtx.jv) << std::endl;
Rethrow();
}
// Recursive function to build signer JSON
std::function<Json::Value(SignerPtr const&)> buildSignerJson;
buildSignerJson = [&](SignerPtr const& signer) -> Json::Value {
Json::Value jo;
jo[jss::Account] = signer->acct.human();
if (signer->isNested())
{
// For nested signers, we use the already-sorted nested vector
// (sorted during construction via sortSignersRecursive)
// This ensures account IDs are in strictly ascending order
auto& subJs = jo[sfSigners.getJsonName()];
for (std::size_t i = 0; i < signer->nested.size(); ++i)
{
auto& subJo = subJs[i][sfSigner.getJsonName()];
subJo = buildSignerJson(signer->nested[i]);
}
}
else
{
// This is a leaf signer - add signature
jo[jss::SigningPubKey] = strHex(signer->sig.pk().slice());
Serializer ss{buildMultiSigningData(*st, signer->acct.id())};
auto const sig = ripple::sign(
*publicKeyType(signer->sig.pk().slice()),
signer->sig.sk(),
ss.slice());
jo[sfTxnSignature.getJsonName()] =
strHex(Slice{sig.data(), sig.size()});
}
return jo;
};
auto& js = jtx[sfSigners.getJsonName()];
for (std::size_t i = 0; i < mySigners.size(); ++i)
{
auto const& e = mySigners[i];
auto& jo = js[i][sfSigner.getJsonName()];
jo = buildSignerJson(mySigners[i]);
jo[jss::Account] = e.acct.human();
jo[jss::SigningPubKey] = strHex(e.sig.pk().slice());
Serializer ss{buildMultiSigningData(*st, e.acct.id())};
auto const sig = ripple::sign(
*publicKeyType(e.sig.pk().slice()), e.sig.sk(), ss.slice());
jo[sfTxnSignature.getJsonName()] =
strHex(Slice{sig.data(), sig.size()});
}
};
}

View File

@@ -21,7 +21,6 @@
#define RIPPLE_TEST_JTX_MULTISIGN_H_INCLUDED
#include <cstdint>
#include <memory>
#include <optional>
#include <test/jtx/Account.h>
#include <test/jtx/amount.h>
@@ -66,48 +65,6 @@ signers(Account const& account, none_t);
class msig
{
public:
// Recursive signer structure
struct Signer
{
Account acct;
Account sig; // For leaf signers (same as acct for master key)
std::vector<std::shared_ptr<Signer>> nested; // For nested signers
// Leaf signer constructor (regular signing)
Signer(Account const& masterSig) : acct(masterSig), sig(masterSig)
{
}
// Leaf signer constructor (with different signing key)
Signer(Account const& acct_, Account const& regularSig)
: acct(acct_), sig(regularSig)
{
}
// Nested signer constructor
Signer(
Account const& acct_,
std::vector<std::shared_ptr<Signer>> nested_)
: acct(acct_), nested(std::move(nested_))
{
}
bool
isNested() const
{
return !nested.empty();
}
AccountID
id() const
{
return acct.id();
}
};
using SignerPtr = std::shared_ptr<Signer>;
// For backward compatibility
struct Reg
{
Account acct;
@@ -116,13 +73,16 @@ public:
Reg(Account const& masterSig) : acct(masterSig), sig(masterSig)
{
}
Reg(Account const& acct_, Account const& regularSig)
: acct(acct_), sig(regularSig)
{
}
Reg(char const* masterSig) : acct(masterSig), sig(masterSig)
{
}
Reg(char const* acct_, char const* regularSig)
: acct(acct_), sig(regularSig)
{
@@ -133,25 +93,13 @@ public:
{
return acct < rhs.acct;
}
// Convert to Signer
SignerPtr
toSigner() const
{
return std::make_shared<Signer>(acct, sig);
}
};
std::vector<SignerPtr> signers;
std::vector<Reg> signers;
public:
// Direct constructor with SignerPtr vector
msig(std::vector<SignerPtr> signers_);
// Backward compatibility constructor
msig(std::vector<Reg> signers_);
// Variadic constructor for backward compatibility
template <class AccountType, class... Accounts>
explicit msig(AccountType&& a0, Accounts&&... aN)
: msig{std::vector<Reg>{
@@ -164,30 +112,6 @@ public:
operator()(Env&, JTx& jt) const;
};
// Helper functions to create signers - renamed to avoid conflict with sig()
// transaction modifier
inline msig::SignerPtr
msigner(Account const& acct)
{
return std::make_shared<msig::Signer>(acct);
}
inline msig::SignerPtr
msigner(Account const& acct, Account const& signingKey)
{
return std::make_shared<msig::Signer>(acct, signingKey);
}
// Create nested signer with initializer list
template <typename... Args>
inline msig::SignerPtr
msigner(Account const& acct, Args&&... args)
{
std::vector<msig::SignerPtr> nested;
(nested.push_back(std::forward<Args>(args)), ...);
return std::make_shared<msig::Signer>(acct, std::move(nested));
}
//------------------------------------------------------------------------------
/** The number of signer lists matches. */

View File

@@ -332,7 +332,6 @@ multi_runner_child::run_multi(Pred pred)
{
if (!pred(*t))
continue;
try
{
failed = run(*t) || failed;