Compare commits

...

8 Commits

Author SHA1 Message Date
Nicholas Dudfield
61b364ec82 chore: clang format 2025-08-18 16:28:41 +07:00
Nicholas Dudfield
7fae6c3eb7 replace index-based account data with map of vectors
- Change AccountTxData to use map<ledgerSeq, vector<AccountTx>>
- Remove vector index storage to prevent invalidation during cleanup
- Update account transaction query functions accordingly
- Fix type conversions for transaction sequence markers
2025-08-18 16:22:52 +07:00
Nicholas Dudfield
70dd2a0f0e wip 2025-08-18 16:11:06 +07:00
Nicholas Dudfield
d15063bca4 remove flatmap database implementation 2025-08-18 15:11:54 +07:00
Nicholas Dudfield
998ae5535b add automatic ledger history cleanup to rwdb 2025-08-18 14:16:16 +07:00
Nicholas Dudfield
4c41d32276 use ordered ledgers_ map for faster first txn lookup 2025-08-18 12:01:39 +07:00
RichardAH
a2137d5436 Merge branch 'dev' into fix-online-delete 2025-08-14 14:02:57 +10:00
Denis Angell
a84d72a7f7 fix online delete 2025-07-10 13:07:53 +02:00
7 changed files with 80 additions and 1151 deletions

View File

@@ -548,7 +548,6 @@ target_sources (rippled PRIVATE
src/ripple/nodestore/backend/CassandraFactory.cpp
src/ripple/nodestore/backend/RWDBFactory.cpp
src/ripple/nodestore/backend/MemoryFactory.cpp
src/ripple/nodestore/backend/FlatmapFactory.cpp
src/ripple/nodestore/backend/NuDBFactory.cpp
src/ripple/nodestore/backend/NullFactory.cpp
src/ripple/nodestore/backend/RocksDBFactory.cpp

View File

@@ -1,851 +0,0 @@
#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#include <ripple/app/ledger/AcceptedLedger.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionMaster.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <algorithm>
#include <map>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <vector>
#include <boost/unordered/concurrent_flat_map.hpp>
namespace ripple {
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
result_type
operator()(AccountID const& value) const
{
return hardened_hash<>{}(value);
}
};
class FlatmapDatabase : public SQLiteDatabase
{
private:
struct LedgerData
{
LedgerInfo info;
boost::unordered::
concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactions;
};
struct AccountTxData
{
boost::unordered::
concurrent_flat_map<std::pair<uint32_t, uint32_t>, AccountTx>
transactions;
};
Application& app_;
boost::unordered::concurrent_flat_map<LedgerIndex, LedgerData> ledgers_;
boost::unordered::
concurrent_flat_map<uint256, LedgerIndex, base_uint_hasher>
ledgerHashToSeq_;
boost::unordered::concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactionMap_;
boost::unordered::
concurrent_flat_map<AccountID, AccountTxData, base_uint_hasher>
accountTxMap_;
public:
FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
: app_(app)
{
}
std::optional<LedgerIndex>
getMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
ledgers_.visit_all([&minSeq](auto const& pair) {
if (!minSeq || pair.first < *minSeq)
{
minSeq = pair.first;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
transactionMap_.visit_all([&minSeq](auto const& pair) {
LedgerIndex seq = pair.second.second->getLgrSeq();
if (!minSeq || seq < *minSeq)
{
minSeq = seq;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getAccountTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
accountTxMap_.visit_all([&minSeq](auto const& pair) {
pair.second.transactions.visit_all([&minSeq](auto const& tx) {
if (!minSeq || tx.first.first < *minSeq)
{
minSeq = tx.first.first;
}
});
});
return minSeq;
}
std::optional<LedgerIndex>
getMaxLedgerSeq() override
{
std::optional<LedgerIndex> maxSeq;
ledgers_.visit_all([&maxSeq](auto const& pair) {
if (!maxSeq || pair.first > *maxSeq)
{
maxSeq = pair.first;
}
});
return maxSeq;
}
void
deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit(ledgerSeq, [this](auto& item) {
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first == ledgerSeq;
});
});
}
void
deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.erase_if([this, ledgerSeq](auto const& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
return true;
}
return false;
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit_all([this, ledgerSeq](auto& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
}
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
std::size_t
getTransactionCount() override
{
return transactionMap_.size();
}
std::size_t
getAccountTransactionCount() override
{
std::size_t count = 0;
accountTxMap_.visit_all([&count](auto const& item) {
count += item.second.transactions.size();
});
return count;
}
CountMinMax
getLedgerCountMinMax() override
{
CountMinMax result{0, 0, 0};
ledgers_.visit_all([&result](auto const& item) {
result.numberOfRows++;
if (result.minLedgerSequence == 0 ||
item.first < result.minLedgerSequence)
{
result.minLedgerSequence = item.first;
}
if (item.first > result.maxLedgerSequence)
{
result.maxLedgerSequence = item.first;
}
});
return result;
}
bool
saveValidatedLedger(
std::shared_ptr<Ledger const> const& ledger,
bool current) override
{
try
{
LedgerData ledgerData;
ledgerData.info = ledger->info();
auto aLedger = std::make_shared<AcceptedLedger>(ledger, app_);
for (auto const& acceptedLedgerTx : *aLedger)
{
auto const& txn = acceptedLedgerTx->getTxn();
auto const& meta = acceptedLedgerTx->getMeta();
auto const& id = txn->getTransactionID();
std::string reason;
auto accTx = std::make_pair(
std::make_shared<ripple::Transaction>(txn, reason, app_),
std::make_shared<ripple::TxMeta>(meta));
ledgerData.transactions.emplace(id, accTx);
transactionMap_.emplace(id, accTx);
for (auto const& account : meta.getAffectedAccounts())
{
accountTxMap_.visit(account, [&](auto& data) {
data.second.transactions.emplace(
std::make_pair(
ledger->info().seq,
acceptedLedgerTx->getTxnSeq()),
accTx);
});
}
}
ledgers_.emplace(ledger->info().seq, std::move(ledgerData));
ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq);
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
const std::size_t BATCH_SIZE = 128;
std::size_t deleted = 0;
ledgers_.erase_if([&](auto const& item) {
if (deleted >= BATCH_SIZE)
return false;
if (item.first < cutoffSeq)
{
item.second.transactions.visit_all(
[this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
deleted++;
return true;
}
return false;
});
if (deleted > 0)
{
accountTxMap_.visit_all([cutoffSeq](auto& item) {
item.second.transactions.erase_if(
[cutoffSeq](auto const& tx) {
return tx.first.first < cutoffSeq;
});
});
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
catch (std::exception const&)
{
deleteTransactionByLedgerSeq(ledger->info().seq);
return false;
}
}
std::optional<LedgerInfo>
getLedgerInfoByIndex(LedgerIndex ledgerSeq) override
{
std::optional<LedgerInfo> result;
ledgers_.visit(ledgerSeq, [&result](auto const& item) {
result = item.second.info;
});
return result;
}
std::optional<LedgerInfo>
getNewestLedgerInfo() override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&result](auto const& item) {
if (!result || item.second.info.seq > result->seq)
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first < result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first > result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLedgerInfoByHash(uint256 const& ledgerHash) override
{
std::optional<LedgerInfo> result;
ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) {
ledgers_.visit(item.second, [&result](auto const& item) {
result = item.second.info;
});
});
return result;
}
uint256
getHashByIndex(LedgerIndex ledgerIndex) override
{
uint256 result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = item.second.info.hash;
});
return result;
}
std::optional<LedgerHashPair>
getHashesByIndex(LedgerIndex ledgerIndex) override
{
std::optional<LedgerHashPair> result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
});
return result;
}
std::map<LedgerIndex, LedgerHashPair>
getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override
{
std::map<LedgerIndex, LedgerHashPair> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= minSeq && item.first <= maxSeq)
{
result[item.first] = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
}
});
return result;
}
std::variant<AccountTx, TxSearched>
getTransaction(
uint256 const& id,
std::optional<ClosedInterval<std::uint32_t>> const& range,
error_code_i& ec) override
{
std::variant<AccountTx, TxSearched> result = TxSearched::unknown;
transactionMap_.visit(id, [&](auto const& item) {
auto const& tx = item.second;
if (!range ||
(range->lower() <= tx.second->getLgrSeq() &&
tx.second->getLgrSeq() <= range->upper()))
{
result = tx;
}
else
{
result = TxSearched::all;
}
});
return result;
}
bool
ledgerDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
bool
transactionDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
std::uint32_t
getKBUsedAll() override
{
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024; // Convert to KB
}
std::uint32_t
getKBUsedLedger() override
{
std::uint32_t size =
ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
}
std::uint32_t
getKBUsedTransaction() override
{
std::uint32_t size =
transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024;
}
void
closeLedgerDB() override
{
// No-op for in-memory database
}
void
closeTransactionDB() override
{
// No-op for in-memory database
}
~FlatmapDatabase()
{
// Concurrent maps need visit_all
accountTxMap_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
accountTxMap_.clear();
transactionMap_.clear();
ledgers_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
ledgers_.clear();
ledgerHashToSeq_.clear();
}
std::vector<std::shared_ptr<Transaction>>
getTxHistory(LedgerIndex startIndex) override
{
std::vector<std::shared_ptr<Transaction>> result;
transactionMap_.visit_all([&](auto const& item) {
if (item.second.second->getLgrSeq() >= startIndex)
{
result.push_back(item.second.first);
}
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a->getLedger() > b->getLedger();
});
if (result.size() > 20)
{
result.resize(20);
}
return result;
}
// Helper function to handle limits
template <typename Container>
void
applyLimit(Container& container, std::size_t limit, bool bUnlimited)
{
if (!bUnlimited && limit > 0 && container.size() > limit)
{
container.resize(limit);
}
}
AccountTxs
getOldestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() < b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
AccountTxs
getNewestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() > b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getOldestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) < std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getNewestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) > std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
oldestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first < b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
newestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first > b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
oldestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
newestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end(), std::greater<>());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
};
// Factory function
std::unique_ptr<SQLiteDatabase>
getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
{
return std::make_unique<FlatmapDatabase>(app, config, jobQueue);
}
} // namespace ripple
#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED

View File

@@ -28,9 +28,8 @@ private:
struct AccountTxData
{
AccountTxs transactions;
std::map<uint32_t, std::map<uint32_t, size_t>>
ledgerTxMap; // ledgerSeq -> txSeq -> index in transactions
std::map<uint32_t, AccountTxs>
ledgerTxMap; // ledgerSeq -> vector of AccountTx
};
Application& app_;
@@ -65,9 +64,12 @@ public:
return {};
std::shared_lock<std::shared_mutex> lock(mutex_);
if (transactionMap_.empty())
return std::nullopt;
return transactionMap_.begin()->second.second->getLgrSeq();
for (const auto& [ledgerSeq, ledgerData] : ledgers_)
{
if (!ledgerData.transactions.empty())
return ledgerSeq;
}
return std::nullopt;
}
std::optional<LedgerIndex>
@@ -163,14 +165,6 @@ public:
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
accountData.transactions.erase(
std::remove_if(
accountData.transactions.begin(),
accountData.transactions.end(),
[ledgerSeq](const AccountTx& tx) {
return tx.second->getLgrSeq() < ledgerSeq;
}),
accountData.transactions.end());
}
}
std::size_t
@@ -193,7 +187,10 @@ public:
std::size_t count = 0;
for (const auto& [_, accountData] : accountTxMap_)
{
count += accountData.transactions.size();
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
count += txVector.size();
}
}
return count;
}
@@ -293,10 +290,7 @@ public:
accountTxMap_[account] = AccountTxData();
auto& accountData = accountTxMap_[account];
accountData.transactions.push_back(accTx);
accountData
.ledgerTxMap[seq][acceptedLedgerTx->getTxnSeq()] =
accountData.transactions.size() - 1;
accountData.ledgerTxMap[seq].push_back(accTx);
}
app_.getMasterTransaction().inLedger(
@@ -310,6 +304,46 @@ public:
// Overwrite Current Ledger
ledgers_[seq] = std::move(ledgerData);
ledgerHashToSeq_[ledger->info().hash] = seq;
// Automatic cleanup based on LEDGER_HISTORY (ported from
// FlatmapDatabase)
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
// Delete old ledgers before cutoff
auto it = ledgers_.begin();
while (it != ledgers_.end() && it->first < cutoffSeq)
{
// Clean up transactions from this ledger
for (const auto& [txHash, _] : it->second.transactions)
{
transactionMap_.erase(txHash);
}
ledgerHashToSeq_.erase(it->second.info.hash);
it = ledgers_.erase(it);
}
// Clean up account transactions before cutoff
for (auto& [_, accountData] : accountTxMap_)
{
auto txIt = accountData.ledgerTxMap.begin();
while (txIt != accountData.ledgerTxMap.end() &&
txIt->first < cutoffSeq)
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
@@ -463,11 +497,9 @@ public:
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
}
}
return size / 1024;
@@ -496,11 +528,9 @@ public:
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
}
}
return size / 1024;
@@ -605,14 +635,13 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
AccountTx const accountTx = accountData.transactions[txIndex];
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setStatus(COMMITTED);
@@ -657,8 +686,7 @@ public:
++skipped;
continue;
}
AccountTx const accountTx =
accountData.transactions[innerRIt->second];
const AccountTx& accountTx = *innerRIt;
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setLedger(inLedger);
@@ -692,14 +720,14 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
const auto& [txn, txMeta] = accountData.transactions[txIndex];
const auto& [txn, txMeta] = accountTx;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -743,8 +771,7 @@ public:
++skipped;
continue;
}
const auto& [txn, txMeta] =
accountData.transactions[innerRIt->second];
const auto& [txn, txMeta] = *innerRIt;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -816,11 +843,9 @@ public:
for (; txIt != txEnd; ++txIt)
{
std::uint32_t const ledgerSeq = txIt->first;
for (auto seqIt = txIt->second.begin();
seqIt != txIt->second.end();
++seqIt)
for (size_t txnSeq = 0; txnSeq < txIt->second.size(); ++txnSeq)
{
const auto& [txnSeq, index] = *seqIt;
const auto& accountTx = txIt->second[txnSeq];
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -833,16 +858,15 @@ public:
else if (numberOfResults == 0)
{
newmarker = {
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
rangeCheckedCast<std::uint32_t>(ledgerSeq),
static_cast<std::uint32_t>(txnSeq)};
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -871,11 +895,10 @@ public:
for (; rtxIt != rtxEnd; ++rtxIt)
{
std::uint32_t const ledgerSeq = rtxIt->first;
for (auto innerRIt = rtxIt->second.rbegin();
innerRIt != rtxIt->second.rend();
++innerRIt)
for (int txnSeq = rtxIt->second.size() - 1; txnSeq >= 0;
--txnSeq)
{
const auto& [txnSeq, index] = *innerRIt;
const auto& accountTx = rtxIt->second[txnSeq];
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -888,16 +911,15 @@ public:
else if (numberOfResults == 0)
{
newmarker = {
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
rangeCheckedCast<std::uint32_t>(ledgerSeq),
static_cast<std::uint32_t>(txnSeq)};
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();

View File

@@ -19,7 +19,6 @@
#include <ripple/app/main/Application.h>
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
#include <ripple/app/rdb/backend/RWDBDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/DatabaseShard.h>
@@ -41,7 +40,6 @@ RelationalDatabase::init(
bool use_sqlite = false;
bool use_postgres = false;
bool use_rwdb = false;
bool use_flatmap = false;
if (config.reporting())
{
@@ -60,10 +58,6 @@ RelationalDatabase::init(
{
use_rwdb = true;
}
else if (boost::iequals(get(rdb_section, "backend"), "flatmap"))
{
use_flatmap = true;
}
else
{
Throw<std::runtime_error>(
@@ -89,10 +83,6 @@ RelationalDatabase::init(
{
return getRWDBDatabase(app, config, jobQueue);
}
else if (use_flatmap)
{
return getFlatmapDatabase(app, config, jobQueue);
}
return std::unique_ptr<RelationalDatabase>();
}

View File

@@ -363,7 +363,7 @@ public:
(!section("node_db").empty() &&
(boost::beast::iequals(get(section("node_db"), "type"), "rwdb") ||
boost::beast::iequals(
get(section("node_db"), "type"), "flatmap")));
get(section("node_db"), "type"), "memory")));
// RHNOTE: memory type is not selected for here because it breaks
// tests
return isMem;

View File

@@ -1,235 +0,0 @@
#include <ripple/basics/contract.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/nodestore/impl/codec.h>
#include <boost/beast/core/string.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/unordered/concurrent_flat_map.hpp>
#include <memory>
#include <mutex>
namespace ripple {
namespace NodeStore {
class FlatmapBackend : public Backend
{
private:
std::string name_;
beast::Journal journal_;
bool isOpen_{false};
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
};
using DataStore = boost::unordered::concurrent_flat_map<
uint256,
std::vector<std::uint8_t>, // Store compressed blob data
base_uint_hasher>;
DataStore table_;
public:
FlatmapBackend(
size_t keyBytes,
Section const& keyValues,
beast::Journal journal)
: name_(get(keyValues, "path")), journal_(journal)
{
boost::ignore_unused(journal_);
if (name_.empty())
name_ = "node_db";
}
~FlatmapBackend() override
{
close();
}
std::string
getName() override
{
return name_;
}
void
open(bool createIfMissing) override
{
if (isOpen_)
Throw<std::runtime_error>("already open");
isOpen_ = true;
}
bool
isOpen() override
{
return isOpen_;
}
void
close() override
{
table_.clear();
isOpen_ = false;
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
{
if (!isOpen_)
return notFound;
uint256 const hash(uint256::fromVoid(key));
bool found = table_.visit(hash, [&](const auto& key_value_pair) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
key_value_pair.second.data(), key_value_pair.second.size(), bf);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
*pObject = nullptr;
return;
}
*pObject = decoded.createObject();
});
return found ? (*pObject ? ok : dataCorrupt) : notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
if (status != ok)
results.push_back({});
else
results.push_back(nObj);
}
return {results, ok};
}
void
store(std::shared_ptr<NodeObject> const& object) override
{
if (!isOpen_)
return;
if (!object)
return;
EncodedBlob encoded(object);
nudb::detail::buffer bf;
auto const result =
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
std::vector<std::uint8_t> compressed(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
table_.insert_or_assign(object->getHash(), std::move(compressed));
}
void
storeBatch(Batch const& batch) override
{
for (auto const& e : batch)
store(e);
}
void
sync() override
{
}
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
if (!isOpen_)
return;
table_.visit_all([&f](const auto& entry) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
entry.second.data(), entry.second.size(), bf);
DecodedBlob decoded(
entry.first.data(), result.first, result.second);
if (decoded.wasOk())
f(decoded.createObject());
});
}
int
getWriteLoad() override
{
return 0;
}
void
setDeletePath() override
{
close();
}
int
fdRequired() const override
{
return 0;
}
private:
size_t
size() const
{
return table_.size();
}
};
class FlatmapFactory : public Factory
{
public:
FlatmapFactory()
{
Manager::instance().insert(*this);
}
~FlatmapFactory() override
{
Manager::instance().erase(*this);
}
std::string
getName() const override
{
return "Flatmap";
}
std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<FlatmapBackend>(keyBytes, keyValues, journal);
}
};
static FlatmapFactory flatmapFactory;
} // namespace NodeStore
} // namespace ripple

View File

@@ -216,6 +216,10 @@ public:
}
BEAST_EXPECT(store.getLastRotated() == lastRotated);
SQLiteDatabase* const db =
dynamic_cast<SQLiteDatabase*>(&env.app().getRelationalDatabase());
BEAST_EXPECT(*db->getTransactionsMinLedgerSeq() == 3);
for (auto i = 3; i < deleteInterval + lastRotated; ++i)
{
ledgers.emplace(