From 3c4c9c87c5b12960a2b3f3ea9a9faa06cfbe0045 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Tue, 26 Aug 2025 11:00:58 +0700 Subject: [PATCH 01/12] Fix rwdb memory leak with online_delete and remove flatmap (#570) Co-authored-by: Denis Angell --- Builds/CMake/RippledCore.cmake | 6 +- Builds/levelization/results/ordering.txt | 4 + cfg/rippled-example.cfg | 31 +- src/ripple/app/rdb/backend/FlatmapDatabase.h | 851 ------------------ src/ripple/app/rdb/backend/RWDBDatabase.h | 197 ++-- .../app/rdb/impl/RelationalDatabase.cpp | 10 - src/ripple/core/Config.h | 4 +- src/ripple/core/impl/Config.cpp | 25 +- .../nodestore/backend/FlatmapFactory.cpp | 235 ----- src/test/app/SHAMapStore_test.cpp | 4 + src/test/core/Config_test.cpp | 92 ++ src/test/rdb/RelationalDatabase_test.cpp | 756 ++++++++++++++++ 12 files changed, 1018 insertions(+), 1197 deletions(-) delete mode 100644 src/ripple/app/rdb/backend/FlatmapDatabase.h delete mode 100644 src/ripple/nodestore/backend/FlatmapFactory.cpp create mode 100644 src/test/rdb/RelationalDatabase_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 6fc33cdee..6b876997b 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -548,7 +548,6 @@ target_sources (rippled PRIVATE src/ripple/nodestore/backend/CassandraFactory.cpp src/ripple/nodestore/backend/RWDBFactory.cpp src/ripple/nodestore/backend/MemoryFactory.cpp - src/ripple/nodestore/backend/FlatmapFactory.cpp src/ripple/nodestore/backend/NuDBFactory.cpp src/ripple/nodestore/backend/NullFactory.cpp src/ripple/nodestore/backend/RocksDBFactory.cpp @@ -995,6 +994,11 @@ if (tests) subdir: resource #]===============================] src/test/resource/Logic_test.cpp + #[===============================[ + test sources: + subdir: rdb + #]===============================] + src/test/rdb/RelationalDatabase_test.cpp #[===============================[ test sources: subdir: rpc diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index 12df1a86e..e409855a7 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -186,6 +186,10 @@ test.protocol > ripple.crypto test.protocol > ripple.json test.protocol > ripple.protocol test.protocol > test.toplevel +test.rdb > ripple.app +test.rdb > ripple.core +test.rdb > test.jtx +test.rdb > test.toplevel test.resource > ripple.basics test.resource > ripple.beast test.resource > ripple.resource diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 41fe12158..80ec0a980 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1063,14 +1063,16 @@ # RWDB is recommended for Validator and Peer nodes that are not required to # store history. # -# RWDB maintains its high speed regardless of the amount of history -# stored. Online delete should NOT be used instead RWDB will use the -# ledger_history config value to determine how many ledgers to keep in memory. -# -# Required keys for NuDB, RWDB and RocksDB: +# Required keys for NuDB and RocksDB: # # path Location to store the database # +# Required keys for RWDB: +# +# online_delete Required. RWDB stores data in memory and will +# grow unbounded without online_delete. See the +# online_delete section below. +# # Required keys for Cassandra: # # contact_points IP of a node in the Cassandra cluster @@ -1110,7 +1112,17 @@ # if sufficient IOPS capacity is available. # Default 0. # -# Optional keys for NuDB or RocksDB: +# online_delete for RWDB, NuDB and RocksDB: +# +# online_delete Minimum value of 256. Enable automatic purging +# of older ledger information. Maintain at least this +# number of ledger records online. Must be greater +# than or equal to ledger_history. +# +# REQUIRED for RWDB to prevent out-of-memory errors. +# Optional for NuDB and RocksDB. +# +# Optional keys for NuDB and RocksDB: # # earliest_seq The default is 32570 to match the XRP ledger # network's earliest allowed sequence. Alternate @@ -1120,12 +1132,7 @@ # it must be defined with the same value in both # sections. # -# online_delete Minimum value of 256. Enable automatic purging -# of older ledger information. Maintain at least this -# number of ledger records online. Must be greater -# than or equal to ledger_history. If using RWDB -# this value is ignored. -# + # These keys modify the behavior of online_delete, and thus are only # relevant if online_delete is defined and non-zero: # diff --git a/src/ripple/app/rdb/backend/FlatmapDatabase.h b/src/ripple/app/rdb/backend/FlatmapDatabase.h deleted file mode 100644 index 00927b1d4..000000000 --- a/src/ripple/app/rdb/backend/FlatmapDatabase.h +++ /dev/null @@ -1,851 +0,0 @@ -#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED -#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace ripple { - -struct base_uint_hasher -{ - using result_type = std::size_t; - - result_type - operator()(base_uint<256> const& value) const - { - return hardened_hash<>{}(value); - } - - result_type - operator()(AccountID const& value) const - { - return hardened_hash<>{}(value); - } -}; - -class FlatmapDatabase : public SQLiteDatabase -{ -private: - struct LedgerData - { - LedgerInfo info; - boost::unordered:: - concurrent_flat_map - transactions; - }; - - struct AccountTxData - { - boost::unordered:: - concurrent_flat_map, AccountTx> - transactions; - }; - - Application& app_; - - boost::unordered::concurrent_flat_map ledgers_; - boost::unordered:: - concurrent_flat_map - ledgerHashToSeq_; - boost::unordered::concurrent_flat_map - transactionMap_; - boost::unordered:: - concurrent_flat_map - accountTxMap_; - -public: - FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue) - : app_(app) - { - } - - std::optional - getMinLedgerSeq() override - { - std::optional minSeq; - ledgers_.visit_all([&minSeq](auto const& pair) { - if (!minSeq || pair.first < *minSeq) - { - minSeq = pair.first; - } - }); - return minSeq; - } - - std::optional - getTransactionsMinLedgerSeq() override - { - std::optional minSeq; - transactionMap_.visit_all([&minSeq](auto const& pair) { - LedgerIndex seq = pair.second.second->getLgrSeq(); - if (!minSeq || seq < *minSeq) - { - minSeq = seq; - } - }); - return minSeq; - } - - std::optional - getAccountTransactionsMinLedgerSeq() override - { - std::optional minSeq; - accountTxMap_.visit_all([&minSeq](auto const& pair) { - pair.second.transactions.visit_all([&minSeq](auto const& tx) { - if (!minSeq || tx.first.first < *minSeq) - { - minSeq = tx.first.first; - } - }); - }); - return minSeq; - } - - std::optional - getMaxLedgerSeq() override - { - std::optional maxSeq; - ledgers_.visit_all([&maxSeq](auto const& pair) { - if (!maxSeq || pair.first > *maxSeq) - { - maxSeq = pair.first; - } - }); - return maxSeq; - } - void - deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override - { - ledgers_.visit(ledgerSeq, [this](auto& item) { - item.second.transactions.visit_all([this](auto const& txPair) { - transactionMap_.erase(txPair.first); - }); - item.second.transactions.clear(); - }); - - accountTxMap_.visit_all([ledgerSeq](auto& item) { - item.second.transactions.erase_if([ledgerSeq](auto const& tx) { - return tx.first.first == ledgerSeq; - }); - }); - } - - void - deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override - { - ledgers_.erase_if([this, ledgerSeq](auto const& item) { - if (item.first < ledgerSeq) - { - item.second.transactions.visit_all([this](auto const& txPair) { - transactionMap_.erase(txPair.first); - }); - ledgerHashToSeq_.erase(item.second.info.hash); - return true; - } - return false; - }); - - accountTxMap_.visit_all([ledgerSeq](auto& item) { - item.second.transactions.erase_if([ledgerSeq](auto const& tx) { - return tx.first.first < ledgerSeq; - }); - }); - } - - void - deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override - { - ledgers_.visit_all([this, ledgerSeq](auto& item) { - if (item.first < ledgerSeq) - { - item.second.transactions.visit_all([this](auto const& txPair) { - transactionMap_.erase(txPair.first); - }); - item.second.transactions.clear(); - } - }); - - accountTxMap_.visit_all([ledgerSeq](auto& item) { - item.second.transactions.erase_if([ledgerSeq](auto const& tx) { - return tx.first.first < ledgerSeq; - }); - }); - } - - void - deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override - { - accountTxMap_.visit_all([ledgerSeq](auto& item) { - item.second.transactions.erase_if([ledgerSeq](auto const& tx) { - return tx.first.first < ledgerSeq; - }); - }); - } - std::size_t - getTransactionCount() override - { - return transactionMap_.size(); - } - - std::size_t - getAccountTransactionCount() override - { - std::size_t count = 0; - accountTxMap_.visit_all([&count](auto const& item) { - count += item.second.transactions.size(); - }); - return count; - } - - CountMinMax - getLedgerCountMinMax() override - { - CountMinMax result{0, 0, 0}; - ledgers_.visit_all([&result](auto const& item) { - result.numberOfRows++; - if (result.minLedgerSequence == 0 || - item.first < result.minLedgerSequence) - { - result.minLedgerSequence = item.first; - } - if (item.first > result.maxLedgerSequence) - { - result.maxLedgerSequence = item.first; - } - }); - return result; - } - - bool - saveValidatedLedger( - std::shared_ptr const& ledger, - bool current) override - { - try - { - LedgerData ledgerData; - ledgerData.info = ledger->info(); - - auto aLedger = std::make_shared(ledger, app_); - for (auto const& acceptedLedgerTx : *aLedger) - { - auto const& txn = acceptedLedgerTx->getTxn(); - auto const& meta = acceptedLedgerTx->getMeta(); - auto const& id = txn->getTransactionID(); - - std::string reason; - auto accTx = std::make_pair( - std::make_shared(txn, reason, app_), - std::make_shared(meta)); - - ledgerData.transactions.emplace(id, accTx); - transactionMap_.emplace(id, accTx); - - for (auto const& account : meta.getAffectedAccounts()) - { - accountTxMap_.visit(account, [&](auto& data) { - data.second.transactions.emplace( - std::make_pair( - ledger->info().seq, - acceptedLedgerTx->getTxnSeq()), - accTx); - }); - } - } - - ledgers_.emplace(ledger->info().seq, std::move(ledgerData)); - ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq); - - if (current) - { - auto const cutoffSeq = - ledger->info().seq > app_.config().LEDGER_HISTORY - ? ledger->info().seq - app_.config().LEDGER_HISTORY - : 0; - - if (cutoffSeq > 0) - { - const std::size_t BATCH_SIZE = 128; - std::size_t deleted = 0; - - ledgers_.erase_if([&](auto const& item) { - if (deleted >= BATCH_SIZE) - return false; - - if (item.first < cutoffSeq) - { - item.second.transactions.visit_all( - [this](auto const& txPair) { - transactionMap_.erase(txPair.first); - }); - ledgerHashToSeq_.erase(item.second.info.hash); - deleted++; - return true; - } - return false; - }); - - if (deleted > 0) - { - accountTxMap_.visit_all([cutoffSeq](auto& item) { - item.second.transactions.erase_if( - [cutoffSeq](auto const& tx) { - return tx.first.first < cutoffSeq; - }); - }); - } - - app_.getLedgerMaster().clearPriorLedgers(cutoffSeq); - } - } - - return true; - } - catch (std::exception const&) - { - deleteTransactionByLedgerSeq(ledger->info().seq); - return false; - } - } - - std::optional - getLedgerInfoByIndex(LedgerIndex ledgerSeq) override - { - std::optional result; - ledgers_.visit(ledgerSeq, [&result](auto const& item) { - result = item.second.info; - }); - return result; - } - - std::optional - getNewestLedgerInfo() override - { - std::optional result; - ledgers_.visit_all([&result](auto const& item) { - if (!result || item.second.info.seq > result->seq) - { - result = item.second.info; - } - }); - return result; - } - - std::optional - getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override - { - std::optional result; - ledgers_.visit_all([&](auto const& item) { - if (item.first >= ledgerFirstIndex && - (!result || item.first < result->seq)) - { - result = item.second.info; - } - }); - return result; - } - - std::optional - getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override - { - std::optional result; - ledgers_.visit_all([&](auto const& item) { - if (item.first >= ledgerFirstIndex && - (!result || item.first > result->seq)) - { - result = item.second.info; - } - }); - return result; - } - - std::optional - getLedgerInfoByHash(uint256 const& ledgerHash) override - { - std::optional result; - ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) { - ledgers_.visit(item.second, [&result](auto const& item) { - result = item.second.info; - }); - }); - return result; - } - uint256 - getHashByIndex(LedgerIndex ledgerIndex) override - { - uint256 result; - ledgers_.visit(ledgerIndex, [&result](auto const& item) { - result = item.second.info.hash; - }); - return result; - } - - std::optional - getHashesByIndex(LedgerIndex ledgerIndex) override - { - std::optional result; - ledgers_.visit(ledgerIndex, [&result](auto const& item) { - result = LedgerHashPair{ - item.second.info.hash, item.second.info.parentHash}; - }); - return result; - } - - std::map - getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override - { - std::map result; - ledgers_.visit_all([&](auto const& item) { - if (item.first >= minSeq && item.first <= maxSeq) - { - result[item.first] = LedgerHashPair{ - item.second.info.hash, item.second.info.parentHash}; - } - }); - return result; - } - - std::variant - getTransaction( - uint256 const& id, - std::optional> const& range, - error_code_i& ec) override - { - std::variant result = TxSearched::unknown; - transactionMap_.visit(id, [&](auto const& item) { - auto const& tx = item.second; - if (!range || - (range->lower() <= tx.second->getLgrSeq() && - tx.second->getLgrSeq() <= range->upper())) - { - result = tx; - } - else - { - result = TxSearched::all; - } - }); - return result; - } - - bool - ledgerDbHasSpace(Config const& config) override - { - return true; // In-memory database always has space - } - - bool - transactionDbHasSpace(Config const& config) override - { - return true; // In-memory database always has space - } - - std::uint32_t - getKBUsedAll() override - { - std::uint32_t size = sizeof(*this); - size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData)); - size += - ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex)); - size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx)); - accountTxMap_.visit_all([&size](auto const& item) { - size += sizeof(AccountID) + sizeof(AccountTxData); - size += item.second.transactions.size() * sizeof(AccountTx); - }); - return size / 1024; // Convert to KB - } - - std::uint32_t - getKBUsedLedger() override - { - std::uint32_t size = - ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData)); - size += - ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex)); - return size / 1024; - } - - std::uint32_t - getKBUsedTransaction() override - { - std::uint32_t size = - transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx)); - accountTxMap_.visit_all([&size](auto const& item) { - size += sizeof(AccountID) + sizeof(AccountTxData); - size += item.second.transactions.size() * sizeof(AccountTx); - }); - return size / 1024; - } - - void - closeLedgerDB() override - { - // No-op for in-memory database - } - - void - closeTransactionDB() override - { - // No-op for in-memory database - } - - ~FlatmapDatabase() - { - // Concurrent maps need visit_all - accountTxMap_.visit_all( - [](auto& pair) { pair.second.transactions.clear(); }); - accountTxMap_.clear(); - - transactionMap_.clear(); - - ledgers_.visit_all( - [](auto& pair) { pair.second.transactions.clear(); }); - ledgers_.clear(); - - ledgerHashToSeq_.clear(); - } - - std::vector> - getTxHistory(LedgerIndex startIndex) override - { - std::vector> result; - transactionMap_.visit_all([&](auto const& item) { - if (item.second.second->getLgrSeq() >= startIndex) - { - result.push_back(item.second.first); - } - }); - std::sort( - result.begin(), result.end(), [](auto const& a, auto const& b) { - return a->getLedger() > b->getLedger(); - }); - if (result.size() > 20) - { - result.resize(20); - } - return result; - } - // Helper function to handle limits - template - void - applyLimit(Container& container, std::size_t limit, bool bUnlimited) - { - if (!bUnlimited && limit > 0 && container.size() > limit) - { - container.resize(limit); - } - } - - AccountTxs - getOldestAccountTxs(AccountTxOptions const& options) override - { - AccountTxs result; - accountTxMap_.visit(options.account, [&](auto const& item) { - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - result.push_back(tx.second); - } - }); - }); - std::sort( - result.begin(), result.end(), [](auto const& a, auto const& b) { - return a.second->getLgrSeq() < b.second->getLgrSeq(); - }); - applyLimit(result, options.limit, options.bUnlimited); - return result; - } - - AccountTxs - getNewestAccountTxs(AccountTxOptions const& options) override - { - AccountTxs result; - accountTxMap_.visit(options.account, [&](auto const& item) { - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - result.push_back(tx.second); - } - }); - }); - std::sort( - result.begin(), result.end(), [](auto const& a, auto const& b) { - return a.second->getLgrSeq() > b.second->getLgrSeq(); - }); - applyLimit(result, options.limit, options.bUnlimited); - return result; - } - - MetaTxsList - getOldestAccountTxsB(AccountTxOptions const& options) override - { - MetaTxsList result; - accountTxMap_.visit(options.account, [&](auto const& item) { - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - result.emplace_back( - tx.second.first->getSTransaction() - ->getSerializer() - .peekData(), - tx.second.second->getAsObject() - .getSerializer() - .peekData(), - tx.first.first); - } - }); - }); - std::sort( - result.begin(), result.end(), [](auto const& a, auto const& b) { - return std::get<2>(a) < std::get<2>(b); - }); - applyLimit(result, options.limit, options.bUnlimited); - return result; - } - - MetaTxsList - getNewestAccountTxsB(AccountTxOptions const& options) override - { - MetaTxsList result; - accountTxMap_.visit(options.account, [&](auto const& item) { - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - result.emplace_back( - tx.second.first->getSTransaction() - ->getSerializer() - .peekData(), - tx.second.second->getAsObject() - .getSerializer() - .peekData(), - tx.first.first); - } - }); - }); - std::sort( - result.begin(), result.end(), [](auto const& a, auto const& b) { - return std::get<2>(a) > std::get<2>(b); - }); - applyLimit(result, options.limit, options.bUnlimited); - return result; - } - std::pair> - oldestAccountTxPage(AccountTxPageOptions const& options) override - { - AccountTxs result; - std::optional marker; - - accountTxMap_.visit(options.account, [&](auto const& item) { - std::vector, AccountTx>> - txs; - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - txs.emplace_back(tx); - } - }); - - std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) { - return a.first < b.first; - }); - - auto it = txs.begin(); - if (options.marker) - { - it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) { - return tx.first.first == options.marker->ledgerSeq && - tx.first.second == options.marker->txnSeq; - }); - if (it != txs.end()) - ++it; - } - - for (; it != txs.end() && - (options.limit == 0 || result.size() < options.limit); - ++it) - { - result.push_back(it->second); - } - - if (it != txs.end()) - { - marker = AccountTxMarker{it->first.first, it->first.second}; - } - }); - - return {result, marker}; - } - - std::pair> - newestAccountTxPage(AccountTxPageOptions const& options) override - { - AccountTxs result; - std::optional marker; - - accountTxMap_.visit(options.account, [&](auto const& item) { - std::vector, AccountTx>> - txs; - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - txs.emplace_back(tx); - } - }); - - std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) { - return a.first > b.first; - }); - - auto it = txs.begin(); - if (options.marker) - { - it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) { - return tx.first.first == options.marker->ledgerSeq && - tx.first.second == options.marker->txnSeq; - }); - if (it != txs.end()) - ++it; - } - - for (; it != txs.end() && - (options.limit == 0 || result.size() < options.limit); - ++it) - { - result.push_back(it->second); - } - - if (it != txs.end()) - { - marker = AccountTxMarker{it->first.first, it->first.second}; - } - }); - - return {result, marker}; - } - - std::pair> - oldestAccountTxPageB(AccountTxPageOptions const& options) override - { - MetaTxsList result; - std::optional marker; - - accountTxMap_.visit(options.account, [&](auto const& item) { - std::vector> txs; - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - txs.emplace_back( - tx.first.first, tx.first.second, tx.second); - } - }); - - std::sort(txs.begin(), txs.end()); - - auto it = txs.begin(); - if (options.marker) - { - it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) { - return std::get<0>(tx) == options.marker->ledgerSeq && - std::get<1>(tx) == options.marker->txnSeq; - }); - if (it != txs.end()) - ++it; - } - - for (; it != txs.end() && - (options.limit == 0 || result.size() < options.limit); - ++it) - { - const auto& [_, __, tx] = *it; - result.emplace_back( - tx.first->getSTransaction()->getSerializer().peekData(), - tx.second->getAsObject().getSerializer().peekData(), - std::get<0>(*it)); - } - - if (it != txs.end()) - { - marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)}; - } - }); - - return {result, marker}; - } - - std::pair> - newestAccountTxPageB(AccountTxPageOptions const& options) override - { - MetaTxsList result; - std::optional marker; - - accountTxMap_.visit(options.account, [&](auto const& item) { - std::vector> txs; - item.second.transactions.visit_all([&](auto const& tx) { - if (tx.first.first >= options.minLedger && - tx.first.first <= options.maxLedger) - { - txs.emplace_back( - tx.first.first, tx.first.second, tx.second); - } - }); - - std::sort(txs.begin(), txs.end(), std::greater<>()); - - auto it = txs.begin(); - if (options.marker) - { - it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) { - return std::get<0>(tx) == options.marker->ledgerSeq && - std::get<1>(tx) == options.marker->txnSeq; - }); - if (it != txs.end()) - ++it; - } - - for (; it != txs.end() && - (options.limit == 0 || result.size() < options.limit); - ++it) - { - const auto& [_, __, tx] = *it; - result.emplace_back( - tx.first->getSTransaction()->getSerializer().peekData(), - tx.second->getAsObject().getSerializer().peekData(), - std::get<0>(*it)); - } - - if (it != txs.end()) - { - marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)}; - } - }); - - return {result, marker}; - } -}; - -// Factory function -std::unique_ptr -getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue) -{ - return std::make_unique(app, config, jobQueue); -} - -} // namespace ripple -#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED diff --git a/src/ripple/app/rdb/backend/RWDBDatabase.h b/src/ripple/app/rdb/backend/RWDBDatabase.h index 3981691bc..f91e50cda 100644 --- a/src/ripple/app/rdb/backend/RWDBDatabase.h +++ b/src/ripple/app/rdb/backend/RWDBDatabase.h @@ -28,9 +28,8 @@ private: struct AccountTxData { - AccountTxs transactions; - std::map> - ledgerTxMap; // ledgerSeq -> txSeq -> index in transactions + std::map> + ledgerTxMap; // ledgerSeq -> vector of transactions }; Application& app_; @@ -65,9 +64,12 @@ public: return {}; std::shared_lock lock(mutex_); - if (transactionMap_.empty()) - return std::nullopt; - return transactionMap_.begin()->second.second->getLgrSeq(); + for (const auto& [ledgerSeq, ledgerData] : ledgers_) + { + if (!ledgerData.transactions.empty()) + return ledgerSeq; + } + return std::nullopt; } std::optional @@ -163,14 +165,6 @@ public: { txIt = accountData.ledgerTxMap.erase(txIt); } - accountData.transactions.erase( - std::remove_if( - accountData.transactions.begin(), - accountData.transactions.end(), - [ledgerSeq](const AccountTx& tx) { - return tx.second->getLgrSeq() < ledgerSeq; - }), - accountData.transactions.end()); } } std::size_t @@ -193,7 +187,10 @@ public: std::size_t count = 0; for (const auto& [_, accountData] : accountTxMap_) { - count += accountData.transactions.size(); + for (const auto& [_, txVector] : accountData.ledgerTxMap) + { + count += txVector.size(); + } } return count; } @@ -293,10 +290,7 @@ public: accountTxMap_[account] = AccountTxData(); auto& accountData = accountTxMap_[account]; - accountData.transactions.push_back(accTx); - accountData - .ledgerTxMap[seq][acceptedLedgerTx->getTxnSeq()] = - accountData.transactions.size() - 1; + accountData.ledgerTxMap[seq].push_back(accTx); } app_.getMasterTransaction().inLedger( @@ -451,59 +445,108 @@ public: return true; // In-memory database always has space } + // Red-black tree node overhead per map entry + static constexpr size_t MAP_NODE_OVERHEAD = 40; + +private: + std::uint64_t + getBytesUsedLedger_unlocked() const + { + std::uint64_t size = 0; + + // Count structural overhead of ledger storage including map node + // overhead Note: sizeof(LedgerData) includes the map container for + // transactions, but not the actual transaction data + size += ledgers_.size() * + (sizeof(LedgerIndex) + sizeof(LedgerData) + MAP_NODE_OVERHEAD); + + // Add the transaction map nodes inside each ledger (ledger's view of + // its transactions) + for (const auto& [_, ledgerData] : ledgers_) + { + size += ledgerData.transactions.size() * + (sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD); + } + + // Count the ledger hash to sequence lookup map + size += ledgerHashToSeq_.size() * + (sizeof(uint256) + sizeof(LedgerIndex) + MAP_NODE_OVERHEAD); + + return size; + } + + std::uint64_t + getBytesUsedTransaction_unlocked() const + { + if (!useTxTables_) + return 0; + + std::uint64_t size = 0; + + // Count structural overhead of transaction map + // sizeof(AccountTx) is just the size of two shared_ptrs (~32 bytes) + size += transactionMap_.size() * + (sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD); + + // Add actual transaction and metadata data sizes + for (const auto& [_, accountTx] : transactionMap_) + { + if (accountTx.first) + size += accountTx.first->getSTransaction() + ->getSerializer() + .peekData() + .size(); + if (accountTx.second) + size += accountTx.second->getAsObject() + .getSerializer() + .peekData() + .size(); + } + + // Count structural overhead of account transaction index + // The actual transaction data is already counted above from + // transactionMap_ + for (const auto& [accountId, accountData] : accountTxMap_) + { + size += + sizeof(accountId) + sizeof(AccountTxData) + MAP_NODE_OVERHEAD; + for (const auto& [ledgerSeq, txVector] : accountData.ledgerTxMap) + { + // Use capacity() to account for actual allocated memory + size += sizeof(ledgerSeq) + MAP_NODE_OVERHEAD; + size += txVector.capacity() * sizeof(AccountTx); + } + } + + return size; + } + +public: std::uint32_t getKBUsedAll() override { std::shared_lock lock(mutex_); - std::uint32_t size = sizeof(*this); - size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData)); - size += - ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex)); - size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx)); - for (const auto& [_, accountData] : accountTxMap_) - { - size += sizeof(AccountID) + sizeof(AccountTxData); - size += accountData.transactions.size() * sizeof(AccountTx); - for (const auto& [_, innerMap] : accountData.ledgerTxMap) - { - size += sizeof(uint32_t) + - innerMap.size() * (sizeof(uint32_t) + sizeof(size_t)); - } - } - return size / 1024; + + // Total = base object + ledger infrastructure + transaction data + std::uint64_t size = sizeof(*this) + getBytesUsedLedger_unlocked() + + getBytesUsedTransaction_unlocked(); + + return static_cast(size / 1024); } std::uint32_t getKBUsedLedger() override { std::shared_lock lock(mutex_); - std::uint32_t size = 0; - size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData)); - size += - ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex)); - return size / 1024; + return static_cast(getBytesUsedLedger_unlocked() / 1024); } std::uint32_t getKBUsedTransaction() override { - if (!useTxTables_) - return 0; - std::shared_lock lock(mutex_); - std::uint32_t size = 0; - size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx)); - for (const auto& [_, accountData] : accountTxMap_) - { - size += sizeof(AccountID) + sizeof(AccountTxData); - size += accountData.transactions.size() * sizeof(AccountTx); - for (const auto& [_, innerMap] : accountData.ledgerTxMap) - { - size += sizeof(uint32_t) + - innerMap.size() * (sizeof(uint32_t) + sizeof(size_t)); - } - } - return size / 1024; + return static_cast( + getBytesUsedTransaction_unlocked() / 1024); } void @@ -605,14 +648,13 @@ public: (options.bUnlimited || result.size() < options.limit); ++txIt) { - for (const auto& [txSeq, txIndex] : txIt->second) + for (const auto& accountTx : txIt->second) { if (skipped < options.offset) { ++skipped; continue; } - AccountTx const accountTx = accountData.transactions[txIndex]; std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setStatus(COMMITTED); @@ -657,8 +699,7 @@ public: ++skipped; continue; } - AccountTx const accountTx = - accountData.transactions[innerRIt->second]; + AccountTx const accountTx = *innerRIt; std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setLedger(inLedger); @@ -692,14 +733,14 @@ public: (options.bUnlimited || result.size() < options.limit); ++txIt) { - for (const auto& [txSeq, txIndex] : txIt->second) + for (const auto& accountTx : txIt->second) { if (skipped < options.offset) { ++skipped; continue; } - const auto& [txn, txMeta] = accountData.transactions[txIndex]; + const auto& [txn, txMeta] = accountTx; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -743,8 +784,7 @@ public: ++skipped; continue; } - const auto& [txn, txMeta] = - accountData.transactions[innerRIt->second]; + const auto& [txn, txMeta] = *innerRIt; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -816,11 +856,9 @@ public: for (; txIt != txEnd; ++txIt) { std::uint32_t const ledgerSeq = txIt->first; - for (auto seqIt = txIt->second.begin(); - seqIt != txIt->second.end(); - ++seqIt) + std::uint32_t txnSeq = 0; + for (const auto& accountTx : txIt->second) { - const auto& [txnSeq, index] = *seqIt; if (lookingForMarker) { if (findLedger == ledgerSeq && findSeq == txnSeq) @@ -828,7 +866,10 @@ public: lookingForMarker = false; } else + { + ++txnSeq; continue; + } } else if (numberOfResults == 0) { @@ -837,12 +878,10 @@ public: return {newmarker, total}; } - Blob rawTxn = accountData.transactions[index] - .first->getSTransaction() + Blob rawTxn = accountTx.first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountData.transactions[index] - .second->getAsObject() + Blob rawMeta = accountTx.second->getAsObject() .getSerializer() .peekData(); @@ -856,6 +895,7 @@ public: std::move(rawMeta)); --numberOfResults; ++total; + ++txnSeq; } } } @@ -871,11 +911,11 @@ public: for (; rtxIt != rtxEnd; ++rtxIt) { std::uint32_t const ledgerSeq = rtxIt->first; + std::uint32_t txnSeq = rtxIt->second.size() - 1; for (auto innerRIt = rtxIt->second.rbegin(); innerRIt != rtxIt->second.rend(); ++innerRIt) { - const auto& [txnSeq, index] = *innerRIt; if (lookingForMarker) { if (findLedger == ledgerSeq && findSeq == txnSeq) @@ -883,7 +923,10 @@ public: lookingForMarker = false; } else + { + --txnSeq; continue; + } } else if (numberOfResults == 0) { @@ -892,12 +935,11 @@ public: return {newmarker, total}; } - Blob rawTxn = accountData.transactions[index] - .first->getSTransaction() + const auto& accountTx = *innerRIt; + Blob rawTxn = accountTx.first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountData.transactions[index] - .second->getAsObject() + Blob rawMeta = accountTx.second->getAsObject() .getSerializer() .peekData(); @@ -911,6 +953,7 @@ public: std::move(rawMeta)); --numberOfResults; ++total; + --txnSeq; } } } diff --git a/src/ripple/app/rdb/impl/RelationalDatabase.cpp b/src/ripple/app/rdb/impl/RelationalDatabase.cpp index 64161bd53..bf24d7dc7 100644 --- a/src/ripple/app/rdb/impl/RelationalDatabase.cpp +++ b/src/ripple/app/rdb/impl/RelationalDatabase.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -41,7 +40,6 @@ RelationalDatabase::init( bool use_sqlite = false; bool use_postgres = false; bool use_rwdb = false; - bool use_flatmap = false; if (config.reporting()) { @@ -60,10 +58,6 @@ RelationalDatabase::init( { use_rwdb = true; } - else if (boost::iequals(get(rdb_section, "backend"), "flatmap")) - { - use_flatmap = true; - } else { Throw( @@ -89,10 +83,6 @@ RelationalDatabase::init( { return getRWDBDatabase(app, config, jobQueue); } - else if (use_flatmap) - { - return getFlatmapDatabase(app, config, jobQueue); - } return std::unique_ptr(); } diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 3e2c3c81a..0909f88ac 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -361,9 +361,7 @@ public: boost::beast::iequals( get(section(SECTION_RELATIONAL_DB), "backend"), "rwdb")) || (!section("node_db").empty() && - (boost::beast::iequals(get(section("node_db"), "type"), "rwdb") || - boost::beast::iequals( - get(section("node_db"), "type"), "flatmap"))); + boost::beast::iequals(get(section("node_db"), "type"), "rwdb")); // RHNOTE: memory type is not selected for here because it breaks // tests return isMem; diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 7673d16ec..9fd23f33e 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -45,7 +45,6 @@ namespace ripple { namespace detail { - [[nodiscard]] std::uint64_t getMemorySize() { @@ -54,7 +53,6 @@ getMemorySize() return 0; } - } // namespace detail } // namespace ripple #endif @@ -64,7 +62,6 @@ getMemorySize() namespace ripple { namespace detail { - [[nodiscard]] std::uint64_t getMemorySize() { @@ -73,7 +70,6 @@ getMemorySize() return 0; } - } // namespace detail } // namespace ripple @@ -85,7 +81,6 @@ getMemorySize() namespace ripple { namespace detail { - [[nodiscard]] std::uint64_t getMemorySize() { @@ -98,13 +93,11 @@ getMemorySize() return 0; } - } // namespace detail } // namespace ripple #endif namespace ripple { - // clang-format off // The configurable node sizes are "tiny", "small", "medium", "large", "huge" inline constexpr std::array>, 13> @@ -1007,6 +1000,23 @@ Config::loadFromString(std::string const& fileContents) "the maximum number of allowed peers (peers_max)"); } } + + if (!RUN_STANDALONE) + { + auto db_section = section(ConfigSection::nodeDatabase()); + if (auto type = get(db_section, "type", ""); type == "rwdb") + { + if (auto delete_interval = get(db_section, "online_delete", 0); + delete_interval == 0) + { + Throw( + "RWDB (in-memory backend) requires online_delete to " + "prevent OOM " + "Exception: standalone mode (used by tests) doesn't need " + "online_delete"); + } + } + } } boost::filesystem::path @@ -1071,5 +1081,4 @@ setup_FeeVote(Section const& section) } return setup; } - } // namespace ripple diff --git a/src/ripple/nodestore/backend/FlatmapFactory.cpp b/src/ripple/nodestore/backend/FlatmapFactory.cpp deleted file mode 100644 index 4cec115ef..000000000 --- a/src/ripple/nodestore/backend/FlatmapFactory.cpp +++ /dev/null @@ -1,235 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -class FlatmapBackend : public Backend -{ -private: - std::string name_; - beast::Journal journal_; - bool isOpen_{false}; - - struct base_uint_hasher - { - using result_type = std::size_t; - - result_type - operator()(base_uint<256> const& value) const - { - return hardened_hash<>{}(value); - } - }; - - using DataStore = boost::unordered::concurrent_flat_map< - uint256, - std::vector, // Store compressed blob data - base_uint_hasher>; - - DataStore table_; - -public: - FlatmapBackend( - size_t keyBytes, - Section const& keyValues, - beast::Journal journal) - : name_(get(keyValues, "path")), journal_(journal) - { - boost::ignore_unused(journal_); - if (name_.empty()) - name_ = "node_db"; - } - - ~FlatmapBackend() override - { - close(); - } - - std::string - getName() override - { - return name_; - } - - void - open(bool createIfMissing) override - { - if (isOpen_) - Throw("already open"); - isOpen_ = true; - } - - bool - isOpen() override - { - return isOpen_; - } - - void - close() override - { - table_.clear(); - isOpen_ = false; - } - - Status - fetch(void const* key, std::shared_ptr* pObject) override - { - if (!isOpen_) - return notFound; - - uint256 const hash(uint256::fromVoid(key)); - - bool found = table_.visit(hash, [&](const auto& key_value_pair) { - nudb::detail::buffer bf; - auto const result = nodeobject_decompress( - key_value_pair.second.data(), key_value_pair.second.size(), bf); - DecodedBlob decoded(hash.data(), result.first, result.second); - if (!decoded.wasOk()) - { - *pObject = nullptr; - return; - } - *pObject = decoded.createObject(); - }); - return found ? (*pObject ? ok : dataCorrupt) : notFound; - } - - std::pair>, Status> - fetchBatch(std::vector const& hashes) override - { - std::vector> results; - results.reserve(hashes.size()); - for (auto const& h : hashes) - { - std::shared_ptr nObj; - Status status = fetch(h->begin(), &nObj); - if (status != ok) - results.push_back({}); - else - results.push_back(nObj); - } - return {results, ok}; - } - - void - store(std::shared_ptr const& object) override - { - if (!isOpen_) - return; - - if (!object) - return; - - EncodedBlob encoded(object); - nudb::detail::buffer bf; - auto const result = - nodeobject_compress(encoded.getData(), encoded.getSize(), bf); - - std::vector compressed( - static_cast(result.first), - static_cast(result.first) + result.second); - - table_.insert_or_assign(object->getHash(), std::move(compressed)); - } - - void - storeBatch(Batch const& batch) override - { - for (auto const& e : batch) - store(e); - } - - void - sync() override - { - } - - void - for_each(std::function)> f) override - { - if (!isOpen_) - return; - - table_.visit_all([&f](const auto& entry) { - nudb::detail::buffer bf; - auto const result = nodeobject_decompress( - entry.second.data(), entry.second.size(), bf); - DecodedBlob decoded( - entry.first.data(), result.first, result.second); - if (decoded.wasOk()) - f(decoded.createObject()); - }); - } - - int - getWriteLoad() override - { - return 0; - } - - void - setDeletePath() override - { - close(); - } - - int - fdRequired() const override - { - return 0; - } - -private: - size_t - size() const - { - return table_.size(); - } -}; - -class FlatmapFactory : public Factory -{ -public: - FlatmapFactory() - { - Manager::instance().insert(*this); - } - - ~FlatmapFactory() override - { - Manager::instance().erase(*this); - } - - std::string - getName() const override - { - return "Flatmap"; - } - - std::unique_ptr - createInstance( - size_t keyBytes, - Section const& keyValues, - std::size_t burstSize, - Scheduler& scheduler, - beast::Journal journal) override - { - return std::make_unique(keyBytes, keyValues, journal); - } -}; - -static FlatmapFactory flatmapFactory; - -} // namespace NodeStore -} // namespace ripple diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index 010c83a42..8a3ca0f89 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -216,6 +216,10 @@ public: } BEAST_EXPECT(store.getLastRotated() == lastRotated); + SQLiteDatabase* const db = + dynamic_cast(&env.app().getRelationalDatabase()); + BEAST_EXPECT(*db->getTransactionsMinLedgerSeq() == 3); + for (auto i = 3; i < deleteInterval + lastRotated; ++i) { ledgers.emplace( diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index 3d7991d74..dbf187fa6 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -1206,6 +1206,97 @@ r.ripple.com:51235 } } + void + testRWDBOnlineDelete() + { + testcase("RWDB online_delete validation"); + + // Test 1: RWDB without online_delete in standalone mode (should + // succeed) + { + Config c; + std::string toLoad = + "[node_db]\n" + "type=rwdb\n" + "path=main\n"; + c.setupControl(true, true, true); // standalone = true + try + { + c.loadFromString(toLoad); + pass(); // Should succeed + } + catch (std::runtime_error const& e) + { + fail("Should not throw in standalone mode"); + } + } + + // Test 2: RWDB without online_delete NOT in standalone mode (should + // throw) + { + Config c; + std::string toLoad = + "[node_db]\n" + "type=rwdb\n" + "path=main\n"; + c.setupControl(true, true, false); // standalone = false + try + { + c.loadFromString(toLoad); + fail("Expected exception for RWDB without online_delete"); + } + catch (std::runtime_error const& e) + { + BEAST_EXPECT( + std::string(e.what()).find( + "RWDB (in-memory backend) requires online_delete") != + std::string::npos); + pass(); + } + } + + // Test 3: RWDB with online_delete NOT in standalone mode (should + // succeed) + { + Config c; + std::string toLoad = + "[node_db]\n" + "type=rwdb\n" + "path=main\n" + "online_delete=256\n"; + c.setupControl(true, true, false); // standalone = false + try + { + c.loadFromString(toLoad); + pass(); // Should succeed + } + catch (std::runtime_error const& e) + { + fail("Should not throw when online_delete is configured"); + } + } + + // Test 4: Non-RWDB without online_delete NOT in standalone mode (should + // succeed) + { + Config c; + std::string toLoad = + "[node_db]\n" + "type=NuDB\n" + "path=main\n"; + c.setupControl(true, true, false); // standalone = false + try + { + c.loadFromString(toLoad); + pass(); // Should succeed + } + catch (std::runtime_error const& e) + { + fail("Should not throw for non-RWDB backends"); + } + } + } + void testOverlay() { @@ -1295,6 +1386,7 @@ r.ripple.com:51235 testComments(); testGetters(); testAmendment(); + testRWDBOnlineDelete(); testOverlay(); testNetworkID(); } diff --git a/src/test/rdb/RelationalDatabase_test.cpp b/src/test/rdb/RelationalDatabase_test.cpp new file mode 100644 index 000000000..8f4ea3907 --- /dev/null +++ b/src/test/rdb/RelationalDatabase_test.cpp @@ -0,0 +1,756 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +class RelationalDatabase_test : public beast::unit_test::suite +{ +private: + // Helper to get SQLiteDatabase* (works for both SQLite and RWDB since RWDB + // inherits from SQLiteDatabase) + static SQLiteDatabase* + getInterface(Application& app) + { + return dynamic_cast(&app.getRelationalDatabase()); + } + + static SQLiteDatabase* + getInterface(RelationalDatabase& db) + { + return dynamic_cast(&db); + } + + static std::unique_ptr + makeConfig(std::string const& backend) + { + auto config = test::jtx::envconfig(); + // Sqlite backend doesn't need a database_path as it will just use + // in-memory databases when in standalone mode anyway. + config->overwrite(SECTION_RELATIONAL_DB, "backend", backend); + return config; + } + +public: + RelationalDatabase_test() = default; + + void + testBasicInitialization( + std::string const& backend, + std::unique_ptr config) + { + testcase("Basic initialization and empty database - " + backend); + + using namespace test::jtx; + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + // Test empty database state + BEAST_EXPECT(db.getMinLedgerSeq() == 2); + BEAST_EXPECT(db.getMaxLedgerSeq() == 2); + BEAST_EXPECT(db.getNewestLedgerInfo()->seq == 2); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + + if (sqliteDb) + { + BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value()); + BEAST_EXPECT( + !sqliteDb->getAccountTransactionsMinLedgerSeq().has_value()); + + auto ledgerCount = sqliteDb->getLedgerCountMinMax(); + BEAST_EXPECT(ledgerCount.numberOfRows == 1); + BEAST_EXPECT(ledgerCount.minLedgerSequence == 2); + BEAST_EXPECT(ledgerCount.maxLedgerSequence == 2); + } + } + + void + testLedgerSequenceOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Ledger sequence operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + // Create initial ledger + Account alice("alice"); + env.fund(XRP(10000), alice); + env.close(); + + // Test basic sequence operations + auto minSeq = db.getMinLedgerSeq(); + auto maxSeq = db.getMaxLedgerSeq(); + + BEAST_EXPECT(minSeq.has_value()); + BEAST_EXPECT(maxSeq.has_value()); + BEAST_EXPECT(*minSeq == 2); + BEAST_EXPECT(*maxSeq == 3); + + // Create more ledgers + env(pay(alice, Account("bob"), XRP(1000))); + env.close(); + + env(pay(alice, Account("carol"), XRP(500))); + env.close(); + + // Verify sequence updates + minSeq = db.getMinLedgerSeq(); + maxSeq = db.getMaxLedgerSeq(); + + BEAST_EXPECT(*minSeq == 2); + BEAST_EXPECT(*maxSeq == 5); + + auto* sqliteDb = getInterface(db); + if (sqliteDb) + { + auto ledgerCount = sqliteDb->getLedgerCountMinMax(); + BEAST_EXPECT(ledgerCount.numberOfRows == 4); + BEAST_EXPECT(ledgerCount.minLedgerSequence == 2); + BEAST_EXPECT(ledgerCount.maxLedgerSequence == 5); + } + } + + void + testLedgerInfoOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Ledger info retrieval operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto* db = getInterface(env.app()); + + Account alice("alice"); + env.fund(XRP(10000), alice); + env.close(); + + // Test getNewestLedgerInfo + auto newestLedger = db->getNewestLedgerInfo(); + BEAST_EXPECT(newestLedger.has_value()); + BEAST_EXPECT(newestLedger->seq == 3); + + // Test getLedgerInfoByIndex + auto ledgerByIndex = db->getLedgerInfoByIndex(3); + BEAST_EXPECT(ledgerByIndex.has_value()); + BEAST_EXPECT(ledgerByIndex->seq == 3); + BEAST_EXPECT(ledgerByIndex->hash == newestLedger->hash); + + // Test getLedgerInfoByHash + auto ledgerByHash = db->getLedgerInfoByHash(newestLedger->hash); + BEAST_EXPECT(ledgerByHash.has_value()); + BEAST_EXPECT(ledgerByHash->seq == 3); + BEAST_EXPECT(ledgerByHash->hash == newestLedger->hash); + + // Test getLimitedOldestLedgerInfo + auto oldestLedger = db->getLimitedOldestLedgerInfo(2); + BEAST_EXPECT(oldestLedger.has_value()); + BEAST_EXPECT(oldestLedger->seq == 2); + + // Test getLimitedNewestLedgerInfo + auto limitedNewest = db->getLimitedNewestLedgerInfo(2); + BEAST_EXPECT(limitedNewest.has_value()); + BEAST_EXPECT(limitedNewest->seq == 3); + + // Test invalid queries + auto invalidLedger = db->getLedgerInfoByIndex(999); + BEAST_EXPECT(!invalidLedger.has_value()); + + uint256 invalidHash; + auto invalidHashLedger = db->getLedgerInfoByHash(invalidHash); + BEAST_EXPECT(!invalidHashLedger.has_value()); + } + + void + testHashOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Hash retrieval operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + Account alice("alice"); + env.fund(XRP(10000), alice); + env.close(); + + env(pay(alice, Account("bob"), XRP(1000))); + env.close(); + + // Test getHashByIndex + auto hash1 = db.getHashByIndex(3); + auto hash2 = db.getHashByIndex(4); + + BEAST_EXPECT(hash1 != uint256()); + BEAST_EXPECT(hash2 != uint256()); + BEAST_EXPECT(hash1 != hash2); + + // Test getHashesByIndex (single) + auto hashPair = db.getHashesByIndex(4); + BEAST_EXPECT(hashPair.has_value()); + BEAST_EXPECT(hashPair->ledgerHash == hash2); + BEAST_EXPECT(hashPair->parentHash == hash1); + + // Test getHashesByIndex (range) + auto hashRange = db.getHashesByIndex(3, 4); + BEAST_EXPECT(hashRange.size() == 2); + BEAST_EXPECT(hashRange[3].ledgerHash == hash1); + BEAST_EXPECT(hashRange[4].ledgerHash == hash2); + BEAST_EXPECT(hashRange[4].parentHash == hash1); + + // Test invalid hash queries + auto invalidHash = db.getHashByIndex(999); + BEAST_EXPECT(invalidHash == uint256()); + + auto invalidHashPair = db.getHashesByIndex(999); + BEAST_EXPECT(!invalidHashPair.has_value()); + + auto emptyRange = db.getHashesByIndex(10, 5); // max < min + BEAST_EXPECT(emptyRange.empty()); + } + + void + testTransactionOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Transaction storage and retrieval - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + Account alice("alice"); + Account bob("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + + if (!sqliteDb) + return; + + // Test initial transaction counts after funding + auto initialTxCount = sqliteDb->getTransactionCount(); + auto initialAcctTxCount = sqliteDb->getAccountTransactionCount(); + + BEAST_EXPECT(initialTxCount == 4); + BEAST_EXPECT(initialAcctTxCount == 6); + + // Create transactions + env(pay(alice, bob, XRP(1000))); + env.close(); + + env(pay(bob, alice, XRP(500))); + env.close(); + + // Test transaction counts after creation + auto txCount = sqliteDb->getTransactionCount(); + auto acctTxCount = sqliteDb->getAccountTransactionCount(); + + BEAST_EXPECT(txCount == 6); + BEAST_EXPECT(acctTxCount == 10); + + // Test transaction retrieval + uint256 invalidTxId; + error_code_i ec; + auto invalidTxResult = + sqliteDb->getTransaction(invalidTxId, std::nullopt, ec); + BEAST_EXPECT(std::holds_alternative(invalidTxResult)); + + // Test transaction history + auto txHistory = db.getTxHistory(0); + + BEAST_EXPECT(!txHistory.empty()); + BEAST_EXPECT(txHistory.size() == 6); + + // Test with valid transaction range + auto minSeq = sqliteDb->getTransactionsMinLedgerSeq(); + auto maxSeq = db.getMaxLedgerSeq(); + + if (minSeq && maxSeq) + { + ClosedInterval range(*minSeq, *maxSeq); + auto rangeResult = sqliteDb->getTransaction(invalidTxId, range, ec); + auto searched = std::get(rangeResult); + BEAST_EXPECT( + searched == TxSearched::all || searched == TxSearched::some); + } + } + + void + testAccountTransactionOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Account transaction operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + Account alice("alice"); + Account bob("bob"); + Account carol("carol"); + + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + + if (!sqliteDb) + return; + + // Create multiple transactions involving alice + env(pay(alice, bob, XRP(1000))); + env.close(); + + env(pay(bob, alice, XRP(500))); + env.close(); + + env(pay(alice, carol, XRP(250))); + env.close(); + + auto minSeq = db.getMinLedgerSeq(); + auto maxSeq = db.getMaxLedgerSeq(); + + if (!minSeq || !maxSeq) + return; + + // Test getOldestAccountTxs + RelationalDatabase::AccountTxOptions options{ + alice.id(), *minSeq, *maxSeq, 0, 10, false}; + + auto oldestTxs = sqliteDb->getOldestAccountTxs(options); + BEAST_EXPECT(oldestTxs.size() == 5); + + // Test getNewestAccountTxs + auto newestTxs = sqliteDb->getNewestAccountTxs(options); + BEAST_EXPECT(newestTxs.size() == 5); + + // Test binary format versions + auto oldestTxsB = sqliteDb->getOldestAccountTxsB(options); + BEAST_EXPECT(oldestTxsB.size() == 5); + + auto newestTxsB = sqliteDb->getNewestAccountTxsB(options); + BEAST_EXPECT(newestTxsB.size() == 5); + + // Test with limit + options.limit = 1; + auto limitedTxs = sqliteDb->getOldestAccountTxs(options); + BEAST_EXPECT(limitedTxs.size() == 1); + + // Test with offset + options.limit = 10; + options.offset = 1; + auto offsetTxs = sqliteDb->getOldestAccountTxs(options); + BEAST_EXPECT(offsetTxs.size() == 4); + + // Test with invalid account + { + Account invalidAccount("invalid"); + RelationalDatabase::AccountTxOptions invalidOptions{ + invalidAccount.id(), *minSeq, *maxSeq, 0, 10, false}; + auto invalidAccountTxs = + sqliteDb->getOldestAccountTxs(invalidOptions); + BEAST_EXPECT(invalidAccountTxs.empty()); + } + } + + void + testAccountTransactionPaging( + std::string const& backend, + std::unique_ptr config) + { + testcase("Account transaction paging operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + Account alice("alice"); + Account bob("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + if (!sqliteDb) + return; + + // Create multiple transactions for paging + for (int i = 0; i < 5; ++i) + { + env(pay(alice, bob, XRP(100 + i))); + env.close(); + } + + auto minSeq = db.getMinLedgerSeq(); + auto maxSeq = db.getMaxLedgerSeq(); + + if (!minSeq || !maxSeq) + return; + + RelationalDatabase::AccountTxPageOptions pageOptions{ + alice.id(), *minSeq, *maxSeq, std::nullopt, 2, false}; + + // Test oldestAccountTxPage + auto [oldestPage, oldestMarker] = + sqliteDb->oldestAccountTxPage(pageOptions); + + BEAST_EXPECT(oldestPage.size() == 2); + BEAST_EXPECT(oldestMarker.has_value() == true); + + // Test newestAccountTxPage + auto [newestPage, newestMarker] = + sqliteDb->newestAccountTxPage(pageOptions); + + BEAST_EXPECT(newestPage.size() == 2); + BEAST_EXPECT(newestMarker.has_value() == true); + + // Test binary versions + auto [oldestPageB, oldestMarkerB] = + sqliteDb->oldestAccountTxPageB(pageOptions); + BEAST_EXPECT(oldestPageB.size() == 2); + + auto [newestPageB, newestMarkerB] = + sqliteDb->newestAccountTxPageB(pageOptions); + BEAST_EXPECT(newestPageB.size() == 2); + + // Test with marker continuation + if (oldestMarker.has_value()) + { + pageOptions.marker = oldestMarker; + auto [continuedPage, continuedMarker] = + sqliteDb->oldestAccountTxPage(pageOptions); + BEAST_EXPECT(continuedPage.size() == 2); + } + } + + void + testDeletionOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Deletion operations - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + Account alice("alice"); + Account bob("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + if (!sqliteDb) + return; + + // Create multiple ledgers and transactions + for (int i = 0; i < 3; ++i) + { + env(pay(alice, bob, XRP(100 + i))); + env.close(); + } + + auto initialTxCount = sqliteDb->getTransactionCount(); + BEAST_EXPECT(initialTxCount == 7); + auto initialAcctTxCount = sqliteDb->getAccountTransactionCount(); + BEAST_EXPECT(initialAcctTxCount == 12); + auto initialLedgerCount = sqliteDb->getLedgerCountMinMax(); + BEAST_EXPECT(initialLedgerCount.numberOfRows == 5); + + auto maxSeq = db.getMaxLedgerSeq(); + if (!maxSeq || *maxSeq <= 2) + return; + + // Test deleteTransactionByLedgerSeq + sqliteDb->deleteTransactionByLedgerSeq(*maxSeq); + auto txCountAfterDelete = sqliteDb->getTransactionCount(); + BEAST_EXPECT(txCountAfterDelete == 6); + + // Test deleteTransactionsBeforeLedgerSeq + sqliteDb->deleteTransactionsBeforeLedgerSeq(*maxSeq - 1); + auto txCountAfterBulkDelete = sqliteDb->getTransactionCount(); + BEAST_EXPECT(txCountAfterBulkDelete == 1); + + // Test deleteAccountTransactionsBeforeLedgerSeq + sqliteDb->deleteAccountTransactionsBeforeLedgerSeq(*maxSeq - 1); + auto acctTxCountAfterDelete = sqliteDb->getAccountTransactionCount(); + BEAST_EXPECT(acctTxCountAfterDelete == 4); + + // Test deleteBeforeLedgerSeq + auto minSeq = db.getMinLedgerSeq(); + if (minSeq) + { + sqliteDb->deleteBeforeLedgerSeq(*minSeq + 1); + auto ledgerCountAfterDelete = sqliteDb->getLedgerCountMinMax(); + BEAST_EXPECT(ledgerCountAfterDelete.numberOfRows == 4); + } + } + + void + testDatabaseSpaceOperations( + std::string const& backend, + std::unique_ptr config) + { + testcase("Database space and size operations - " + backend); + + using namespace test::jtx; + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + if (!sqliteDb) + return; + + // Test size queries + auto allKB = sqliteDb->getKBUsedAll(); + auto ledgerKB = sqliteDb->getKBUsedLedger(); + auto txKB = sqliteDb->getKBUsedTransaction(); + + if (backend == "rwdb") + { + // RWDB reports actual data memory (rounded down to KB) + // Initially should be < 1KB, so rounds down to 0 + // Note: These are 0 due to rounding, not because there's literally + // no data + BEAST_EXPECT(allKB == 0); // < 1024 bytes rounds to 0 KB + BEAST_EXPECT(ledgerKB == 0); // < 1024 bytes rounds to 0 KB + BEAST_EXPECT(txKB == 0); // < 1024 bytes rounds to 0 KB + } + else + { + // SQLite reports cache/engine memory which has overhead even when + // empty Just verify the functions return reasonable values + BEAST_EXPECT(allKB >= 0); + BEAST_EXPECT(ledgerKB >= 0); + BEAST_EXPECT(txKB >= 0); + } + + // Create some data and verify size increases + Account alice("alice"); + env.fund(XRP(10000), alice); + env.close(); + + auto newAllKB = sqliteDb->getKBUsedAll(); + auto newLedgerKB = sqliteDb->getKBUsedLedger(); + auto newTxKB = sqliteDb->getKBUsedTransaction(); + + if (backend == "rwdb") + { + // RWDB reports actual data memory + // After adding data, should see some increase + BEAST_EXPECT(newAllKB >= 1); // Should have at least 1KB total + BEAST_EXPECT( + newTxKB >= 0); // Transactions added (might still be < 1KB) + BEAST_EXPECT( + newLedgerKB >= 0); // Ledger data (might still be < 1KB) + + // Key relationships + BEAST_EXPECT(newAllKB >= newLedgerKB + newTxKB); // Total >= parts + BEAST_EXPECT(newAllKB >= allKB); // Should increase or stay same + BEAST_EXPECT(newTxKB >= txKB); // Should increase or stay same + } + else + { + // SQLite: Memory usage should not decrease after adding data + // Values might increase due to cache growth + BEAST_EXPECT(newAllKB >= allKB); + BEAST_EXPECT(newLedgerKB >= ledgerKB); + BEAST_EXPECT(newTxKB >= txKB); + + // SQLite's getKBUsedAll is global memory, should be >= parts + BEAST_EXPECT(newAllKB >= newLedgerKB); + BEAST_EXPECT(newAllKB >= newTxKB); + } + + // Test space availability + // Both SQLite and RWDB use in-memory databases in standalone mode, + // so file-based space checks don't apply to either backend. + // Skip these checks for both. + + // if (backend == "rwdb") + // { + // BEAST_EXPECT(db.ledgerDbHasSpace(env.app().config())); + // BEAST_EXPECT(db.transactionDbHasSpace(env.app().config())); + // } + + // Test database closure operations (should not throw) + try + { + sqliteDb->closeLedgerDB(); + sqliteDb->closeTransactionDB(); + } + catch (std::exception const& e) + { + BEAST_EXPECT(false); // Should not throw + } + } + + void + testTransactionMinLedgerSeq( + std::string const& backend, + std::unique_ptr config) + { + testcase("Transaction minimum ledger sequence tracking - " + backend); + + using namespace test::jtx; + config->LEDGER_HISTORY = 1000; + + Env env(*this, std::move(config)); + auto& db = env.app().getRelationalDatabase(); + + auto* sqliteDb = getInterface(db); + BEAST_EXPECT(sqliteDb != nullptr); + if (!sqliteDb) + return; + + // Initially should have no transactions + BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value()); + BEAST_EXPECT( + !sqliteDb->getAccountTransactionsMinLedgerSeq().has_value()); + + Account alice("alice"); + Account bob("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + // Create first transaction + env(pay(alice, bob, XRP(1000))); + env.close(); + + auto txMinSeq = sqliteDb->getTransactionsMinLedgerSeq(); + auto acctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq(); + BEAST_EXPECT(txMinSeq.has_value()); + BEAST_EXPECT(acctTxMinSeq.has_value()); + BEAST_EXPECT(*txMinSeq == 3); + BEAST_EXPECT(*acctTxMinSeq == 3); + + // Create more transactions + env(pay(bob, alice, XRP(500))); + env.close(); + + env(pay(alice, bob, XRP(250))); + env.close(); + + // Min sequences should remain the same (first transaction ledger) + auto newTxMinSeq = sqliteDb->getTransactionsMinLedgerSeq(); + auto newAcctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq(); + BEAST_EXPECT(newTxMinSeq == txMinSeq); + BEAST_EXPECT(newAcctTxMinSeq == acctTxMinSeq); + } + + std::vector static getBackends(std::string const& unittest_arg) + { + // Valid backends + static const std::set validBackends = {"sqlite", "rwdb"}; + + // Default to all valid backends if no arg specified + if (unittest_arg.empty()) + return {validBackends.begin(), validBackends.end()}; + + std::set backends; // Use set to avoid duplicates + std::stringstream ss(unittest_arg); + std::string backend; + + while (std::getline(ss, backend, ',')) + { + if (!backend.empty()) + { + // Validate backend + if (validBackends.contains(backend)) + { + backends.insert(backend); + } + } + } + + // Return as vector (sorted due to set) + return {backends.begin(), backends.end()}; + } + + void + run() override + { + auto backends = getBackends(arg()); + + if (backends.empty()) + { + fail("no valid backend specified: '" + arg() + "'"); + } + + for (auto const& backend : backends) + { + testBasicInitialization(backend, makeConfig(backend)); + testLedgerSequenceOperations(backend, makeConfig(backend)); + testLedgerInfoOperations(backend, makeConfig(backend)); + testHashOperations(backend, makeConfig(backend)); + testTransactionOperations(backend, makeConfig(backend)); + testAccountTransactionOperations(backend, makeConfig(backend)); + testAccountTransactionPaging(backend, makeConfig(backend)); + testDeletionOperations(backend, makeConfig(backend)); + testDatabaseSpaceOperations(backend, makeConfig(backend)); + testTransactionMinLedgerSeq(backend, makeConfig(backend)); + } + } +}; + +BEAST_DEFINE_TESTSUITE(RelationalDatabase, rdb, ripple); + +} // namespace test +} // namespace ripple \ No newline at end of file From 46cf6785ab37bc7117a4dbbd03e3545260005289 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Mon, 8 Sep 2025 10:57:49 +0700 Subject: [PATCH 02/12] fix(tests): prevent buffer corruption from concurrent log writes (#565) std::endl triggers flush() which calls sync() on the shared log buffer. Multiple threads racing in sync() cause str()/str("") operations to corrupt buffer state, leading to crashes and double frees. Added mutex to serialize access to suite.log, preventing concurrent sync() calls on the same buffer. --- src/test/unit_test/SuiteJournal.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/test/unit_test/SuiteJournal.h b/src/test/unit_test/SuiteJournal.h index 0e80e83cd..40d6bb94b 100644 --- a/src/test/unit_test/SuiteJournal.h +++ b/src/test/unit_test/SuiteJournal.h @@ -19,9 +19,9 @@ #ifndef TEST_UNIT_TEST_SUITE_JOURNAL_H #define TEST_UNIT_TEST_SUITE_JOURNAL_H - #include #include +#include namespace ripple { namespace test { @@ -82,7 +82,13 @@ SuiteJournalSink::write( // Only write the string if the level at least equals the threshold. if (level >= threshold()) + { + // std::endl flushes → sync() → str()/str("") race in shared buffer → + // crashes + static std::mutex log_mutex; + std::lock_guard lock(log_mutex); suite_.log << s << partition_ << text << std::endl; + } } class SuiteJournal From 8f7ebf0377107a0b32d20eb6341a57b663bebedb Mon Sep 17 00:00:00 2001 From: tequ Date: Mon, 8 Sep 2025 14:53:40 +0900 Subject: [PATCH 03/12] Optimize github action cache (#544) * optimize github action cache * fix * refactor: improve github actions cache optimization (#3) - move ccache configuration logic to dedicated action - rename conanfile-changed to should-save-conan-cache for clarity --------- Co-authored-by: Niq Dudfield --- .../actions/xahau-configure-ccache/action.yml | 36 +++++++++++++++++-- .github/actions/xahau-ga-build/action.yml | 31 ++++++++++++---- .../actions/xahau-ga-dependencies/action.yml | 22 +++++++++++- .github/workflows/xahau-ga-macos.yml | 5 ++- .github/workflows/xahau-ga-nix.yml | 3 ++ 5 files changed, 87 insertions(+), 10 deletions(-) diff --git a/.github/actions/xahau-configure-ccache/action.yml b/.github/actions/xahau-configure-ccache/action.yml index 2b0a3d98b..44414b98a 100644 --- a/.github/actions/xahau-configure-ccache/action.yml +++ b/.github/actions/xahau-configure-ccache/action.yml @@ -14,6 +14,18 @@ inputs: description: 'How to check compiler for changes' required: false default: 'content' + is_main_branch: + description: 'Whether the current branch is the main branch' + required: false + default: 'false' + main_cache_dir: + description: 'Path to the main branch cache directory' + required: false + default: '~/.ccache-main' + current_cache_dir: + description: 'Path to the current branch cache directory' + required: false + default: '~/.ccache-current' runs: using: 'composite' @@ -21,11 +33,31 @@ runs: - name: Configure ccache shell: bash run: | + # Create cache directories + mkdir -p ${{ inputs.main_cache_dir }} ${{ inputs.current_cache_dir }} + + # Set compiler check globally + ccache -o compiler_check=${{ inputs.compiler_check }} + + # Use a single config file location mkdir -p ~/.ccache - export CONF_PATH="${CCACHE_CONFIGPATH:-${CCACHE_DIR:-$HOME/.ccache}/ccache.conf}" - mkdir -p $(dirname "$CONF_PATH") + export CONF_PATH="$HOME/.ccache/ccache.conf" + + # Apply common settings echo "max_size = ${{ inputs.max_size }}" > "$CONF_PATH" echo "hash_dir = ${{ inputs.hash_dir }}" >> "$CONF_PATH" echo "compiler_check = ${{ inputs.compiler_check }}" >> "$CONF_PATH" + + if [ "${{ inputs.is_main_branch }}" == "true" ]; then + # Main branch: use main branch cache + ccache --set-config=cache_dir="${{ inputs.main_cache_dir }}" + echo "CCACHE_DIR=${{ inputs.main_cache_dir }}" >> $GITHUB_ENV + else + # Feature branch: use current branch cache with main as secondary + ccache --set-config=cache_dir="${{ inputs.current_cache_dir }}" + ccache --set-config=secondary_storage="file:${{ inputs.main_cache_dir }}" + echo "CCACHE_DIR=${{ inputs.current_cache_dir }}" >> $GITHUB_ENV + fi + ccache -p # Print config for verification ccache -z # Zero statistics before the build \ No newline at end of file diff --git a/.github/actions/xahau-ga-build/action.yml b/.github/actions/xahau-ga-build/action.yml index 417190472..3387f0391 100644 --- a/.github/actions/xahau-ga-build/action.yml +++ b/.github/actions/xahau-ga-build/action.yml @@ -48,12 +48,23 @@ runs: SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-') echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT - - name: Restore ccache directory + - name: Restore ccache directory for default branch if: inputs.ccache_enabled == 'true' id: ccache-restore uses: actions/cache/restore@v4 with: - path: ~/.ccache + path: ~/.ccache-main + key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }} + restore-keys: | + ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}- + ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}- + + - name: Restore ccache directory for current branch + if: inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch + id: ccache-restore-current-branch + uses: actions/cache/restore@v4 + with: + path: ~/.ccache-current key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }} restore-keys: | ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }} @@ -75,6 +86,7 @@ runs: export CXX="${{ inputs.cxx }}" fi + # Configure ccache launcher args CCACHE_ARGS="" if [ "${{ inputs.ccache_enabled }}" = "true" ]; then @@ -99,9 +111,16 @@ runs: shell: bash run: ccache -s - - name: Save ccache directory - if: inputs.ccache_enabled == 'true' + - name: Save ccache directory for default branch + if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch uses: actions/cache/save@v4 with: - path: ~/.ccache - key: ${{ steps.ccache-restore.outputs.cache-primary-key }} \ No newline at end of file + path: ~/.ccache-main + key: ${{ steps.ccache-restore.outputs.cache-primary-key }} + + - name: Save ccache directory for current branch + if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch + uses: actions/cache/save@v4 + with: + path: ~/.ccache-current + key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }} diff --git a/.github/actions/xahau-ga-dependencies/action.yml b/.github/actions/xahau-ga-dependencies/action.yml index b57a9e69e..d295e20eb 100644 --- a/.github/actions/xahau-ga-dependencies/action.yml +++ b/.github/actions/xahau-ga-dependencies/action.yml @@ -42,6 +42,26 @@ runs: SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-') echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT + - name: Check conanfile changes + if: inputs.cache_enabled == 'true' + id: check-conanfile-changes + shell: bash + run: | + # Check if we're on the main branch + if [ "${{ github.ref_name }}" == "${{ inputs.main_branch }}" ]; then + echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT + else + # Fetch main branch for comparison + git fetch origin ${{ inputs.main_branch }} + + # Check if conanfile.txt or conanfile.py has changed compared to main branch + if git diff --quiet origin/${{ inputs.main_branch }}..HEAD -- '**/conanfile.txt' '**/conanfile.py'; then + echo "should-save-conan-cache=false" >> $GITHUB_OUTPUT + else + echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT + fi + fi + - name: Restore Conan cache if: inputs.cache_enabled == 'true' id: cache-restore-conan @@ -76,7 +96,7 @@ runs: .. - name: Save Conan cache - if: inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true' + if: always() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true' && steps.check-conanfile-changes.outputs.should-save-conan-cache == 'true' uses: actions/cache/save@v4 with: path: | diff --git a/.github/workflows/xahau-ga-macos.yml b/.github/workflows/xahau-ga-macos.yml index 5c802c7e9..efb1a2001 100644 --- a/.github/workflows/xahau-ga-macos.yml +++ b/.github/workflows/xahau-ga-macos.yml @@ -5,6 +5,8 @@ on: branches: ["dev", "candidate", "release"] pull_request: branches: ["dev", "candidate", "release"] + schedule: + - cron: '0 0 * * *' concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -74,6 +76,7 @@ jobs: max_size: 2G hash_dir: true compiler_check: content + is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }} - name: Check environment run: | @@ -113,4 +116,4 @@ jobs: - name: Test run: | - ${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc) \ No newline at end of file + ${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc) diff --git a/.github/workflows/xahau-ga-nix.yml b/.github/workflows/xahau-ga-nix.yml index 486518033..eca5a660a 100644 --- a/.github/workflows/xahau-ga-nix.yml +++ b/.github/workflows/xahau-ga-nix.yml @@ -5,6 +5,8 @@ on: branches: ["dev", "candidate", "release"] pull_request: branches: ["dev", "candidate", "release"] + schedule: + - cron: '0 0 * * *' concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -48,6 +50,7 @@ jobs: max_size: 2G hash_dir: true compiler_check: content + is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }} - name: Configure Conan run: | From 92e3a927fce68c227456423c5e765f54a6461539 Mon Sep 17 00:00:00 2001 From: tequ Date: Thu, 9 Oct 2025 19:02:14 +0900 Subject: [PATCH 04/12] refactor KEYLET_LINE in utils_keylet (#502) Fixes the use of high and low in variable names, as these are determined by ripple::keylet::line processing. Co-authored-by: RichardAH --- src/ripple/app/hook/impl/applyHook.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ripple/app/hook/impl/applyHook.cpp b/src/ripple/app/hook/impl/applyHook.cpp index 0616ccf5f..5b5f28cf2 100644 --- a/src/ripple/app/hook/impl/applyHook.cpp +++ b/src/ripple/app/hook/impl/applyHook.cpp @@ -3150,15 +3150,15 @@ DEFINE_HOOK_FUNCTION( if (a == 0 || b == 0 || c == 0 || d == 0 || e == 0 || f == 0) return INVALID_ARGUMENT; - uint32_t hi_ptr = a, hi_len = b, lo_ptr = c, lo_len = d, + uint32_t acc1_ptr = a, acc1_len = b, acc2_ptr = c, acc2_len = d, cu_ptr = e, cu_len = f; - if (NOT_IN_BOUNDS(hi_ptr, hi_len, memory_length) || - NOT_IN_BOUNDS(lo_ptr, lo_len, memory_length) || + if (NOT_IN_BOUNDS(acc1_ptr, acc1_len, memory_length) || + NOT_IN_BOUNDS(acc2_ptr, acc2_len, memory_length) || NOT_IN_BOUNDS(cu_ptr, cu_len, memory_length)) return OUT_OF_BOUNDS; - if (hi_len != 20 || lo_len != 20) + if (acc1_len != 20 || acc2_len != 20) return INVALID_ARGUMENT; std::optional cur = @@ -3167,8 +3167,8 @@ DEFINE_HOOK_FUNCTION( return INVALID_ARGUMENT; auto kl = ripple::keylet::line( - AccountID::fromVoid(memory + hi_ptr), - AccountID::fromVoid(memory + lo_ptr), + AccountID::fromVoid(memory + acc1_ptr), + AccountID::fromVoid(memory + acc2_ptr), *cur); return serialize_keylet(kl, memory, write_ptr, write_len); } From fa1b93bfd879f93e0b8fcf369b86fb18d39390b4 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Fri, 10 Oct 2025 10:57:46 +0700 Subject: [PATCH 05/12] build: migrate to conan 2 (#585) Migrates the build system from Conan 1 to Conan 2 --- .github/actions/xahau-ga-build/action.yml | 4 + .../actions/xahau-ga-dependencies/action.yml | 5 +- .github/workflows/xahau-ga-macos.yml | 52 +++++++-- .github/workflows/xahau-ga-nix.yml | 36 ++++-- BUILD.md | 72 ++++++++---- build-core.sh | 27 ++++- conanfile.py | 103 ++++++++++-------- external/wasmedge/conanfile.py | 11 +- release-builder.sh | 60 +++++++--- 9 files changed, 256 insertions(+), 114 deletions(-) diff --git a/.github/actions/xahau-ga-build/action.yml b/.github/actions/xahau-ga-build/action.yml index 3387f0391..66fe45d62 100644 --- a/.github/actions/xahau-ga-build/action.yml +++ b/.github/actions/xahau-ga-build/action.yml @@ -94,6 +94,10 @@ runs: fi # Run CMake configure + # Note: conanfile.py hardcodes 'build/generators' as the output path. + # If we're in a 'build' folder, Conan detects this and uses just 'generators/' + # If we're in '.build' (non-standard), Conan adds the full 'build/generators/' + # So we get: .build/build/generators/ with our non-standard folder name cmake .. \ -G "${{ inputs.generator }}" \ $CCACHE_ARGS \ diff --git a/.github/actions/xahau-ga-dependencies/action.yml b/.github/actions/xahau-ga-dependencies/action.yml index d295e20eb..cb14e4e57 100644 --- a/.github/actions/xahau-ga-dependencies/action.yml +++ b/.github/actions/xahau-ga-dependencies/action.yml @@ -78,8 +78,9 @@ runs: - name: Export custom recipes shell: bash run: | - conan export external/snappy snappy/1.1.10@xahaud/stable - conan export external/soci soci/4.0.3@xahaud/stable + conan export external/snappy --version 1.1.10 --user xahaud --channel stable + conan export external/soci --version 4.0.3 --user xahaud --channel stable + conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable - name: Install dependencies shell: bash diff --git a/.github/workflows/xahau-ga-macos.yml b/.github/workflows/xahau-ga-macos.yml index efb1a2001..66c7e6877 100644 --- a/.github/workflows/xahau-ga-macos.yml +++ b/.github/workflows/xahau-ga-macos.yml @@ -32,9 +32,9 @@ jobs: - name: Install Conan run: | - brew install conan@1 - # Add Conan 1 to the PATH for this job - echo "$(brew --prefix conan@1)/bin" >> $GITHUB_PATH + brew install conan + # Verify Conan 2 is installed + conan --version - name: Install Coreutils run: | @@ -60,12 +60,20 @@ jobs: - name: Install CMake run: | - if which cmake > /dev/null 2>&1; then - echo "cmake executable exists" - cmake --version - else - brew install cmake - fi + # Install CMake 3.x to match local dev environments + # With Conan 2 and the policy args passed to CMake, newer versions + # can have issues with dependencies that require cmake_minimum_required < 3.5 + brew uninstall cmake --ignore-dependencies 2>/dev/null || true + + # Download and install CMake 3.31.7 directly + curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz + tar -xzf cmake.tar.gz + + # Move the entire CMake.app to /Applications + sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/ + + echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH + /Applications/CMake.app/Contents/bin/cmake --version - name: Install ccache run: brew install ccache @@ -92,8 +100,30 @@ jobs: - name: Configure Conan run: | - conan profile new default --detect || true # Ignore error if profile exists - conan profile update settings.compiler.cppstd=20 default + # Create the default profile directory if it doesn't exist + mkdir -p ~/.conan2/profiles + + # Detect compiler version + COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+') + + # Create profile with our specific settings + cat > ~/.conan2/profiles/default <=2.0" - name: Configure ccache uses: ./.github/actions/xahau-configure-ccache @@ -54,18 +54,30 @@ jobs: - name: Configure Conan run: | - conan profile new default --detect || true # Ignore error if profile exists - conan profile update settings.compiler.cppstd=20 default - conan profile update settings.compiler=${{ matrix.compiler }} default - conan profile update settings.compiler.libcxx=libstdc++11 default - conan profile update env.CC=/usr/bin/${{ matrix.cc }} default - conan profile update env.CXX=/usr/bin/${{ matrix.cxx }} default - conan profile update conf.tools.build:compiler_executables='{"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}' default + # Create the default profile directory if it doesn't exist + mkdir -p ~/.conan2/profiles + + # Create profile with our specific settings + cat > ~/.conan2/profiles/default <` header. @@ -65,13 +65,24 @@ can't build earlier Boost versions. 1. (Optional) If you've never used Conan, use autodetect to set up a default profile. ``` - conan profile new default --detect + conan profile detect --force ``` 2. Update the compiler settings. + For Conan 2, you can edit the profile directly at `~/.conan2/profiles/default`, + or use the Conan CLI. Ensure C++20 is set: + ``` - conan profile update settings.compiler.cppstd=20 default + conan profile show + ``` + + Look for `compiler.cppstd=20` in the output. If it's not set, edit the profile: + + ``` + # Edit ~/.conan2/profiles/default and ensure these settings exist: + [settings] + compiler.cppstd=20 ``` Linux developers will commonly have a default Conan [profile][] that compiles @@ -80,7 +91,9 @@ can't build earlier Boost versions. then you will need to choose the `libstdc++11` ABI. ``` - conan profile update settings.compiler.libcxx=libstdc++11 default + # In ~/.conan2/profiles/default, ensure: + [settings] + compiler.libcxx=libstdc++11 ``` On Windows, you should use the x64 native build tools. @@ -91,7 +104,9 @@ can't build earlier Boost versions. architecture. ``` - conan profile update settings.arch=x86_64 default + # In ~/.conan2/profiles/default, ensure: + [settings] + arch=x86_64 ``` 3. (Optional) If you have multiple compilers installed on your platform, @@ -100,16 +115,18 @@ can't build earlier Boost versions. in the generated CMake toolchain file. ``` - conan profile update 'conf.tools.build:compiler_executables={"c": "", "cpp": ""}' default + # In ~/.conan2/profiles/default, add under [conf] section: + [conf] + tools.build:compiler_executables={"c": "", "cpp": ""} ``` - It should choose the compiler for dependencies as well, - but not all of them have a Conan recipe that respects this setting (yet). - For the rest, you can set these environment variables: + For setting environment variables for dependencies: ``` - conan profile update env.CC= default - conan profile update env.CXX= default + # In ~/.conan2/profiles/default, add under [buildenv] section: + [buildenv] + CC= + CXX= ``` 4. Export our [Conan recipe for Snappy](./external/snappy). @@ -117,14 +134,20 @@ can't build earlier Boost versions. which allows you to statically link it with GCC, if you want. ``` - conan export external/snappy snappy/1.1.10@xahaud/stable + conan export external/snappy --version 1.1.10 --user xahaud --channel stable ``` 5. Export our [Conan recipe for SOCI](./external/soci). It patches their CMake to correctly import its dependencies. ``` - conan export external/soci soci/4.0.3@xahaud/stable + conan export external/soci --version 4.0.3 --user xahaud --channel stable + ``` + +6. Export our [Conan recipe for WasmEdge](./external/wasmedge). + + ``` + conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable ``` ### Build and Test @@ -259,23 +282,26 @@ and can be helpful for detecting `#include` omissions. If you have trouble building dependencies after changing Conan settings, try removing the Conan cache. +For Conan 2: ``` -rm -rf ~/.conan/data +rm -rf ~/.conan2/p +``` + +Or clear the entire Conan 2 cache: +``` +conan cache clean "*" ``` -### no std::result_of +### macOS compilation with Apple Clang 17+ -If your compiler version is recent enough to have removed `std::result_of` as -part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor -definition to your build. +If you're on macOS with Apple Clang 17 or newer, you need to add a compiler flag to work around a compilation error in gRPC dependencies. + +Edit `~/.conan2/profiles/default` and add under the `[conf]` section: ``` -conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default +[conf] +tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"] ``` diff --git a/build-core.sh b/build-core.sh index 7b1eb78dc..ce78c7625 100755 --- a/build-core.sh +++ b/build-core.sh @@ -12,6 +12,13 @@ echo "-- GITHUB_REPOSITORY: $1" echo "-- GITHUB_SHA: $2" echo "-- GITHUB_RUN_NUMBER: $4" +# Use mounted filesystem for temp files to avoid container space limits +export TMPDIR=/io/tmp +export TEMP=/io/tmp +export TMP=/io/tmp +mkdir -p /io/tmp +echo "=== Using temp directory: /io/tmp ===" + umask 0000; cd /io/ && @@ -43,10 +50,17 @@ export LDFLAGS="-static-libstdc++" git config --global --add safe.directory /io && git checkout src/ripple/protocol/impl/BuildInfo.cpp && sed -i s/\"0.0.0\"/\"$(date +%Y).$(date +%-m).$(date +%-d)-$(git rev-parse --abbrev-ref HEAD)$(if [ -n "$4" ]; then echo "+$4"; fi)\"/g src/ripple/protocol/impl/BuildInfo.cpp && -conan export external/snappy snappy/1.1.10@xahaud/stable && -conan export external/soci soci/4.0.3@xahaud/stable && +conan export external/snappy --version 1.1.10 --user xahaud --channel stable && +conan export external/soci --version 4.0.3 --user xahaud --channel stable && +conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable && cd release-build && -conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE && +# Install dependencies - tool_requires in conanfile.py handles glibc 2.28 compatibility +# for build tools (protoc, grpc plugins, b2) in HBB environment +# The tool_requires('b2/5.3.2') in conanfile.py should force b2 to build from source +# with the correct toolchain, avoiding the GLIBCXX_3.4.29 issue +echo "=== Installing dependencies ===" && +conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE \ + -o with_wasmedge=False -o tool_requires_b2=True && cmake .. -G Ninja \ -DCMAKE_BUILD_TYPE=$BUILD_TYPE \ -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ @@ -56,10 +70,13 @@ cmake .. -G Ninja \ -Dxrpld=TRUE \ -Dtests=TRUE && ccache -z && -ninja -j $3 && +ninja -j $3 && echo "=== Re-running final link with verbose output ===" && rm -f rippled && ninja -v rippled && ccache -s && -strip -s rippled && +strip -s rippled && mv rippled xahaud && +echo "=== Full ldd output ===" && +ldd xahaud && +echo "=== Running libcheck ===" && libcheck xahaud && echo "Build host: `hostname`" > release.info && echo "Build date: `date`" >> release.info && diff --git a/conanfile.py b/conanfile.py index 0a5bc0b4a..4b98b84db 100644 --- a/conanfile.py +++ b/conanfile.py @@ -21,22 +21,20 @@ class Xrpl(ConanFile): 'static': [True, False], 'tests': [True, False], 'unity': [True, False], + 'with_wasmedge': [True, False], + 'tool_requires_b2': [True, False], } requires = [ - 'boost/1.86.0', 'date/3.0.1', 'libarchive/3.6.0', - 'lz4/1.9.3', + 'lz4/1.9.4', 'grpc/1.50.1', 'nudb/2.0.8', 'openssl/1.1.1u', - 'protobuf/3.21.9', - 'snappy/1.1.10@xahaud/stable', + 'protobuf/3.21.12', 'soci/4.0.3@xahaud/stable', - 'sqlite3/3.42.0', - 'zlib/1.2.13', - 'wasmedge/0.11.2', + 'zlib/1.3.1', ] default_options = { @@ -50,42 +48,44 @@ class Xrpl(ConanFile): 'static': True, 'tests': True, 'unity': False, + 'with_wasmedge': True, + 'tool_requires_b2': False, - 'cassandra-cpp-driver:shared': False, - 'date:header_only': True, - 'grpc:shared': False, - 'grpc:secure': True, - 'libarchive:shared': False, - 'libarchive:with_acl': False, - 'libarchive:with_bzip2': False, - 'libarchive:with_cng': False, - 'libarchive:with_expat': False, - 'libarchive:with_iconv': False, - 'libarchive:with_libxml2': False, - 'libarchive:with_lz4': True, - 'libarchive:with_lzma': False, - 'libarchive:with_lzo': False, - 'libarchive:with_nettle': False, - 'libarchive:with_openssl': False, - 'libarchive:with_pcreposix': False, - 'libarchive:with_xattr': False, - 'libarchive:with_zlib': False, - 'libpq:shared': False, - 'lz4:shared': False, - 'openssl:shared': False, - 'protobuf:shared': False, - 'protobuf:with_zlib': True, - 'rocksdb:enable_sse': False, - 'rocksdb:lite': False, - 'rocksdb:shared': False, - 'rocksdb:use_rtti': True, - 'rocksdb:with_jemalloc': False, - 'rocksdb:with_lz4': True, - 'rocksdb:with_snappy': True, - 'snappy:shared': False, - 'soci:shared': False, - 'soci:with_sqlite3': True, - 'soci:with_boost': True, + 'cassandra-cpp-driver/*:shared': False, + 'date/*:header_only': True, + 'grpc/*:shared': False, + 'grpc/*:secure': True, + 'libarchive/*:shared': False, + 'libarchive/*:with_acl': False, + 'libarchive/*:with_bzip2': False, + 'libarchive/*:with_cng': False, + 'libarchive/*:with_expat': False, + 'libarchive/*:with_iconv': False, + 'libarchive/*:with_libxml2': False, + 'libarchive/*:with_lz4': True, + 'libarchive/*:with_lzma': False, + 'libarchive/*:with_lzo': False, + 'libarchive/*:with_nettle': False, + 'libarchive/*:with_openssl': False, + 'libarchive/*:with_pcreposix': False, + 'libarchive/*:with_xattr': False, + 'libarchive/*:with_zlib': False, + 'libpq/*:shared': False, + 'lz4/*:shared': False, + 'openssl/*:shared': False, + 'protobuf/*:shared': False, + 'protobuf/*:with_zlib': True, + 'rocksdb/*:enable_sse': False, + 'rocksdb/*:lite': False, + 'rocksdb/*:shared': False, + 'rocksdb/*:use_rtti': True, + 'rocksdb/*:with_jemalloc': False, + 'rocksdb/*:with_lz4': True, + 'rocksdb/*:with_snappy': True, + 'snappy/*:shared': False, + 'soci/*:shared': False, + 'soci/*:with_sqlite3': True, + 'soci/*:with_boost': True, } def set_version(self): @@ -96,11 +96,28 @@ class Xrpl(ConanFile): match = next(m for m in matches if m) self.version = match.group(1) + def build_requirements(self): + # These provide build tools (protoc, grpc plugins) that run during build + self.tool_requires('protobuf/3.21.12') + self.tool_requires('grpc/1.50.1') + # Explicitly require b2 (e.g. for building from source for glibc compatibility) + if self.options.tool_requires_b2: + self.tool_requires('b2/5.3.2') + def configure(self): if self.settings.compiler == 'apple-clang': - self.options['boost'].visibility = 'global' + self.options['boost/*'].visibility = 'global' def requirements(self): + # Force sqlite3 version to avoid conflicts with soci + self.requires('sqlite3/3.42.0', override=True) + # Force our custom snappy build for all dependencies + self.requires('snappy/1.1.10@xahaud/stable', override=True) + # Force boost version for all dependencies to avoid conflicts + self.requires('boost/1.86.0', override=True) + + if self.options.with_wasmedge: + self.requires('wasmedge/0.11.2@xahaud/stable') if self.options.jemalloc: self.requires('jemalloc/5.2.1') if self.options.reporting: diff --git a/external/wasmedge/conanfile.py b/external/wasmedge/conanfile.py index 4b7c42050..cefda286d 100644 --- a/external/wasmedge/conanfile.py +++ b/external/wasmedge/conanfile.py @@ -38,8 +38,15 @@ class WasmedgeConan(ConanFile): raise ConanInvalidConfiguration("Binaries for this combination of version/os/arch/compiler are not available") def package_id(self): - del self.info.settings.compiler.version - self.info.settings.compiler = self._compiler_alias + # Make binary compatible across compiler versions (since we're downloading prebuilt) + self.info.settings.rm_safe("compiler.version") + # Group compilers by their binary compatibility + # Note: We must use self.info.settings here, not self.settings (forbidden in Conan 2) + compiler_name = str(self.info.settings.compiler) + if compiler_name in ["Visual Studio", "msvc"]: + self.info.settings.compiler = "Visual Studio" + else: + self.info.settings.compiler = "gcc" def build(self): # This is packaging binaries so the download needs to be in build diff --git a/release-builder.sh b/release-builder.sh index f2a64a673..672e68042 100755 --- a/release-builder.sh +++ b/release-builder.sh @@ -1,9 +1,11 @@ -#!/bin/bash +#!/bin/bash # We use set -e and bash with -u to bail on first non zero exit code of any # processes launched or upon any unbound variable. # We use set -x to print commands before running them to help with # debugging. +set -ex + echo "START BUILDING (HOST)" echo "Cleaning previously built binary" @@ -90,29 +92,37 @@ RUN /hbb_exe/activate-exec bash -c "dnf install -y epel-release && \ llvm14-static llvm14-devel && \ dnf clean all" -# Install Conan and CMake -RUN /hbb_exe/activate-exec pip3 install "conan==1.66.0" && \ +# Install Conan 2 and CMake +RUN /hbb_exe/activate-exec pip3 install "conan>=2.0,<3.0" && \ /hbb_exe/activate-exec wget -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz -O cmake.tar.gz && \ mkdir cmake && \ tar -xzf cmake.tar.gz --strip-components=1 -C cmake && \ rm cmake.tar.gz -# Install Boost 1.86.0 -RUN /hbb_exe/activate-exec bash -c "cd /tmp && \ +# Dual Boost configuration in HBB environment: +# - Manual Boost in /usr/local (minimal: for WasmEdge which is pre-built in Docker) +# - Conan Boost (full: for the application and all dependencies via toolchain) +# +# Install minimal Boost 1.86.0 for WasmEdge only (filesystem and its dependencies) +# The main application will use Conan-provided Boost for all other components +# IMPORTANT: Understanding Boost linking options: +# - link=static: Creates static Boost libraries (.a files) instead of shared (.so files) +# - runtime-link=shared: Links Boost libraries against shared libc (glibc) +# WasmEdge only needs boost::filesystem and boost::system +RUN /hbb_exe/activate-exec bash -c "echo 'Boost cache bust: v5-minimal' && \ + rm -rf /usr/local/lib/libboost* /usr/local/include/boost && \ + cd /tmp && \ wget -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz -O boost.tar.gz && \ mkdir boost && \ tar -xzf boost.tar.gz --strip-components=1 -C boost && \ cd boost && \ ./bootstrap.sh && \ - ./b2 link=static -j${BUILD_CORES} && \ - ./b2 install && \ + ./b2 install \ + link=static runtime-link=shared -j${BUILD_CORES} \ + --with-filesystem --with-system && \ cd /tmp && \ rm -rf boost boost.tar.gz" -ENV BOOST_ROOT=/usr/local/src/boost_1_86_0 -ENV Boost_LIBRARY_DIRS=/usr/local/lib -ENV BOOST_INCLUDEDIR=/usr/local/src/boost_1_86_0 - ENV CMAKE_EXE_LINKER_FLAGS="-static-libstdc++" ENV LLVM_DIR=/usr/lib64/llvm14/lib/cmake/llvm @@ -155,6 +165,10 @@ RUN cd /tmp && \ cd build && \ /hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && \ ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ar /usr/bin/ar && \ + ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ranlib /usr/bin/ranlib && \ + echo '=== Binutils version check ===' && \ + ar --version | head -1 && \ + ranlib --version | head -1 && \ cmake .. \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr/local \ @@ -176,14 +190,28 @@ RUN cd /tmp && \ # Set environment variables ENV PATH=/usr/local/bin:$PATH -# Configure ccache and Conan +# Configure ccache and Conan 2 +# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \ ccache -o cache_dir=/cache/ccache && \ ccache -o compiler_check=content && \ - conan config set storage.path=/cache/conan && \ - (conan profile new default --detect || true) && \ - conan profile update settings.compiler.libcxx=libstdc++11 default && \ - conan profile update settings.compiler.cppstd=20 default" + mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \ + echo 'core.cache:storage_path=/cache/conan2' > ~/.conan2/global.conf && \ + echo 'core.download:download_cache=/cache/conan2_download' >> ~/.conan2/global.conf && \ + echo 'core.sources:download_cache=/cache/conan2_sources' >> ~/.conan2/global.conf && \ + conan profile detect --force && \ + echo '[settings]' > ~/.conan2/profiles/default && \ + echo 'arch=x86_64' >> ~/.conan2/profiles/default && \ + echo 'build_type=Release' >> ~/.conan2/profiles/default && \ + echo 'compiler=gcc' >> ~/.conan2/profiles/default && \ + echo 'compiler.cppstd=20' >> ~/.conan2/profiles/default && \ + echo 'compiler.libcxx=libstdc++11' >> ~/.conan2/profiles/default && \ + echo 'compiler.version=11' >> ~/.conan2/profiles/default && \ + echo 'os=Linux' >> ~/.conan2/profiles/default && \ + echo '' >> ~/.conan2/profiles/default && \ + echo '[conf]' >> ~/.conan2/profiles/default && \ + echo '# Force building from source for packages with binary compatibility issues' >> ~/.conan2/profiles/default && \ + echo '*:tools.system.package_manager:mode=build' >> ~/.conan2/profiles/default" DOCKERFILE_EOF ) From b3e6a902cb594738b8f654f5c3dcc46d8c1fbd85 Mon Sep 17 00:00:00 2001 From: "J. Scott Branson" <18340247+jscottbranson@users.noreply.github.com> Date: Thu, 9 Oct 2025 23:59:39 -0400 Subject: [PATCH 06/12] Update Sample Configuration Files in /cfg for Congruence with xahaud (#584) --- README.md | 6 +- cfg/validators-example.txt | 49 ++-- ...rippled-example.cfg => xahaud-example.cfg} | 207 +++++++++-------- ...led-reporting.cfg => xahaud-reporting.cfg} | 209 +++++++++--------- ...d-standalone.cfg => xahaud-standalone.cfg} | 9 +- 5 files changed, 238 insertions(+), 242 deletions(-) rename cfg/{rippled-example.cfg => xahaud-example.cfg} (90%) rename cfg/{rippled-reporting.cfg => xahaud-reporting.cfg} (90%) rename cfg/{rippled-standalone.cfg => xahaud-standalone.cfg} (97%) mode change 100755 => 100644 diff --git a/README.md b/README.md index c9335cfc1..0a7e17ddd 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **Note:** Throughout this README, references to "we" or "our" pertain to the community and contributors involved in the Xahau network. It does not imply a legal entity or a specific collection of individuals. -[Xahau](https://xahau.network/) is a decentralized cryptographic ledger that builds upon the robust foundation of the XRP Ledger. It inherits the XRP Ledger's Byzantine Fault Tolerant consensus algorithm and enhances it with additional features and functionalities. Developers and users familiar with the XRP Ledger will find that most documentation and tutorials available on [xrpl.org](https://xrpl.org) are relevant and applicable to Xahau, including those related to running validators and managing validator keys. For Xahau specific documentation you can visit our [documentation](https://docs.xahau.network/) +[Xahau](https://xahau.network/) is a decentralized cryptographic ledger that builds upon the robust foundation of the XRP Ledger. It inherits the XRP Ledger's Byzantine Fault Tolerant consensus algorithm and enhances it with additional features and functionalities. Developers and users familiar with the XRP Ledger will find that most documentation and tutorials available on [xrpl.org](https://xrpl.org) are relevant and applicable to Xahau, including those related to running validators and managing validator keys. For Xahau specific documentation you can visit our [documentation](https://xahau.network/) ## XAH XAH is the public, counterparty-free asset native to Xahau and functions primarily as network gas. Transactions submitted to the Xahau network must supply an appropriate amount of XAH, to be burnt by the network as a fee, in order to be successfully included in a validated ledger. In addition, XAH also acts as a bridge currency within the Xahau DEX. XAH is traded on the open-market and is available for anyone to access. Xahau was created in 2023 with a supply of 600 million units of XAH. @@ -12,7 +12,7 @@ The server software that powers Xahau is called `xahaud` and is available in thi ### Build from Source -* [Read the build instructions in our documentation](https://docs.xahau.network/infrastructure/building-xahau) +* [Read the build instructions in our documentation](https://xahau.network/infrastructure/building-xahau) * If you encounter any issues, please [open an issue](https://github.com/xahau/xahaud/issues) ## Highlights of Xahau @@ -58,7 +58,7 @@ git-subtree. See those directories' README files for more details. - **Documentation**: Documentation for XRPL, Xahau and Hooks. - [Xrpl Documentation](https://xrpl.org) - - [Xahau Documentation](https://docs.xahau.network/) + - [Xahau Documentation](https://xahau.network/) - [Hooks Technical Documentation](https://xrpl-hooks.readme.io/) - **Explorers**: Explore the Xahau ledger using various explorers: - [xahauexplorer.com](https://xahauexplorer.com) diff --git a/cfg/validators-example.txt b/cfg/validators-example.txt index 6c2314ebd..11ab76c2a 100644 --- a/cfg/validators-example.txt +++ b/cfg/validators-example.txt @@ -1,7 +1,7 @@ # # Default validators.txt # -# This file is located in the same folder as your rippled.cfg file +# This file is located in the same folder as your xahaud.cfg file # and defines which validators your server trusts not to collude. # # This file is UTF-8 with DOS, UNIX, or Mac style line endings. @@ -17,18 +17,17 @@ # See validator_list_sites and validator_list_keys below. # # Examples: -# n9KorY8QtTdRx7TVDpwnG9NvyxsDwHUKUEeDLY3AkiGncVaSXZi5 -# n9MqiExBcoG19UXwoLjBJnhsxEhAZMuWwJDRdkyDz1EkEkwzQTNt +# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY +# n9M7G6eLwQtUjfCthWUmTN8L4oEZn1sNr46yvKrpsq58K1C6LAxz # # [validator_list_sites] # # List of URIs serving lists of recommended validators. # # Examples: -# https://vl.ripple.com -# https://vl.xrplf.org +# https://vl.xahau.org # http://127.0.0.1:8000 -# file:///etc/opt/ripple/vl.txt +# file:///etc/opt/xahaud/vl.txt # # [validator_list_keys] # @@ -39,50 +38,48 @@ # Validator list keys should be hex-encoded. # # Examples: -# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 -# ED307A760EE34F2D0CAA103377B1969117C38B8AA0AA1E2A24DAC1F32FC97087ED +# EDA46E9C39B1389894E690E58914DC1029602870370A0993E5B87C4A24EAF4A8E8 # # [import_vl_keys] # # This section is used to import the public keys of trusted validator list publishers. # The keys are used to authenticate and accept new lists of trusted validators. -# In this example, the key for the publisher "vl.xrplf.org" is imported. # Each key is represented as a hexadecimal string. # # Examples: -# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 +# ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B +# ED42AEC58B701EEBB77356FFFEC26F83C1F0407263530F068C7C73D392C7E06FD1 +# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 -# The default validator list publishers that the rippled instance +# The default validator list publishers that the xahaud instance # trusts. # -# WARNING: Changing these values can cause your rippled instance to see a -# validated ledger that contradicts other rippled instances' +# WARNING: Changing these values can cause your xahaud instance to see a +# validated ledger that contradicts other xahaud instances' # validated ledgers (aka a ledger fork) if your validator list(s) # do not sufficiently overlap with the list(s) used by others. # See: https://arxiv.org/pdf/1802.07242.pdf [validator_list_sites] -https://vl.ripple.com -https://vl.xrplf.org +https://vl.xahau.org [validator_list_keys] -#vl.ripple.com -ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 -# vl.xrplf.org -ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B +# vl.xahau.org +EDA46E9C39B1389894E690E58914DC1029602870370A0993E5B87C4A24EAF4A8E8 [import_vl_keys] -# vl.xrplf.org +ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B +ED42AEC58B701EEBB77356FFFEC26F83C1F0407263530F068C7C73D392C7E06FD1 ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 -# To use the test network (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), +# To use the test network (see https://xahau.network/docs/infrastructure/installing-xahaud), # use the following configuration instead: # -# [validator_list_sites] -# https://vl.altnet.rippletest.net -# -# [validator_list_keys] -# ED264807102805220DA0F312E71FC2C69E1552C9C5790F6C25E3729DEB573D5860 +# [validators] +# nHBoJCE3wPgkTcrNPMHyTJFQ2t77EyCAqcBRspFCpL6JhwCm94VZ +# nHUVv4g47bFMySAZFUKVaXUYEmfiUExSoY4FzwXULNwJRzju4XnQ +# nHBvr8avSFTz4TFxZvvi4rEJZZtyqE3J6KAAcVWVtifsE7edPM7q +# nHUH3Z8TRU57zetHbEPr1ynyrJhxQCwrJvNjr4j1SMjYADyW1WWe # # [import_vl_keys] # ED264807102805220DA0F312E71FC2C69E1552C9C5790F6C25E3729DEB573D5860 diff --git a/cfg/rippled-example.cfg b/cfg/xahaud-example.cfg similarity index 90% rename from cfg/rippled-example.cfg rename to cfg/xahaud-example.cfg index 80ec0a980..85520f93f 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/xahaud-example.cfg @@ -9,7 +9,7 @@ # # 2. Peer Protocol # -# 3. Ripple Protocol +# 3. XRPL Protocol # # 4. HTTPS Client # @@ -29,18 +29,17 @@ # # Purpose # -# This file documents and provides examples of all rippled server process -# configuration options. When the rippled server instance is launched, it +# This file documents and provides examples of all xahaud server process +# configuration options. When the xahaud server instance is launched, it # looks for a file with the following name: # -# rippled.cfg +# xahaud.cfg # -# For more information on where the rippled server instance searches for the -# file, visit: +# To run xahaud with a custom configuration file, use the "--conf {file}" flag. +# By default, xahaud will look in the local working directory or the home directory. # -# https://xrpl.org/commandline-usage.html#generic-options # -# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, +# This file should be named xahaud.cfg. This file is UTF-8 with DOS, UNIX, # or Mac style end of lines. Blank lines and lines beginning with '#' are # ignored. Undefined sections are reserved. No escapes are currently defined. # @@ -89,8 +88,8 @@ # # # -# rippled offers various server protocols to clients making inbound -# connections. The listening ports rippled uses are "universal" ports +# xahaud offers various server protocols to clients making inbound +# connections. The listening ports xahaud uses are "universal" ports # which may be configured to handshake in one or more of the available # supported protocols. These universal ports simplify administration: # A single open port can be used for multiple protocols. @@ -103,7 +102,7 @@ # # A list of port names and key/value pairs. A port name must start with a # letter and contain only letters and numbers. The name is not case-sensitive. -# For each name in this list, rippled will look for a configuration file +# For each name in this list, xahaud will look for a configuration file # section with the same name and use it to create a listening port. The # name is informational only; the choice of name does not affect the function # of the listening port. @@ -134,7 +133,7 @@ # ip = 127.0.0.1 # protocol = http # -# When rippled is used as a command line client (for example, issuing a +# When xahaud is used as a command line client (for example, issuing a # server stop command), the first port advertising the http or https # protocol will be used to make the connection. # @@ -175,7 +174,7 @@ # same time. It is possible have both Websockets and Secure Websockets # together in one port. # -# NOTE If no ports support the peer protocol, rippled cannot +# NOTE If no ports support the peer protocol, xahaud cannot # receive incoming peer connections or become a superpeer. # # limit = @@ -194,7 +193,7 @@ # required. IP address restrictions, if any, will be checked in addition # to the credentials specified here. # -# When acting in the client role, rippled will supply these credentials +# When acting in the client role, xahaud will supply these credentials # using HTTP's Basic Authentication headers when making outbound HTTP/S # requests. # @@ -237,7 +236,7 @@ # WS, or WSS protocol interfaces. If administrative commands are # disabled for a port, these credentials have no effect. # -# When acting in the client role, rippled will supply these credentials +# When acting in the client role, xahaud will supply these credentials # in the submitted JSON for any administrative command requests when # invoking JSON-RPC commands on remote servers. # @@ -258,7 +257,7 @@ # resource controls will default to those for non-administrative users. # # The secure_gateway IP addresses are intended to represent -# proxies. Since rippled trusts these hosts, they must be +# proxies. Since xahaud trusts these hosts, they must be # responsible for properly authenticating the remote user. # # If some IP addresses are included for both "admin" and @@ -272,7 +271,7 @@ # Use the specified files when configuring SSL on the port. # # NOTE If no files are specified and secure protocols are selected, -# rippled will generate an internal self-signed certificate. +# xahaud will generate an internal self-signed certificate. # # The files have these meanings: # @@ -295,12 +294,12 @@ # Control the ciphers which the server will support over SSL on the port, # specified using the OpenSSL "cipher list format". # -# NOTE If unspecified, rippled will automatically configure a modern +# NOTE If unspecified, xahaud will automatically configure a modern # cipher suite. This default suite should be widely supported. # # You should not modify this string unless you have a specific # reason and cryptographic expertise. Incorrect modification may -# keep rippled from connecting to other instances of rippled or +# keep xahaud from connecting to other instances of xahaud or # prevent RPC and WebSocket clients from connecting. # # send_queue_limit = [1..65535] @@ -351,7 +350,7 @@ # # Examples: # { "command" : "server_info" } -# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" } +# { "command" : "log_level", "partition" : "xahaudcalc", "severity" : "trace" } # # # @@ -380,16 +379,15 @@ #----------------- # # These settings control security and access attributes of the Peer to Peer -# server section of the rippled process. Peer Protocol implements the -# Ripple Payment protocol. It is over peer connections that transactions -# and validations are passed from to machine to machine, to determine the -# contents of validated ledgers. +# server section of the xahaud process. It is over peer connections that +# transactions and validations are passed from to machine to machine, to +# determine the contents of validated ledgers. # # # # [ips] # -# List of hostnames or ips where the Ripple protocol is served. A default +# List of hostnames or ips where the XRPL protocol is served. A default # starter list is included in the code and used if no other hostnames are # available. # @@ -398,24 +396,23 @@ # does not generally matter. # # The default list of entries is: -# - r.ripple.com 51235 -# - zaphod.alloy.ee 51235 -# - sahyadri.isrdc.in 51235 +# - hubs.xahau.as16089.net 21337 +# - bacab.alloy.ee 21337 # # Examples: # # [ips] # 192.168.0.1 -# 192.168.0.1 2459 -# r.ripple.com 51235 +# 192.168.0.1 21337 +# bacab.alloy.ee 21337 # # # [ips_fixed] # -# List of IP addresses or hostnames to which rippled should always attempt to +# List of IP addresses or hostnames to which xahaud should always attempt to # maintain peer connections with. This is useful for manually forming private # networks, for example to configure a validation server that connects to the -# Ripple network through a public-facing server, or for building a set +# Xahau Network through a public-facing server, or for building a set # of cluster peers. # # One address or domain names per line is allowed. A port must be specified @@ -465,7 +462,7 @@ # # IP address or domain of NTP servers to use for time synchronization. # -# These NTP servers are suitable for rippled servers located in the United +# These NTP servers are suitable for xahaud servers located in the United # States: # time.windows.com # time.apple.com @@ -566,7 +563,7 @@ # # minimum_txn_in_ledger_standalone = # -# Like minimum_txn_in_ledger when rippled is running in standalone +# Like minimum_txn_in_ledger when xahaud is running in standalone # mode. Default: 1000. # # target_txn_in_ledger = @@ -703,7 +700,7 @@ # # [validator_token] # -# This is an alternative to [validation_seed] that allows rippled to perform +# This is an alternative to [validation_seed] that allows xahaud to perform # validation without having to store the validator keys on the network # connected server. The field should contain a single token in the form of a # base64-encoded blob. @@ -738,19 +735,18 @@ # # Specify the file by its name or path. # Unless an absolute path is specified, it will be considered relative to -# the folder in which the rippled.cfg file is located. +# the folder in which the xahaud.cfg file is located. # # Examples: -# /home/ripple/validators.txt -# C:/home/ripple/validators.txt +# /home/xahaud/validators.txt +# C:/home/xahaud/validators.txt # # Example content: # [validators] -# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 -# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj -# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C -# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS -# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA +# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY +# n9LQDHLWyFuAn5BXJuW2ow5J9uGqpmSjRYS2cFRpxf6uJbxwDzvM +# n9MCWyKVUkiatXVJTKUrAESB5kBFP8R3hm43jGHtg8WBnjv3iDfb +# n9KWXCLRhjpajuZtULTXsy6R5xbisA6ozGxM4zdEJFq6uHiFZDvW # # # @@ -833,7 +829,7 @@ # # 0: Disable the ledger replay feature [default] # 1: Enable the ledger replay feature. With this feature enabled, when -# acquiring a ledger from the network, a rippled node only downloads +# acquiring a ledger from the network, a xahaud node only downloads # the ledger header and the transactions instead of the whole ledger. # And the ledger is built by applying the transactions to the parent # ledger. @@ -844,10 +840,9 @@ # #---------------- # -# The rippled server instance uses HTTPS GET requests in a variety of +# The xahaud server instance uses HTTPS GET requests in a variety of # circumstances, including but not limited to contacting trusted domains to -# fetch information such as mapping an email address to a Ripple Payment -# Network address. +# fetch information such as mapping an email address to a user's r address. # # [ssl_verify] # @@ -884,15 +879,15 @@ # #------------ # -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are +# xahaud has an optional operating mode called Reporting Mode. In Reporting +# Mode, xahaud does not connect to the peer to peer network. Instead, xahaud +# will continuously extract data from one or more xahaud servers that are # connected to the peer to peer network (referred to as an ETL source). # Reporting mode servers will forward RPC requests that require access to the # peer to peer network (submit, fee, etc) to an ETL source. # # [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section +# present, xahaud will start in reporting mode. This section # contains a list of ETL source names, and key-value pairs. The # ETL source names each correspond to a configuration file # section; the names must match exactly. The key-value pairs are @@ -997,16 +992,16 @@ # #------------ # -# rippled creates 4 SQLite database to hold bookkeeping information +# xahaud creates 4 SQLite database to hold bookkeeping information # about transactions, local credentials, and various other things. # It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled +# make up the current and historical ledgers. In Reporting Mode, xahauad # uses a Postgres database instead of SQLite. # # The simplest way to work with Postgres is to install it locally. # When it is running, execute the initdb.sh script in the current # directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. +# This will create the xahaud user and an empty database of the same name. # # The size of the NodeDB grows in proportion to the amount of new data and the # amount of historical data (a configurable setting) so the performance of the @@ -1014,7 +1009,7 @@ # the performance of the server. # # Partial pathnames will be considered relative to the location of -# the rippled.cfg file. +# the xahaud.cfg file. # # [node_db] Settings for the Node Database (required) # @@ -1025,18 +1020,18 @@ # # Example: # type=nudb -# path=db/nudb +# path=/opt/xahaud/db/nudb # # The "type" field must be present and controls the choice of backend: # # type = NuDB # # NuDB is a high-performance database written by Ripple Labs and optimized -# for rippled and solid-state drives. +# for and solid-state drives. # # NuDB maintains its high speed regardless of the amount of history # stored. Online delete may be selected, but is not required. NuDB is -# available on all platforms that rippled runs on. +# available on all platforms that xahaud runs on. # # type = RocksDB # @@ -1124,7 +1119,7 @@ # # Optional keys for NuDB and RocksDB: # -# earliest_seq The default is 32570 to match the XRP ledger +# earliest_seq The default is 32570 to match the XRP Ledger's # network's earliest allowed sequence. Alternate # networks may set this value. Minimum value of 1. # If a [shard_db] section is defined, and this @@ -1166,7 +1161,7 @@ # # recovery_wait_seconds # The online delete process checks periodically -# that rippled is still in sync with the network, +# that xahaud is still in sync with the network, # and that the validated ledger is less than # 'age_threshold_seconds' old. If not, then continue # sleeping for this number of seconds and @@ -1205,8 +1200,8 @@ # The server creates and maintains 4 to 5 bookkeeping SQLite databases in # the 'database_path' location. If you omit this configuration setting, # the server creates a directory called "db" located in the same place as -# your rippled.cfg file. -# Partial pathnames are relative to the location of the rippled executable. +# your xahaud.cfg file. +# Partial pathnames are relative to the location of the xahaud executable. # # [shard_db] Settings for the Shard Database (optional) # @@ -1282,7 +1277,7 @@ # The default is "wal", which uses a write-ahead # log to implement database transactions. # Alternately, "memory" saves disk I/O, but if -# rippled crashes during a transaction, the +# xahaud crashes during a transaction, the # database is likely to be corrupted. # See https://www.sqlite.org/pragma.html#pragma_journal_mode # for more details about the available options. @@ -1292,7 +1287,7 @@ # synchronous Valid values: off, normal, full, extra # The default is "normal", which works well with # the "wal" journal mode. Alternatively, "off" -# allows rippled to continue as soon as data is +# allows xahaud to continue as soon as data is # passed to the OS, which can significantly # increase speed, but risks data corruption if # the host computer crashes before writing that @@ -1306,7 +1301,7 @@ # The default is "file", which will use files # for temporary database tables and indices. # Alternatively, "memory" may save I/O, but -# rippled does not currently use many, if any, +# xahaud does not currently use many, if any, # of these temporary objects. # See https://www.sqlite.org/pragma.html#pragma_temp_store # for more details about the available options. @@ -1318,9 +1313,9 @@ # conninfo Info for connecting to Postgres. Format is # postgres://[username]:[password]@[ip]/[database]. # The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the +# section is missing and xahaud is running in +# Reporting Mode, xahaud will connect as the +# user running xahaud to a database with the # same name. On Linux and Mac OS X, the connection # will take place using the server's UNIX domain # socket. On Windows, through the localhost IP @@ -1329,7 +1324,7 @@ # use_tx_tables Valid values: 1, 0 # The default is 1 (true). Determines whether to use # the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, +# xahaud will not write to the transaction database, # and will reject tx, account_tx and tx_history RPCs. # In Reporting Mode, this setting is ignored. # @@ -1357,7 +1352,7 @@ # # These settings are designed to help server administrators diagnose # problems, and obtain detailed information about the activities being -# performed by the rippled process. +# performed by the xahaud process. # # # @@ -1374,7 +1369,7 @@ # # Configuration parameters for the Beast. Insight stats collection module. # -# Insight is a module that collects information from the areas of rippled +# Insight is a module that collects information from the areas of xahaud # that have instrumentation. The configuration parameters control where the # collection metrics are sent. The parameters are expressed as key = value # pairs with no white space. The main parameter is the choice of server: @@ -1383,7 +1378,7 @@ # # Choice of server to send metrics to. Currently the only choice is # "statsd" which sends UDP packets to a StatsD daemon, which must be -# running while rippled is running. More information on StatsD is +# running while xahaud is running. More information on StatsD is # available here: # https://github.com/b/statsd_spec # @@ -1393,7 +1388,7 @@ # in the format, n.n.n.n:port. # # "prefix" A string prepended to each collected metric. This is used -# to distinguish between different running instances of rippled. +# to distinguish between different running instances of xahaud. # # If this section is missing, or the server type is unspecified or unknown, # statistics are not collected or reported. @@ -1420,7 +1415,7 @@ # # Example: # [perf] -# perf_log=/var/log/rippled/perf.log +# perf_log=/var/log/xahaud/perf.log # log_interval=2 # #------------------------------------------------------------------------------- @@ -1429,8 +1424,8 @@ # #---------- # -# The vote settings configure settings for the entire Ripple network. -# While a single instance of rippled cannot unilaterally enforce network-wide +# The vote settings configure settings for the entire Xahau Network. +# While a single instance of xahaud cannot unilaterally enforce network-wide # settings, these choices become part of the instance's vote during the # consensus process for each voting ledger. # @@ -1442,9 +1437,9 @@ # # The cost of the reference transaction fee, specified in drops. # The reference transaction is the simplest form of transaction. -# It represents an XRP payment between two parties. +# It represents an XAH payment between two parties. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: @@ -1453,26 +1448,26 @@ # account_reserve = # # The account reserve requirement is specified in drops. The portion of an -# account's XRP balance that is at or below the reserve may only be +# account's XAH balance that is at or below the reserve may only be # spent on transaction fees, and not transferred out of the account. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: -# account_reserve = 10000000 # 10 XRP +# account_reserve = 10000000 # 10 XAH # # owner_reserve = # -# The owner reserve is the amount of XRP reserved in the account for +# The owner reserve is the amount of XAH reserved in the account for # each ledger item owned by the account. Ledger items an account may # own include trust lines, open orders, and tickets. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: -# owner_reserve = 2000000 # 2 XRP +# owner_reserve = 2000000 # 2 XAH # #------------------------------------------------------------------------------- # @@ -1510,7 +1505,7 @@ # tool instead. # # This flag has no effect on the "sign" and "sign_for" command line options -# that rippled makes available. +# that xahaud makes available. # # The default value of this field is "false" # @@ -1589,7 +1584,7 @@ #-------------------- # # Administrators can use these values as a starting point for configuring -# their instance of rippled, but each value should be checked to make sure +# their instance of xahaud, but each value should be checked to make sure # it meets the business requirements for the organization. # # Server @@ -1599,7 +1594,7 @@ # "peer" # # Peer protocol open to everyone. This is required to accept -# incoming rippled connections. This does not affect automatic +# incoming xahaud connections. This does not affect automatic # or manual outgoing Peer protocol connections. # # "rpc" @@ -1627,8 +1622,8 @@ # NOTE # # To accept connections on well known ports such as 80 (HTTP) or -# 443 (HTTPS), most operating systems will require rippled to -# run with administrator privileges, or else rippled will not start. +# 443 (HTTPS), most operating systems will require xahaud to +# run with administrator privileges, or else xahaud will not start. [server] port_rpc_admin_local @@ -1639,20 +1634,20 @@ port_ws_admin_local #ssl_cert = /etc/ssl/certs/server.crt [port_rpc_admin_local] -port = 5005 +port = 5009 ip = 127.0.0.1 admin = 127.0.0.1 protocol = http [port_peer] -port = 51235 +port = 21337 ip = 0.0.0.0 # alternatively, to accept connections on IPv4 + IPv6, use: #ip = :: protocol = peer [port_ws_admin_local] -port = 6006 +port = 6009 ip = 127.0.0.1 admin = 127.0.0.1 protocol = ws @@ -1663,15 +1658,15 @@ ip = 127.0.0.1 secure_gateway = 127.0.0.1 #[port_ws_public] -#port = 6005 +#port = 6008 #ip = 127.0.0.1 #protocol = wss #------------------------------------------------------------------------------- -# This is primary persistent datastore for rippled. This includes transaction +# This is primary persistent datastore for xahaud. This includes transaction # metadata, account states, and ledger headers. Helpful information can be -# found at https://xrpl.org/capacity-planning.html#node-db-type +# found at https://xahau.network/docs/infrastructure/system-requirements # type=NuDB is recommended for non-validators with fast SSDs. Validators or # slow / spinning disks should use RocksDB. Caution: Spinning disks are # not recommended. They do not perform well enough to consistently remain @@ -1684,16 +1679,16 @@ secure_gateway = 127.0.0.1 # deletion. [node_db] type=NuDB -path=/var/lib/rippled/db/nudb +path=/opt/xahaud/db/nudb online_delete=512 advisory_delete=0 # This is the persistent datastore for shards. It is important for the health -# of the ripple network that rippled operators shard as much as practical. +# of the Xahau Network that xahaud operators shard as much as practical. # NuDB requires SSD storage. Helpful information can be found at # https://xrpl.org/history-sharding.html #[shard_db] -#path=/var/lib/rippled/db/shards/nudb +#path=/opt/xahaud/db/shards/nudb #max_historical_shards=50 # # This optional section can be configured with a list @@ -1704,7 +1699,7 @@ advisory_delete=0 #/path/2 [database_path] -/var/lib/rippled/db +/opt/xahaud/db # To use Postgres, uncomment this section and fill in the appropriate connection @@ -1719,7 +1714,7 @@ advisory_delete=0 # This needs to be an absolute directory reference, not a relative one. # Modify this value as required. [debug_logfile] -/var/log/rippled/debug.log +/var/log/xahaud/debug.log [sntp_servers] time.windows.com @@ -1727,15 +1722,19 @@ time.apple.com time.nist.gov pool.ntp.org -# To use the XRP test network -# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), +# To use the Xahau Test Network +# (see https://xahau.network/docs/infrastructure/installing-xahaud), # use the following [ips] section: # [ips] -# r.altnet.rippletest.net 51235 +# 79.110.60.121 21338 +# 79.110.60.122 21338 +# 79.110.60.124 21338 +# 79.110.60.125 21338 + # File containing trusted validator keys or validator list publishers. # Unless an absolute path is specified, it will be considered relative to the -# folder in which the rippled.cfg file is located. +# folder in which the xahaud.cfg file is located. [validators_file] validators.txt diff --git a/cfg/rippled-reporting.cfg b/cfg/xahaud-reporting.cfg similarity index 90% rename from cfg/rippled-reporting.cfg rename to cfg/xahaud-reporting.cfg index dbafdd497..03ac0f33e 100644 --- a/cfg/rippled-reporting.cfg +++ b/cfg/xahaud-reporting.cfg @@ -9,7 +9,7 @@ # # 2. Peer Protocol # -# 3. Ripple Protocol +# 3. XRPL Protocol # # 4. HTTPS Client # @@ -29,18 +29,16 @@ # # Purpose # -# This file documents and provides examples of all rippled server process -# configuration options. When the rippled server instance is launched, it +# This file documents and provides examples of all xahaud server process +# configuration options. When the xahaud server instance is launched, it # looks for a file with the following name: # -# rippled.cfg +# xahaud.cfg # -# For more information on where the rippled server instance searches for the -# file, visit: +# To run xahaud with a custom configuration file, use the "--conf {file}" flag. +# By default, xahaud will look in the local working directory or the home directory # -# https://xrpl.org/commandline-usage.html#generic-options -# -# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, +# This file should be named xahaud.cfg. This file is UTF-8 with DOS, UNIX, # or Mac style end of lines. Blank lines and lines beginning with '#' are # ignored. Undefined sections are reserved. No escapes are currently defined. # @@ -89,8 +87,8 @@ # # # -# rippled offers various server protocols to clients making inbound -# connections. The listening ports rippled uses are "universal" ports +# xahaud offers various server protocols to clients making inbound +# connections. The listening ports xahaud uses are "universal" ports # which may be configured to handshake in one or more of the available # supported protocols. These universal ports simplify administration: # A single open port can be used for multiple protocols. @@ -103,7 +101,7 @@ # # A list of port names and key/value pairs. A port name must start with a # letter and contain only letters and numbers. The name is not case-sensitive. -# For each name in this list, rippled will look for a configuration file +# For each name in this list, xahaud will look for a configuration file # section with the same name and use it to create a listening port. The # name is informational only; the choice of name does not affect the function # of the listening port. @@ -134,7 +132,7 @@ # ip = 127.0.0.1 # protocol = http # -# When rippled is used as a command line client (for example, issuing a +# When xahaud is used as a command line client (for example, issuing a # server stop command), the first port advertising the http or https # protocol will be used to make the connection. # @@ -175,7 +173,7 @@ # same time. It is possible have both Websockets and Secure Websockets # together in one port. # -# NOTE If no ports support the peer protocol, rippled cannot +# NOTE If no ports support the peer protocol, xahaud cannot # receive incoming peer connections or become a superpeer. # # limit = @@ -194,7 +192,7 @@ # required. IP address restrictions, if any, will be checked in addition # to the credentials specified here. # -# When acting in the client role, rippled will supply these credentials +# When acting in the client role, xahaud will supply these credentials # using HTTP's Basic Authentication headers when making outbound HTTP/S # requests. # @@ -227,7 +225,7 @@ # WS, or WSS protocol interfaces. If administrative commands are # disabled for a port, these credentials have no effect. # -# When acting in the client role, rippled will supply these credentials +# When acting in the client role, xahaud will supply these credentials # in the submitted JSON for any administrative command requests when # invoking JSON-RPC commands on remote servers. # @@ -247,11 +245,11 @@ # resource controls will default to those for non-administrative users. # # The secure_gateway IP addresses are intended to represent -# proxies. Since rippled trusts these hosts, they must be +# proxies. Since xahaud trusts these hosts, they must be # responsible for properly authenticating the remote user. # # The same IP address cannot be used in both "admin" and "secure_gateway" -# lists for the same port. In this case, rippled will abort with an error +# lists for the same port. In this case, xahaud will abort with an error # message to the console shortly after startup # # ssl_key = @@ -261,7 +259,7 @@ # Use the specified files when configuring SSL on the port. # # NOTE If no files are specified and secure protocols are selected, -# rippled will generate an internal self-signed certificate. +# xahaud will generate an internal self-signed certificate. # # The files have these meanings: # @@ -284,12 +282,12 @@ # Control the ciphers which the server will support over SSL on the port, # specified using the OpenSSL "cipher list format". # -# NOTE If unspecified, rippled will automatically configure a modern +# NOTE If unspecified, xahaud will automatically configure a modern # cipher suite. This default suite should be widely supported. # # You should not modify this string unless you have a specific # reason and cryptographic expertise. Incorrect modification may -# keep rippled from connecting to other instances of rippled or +# keep xahaud from connecting to other instances of xahaud or # prevent RPC and WebSocket clients from connecting. # # send_queue_limit = [1..65535] @@ -340,7 +338,7 @@ # # Examples: # { "command" : "server_info" } -# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" } +# { "command" : "log_level", "partition" : "xahaucalc", "severity" : "trace" } # # # @@ -369,8 +367,8 @@ #----------------- # # These settings control security and access attributes of the Peer to Peer -# server section of the rippled process. Peer Protocol implements the -# Ripple Payment protocol. It is over peer connections that transactions +# server section of the xahaud process. Peer Protocol implements the +# XRPL Payment protocol. It is over peer connections that transactions # and validations are passed from to machine to machine, to determine the # contents of validated ledgers. # @@ -378,7 +376,7 @@ # # [ips] # -# List of hostnames or ips where the Ripple protocol is served. A default +# List of hostnames or ips where the XRPL protocol is served. A default # starter list is included in the code and used if no other hostnames are # available. # @@ -387,24 +385,23 @@ # does not generally matter. # # The default list of entries is: -# - r.ripple.com 51235 -# - zaphod.alloy.ee 51235 -# - sahyadri.isrdc.in 51235 +# - bacab.alloy.ee 21337 +# - hubs.xahau.as16089.net 21337 # # Examples: # # [ips] # 192.168.0.1 -# 192.168.0.1 2459 -# r.ripple.com 51235 +# 192.168.0.1 21337 +# bacab.alloy.ee 21337 # # # [ips_fixed] # -# List of IP addresses or hostnames to which rippled should always attempt to +# List of IP addresses or hostnames to which xahaud should always attempt to # maintain peer connections with. This is useful for manually forming private # networks, for example to configure a validation server that connects to the -# Ripple network through a public-facing server, or for building a set +# Xahau Network through a public-facing server, or for building a set # of cluster peers. # # One address or domain names per line is allowed. A port must be specified @@ -454,7 +451,7 @@ # # IP address or domain of NTP servers to use for time synchronization. # -# These NTP servers are suitable for rippled servers located in the United +# These NTP servers are suitable for xahaud servers located in the United # States: # time.windows.com # time.apple.com @@ -555,7 +552,7 @@ # # minimum_txn_in_ledger_standalone = # -# Like minimum_txn_in_ledger when rippled is running in standalone +# Like minimum_txn_in_ledger when xahaud is running in standalone # mode. Default: 1000. # # target_txn_in_ledger = @@ -682,7 +679,7 @@ # # [validator_token] # -# This is an alternative to [validation_seed] that allows rippled to perform +# This is an alternative to [validation_seed] that allows xahaud to perform # validation without having to store the validator keys on the network # connected server. The field should contain a single token in the form of a # base64-encoded blob. @@ -717,22 +714,21 @@ # # Specify the file by its name or path. # Unless an absolute path is specified, it will be considered relative to -# the folder in which the rippled.cfg file is located. +# the folder in which the xahaud.cfg file is located. # # Examples: -# /home/ripple/validators.txt -# C:/home/ripple/validators.txt +# /home/xahaud/validators.txt +# C:/home/xahaud/validators.txt # # Example content: # [validators] -# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 -# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj -# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C -# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS -# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA -# -# -# +# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY +# n9LQDHLWyFuAn5BXJuW2ow5J9uGqpmSjRYS2cFRpxf6uJbxwDzvM +# n9MCWyKVUkiatXVJTKUrAESB5kBFP8R3hm43jGHtg8WBnjv3iDfb +# n9KWXCLRhjpajuZtULTXsy6R5xbisA6ozGxM4zdEJFq6uHiFZDvW + + + # [path_search] # When searching for paths, the default search aggressiveness. This can take # exponentially more resources as the size is increased. @@ -795,7 +791,7 @@ # # 0: Disable the ledger replay feature [default] # 1: Enable the ledger replay feature. With this feature enabled, when -# acquiring a ledger from the network, a rippled node only downloads +# acquiring a ledger from the network, a xahaud node only downloads # the ledger header and the transactions instead of the whole ledger. # And the ledger is built by applying the transactions to the parent # ledger. @@ -806,9 +802,9 @@ # #---------------- # -# The rippled server instance uses HTTPS GET requests in a variety of +# The xahaud server instance uses HTTPS GET requests in a variety of # circumstances, including but not limited to contacting trusted domains to -# fetch information such as mapping an email address to a Ripple Payment +# fetch information such as mapping an email address to a XRPL Payment # Network address. # # [ssl_verify] @@ -846,15 +842,15 @@ # #------------ # -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are +# xahaud has an optional operating mode called Reporting Mode. In Reporting +# Mode, xahaud does not connect to the peer to peer network. Instead, xahaud +# will continuously extract data from one or more xahaud servers that are # connected to the peer to peer network (referred to as an ETL source). # Reporting mode servers will forward RPC requests that require access to the # peer to peer network (submit, fee, etc) to an ETL source. # # [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section +# present, xahaud will start in reporting mode. This section # contains a list of ETL source names, and key-value pairs. The # ETL source names each correspond to a configuration file # section; the names must match exactly. The key-value pairs are @@ -959,16 +955,16 @@ # #------------ # -# rippled creates 4 SQLite database to hold bookkeeping information +# xahaud creates 4 SQLite database to hold bookkeeping information # about transactions, local credentials, and various other things. # It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled +# make up the current and historical ledgers. In Reporting Mode, xahaud # uses a Postgres database instead of SQLite. # # The simplest way to work with Postgres is to install it locally. # When it is running, execute the initdb.sh script in the current # directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. +# This will create the xahaud user and an empty database of the same name. # # The size of the NodeDB grows in proportion to the amount of new data and the # amount of historical data (a configurable setting) so the performance of the @@ -976,7 +972,7 @@ # the performance of the server. # # Partial pathnames will be considered relative to the location of -# the rippled.cfg file. +# the xahaud.cfg file. # # [node_db] Settings for the Node Database (required) # @@ -994,11 +990,11 @@ # type = NuDB # # NuDB is a high-performance database written by Ripple Labs and optimized -# for rippled and solid-state drives. +# for solid-state drives. # # NuDB maintains its high speed regardless of the amount of history # stored. Online delete may be selected, but is not required. NuDB is -# available on all platforms that rippled runs on. +# available on all platforms that xahaud runs on. # # type = RocksDB # @@ -1103,14 +1099,14 @@ # # recovery_wait_seconds # The online delete process checks periodically -# that rippled is still in sync with the network, +# that xahaud is still in sync with the network, # and that the validated ledger is less than # 'age_threshold_seconds' old. By default, if it # is not the online delete process aborts and # tries again later. If 'recovery_wait_seconds' -# is set and rippled is out of sync, but likely to +# is set and xahaud is out of sync, but likely to # recover quickly, then online delete will wait -# this number of seconds for rippled to get back +# this number of seconds for xahaud to get back # into sync before it aborts. # Set this value if the node is otherwise staying # in sync, or recovering quickly, but the online @@ -1146,8 +1142,8 @@ # The server creates and maintains 4 to 5 bookkeeping SQLite databases in # the 'database_path' location. If you omit this configuration setting, # the server creates a directory called "db" located in the same place as -# your rippled.cfg file. -# Partial pathnames are relative to the location of the rippled executable. +# your xahaud.cfg file. +# Partial pathnames are relative to the location of the xahaud executable. # # [shard_db] Settings for the Shard Database (optional) # @@ -1223,7 +1219,7 @@ # The default is "wal", which uses a write-ahead # log to implement database transactions. # Alternately, "memory" saves disk I/O, but if -# rippled crashes during a transaction, the +# xahaud crashes during a transaction, the # database is likely to be corrupted. # See https://www.sqlite.org/pragma.html#pragma_journal_mode # for more details about the available options. @@ -1233,7 +1229,7 @@ # synchronous Valid values: off, normal, full, extra # The default is "normal", which works well with # the "wal" journal mode. Alternatively, "off" -# allows rippled to continue as soon as data is +# allows xahaud to continue as soon as data is # passed to the OS, which can significantly # increase speed, but risks data corruption if # the host computer crashes before writing that @@ -1247,7 +1243,7 @@ # The default is "file", which will use files # for temporary database tables and indices. # Alternatively, "memory" may save I/O, but -# rippled does not currently use many, if any, +# xahaud does not currently use many, if any, # of these temporary objects. # See https://www.sqlite.org/pragma.html#pragma_temp_store # for more details about the available options. @@ -1259,9 +1255,9 @@ # conninfo Info for connecting to Postgres. Format is # postgres://[username]:[password]@[ip]/[database]. # The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the +# section is missing and xahaud is running in +# Reporting Mode, xahaud will connect as the +# user running xahaud to a database with the # same name. On Linux and Mac OS X, the connection # will take place using the server's UNIX domain # socket. On Windows, through the localhost IP @@ -1270,7 +1266,7 @@ # use_tx_tables Valid values: 1, 0 # The default is 1 (true). Determines whether to use # the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, +# xahaud will not write to the transaction database, # and will reject tx, account_tx and tx_history RPCs. # In Reporting Mode, this setting is ignored. # @@ -1298,7 +1294,7 @@ # # These settings are designed to help server administrators diagnose # problems, and obtain detailed information about the activities being -# performed by the rippled process. +# performed by the xahaud process. # # # @@ -1315,7 +1311,7 @@ # # Configuration parameters for the Beast. Insight stats collection module. # -# Insight is a module that collects information from the areas of rippled +# Insight is a module that collects information from the areas of xahaud # that have instrumentation. The configuration parameters control where the # collection metrics are sent. The parameters are expressed as key = value # pairs with no white space. The main parameter is the choice of server: @@ -1324,7 +1320,7 @@ # # Choice of server to send metrics to. Currently the only choice is # "statsd" which sends UDP packets to a StatsD daemon, which must be -# running while rippled is running. More information on StatsD is +# running while xahaud is running. More information on StatsD is # available here: # https://github.com/b/statsd_spec # @@ -1334,7 +1330,7 @@ # in the format, n.n.n.n:port. # # "prefix" A string prepended to each collected metric. This is used -# to distinguish between different running instances of rippled. +# to distinguish between different running instances of xahaud. # # If this section is missing, or the server type is unspecified or unknown, # statistics are not collected or reported. @@ -1361,7 +1357,7 @@ # # Example: # [perf] -# perf_log=/var/log/rippled/perf.log +# perf_log=/var/log/xahaud/perf.log # log_interval=2 # #------------------------------------------------------------------------------- @@ -1370,8 +1366,8 @@ # #---------- # -# The vote settings configure settings for the entire Ripple network. -# While a single instance of rippled cannot unilaterally enforce network-wide +# The vote settings configure settings for the entire Xahau Network. +# While a single instance of xahaud cannot unilaterally enforce network-wide # settings, these choices become part of the instance's vote during the # consensus process for each voting ledger. # @@ -1383,9 +1379,9 @@ # # The cost of the reference transaction fee, specified in drops. # The reference transaction is the simplest form of transaction. -# It represents an XRP payment between two parties. +# It represents an XAH payment between two parties. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: @@ -1394,26 +1390,26 @@ # account_reserve = # # The account reserve requirement is specified in drops. The portion of an -# account's XRP balance that is at or below the reserve may only be +# account's XAH balance that is at or below the reserve may only be # spent on transaction fees, and not transferred out of the account. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: -# account_reserve = 10000000 # 10 XRP +# account_reserve = 10000000 # 10 XAH # # owner_reserve = # -# The owner reserve is the amount of XRP reserved in the account for +# The owner reserve is the amount of XAH reserved in the account for # each ledger item owned by the account. Ledger items an account may # own include trust lines, open orders, and tickets. # -# If this parameter is unspecified, rippled will use an internal +# If this parameter is unspecified, xahaud will use an internal # default. Don't change this without understanding the consequences. # # Example: -# owner_reserve = 2000000 # 2 XRP +# owner_reserve = 2000000 # 2 XAH # #------------------------------------------------------------------------------- # @@ -1451,7 +1447,7 @@ # tool instead. # # This flag has no effect on the "sign" and "sign_for" command line options -# that rippled makes available. +# that xahaud makes available. # # The default value of this field is "false" # @@ -1530,7 +1526,7 @@ #-------------------- # # Administrators can use these values as a starting point for configuring -# their instance of rippled, but each value should be checked to make sure +# their instance of xahaud, but each value should be checked to make sure # it meets the business requirements for the organization. # # Server @@ -1540,7 +1536,7 @@ # "peer" # # Peer protocol open to everyone. This is required to accept -# incoming rippled connections. This does not affect automatic +# incoming xahaud connections. This does not affect automatic # or manual outgoing Peer protocol connections. # # "rpc" @@ -1568,8 +1564,8 @@ # NOTE # # To accept connections on well known ports such as 80 (HTTP) or -# 443 (HTTPS), most operating systems will require rippled to -# run with administrator privileges, or else rippled will not start. +# 443 (HTTPS), most operating systems will require xahaud to +# run with administrator privileges, or else xahaud will not start. [server] port_rpc_admin_local @@ -1587,7 +1583,7 @@ admin = 127.0.0.1 protocol = http [port_peer] -port = 51235 +port = 21337 ip = 0.0.0.0 # alternatively, to accept connections on IPv4 + IPv6, use: #ip = :: @@ -1611,9 +1607,9 @@ protocol = ws #------------------------------------------------------------------------------- -# This is primary persistent datastore for rippled. This includes transaction +# This is primary persistent datastore for xahaud. This includes transaction # metadata, account states, and ledger headers. Helpful information can be -# found at https://xrpl.org/capacity-planning.html#node-db-type +# found at https://xahau.network/docs/infrastructure/system-requirements # type=NuDB is recommended for non-validators with fast SSDs. Validators or # slow / spinning disks should use RocksDB. Caution: Spinning disks are # not recommended. They do not perform well enough to consistently remain @@ -1626,16 +1622,16 @@ protocol = ws # deletion. [node_db] type=NuDB -path=/var/lib/rippled-reporting/db/nudb +path=/opt/xahaud-reporting/db/nudb # online_delete=512 # advisory_delete=0 # This is the persistent datastore for shards. It is important for the health -# of the ripple network that rippled operators shard as much as practical. +# of the Xahau Network that xahaud operators shard as much as practical. # NuDB requires SSD storage. Helpful information can be found at # https://xrpl.org/history-sharding.html #[shard_db] -#path=/var/lib/rippled/db/shards/nudb +#path=/opt/xahaud-reporting/db/shards/nudb #max_historical_shards=50 # # This optional section can be configured with a list @@ -1646,7 +1642,7 @@ advisory_delete=0 #/path/2 [database_path] -/var/lib/rippled-reporting/db +/opt/xahaud-reporting/db # To use Postgres, uncomment this section and fill in the appropriate connection # info. Postgres can only be used in Reporting Mode. @@ -1660,7 +1656,7 @@ advisory_delete=0 # This needs to be an absolute directory reference, not a relative one. # Modify this value as required. [debug_logfile] -/var/log/rippled-reporting/debug.log +/var/log/xahaud-reporting/debug.log [sntp_servers] time.windows.com @@ -1668,17 +1664,20 @@ time.apple.com time.nist.gov pool.ntp.org -# To use the XRP test network -# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), +# To use the Xahau Test Network +# (see https://xahau.network/docs/infrastructure/installing-xahaud), # use the following [ips] section: # [ips] -# r.altnet.rippletest.net 51235 +# 79.110.60.121 21338 +# 79.110.60.122 21338 +# 79.110.60.124 21338 +# 79.110.60.125 21338 # File containing trusted validator keys or validator list publishers. # Unless an absolute path is specified, it will be considered relative to the -# folder in which the rippled.cfg file is located. +# folder in which the xahaud.cfg file is located. [validators_file] -/opt/rippled-reporting/etc/validators.txt +/opt/xahaud-reporting/etc/validators.txt # Turn down default logging to save disk space in the long run. # Valid values here are trace, debug, info, warning, error, and fatal @@ -1699,5 +1698,5 @@ etl_source [etl_source] source_grpc_port=50051 -source_ws_port=6005 +source_ws_port=6008 source_ip=127.0.0.1 diff --git a/cfg/rippled-standalone.cfg b/cfg/xahaud-standalone.cfg old mode 100755 new mode 100644 similarity index 97% rename from cfg/rippled-standalone.cfg rename to cfg/xahaud-standalone.cfg index 3301299ca..3933b4ba8 --- a/cfg/rippled-standalone.cfg +++ b/cfg/xahaud-standalone.cfg @@ -1,4 +1,4 @@ -# standalone: ./rippled -a --ledgerfile config/genesis.json --conf config/rippled-standalone.cfg +# standalone: ./xahaud -a --ledgerfile config/genesis.json --conf config/xahaud-standalone.cfg [server] port_rpc_admin_local port_ws_public @@ -21,7 +21,7 @@ ip = 0.0.0.0 protocol = ws # [port_peer] -# port = 51235 +# port = 21337 # ip = 0.0.0.0 # protocol = peer @@ -69,7 +69,8 @@ time.nist.gov pool.ntp.org [ips] -r.ripple.com 51235 +bacab.alloy.ee 21337 +hubs.xahau.as16089.net 21337 [validators_file] validators-example.txt @@ -94,7 +95,7 @@ validators-example.txt 1000000 [network_id] -21338 +21337 [amendments] 740352F2412A9909880C23A559FCECEDA3BE2126FED62FC7660D628A06927F11 Flow From 39d1c439015883e26135e8d149c47a5142e36556 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Fri, 10 Oct 2025 16:53:35 +0700 Subject: [PATCH 07/12] build: upgrade openssl from 1.1.1u to 3.6.0 (#587) Updates OpenSSL dependency to the latest 3.x series available on Conan Center. --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 4b98b84db..e29a880d8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -31,7 +31,7 @@ class Xrpl(ConanFile): 'lz4/1.9.4', 'grpc/1.50.1', 'nudb/2.0.8', - 'openssl/1.1.1u', + 'openssl/3.6.0', 'protobuf/3.21.12', 'soci/4.0.3@xahaud/stable', 'zlib/1.3.1', From 094f011006f6a7501cf721f90aedfb7464292d1c Mon Sep 17 00:00:00 2001 From: tequ Date: Sat, 11 Oct 2025 10:43:09 +0900 Subject: [PATCH 08/12] Fix `emit` Hook API testcase name (#580) --- src/test/app/SetHook_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/app/SetHook_test.cpp b/src/test/app/SetHook_test.cpp index c3b336d05..3fb16176b 100644 --- a/src/test/app/SetHook_test.cpp +++ b/src/test/app/SetHook_test.cpp @@ -2434,7 +2434,7 @@ public: void test_emit(FeatureBitset features) { - testcase("Test float_emit"); + testcase("Test emit"); using namespace jtx; Env env{*this, features}; From e580f7cfc0c02986132360f1669e82b6df2f9599 Mon Sep 17 00:00:00 2001 From: tequ Date: Sat, 11 Oct 2025 10:43:50 +0900 Subject: [PATCH 09/12] chore(vscode): enable format on save in settings.json (#578) --- .vscode/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 1642d6324..313281c33 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,6 +8,6 @@ "editor.semanticHighlighting.enabled": true, "editor.tabSize": 4, "editor.defaultFormatter": "xaver.clang-format", - "editor.formatOnSave": false + "editor.formatOnSave": true } } From ad0531ad6c7f970d03423e688d27472babf55cf7 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Sat, 11 Oct 2025 08:47:13 +0700 Subject: [PATCH 10/12] chore: fix warnings (#509) Co-authored-by: Denis Angell Co-authored-by: RichardAH --- src/ripple/app/rdb/backend/RWDBDatabase.h | 3 ++- src/ripple/overlay/impl/Handshake.cpp | 12 +++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/ripple/app/rdb/backend/RWDBDatabase.h b/src/ripple/app/rdb/backend/RWDBDatabase.h index f91e50cda..308c4366f 100644 --- a/src/ripple/app/rdb/backend/RWDBDatabase.h +++ b/src/ripple/app/rdb/backend/RWDBDatabase.h @@ -221,7 +221,8 @@ public: if (!ledger->info().accountHash.isNonZero()) { - JLOG(j.fatal()) << "AH is zero: " << getJson({*ledger, {}}); + JLOG(j.fatal()) + << "AH is zero: " << getJson({*ledger, {}}).asString(); assert(false); } diff --git a/src/ripple/overlay/impl/Handshake.cpp b/src/ripple/overlay/impl/Handshake.cpp index 11b75e28d..73d40e6c5 100644 --- a/src/ripple/overlay/impl/Handshake.cpp +++ b/src/ripple/overlay/impl/Handshake.cpp @@ -239,19 +239,17 @@ verifyHandshake( throw std::runtime_error("Invalid server domain"); } - // Check the network. Omitting Network-ID (on either side ours, or theirs) - // means NID=0 + // Check network ID, treating absent/empty as default network 0 { - uint32_t peer_nid = 0; + std::uint32_t nid{0}; + if (auto const iter = headers.find("Network-ID"); iter != headers.end()) { - if (!beast::lexicalCastChecked( - peer_nid, std::string(iter->value()))) + if (!beast::lexicalCastChecked(nid, std::string(iter->value()))) throw std::runtime_error("Invalid peer network identifier"); } - uint32_t our_nid = networkID ? *networkID : 0; - if (peer_nid != our_nid) + if (networkID.value_or(0) != nid) throw std::runtime_error("Peer is on a different network"); } From 1f12b9ec5aed58bf9c722ae0b449dba1643fd9bc Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Tue, 14 Oct 2025 07:44:03 +0700 Subject: [PATCH 11/12] feat(logs): add -DBEAST_ENHANCED_LOGGING with file:line numbers for JLOG macro (#552) --- .../actions/xahau-ga-dependencies/action.yml | 2 + Builds/CMake/RippledCore.cmake | 13 ++ CMakeLists.txt | 19 +++ conanfile.py | 4 +- src/ripple/app/misc/impl/Manifest.cpp | 57 ++++----- src/ripple/basics/Log.h | 11 +- src/ripple/basics/impl/Log.cpp | 51 +++++++- src/ripple/beast/utility/EnhancedLogging.h | 85 +++++++++++++ src/ripple/beast/utility/Journal.h | 61 ++++++++++ .../utility/src/beast_EnhancedLogging.cpp | 114 ++++++++++++++++++ .../beast/utility/src/beast_Journal.cpp | 48 +++++++- src/test/ledger/Invariants_test.cpp | 25 +++- 12 files changed, 448 insertions(+), 42 deletions(-) create mode 100644 src/ripple/beast/utility/EnhancedLogging.h create mode 100644 src/ripple/beast/utility/src/beast_EnhancedLogging.cpp diff --git a/.github/actions/xahau-ga-dependencies/action.yml b/.github/actions/xahau-ga-dependencies/action.yml index cb14e4e57..afa1067b6 100644 --- a/.github/actions/xahau-ga-dependencies/action.yml +++ b/.github/actions/xahau-ga-dependencies/action.yml @@ -84,6 +84,8 @@ runs: - name: Install dependencies shell: bash + env: + CONAN_REQUEST_TIMEOUT: 180 # Increase timeout to 3 minutes for slow mirrors run: | # Create build directory mkdir -p ${{ inputs.build_dir }} diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 6b876997b..58cd234e4 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -50,6 +50,12 @@ target_sources (xrpl_core PRIVATE src/ripple/beast/utility/src/beast_Journal.cpp src/ripple/beast/utility/src/beast_PropertyStream.cpp) +# Conditionally add enhanced logging source when BEAST_ENHANCED_LOGGING is enabled +if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING) + target_sources(xrpl_core PRIVATE + src/ripple/beast/utility/src/beast_EnhancedLogging.cpp) +endif() + #[===============================[ core sources #]===============================] @@ -155,6 +161,13 @@ target_link_libraries (xrpl_core ed25519::ed25519 date::date Ripple::opts) + +# Link date-tz library when enhanced logging is enabled +if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING) + if(TARGET date::date-tz) + target_link_libraries(xrpl_core PUBLIC date::date-tz) + endif() +endif() #[=================================[ main/core headers installation #]=================================] diff --git a/CMakeLists.txt b/CMakeLists.txt index a50560cb3..9a31d4931 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,25 @@ if(Git_FOUND) endif() endif() #git +# make SOURCE_ROOT_PATH define available for logging +set(SOURCE_ROOT_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src/") +add_definitions(-DSOURCE_ROOT_PATH="${SOURCE_ROOT_PATH}") + +# BEAST_ENHANCED_LOGGING option - adds file:line numbers and formatting to logs +# Default to ON for Debug builds, OFF for Release +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" ON) +else() + option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" OFF) +endif() + +if(BEAST_ENHANCED_LOGGING) + add_definitions(-DBEAST_ENHANCED_LOGGING=1) + message(STATUS "Log line numbers enabled") +else() + message(STATUS "Log line numbers disabled") +endif() + if(thread_safety_analysis) add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS) add_compile_options("-stdlib=libc++") diff --git a/conanfile.py b/conanfile.py index e29a880d8..477ee3825 100644 --- a/conanfile.py +++ b/conanfile.py @@ -26,7 +26,7 @@ class Xrpl(ConanFile): } requires = [ - 'date/3.0.1', + 'date/3.0.3', 'libarchive/3.6.0', 'lz4/1.9.4', 'grpc/1.50.1', @@ -52,7 +52,7 @@ class Xrpl(ConanFile): 'tool_requires_b2': False, 'cassandra-cpp-driver/*:shared': False, - 'date/*:header_only': True, + 'date/*:header_only': False, 'grpc/*:shared': False, 'grpc/*:secure': True, 'libarchive/*:shared': False, diff --git a/src/ripple/app/misc/impl/Manifest.cpp b/src/ripple/app/misc/impl/Manifest.cpp index 60b521330..44e19244a 100644 --- a/src/ripple/app/misc/impl/Manifest.cpp +++ b/src/ripple/app/misc/impl/Manifest.cpp @@ -156,34 +156,22 @@ deserializeManifest(Slice s, beast::Journal journal) } } -template -Stream& -logMftAct( - Stream& s, - std::string const& action, - PublicKey const& pk, - std::uint32_t seq) -{ - s << "Manifest: " << action - << ";Pk: " << toBase58(TokenType::NodePublic, pk) << ";Seq: " << seq - << ";"; - return s; -} +// Helper macros to format manifest log messages while preserving line numbers +#define LOG_MANIFEST_ACTION(stream, action, pk, seq) \ + do \ + { \ + JLOG(stream) << "Manifest: " << action \ + << ";Pk: " << toBase58(TokenType::NodePublic, pk) \ + << ";Seq: " << seq << ";"; \ + } while (0) -template -Stream& -logMftAct( - Stream& s, - std::string const& action, - PublicKey const& pk, - std::uint32_t seq, - std::uint32_t oldSeq) -{ - s << "Manifest: " << action - << ";Pk: " << toBase58(TokenType::NodePublic, pk) << ";Seq: " << seq - << ";OldSeq: " << oldSeq << ";"; - return s; -} +#define LOG_MANIFEST_ACTION_WITH_OLD(stream, action, pk, seq, oldSeq) \ + do \ + { \ + JLOG(stream) << "Manifest: " << action \ + << ";Pk: " << toBase58(TokenType::NodePublic, pk) \ + << ";Seq: " << seq << ";OldSeq: " << oldSeq << ";"; \ + } while (0) bool Manifest::verify() const @@ -381,7 +369,7 @@ ManifestCache::applyManifest(Manifest m) // several cases including when we receive manifests from a peer who // doesn't have the latest data. if (auto stream = j_.debug()) - logMftAct( + LOG_MANIFEST_ACTION_WITH_OLD( stream, "Stale", m.masterKey, @@ -393,7 +381,7 @@ ManifestCache::applyManifest(Manifest m) if (checkSignature && !m.verify()) { if (auto stream = j_.warn()) - logMftAct(stream, "Invalid", m.masterKey, m.sequence); + LOG_MANIFEST_ACTION(stream, "Invalid", m.masterKey, m.sequence); return ManifestDisposition::invalid; } @@ -407,7 +395,7 @@ ManifestCache::applyManifest(Manifest m) bool const revoked = m.revoked(); if (auto stream = j_.warn(); stream && revoked) - logMftAct(stream, "Revoked", m.masterKey, m.sequence); + LOG_MANIFEST_ACTION(stream, "Revoked", m.masterKey, m.sequence); // Sanity check: the master key of this manifest should not be used as // the ephemeral key of another manifest: @@ -476,7 +464,7 @@ ManifestCache::applyManifest(Manifest m) if (iter == map_.end()) { if (auto stream = j_.info()) - logMftAct(stream, "AcceptedNew", m.masterKey, m.sequence); + LOG_MANIFEST_ACTION(stream, "AcceptedNew", m.masterKey, m.sequence); if (!revoked) signingToMasterKeys_[m.signingKey] = m.masterKey; @@ -489,7 +477,7 @@ ManifestCache::applyManifest(Manifest m) // An ephemeral key was revoked and superseded by a new key. This is // expected, but should happen infrequently. if (auto stream = j_.info()) - logMftAct( + LOG_MANIFEST_ACTION_WITH_OLD( stream, "AcceptedUpdate", m.masterKey, @@ -584,4 +572,9 @@ ManifestCache::save( saveManifests(*db, dbTable, isTrusted, map_, j_); } + +// Clean up macros to avoid namespace pollution +#undef LOG_MANIFEST_ACTION +#undef LOG_MANIFEST_ACTION_WITH_OLD + } // namespace ripple diff --git a/src/ripple/basics/Log.h b/src/ripple/basics/Log.h index 6722ab813..182062ac7 100644 --- a/src/ripple/basics/Log.h +++ b/src/ripple/basics/Log.h @@ -249,13 +249,22 @@ private: // Wraps a Journal::Stream to skip evaluation of // expensive argument lists if the stream is not active. #ifndef JLOG +#ifdef BEAST_ENHANCED_LOGGING #define JLOG(x) \ - if (!x) \ + if (!(x)) \ + { \ + } \ + else \ + (x).withLocation(__FILE__, __LINE__) +#else +#define JLOG(x) \ + if (!(x)) \ { \ } \ else \ x #endif +#endif //------------------------------------------------------------------------------ // Debug logging: diff --git a/src/ripple/basics/impl/Log.cpp b/src/ripple/basics/impl/Log.cpp index c023bc164..f4f52cdc5 100644 --- a/src/ripple/basics/impl/Log.cpp +++ b/src/ripple/basics/impl/Log.cpp @@ -17,11 +17,19 @@ */ //============================================================================== +#include + #include #include #include +#ifdef BEAST_ENHANCED_LOGGING +#include +#include +#endif #include #include +#include +#include #include #include #include @@ -316,11 +324,46 @@ Logs::format( { output.reserve(message.size() + partition.size() + 100); - output = to_string(std::chrono::system_clock::now()); +#ifdef BEAST_ENHANCED_LOGGING + // Environment variables are used instead of config file because: + // 1. Logging starts before config parsing (needed to debug config issues) + // 2. This is a developer feature - devs can easily set env vars + // 3. Allows per-run overrides without editing config files + static const char* fmt = []() { + const char* env = std::getenv("LOG_DATE_FORMAT"); + return env ? env : "%Y-%b-%d %T %Z"; // Default format + }(); + + // Check if we should use local time + static const bool useLocalTime = []() { + const char* env = std::getenv("LOG_DATE_LOCAL"); + return env && std::strcmp(env, "1") == 0; + }(); + + if (useLocalTime) + { + auto now = std::chrono::system_clock::now(); + auto local = date::make_zoned(date::current_zone(), now); + output = date::format(fmt, local); + } + else + { + output = date::format(fmt, std::chrono::system_clock::now()); + } +#else + output = to_string(std::chrono::system_clock::now()); +#endif + + if (!output.empty()) // Allow setting date format to an empty string + output += " "; - output += " "; if (!partition.empty()) + { +#ifdef BEAST_ENHANCED_LOGGING + output += beast::detail::get_log_highlight_color(); +#endif output += partition + ":"; + } using namespace beast::severities; switch (severity) @@ -348,6 +391,10 @@ Logs::format( break; } +#ifdef BEAST_ENHANCED_LOGGING + output += "\033[0m"; +#endif + output += message; // Limit the maximum length of the output diff --git a/src/ripple/beast/utility/EnhancedLogging.h b/src/ripple/beast/utility/EnhancedLogging.h new file mode 100644 index 000000000..198e0b52f --- /dev/null +++ b/src/ripple/beast/utility/EnhancedLogging.h @@ -0,0 +1,85 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_UTILITY_ENHANCEDLOGGING_H_INCLUDED +#define BEAST_UTILITY_ENHANCEDLOGGING_H_INCLUDED + +#include // for size_t +#include // for std::ostream + +namespace beast { +namespace detail { + +// Check if we should use colors - cached at startup +bool +should_log_use_colors(); + +// Get the log highlight color - can be overridden via +// LOG_HIGHLIGHT_COLOR +const char* +get_log_highlight_color(); + +// Strip source root path from __FILE__ at compile time +// IMPORTANT: This MUST stay in the header as constexpr for compile-time +// evaluation! +constexpr const char* +strip_source_root(const char* file) +{ +#ifdef SOURCE_ROOT_PATH + constexpr const char* sourceRoot = SOURCE_ROOT_PATH; + constexpr auto strlen_constexpr = [](const char* s) constexpr + { + const char* p = s; + while (*p) + ++p; + return p - s; + }; + constexpr auto strncmp_constexpr = + [](const char* a, const char* b, size_t n) constexpr + { + for (size_t i = 0; i < n; ++i) + { + if (a[i] != b[i]) + return a[i] - b[i]; + if (a[i] == '\0') + break; + } + return 0; + }; + constexpr size_t sourceRootLen = strlen_constexpr(sourceRoot); + return (strncmp_constexpr(file, sourceRoot, sourceRootLen) == 0) + ? file + sourceRootLen + : file; +#else + return file; +#endif +} + +// Check if location info should be shown - cached at startup +bool +should_show_location(); + +// Helper to write location string (no leading/trailing space) +void +log_write_location_string(std::ostream& os, const char* file, int line); + +} // namespace detail +} // namespace beast + +#endif diff --git a/src/ripple/beast/utility/Journal.h b/src/ripple/beast/utility/Journal.h index 333a743a6..f31eb5049 100644 --- a/src/ripple/beast/utility/Journal.h +++ b/src/ripple/beast/utility/Journal.h @@ -146,6 +146,10 @@ private: ScopedStream(Sink& sink, Severity level); +#ifdef BEAST_ENHANCED_LOGGING + ScopedStream(Sink& sink, Severity level, const char* file, int line); +#endif + template ScopedStream(Stream const& stream, T const& t); @@ -173,6 +177,10 @@ private: Sink& m_sink; Severity const m_level; std::ostringstream mutable m_ostream; +#ifdef BEAST_ENHANCED_LOGGING + const char* file_ = nullptr; + int line_ = 0; +#endif }; #ifndef __INTELLISENSE__ @@ -191,6 +199,33 @@ private: //-------------------------------------------------------------------------- public: /** Provide a light-weight way to check active() before string formatting */ + +#ifdef BEAST_ENHANCED_LOGGING + /** Stream with location information that prepends file:line to the first + * message */ + class StreamWithLocation + { + public: + StreamWithLocation(Stream const& stream, const char* file, int line) + : file_(file), line_(line), stream_(stream) + { + } + + /** Override to inject file:line before the first output */ + template + ScopedStream + operator<<(T const& t) const; + + ScopedStream + operator<<(std::ostream& manip(std::ostream&)) const; + + private: + const char* file_; + int line_; + const Stream& stream_; + }; +#endif + class Stream { public: @@ -255,6 +290,15 @@ public: operator<<(T const& t) const; /** @} */ +#ifdef BEAST_ENHANCED_LOGGING + /** Create a StreamWithLocation that prepends file:line info */ + StreamWithLocation + withLocation(const char* file, int line) const + { + return StreamWithLocation(*this, file, line); + } +#endif + private: Sink& m_sink; Severity m_level; @@ -354,6 +398,8 @@ static_assert(std::is_nothrow_destructible::value == true, ""); //------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ + template Journal::ScopedStream::ScopedStream(Journal::Stream const& stream, T const& t) : ScopedStream(stream.sink(), stream.level()) @@ -378,6 +424,21 @@ Journal::Stream::operator<<(T const& t) const return ScopedStream(*this, t); } +#ifdef BEAST_ENHANCED_LOGGING +//------------------------------------------------------------------------------ + +template +Journal::ScopedStream +Journal::StreamWithLocation::operator<<(T const& t) const +{ + // Create a ScopedStream with location info + ScopedStream scoped(stream_.sink(), stream_.level(), file_, line_); + scoped.ostream() << t; + return scoped; +} + +#endif + namespace detail { template > diff --git a/src/ripple/beast/utility/src/beast_EnhancedLogging.cpp b/src/ripple/beast/utility/src/beast_EnhancedLogging.cpp new file mode 100644 index 000000000..c5f34bd88 --- /dev/null +++ b/src/ripple/beast/utility/src/beast_EnhancedLogging.cpp @@ -0,0 +1,114 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace beast { +namespace detail { + +// Check if we should use colors - cached at startup +bool +should_log_use_colors() +{ + static const bool use_colors = []() { + // Honor NO_COLOR environment variable (standard) + if (std::getenv("NO_COLOR")) + return false; + + // Honor FORCE_COLOR to override terminal detection + if (std::getenv("FORCE_COLOR")) + return true; + + // Check if stderr is a terminal + return isatty(STDERR_FILENO) != 0; + }(); + return use_colors; +} + +// Get the log highlight color - can be overridden via +// LOG_HIGHLIGHT_COLOR +const char* +get_log_highlight_color() +{ + static const char* escape = []() { + const char* env = std::getenv("LOG_HIGHLIGHT_COLOR"); + if (!env) + return "\033[36m"; // Default: cyan + + // Simple map of color names to escape sequences + if (std::strcmp(env, "red") == 0) + return "\033[31m"; + if (std::strcmp(env, "green") == 0) + return "\033[32m"; + if (std::strcmp(env, "yellow") == 0) + return "\033[33m"; + if (std::strcmp(env, "blue") == 0) + return "\033[34m"; + if (std::strcmp(env, "magenta") == 0) + return "\033[35m"; + if (std::strcmp(env, "cyan") == 0) + return "\033[36m"; + if (std::strcmp(env, "white") == 0) + return "\033[37m"; + if (std::strcmp(env, "gray") == 0 || std::strcmp(env, "grey") == 0) + return "\033[90m"; // Bright black (gray) + if (std::strcmp(env, "orange") == 0) + return "\033[93m"; // Bright yellow (appears orange-ish) + if (std::strcmp(env, "none") == 0) + return ""; + + // Default to cyan if unknown color name + return "\033[36m"; + }(); + return escape; +} + +// Check if location info should be shown - cached at startup +bool +should_show_location() +{ + static const bool show = []() { + const char* env = std::getenv("LOG_DISABLE"); + // Show location by default, hide if LOG_DISABLE=1 + return !env || std::strcmp(env, "1") != 0; + }(); + return show; +} + +// Helper to write location string (no leading/trailing space) +void +log_write_location_string(std::ostream& os, const char* file, int line) +{ + if (detail::should_log_use_colors()) + { + os << detail::get_log_highlight_color() << "[" + << detail::strip_source_root(file) << ":" << line << "]\033[0m"; + } + else + { + os << "[" << detail::strip_source_root(file) << ":" << line << "]"; + } +} + +} // namespace detail +} // namespace beast diff --git a/src/ripple/beast/utility/src/beast_Journal.cpp b/src/ripple/beast/utility/src/beast_Journal.cpp index 7c332bf6b..037da0b76 100644 --- a/src/ripple/beast/utility/src/beast_Journal.cpp +++ b/src/ripple/beast/utility/src/beast_Journal.cpp @@ -19,6 +19,11 @@ #include #include +#ifdef BEAST_ENHANCED_LOGGING +#include +#include +#include +#endif namespace beast { @@ -131,9 +136,36 @@ Journal::ScopedStream::ScopedStream( m_ostream << manip; } +#ifdef BEAST_ENHANCED_LOGGING +Journal::ScopedStream::ScopedStream( + Sink& sink, + Severity level, + const char* file, + int line) + : m_sink(sink), m_level(level), file_(file), line_(line) +{ + // Modifiers applied from all ctors + m_ostream << std::boolalpha << std::showbase; +} +#endif + Journal::ScopedStream::~ScopedStream() { - std::string const& s(m_ostream.str()); + std::string s(m_ostream.str()); + +#ifdef BEAST_ENHANCED_LOGGING + // Add suffix if location is enabled + if (file_ && detail::should_show_location() && !s.empty() && s != "\n") + { + std::ostringstream combined; + combined << s; + if (!s.empty() && s.back() != ' ') + combined << " "; + detail::log_write_location_string(combined, file_, line_); + s = combined.str(); + } +#endif + if (!s.empty()) { if (s == "\n") @@ -157,4 +189,18 @@ Journal::Stream::operator<<(std::ostream& manip(std::ostream&)) const return ScopedStream(*this, manip); } +#ifdef BEAST_ENHANCED_LOGGING + +// Implementation moved to use new constructor +Journal::ScopedStream +Journal::StreamWithLocation::operator<<( + std::ostream& manip(std::ostream&)) const +{ + // Create a ScopedStream with location info + ScopedStream scoped(stream_.sink(), stream_.level(), file_, line_); + scoped.ostream() << manip; + return scoped; +} +#endif + } // namespace beast diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 2e5c2f402..8843d402b 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -90,10 +91,26 @@ class Invariants_test : public beast::unit_test::suite { terActual = ac.checkInvariants(terActual, fee); BEAST_EXPECT(terExpect == terActual); - BEAST_EXPECT( - sink.messages().str().starts_with("Invariant failed:") || - sink.messages().str().starts_with( - "Transaction caused an exception")); + // Handle both with and without BEAST_ENHANCED_LOGGING + auto const msg = sink.messages().str(); + bool hasExpectedPrefix = false; + +#ifdef BEAST_ENHANCED_LOGGING + // When BEAST_ENHANCED_LOGGING is enabled, messages may include ANSI + // color codes and start with [file:line]. Just search for the + // message content. + hasExpectedPrefix = + msg.find("Invariant failed:") != std::string::npos || + msg.find("Transaction caused an exception") != + std::string::npos; +#else + // Without BEAST_ENHANCED_LOGGING, messages start directly with the + // text + hasExpectedPrefix = msg.starts_with("Invariant failed:") || + msg.starts_with("Transaction caused an exception"); +#endif + + BEAST_EXPECT(hasExpectedPrefix); for (auto const& m : expect_logs) { if (sink.messages().str().find(m) == std::string::npos) From 15c7ad6f787677c6af7fbb93ce7db9aeabc61731 Mon Sep 17 00:00:00 2001 From: tequ Date: Tue, 14 Oct 2025 14:35:48 +0900 Subject: [PATCH 12/12] Fix Invalid Tx flags (#514) --- src/ripple/app/tx/impl/SetHook.cpp | 8 ++++++ src/ripple/app/tx/impl/SetSignerList.cpp | 8 ++++++ src/ripple/protocol/Feature.h | 3 ++- src/ripple/protocol/impl/Feature.cpp | 1 + src/test/app/Import_test.cpp | 4 +-- src/test/app/MultiSign_test.cpp | 33 ++++++++++++++++++++++++ src/test/app/SetHook_test.cpp | 30 +++++++++++++++++++++ src/test/rpc/AccountTx_test.cpp | 10 +++---- 8 files changed, 88 insertions(+), 9 deletions(-) diff --git a/src/ripple/app/tx/impl/SetHook.cpp b/src/ripple/app/tx/impl/SetHook.cpp index d19a3337a..79e68e01d 100644 --- a/src/ripple/app/tx/impl/SetHook.cpp +++ b/src/ripple/app/tx/impl/SetHook.cpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -665,6 +666,13 @@ SetHook::preflight(PreflightContext const& ctx) if (!isTesSuccess(ret)) return ret; + if (ctx.rules.enabled(fixInvalidTxFlags) && + ctx.tx.getFlags() & tfUniversalMask) + { + JLOG(ctx.j.trace()) << "SetHook: Invalid flags set."; + return temINVALID_FLAG; + } + if (!ctx.tx.isFieldPresent(sfHooks)) { JLOG(ctx.j.trace()) diff --git a/src/ripple/app/tx/impl/SetSignerList.cpp b/src/ripple/app/tx/impl/SetSignerList.cpp index 87f69c51d..71ed014c7 100644 --- a/src/ripple/app/tx/impl/SetSignerList.cpp +++ b/src/ripple/app/tx/impl/SetSignerList.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -81,6 +82,13 @@ SetSignerList::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; + if (ctx.rules.enabled(fixInvalidTxFlags) && + (ctx.tx.getFlags() & tfUniversalMask)) + { + JLOG(ctx.j.trace()) << "SetSignerList: invalid flags."; + return temINVALID_FLAG; + } + auto const result = determineOperation(ctx.tx, ctx.flags, ctx.j); if (!isTesSuccess(std::get<0>(result))) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 6982b866c..b52a78265 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 85; +static constexpr std::size_t numFeatures = 86; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -373,6 +373,7 @@ extern uint256 const fixProvisionalDoubleThreading; extern uint256 const featureClawback; extern uint256 const featureDeepFreeze; extern uint256 const featureIOUIssuerWeakTSH; +extern uint256 const fixInvalidTxFlags; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 9775b7d81..789423efa 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -479,6 +479,7 @@ REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::De REGISTER_FIX (fixProvisionalDoubleThreading, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::DefaultYes); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. diff --git a/src/test/app/Import_test.cpp b/src/test/app/Import_test.cpp index 2bb56fbe9..057952836 100644 --- a/src/test/app/Import_test.cpp +++ b/src/test/app/Import_test.cpp @@ -5203,8 +5203,8 @@ class Import_test : public beast::unit_test::suite std::string ns_str = "CAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECA" "FE"; - Json::Value jv = ripple::test::jtx::hook( - issuer, {{hso(createCodeHex)}}, hsfOVERRIDE | hsfCOLLECT); + Json::Value jv = + ripple::test::jtx::hook(issuer, {{hso(createCodeHex)}}, 0); jv[jss::Hooks][0U][jss::Hook][jss::HookNamespace] = ns_str; jv[jss::Hooks][0U][jss::Hook][jss::HookOn] = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDFFFFFFFFFFFFFFFFFFBFFF" diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index 7668ef6be..1c4e537a3 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -1659,6 +1659,36 @@ public: BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); } + void + test_signerListSetFlags(FeatureBitset features) + { + using namespace test::jtx; + + for (bool const withFixInvalidTxFlags : {false, true}) + { + Env env{ + *this, + withFixInvalidTxFlags ? features + : features - fixInvalidTxFlags}; + Account const alice{"alice"}; + + env.fund(XRP(1000), alice); + env.close(); + + bool const enabled = features[fixInvalidTxFlags]; + testcase( + std::string("SignerListSet flag, fix ") + + (withFixInvalidTxFlags ? "enabled" : "disabled")); + + ter const expected( + withFixInvalidTxFlags ? TER(temINVALID_FLAG) : TER(tesSUCCESS)); + env(signers(alice, 2, {{bogie, 1}, {ghost, 1}}), + expected, + txflags(tfPassive)); + env.close(); + } + } + void testAll(FeatureBitset features) { @@ -1695,6 +1725,9 @@ public: testAll(all - featureMultiSignReserve - featureExpandedSignerList); testAll(all - featureExpandedSignerList); testAll(all); + + test_signerListSetFlags(all); + test_amendmentTransition(); } }; diff --git a/src/test/app/SetHook_test.cpp b/src/test/app/SetHook_test.cpp index 3fb16176b..75caade5e 100644 --- a/src/test/app/SetHook_test.cpp +++ b/src/test/app/SetHook_test.cpp @@ -364,6 +364,35 @@ public: } } + void + testInvalidTxFlags(FeatureBitset features) + { + testcase("Checks invalid tx flags"); + using namespace jtx; + + for (bool const withFixInvalidTxFlags : {false, true}) + { + Env env{ + *this, + withFixInvalidTxFlags ? features + : features - fixInvalidTxFlags}; + + auto const alice = Account{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + Json::Value jv = + ripple::test::jtx::hook(alice, {{{hso_delete()}}}, 0); + jv[jss::Flags] = tfUniversalMask; + + env(jv, + M("Invalid SetHook flags"), + HSFEE, + withFixInvalidTxFlags ? ter(temINVALID_FLAG) : ter(tesSUCCESS)); + env.close(); + } + } + void testGrants(FeatureBitset features) { @@ -12737,6 +12766,7 @@ public: testHooksOwnerDir(features); testHooksDisabled(features); testTxStructure(features); + testInvalidTxFlags(features); testInferHookSetOperation(); testParams(features); testGrants(features); diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 85dd2978d..aaed4defe 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -523,8 +523,7 @@ class AccountTx_test : public beast::unit_test::suite "0B"; Json::Value jhv = hso(updateHookHex); jhv[jss::Flags] = hsfOVERRIDE; - Json::Value jv = - ripple::test::jtx::hook(account, {{jhv}}, hsfOVERRIDE); + Json::Value jv = ripple::test::jtx::hook(account, {{jhv}}, 0); return jv; }; env(updateHook(alice), HSFEE, sig(alie)); @@ -553,8 +552,7 @@ class AccountTx_test : public beast::unit_test::suite "000000"; jhv[jss::HookNamespace] = to_string(uint256{beast::zero}); jhv[jss::HookHash] = to_string(hookHash); - Json::Value jv = - ripple::test::jtx::hook(account, {{jhv}}, hsfOVERRIDE); + Json::Value jv = ripple::test::jtx::hook(account, {{jhv}}, 0); return jv; }; uint256 const hid = hh(env, alice); @@ -563,8 +561,8 @@ class AccountTx_test : public beast::unit_test::suite // Delete Hook auto deleteHook = [](test::jtx::Account const& account) { - Json::Value jv = ripple::test::jtx::hook( - account, {{hso_delete()}}, hsfOVERRIDE); + Json::Value jv = + ripple::test::jtx::hook(account, {{hso_delete()}}, 0); return jv; }; env(deleteHook(alice), HSFEE, sig(alie));