mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-05 19:25:49 +00:00
Compare commits
54 Commits
hook-api-u
...
catalogue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f914365c3f | ||
|
|
7e8436cb50 | ||
|
|
0e19a4034d | ||
|
|
fa640624f0 | ||
|
|
bbdad4b2da | ||
|
|
58eb1b53f6 | ||
|
|
10b5507edd | ||
|
|
626c8c06e0 | ||
|
|
b1086d6577 | ||
|
|
f3cce20ea8 | ||
|
|
334f380262 | ||
|
|
065a94909f | ||
|
|
06a9ce4f33 | ||
|
|
af3d6bb421 | ||
|
|
24a9539095 | ||
|
|
f01364180b | ||
|
|
525dcc7e20 | ||
|
|
c5d298d6e8 | ||
|
|
63077ac753 | ||
|
|
e378ecb385 | ||
|
|
6f343f5d5e | ||
|
|
a8f411d715 | ||
|
|
5caa1cd472 | ||
|
|
c1d097f868 | ||
|
|
7c33592a26 | ||
|
|
008a0f78de | ||
|
|
78bd570016 | ||
|
|
b2d30104d4 | ||
|
|
76ffbffcd6 | ||
|
|
a6f2f9f27a | ||
|
|
ac48b5a4dd | ||
|
|
f5660c4d7d | ||
|
|
d7970346ae | ||
|
|
ea3ec6e532 | ||
|
|
589de50202 | ||
|
|
5aa15f0ddc | ||
|
|
55a94c400e | ||
|
|
90c2726cbd | ||
|
|
21d164b23b | ||
|
|
3a7e59c077 | ||
|
|
5c4b1a8ce8 | ||
|
|
6ebecd193b | ||
|
|
80cf986036 | ||
|
|
99ebfd52ac | ||
|
|
a2a764fb16 | ||
|
|
de455e52e8 | ||
|
|
84942ce125 | ||
|
|
1a8173f3df | ||
|
|
4c39910576 | ||
|
|
c2434e74a1 | ||
|
|
4327f06152 | ||
|
|
250fedf45f | ||
|
|
16b90fa826 | ||
|
|
96be5cc0b0 |
@@ -606,6 +606,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/BlackList.cpp
|
||||
src/ripple/rpc/handlers/BookOffers.cpp
|
||||
src/ripple/rpc/handlers/CanDelete.cpp
|
||||
src/ripple/rpc/handlers/Catalogue.cpp
|
||||
src/ripple/rpc/handlers/Connect.cpp
|
||||
src/ripple/rpc/handlers/ConsensusInfo.cpp
|
||||
src/ripple/rpc/handlers/CrawlShards.cpp
|
||||
@@ -661,6 +662,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/ValidatorListSites.cpp
|
||||
src/ripple/rpc/handlers/Validators.cpp
|
||||
src/ripple/rpc/handlers/WalletPropose.cpp
|
||||
src/ripple/rpc/handlers/Catalogue.cpp
|
||||
src/ripple/rpc/impl/DeliveredAmount.cpp
|
||||
src/ripple/rpc/impl/Handler.cpp
|
||||
src/ripple/rpc/impl/LegacyPathFind.cpp
|
||||
@@ -995,6 +997,7 @@ if (tests)
|
||||
src/test/rpc/AccountTx_test.cpp
|
||||
src/test/rpc/AmendmentBlocked_test.cpp
|
||||
src/test/rpc/Book_test.cpp
|
||||
src/test/rpc/Catalogue_test.cpp
|
||||
src/test/rpc/DepositAuthorized_test.cpp
|
||||
src/test/rpc/DeliveredAmount_test.cpp
|
||||
src/test/rpc/Feature_test.cpp
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
#[===================================================================[
|
||||
NIH dep: boost
|
||||
#]===================================================================]
|
||||
|
||||
if((NOT DEFINED BOOST_ROOT) AND(DEFINED ENV{BOOST_ROOT}))
|
||||
set(BOOST_ROOT $ENV{BOOST_ROOT})
|
||||
endif()
|
||||
if((NOT DEFINED BOOST_LIBRARYDIR) AND(DEFINED ENV{BOOST_LIBRARYDIR}))
|
||||
set(BOOST_LIBRARYDIR $ENV{BOOST_LIBRARYDIR})
|
||||
endif()
|
||||
file(TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
||||
if(WIN32 OR CYGWIN)
|
||||
# Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders
|
||||
if(DEFINED BOOST_ROOT)
|
||||
if((NOT DEFINED BOOST_LIBRARYDIR) AND (DEFINED BOOST_ROOT))
|
||||
if(IS_DIRECTORY ${BOOST_ROOT}/stage64/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib)
|
||||
elseif(IS_DIRECTORY ${BOOST_ROOT}/stage/lib)
|
||||
@@ -55,6 +57,7 @@ find_package(Boost 1.86 REQUIRED
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
iostreams
|
||||
thread)
|
||||
|
||||
add_library(ripple_boost INTERFACE)
|
||||
@@ -74,6 +77,7 @@ target_link_libraries(ripple_boost
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::iostreams
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
|
||||
@@ -248,6 +248,7 @@ include(FindPackageHandleStandardArgs)
|
||||
# Save project's policies
|
||||
cmake_policy(PUSH)
|
||||
cmake_policy(SET CMP0057 NEW) # if IN_LIST
|
||||
#cmake_policy(SET CMP0144 NEW)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Before we go searching, check whether a boost cmake package is available, unless
|
||||
@@ -969,7 +970,24 @@ function(_Boost_COMPONENT_DEPENDENCIES component _ret)
|
||||
set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic)
|
||||
set(_Boost_WSERIALIZATION_DEPENDENCIES serialization)
|
||||
endif()
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.77.0)
|
||||
|
||||
# Special handling for Boost 1.86.0 and higher
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.86.0)
|
||||
# Explicitly set these for Boost 1.86
|
||||
set(_Boost_IOSTREAMS_DEPENDENCIES "") # No dependencies for iostreams in 1.86
|
||||
|
||||
# Debug output to help diagnose the issue
|
||||
if(Boost_DEBUG)
|
||||
message(STATUS "Using special dependency settings for Boost 1.86.0+")
|
||||
message(STATUS "Component: ${component}, uppercomponent: ${uppercomponent}")
|
||||
message(STATUS "Boost_VERSION_STRING: ${Boost_VERSION_STRING}")
|
||||
message(STATUS "BOOST_ROOT: $ENV{BOOST_ROOT}")
|
||||
message(STATUS "BOOST_LIBRARYDIR: $ENV{BOOST_LIBRARYDIR}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Only show warning for versions beyond what we've defined
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.87.0)
|
||||
message(WARNING "New Boost version may have incorrect or missing dependencies and imported targets")
|
||||
endif()
|
||||
endif()
|
||||
@@ -1879,6 +1897,18 @@ foreach(COMPONENT ${Boost_FIND_COMPONENTS})
|
||||
list(INSERT _boost_LIBRARY_SEARCH_DIRS_RELEASE 0 ${Boost_LIBRARY_DIR_DEBUG})
|
||||
endif()
|
||||
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.86.0)
|
||||
if(BOOST_LIBRARYDIR AND EXISTS "${BOOST_LIBRARYDIR}")
|
||||
# Clear existing search paths and use only BOOST_LIBRARYDIR
|
||||
set(_boost_LIBRARY_SEARCH_DIRS_RELEASE "${BOOST_LIBRARYDIR}" NO_DEFAULT_PATH)
|
||||
set(_boost_LIBRARY_SEARCH_DIRS_DEBUG "${BOOST_LIBRARYDIR}" NO_DEFAULT_PATH)
|
||||
|
||||
if(Boost_DEBUG)
|
||||
message(STATUS "Boost 1.86: Setting library search dirs to BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing.
|
||||
string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_RELEASE}")
|
||||
|
||||
|
||||
@@ -74,7 +74,11 @@ else ()
|
||||
if (NOT _location)
|
||||
message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares")
|
||||
endif ()
|
||||
add_library (c-ares::cares ${_static} IMPORTED GLOBAL)
|
||||
if(${_location} MATCHES "\\.a$")
|
||||
add_library(c-ares::cares STATIC IMPORTED GLOBAL)
|
||||
else()
|
||||
add_library(c-ares::cares SHARED IMPORTED GLOBAL)
|
||||
endif()
|
||||
set_target_properties (c-ares::cares PROPERTIES
|
||||
IMPORTED_LOCATION ${_location}
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${${_prefix}_INCLUDE_DIRS}"
|
||||
@@ -204,6 +208,7 @@ else ()
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
$<$<BOOL:${CMAKE_TOOLCHAIN_FILE}>:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}>
|
||||
$<$<BOOL:${VCPKG_TARGET_TRIPLET}>:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}>
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
cmake_minimum_required (VERSION 3.16)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
|
||||
if (POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif ()
|
||||
|
||||
project (rippled)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
if(POLICY CMP0144)
|
||||
cmake_policy(SET CMP0144 NEW)
|
||||
endif()
|
||||
|
||||
project (rippled)
|
||||
set(Boost_NO_BOOST_CMAKE ON)
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
|
||||
@@ -305,6 +305,20 @@ Ledger::Ledger(
|
||||
}
|
||||
}
|
||||
|
||||
Ledger::Ledger(
|
||||
LedgerInfo& info,
|
||||
Config const& config,
|
||||
Family& family,
|
||||
SHAMap const& baseState)
|
||||
: mImmutable(false)
|
||||
, info_(info)
|
||||
, txMap_(SHAMapType::TRANSACTION, family)
|
||||
, stateMap_(baseState, true)
|
||||
, rules_{config.features}
|
||||
, j_(beast::Journal(beast::Journal::getNullSink()))
|
||||
{
|
||||
}
|
||||
|
||||
// Create a new ledger that follows this one
|
||||
Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime)
|
||||
: mImmutable(false)
|
||||
@@ -385,6 +399,19 @@ Ledger::setImmutable(bool rehash)
|
||||
setup();
|
||||
}
|
||||
|
||||
// raw setters for catalogue
|
||||
void
|
||||
Ledger::setCloseFlags(int closeFlags)
|
||||
{
|
||||
info_.closeFlags = closeFlags;
|
||||
}
|
||||
|
||||
void
|
||||
Ledger::setDrops(uint64_t drops)
|
||||
{
|
||||
info_.drops = drops;
|
||||
}
|
||||
|
||||
void
|
||||
Ledger::setAccepted(
|
||||
NetClock::time_point closeTime,
|
||||
|
||||
@@ -121,6 +121,13 @@ public:
|
||||
Family& family,
|
||||
beast::Journal j);
|
||||
|
||||
// used when loading ledgers from catalogue files
|
||||
Ledger(
|
||||
LedgerInfo& info,
|
||||
Config const& config,
|
||||
Family& family,
|
||||
SHAMap const& baseState);
|
||||
|
||||
/** Create a new ledger following a previous ledger
|
||||
|
||||
The ledger will have the sequence number that
|
||||
@@ -275,6 +282,12 @@ public:
|
||||
void
|
||||
setImmutable(bool rehash = true);
|
||||
|
||||
void
|
||||
setCloseFlags(int closeFlags);
|
||||
|
||||
void
|
||||
setDrops(uint64_t drops);
|
||||
|
||||
bool
|
||||
isImmutable() const
|
||||
{
|
||||
|
||||
@@ -70,8 +70,6 @@ public:
|
||||
LedgerHash
|
||||
getLedgerHash(LedgerIndex ledgerIndex);
|
||||
|
||||
/** Remove stale cache entries
|
||||
*/
|
||||
void
|
||||
sweep()
|
||||
{
|
||||
|
||||
@@ -128,7 +128,7 @@ public:
|
||||
getEarliestFetch();
|
||||
|
||||
bool
|
||||
storeLedger(std::shared_ptr<Ledger const> ledger);
|
||||
storeLedger(std::shared_ptr<Ledger const> ledger, bool pin = false);
|
||||
|
||||
void
|
||||
setFullLedger(
|
||||
@@ -152,9 +152,15 @@ public:
|
||||
std::string
|
||||
getCompleteLedgers();
|
||||
|
||||
std::string
|
||||
getPinnedLedgers();
|
||||
|
||||
RangeSet<std::uint32_t>
|
||||
getCompleteLedgersRangeSet();
|
||||
|
||||
RangeSet<std::uint32_t>
|
||||
getPinnedLedgersRangeSet();
|
||||
|
||||
/** Apply held transactions to the open ledger
|
||||
This is normally called as we close the ledger.
|
||||
The open ledger remains open to handle new transactions
|
||||
@@ -200,7 +206,10 @@ public:
|
||||
getLedgerByHash(uint256 const& hash);
|
||||
|
||||
void
|
||||
setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV);
|
||||
setLedgerRangePresent(
|
||||
std::uint32_t minV,
|
||||
std::uint32_t maxV,
|
||||
bool pin = false /* if true, do not let these leaders be removed */);
|
||||
|
||||
std::optional<NetClock::time_point>
|
||||
getCloseTimeBySeq(LedgerIndex ledgerIndex);
|
||||
@@ -373,6 +382,7 @@ private:
|
||||
|
||||
std::recursive_mutex mCompleteLock;
|
||||
RangeSet<std::uint32_t> mCompleteLedgers;
|
||||
RangeSet<std::uint32_t> mPinnedLedgers; // Track pinned ledger ranges
|
||||
|
||||
// Publish thread is running.
|
||||
bool mAdvanceThread{false};
|
||||
|
||||
@@ -533,11 +533,20 @@ LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||
}
|
||||
|
||||
bool
|
||||
LedgerMaster::storeLedger(std::shared_ptr<Ledger const> ledger)
|
||||
LedgerMaster::storeLedger(std::shared_ptr<Ledger const> ledger, bool pin)
|
||||
{
|
||||
bool validated = ledger->info().validated;
|
||||
// Returns true if we already had the ledger
|
||||
return mLedgerHistory.insert(std::move(ledger), validated);
|
||||
if (!mLedgerHistory.insert(std::move(ledger), validated))
|
||||
return false;
|
||||
|
||||
if (pin)
|
||||
{
|
||||
uint32_t seq = ledger->info().seq;
|
||||
mPinnedLedgers.insert(range(seq, seq));
|
||||
JLOG(m_journal.info()) << "Pinned ledger : " << seq;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Apply held transactions to the open ledger
|
||||
@@ -595,6 +604,15 @@ void
|
||||
LedgerMaster::clearLedger(std::uint32_t seq)
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
|
||||
// Don't clear pinned ledgers
|
||||
if (boost::icl::contains(mPinnedLedgers, seq))
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "Ledger " << seq << " is pinned, not clearing";
|
||||
return;
|
||||
}
|
||||
|
||||
mCompleteLedgers.erase(seq);
|
||||
}
|
||||
|
||||
@@ -1714,6 +1732,13 @@ LedgerMaster::getCompleteLedgers()
|
||||
return to_string(mCompleteLedgers);
|
||||
}
|
||||
|
||||
std::string
|
||||
LedgerMaster::getPinnedLedgers()
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
return to_string(mPinnedLedgers);
|
||||
}
|
||||
|
||||
RangeSet<std::uint32_t>
|
||||
LedgerMaster::getCompleteLedgersRangeSet()
|
||||
{
|
||||
@@ -1721,6 +1746,13 @@ LedgerMaster::getCompleteLedgersRangeSet()
|
||||
return mCompleteLedgers;
|
||||
}
|
||||
|
||||
RangeSet<std::uint32_t>
|
||||
LedgerMaster::getPinnedLedgersRangeSet()
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
return mPinnedLedgers;
|
||||
}
|
||||
|
||||
std::optional<NetClock::time_point>
|
||||
LedgerMaster::getCloseTimeBySeq(LedgerIndex ledgerIndex)
|
||||
{
|
||||
@@ -1876,15 +1908,26 @@ LedgerMaster::getLedgerByHash(uint256 const& hash)
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
|
||||
LedgerMaster::setLedgerRangePresent(
|
||||
std::uint32_t minV,
|
||||
std::uint32_t maxV,
|
||||
bool pin)
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
mCompleteLedgers.insert(range(minV, maxV));
|
||||
|
||||
if (pin)
|
||||
{
|
||||
mPinnedLedgers.insert(range(minV, maxV));
|
||||
JLOG(m_journal.info())
|
||||
<< "Pinned ledger range: " << minV << " - " << maxV;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::sweep()
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
mLedgerHistory.sweep();
|
||||
fetch_packs_.sweep();
|
||||
}
|
||||
@@ -1899,8 +1942,24 @@ void
|
||||
LedgerMaster::clearPriorLedgers(LedgerIndex seq)
|
||||
{
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
if (seq > 0)
|
||||
mCompleteLedgers.erase(range(0u, seq - 1));
|
||||
if (seq <= 0)
|
||||
return;
|
||||
|
||||
// First, save a copy of the pinned ledgers
|
||||
auto pinnedCopy = mPinnedLedgers;
|
||||
|
||||
// Clear everything before seq
|
||||
RangeSet<std::uint32_t> toClear;
|
||||
toClear.insert(range(0u, seq - 1));
|
||||
for (auto const& interval : toClear)
|
||||
mCompleteLedgers.erase(interval);
|
||||
|
||||
// Re-add the pinned ledgers to ensure they're preserved
|
||||
for (auto const& interval : pinnedCopy)
|
||||
mCompleteLedgers.insert(interval);
|
||||
|
||||
JLOG(m_journal.debug()) << "clearPriorLedgers: after restoration, pinned="
|
||||
<< to_string(mPinnedLedgers);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -2493,6 +2493,8 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
toBase58(TokenType::NodePublic, app_.nodeIdentity().first);
|
||||
|
||||
info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
|
||||
info[jss::complete_ledgers_pinned] =
|
||||
app_.getLedgerMaster().getPinnedLedgers();
|
||||
|
||||
if (amendmentBlocked_)
|
||||
info[jss::amendment_blocked] = true;
|
||||
|
||||
@@ -209,6 +209,7 @@ SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
// Create NodeStore with two backends to allow online deletion of
|
||||
// data
|
||||
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
||||
app_,
|
||||
scheduler_,
|
||||
readThreads,
|
||||
std::move(writableBackend),
|
||||
|
||||
@@ -601,11 +601,44 @@ SQLiteDatabaseImp::deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq)
|
||||
void
|
||||
SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq)
|
||||
{
|
||||
// Get a reference to the pinned ledgers set for quick lookups
|
||||
RangeSet<std::uint32_t> pinnedLedgers;
|
||||
{
|
||||
auto& ledgerMaster = app_.getLedgerMaster();
|
||||
// Use public API to get the pinned ledgers
|
||||
pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet();
|
||||
}
|
||||
|
||||
if (existsLedger())
|
||||
{
|
||||
auto db = checkoutLedger();
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::Ledgers, ledgerSeq);
|
||||
|
||||
// Check if any ledgers in the range to be deleted are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in the range, proceed with normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::Ledgers, ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete ledgers individually, skipping pinned ones
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
*db, detail::TableType::Ledgers, seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -614,8 +647,39 @@ SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq)
|
||||
iterateLedgerBack(
|
||||
seqToShardIndex(ledgerSeq),
|
||||
[&](soci::session& session, std::uint32_t shardIndex) {
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session, detail::TableType::Ledgers, ledgerSeq);
|
||||
LedgerIndex firstSeq = firstLedgerSeq(shardIndex);
|
||||
LedgerIndex lastSeq =
|
||||
std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1);
|
||||
|
||||
// Check if any ledgers in this shard's range are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = firstSeq;
|
||||
seq <= lastSeq && !hasPinnedLedgers;
|
||||
++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in this shard range, proceed with
|
||||
// normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session, detail::TableType::Ledgers, ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete ledgers individually, skipping pinned ones
|
||||
for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
session, detail::TableType::Ledgers, seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -627,11 +691,43 @@ SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq)
|
||||
if (!useTxTables_)
|
||||
return;
|
||||
|
||||
// Get a reference to the pinned ledgers set for quick lookups
|
||||
RangeSet<std::uint32_t> pinnedLedgers;
|
||||
{
|
||||
auto& ledgerMaster = app_.getLedgerMaster();
|
||||
pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet();
|
||||
}
|
||||
|
||||
if (existsTransaction())
|
||||
{
|
||||
auto db = checkoutTransaction();
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::Transactions, ledgerSeq);
|
||||
|
||||
// Check if any ledgers in the range to be deleted are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in the range, proceed with normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::Transactions, ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete transaction data individually, skipping pinned ledgers
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
*db, detail::TableType::Transactions, seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -640,8 +736,40 @@ SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq)
|
||||
iterateTransactionBack(
|
||||
seqToShardIndex(ledgerSeq),
|
||||
[&](soci::session& session, std::uint32_t shardIndex) {
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session, detail::TableType::Transactions, ledgerSeq);
|
||||
LedgerIndex firstSeq = firstLedgerSeq(shardIndex);
|
||||
LedgerIndex lastSeq =
|
||||
std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1);
|
||||
|
||||
// Check if any ledgers in this shard's range are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = firstSeq;
|
||||
seq <= lastSeq && !hasPinnedLedgers;
|
||||
++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in this shard range, proceed with
|
||||
// normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session, detail::TableType::Transactions, ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete transaction data individually, skipping pinned
|
||||
// ledgers
|
||||
for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
session, detail::TableType::Transactions, seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -654,11 +782,44 @@ SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq(
|
||||
if (!useTxTables_)
|
||||
return;
|
||||
|
||||
// Get a reference to the pinned ledgers set for quick lookups
|
||||
RangeSet<std::uint32_t> pinnedLedgers;
|
||||
{
|
||||
auto& ledgerMaster = app_.getLedgerMaster();
|
||||
pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet();
|
||||
}
|
||||
|
||||
if (existsTransaction())
|
||||
{
|
||||
auto db = checkoutTransaction();
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::AccountTransactions, ledgerSeq);
|
||||
|
||||
// Check if any ledgers in the range to be deleted are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in the range, proceed with normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
*db, detail::TableType::AccountTransactions, ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete account transaction data individually, skipping pinned
|
||||
// ledgers
|
||||
for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
*db, detail::TableType::AccountTransactions, seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -667,8 +828,44 @@ SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq(
|
||||
iterateTransactionBack(
|
||||
seqToShardIndex(ledgerSeq),
|
||||
[&](soci::session& session, std::uint32_t shardIndex) {
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session, detail::TableType::AccountTransactions, ledgerSeq);
|
||||
LedgerIndex firstSeq = firstLedgerSeq(shardIndex);
|
||||
LedgerIndex lastSeq =
|
||||
std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1);
|
||||
|
||||
// Check if any ledgers in this shard's range are pinned
|
||||
bool hasPinnedLedgers = false;
|
||||
for (LedgerIndex seq = firstSeq;
|
||||
seq <= lastSeq && !hasPinnedLedgers;
|
||||
++seq)
|
||||
{
|
||||
if (boost::icl::contains(pinnedLedgers, seq))
|
||||
hasPinnedLedgers = true;
|
||||
}
|
||||
|
||||
if (!hasPinnedLedgers)
|
||||
{
|
||||
// No pinned ledgers in this shard range, proceed with
|
||||
// normal delete
|
||||
detail::deleteBeforeLedgerSeq(
|
||||
session,
|
||||
detail::TableType::AccountTransactions,
|
||||
ledgerSeq);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete account transaction data individually, skipping
|
||||
// pinned ledgers
|
||||
for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq)
|
||||
{
|
||||
if (!boost::icl::contains(pinnedLedgers, seq))
|
||||
{
|
||||
detail::deleteByLedgerSeq(
|
||||
session,
|
||||
detail::TableType::AccountTransactions,
|
||||
seq);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -129,6 +129,18 @@ public:
|
||||
return reinterpret_cast<const_pointer>(data_.data());
|
||||
}
|
||||
|
||||
char const*
|
||||
cdata() const
|
||||
{
|
||||
return reinterpret_cast<char const*>(data_.data());
|
||||
}
|
||||
|
||||
char*
|
||||
cdata()
|
||||
{
|
||||
return reinterpret_cast<char*>(data_.data());
|
||||
}
|
||||
|
||||
iterator
|
||||
begin()
|
||||
{
|
||||
|
||||
@@ -28,7 +28,7 @@ namespace ripple {
|
||||
bool
|
||||
isRpcError(Json::Value jvResult);
|
||||
Json::Value
|
||||
rpcError(int iError);
|
||||
rpcError(int iError, std::string msg = "");
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -26,10 +26,14 @@ struct RPCErr;
|
||||
|
||||
// VFALCO NOTE Deprecated function
|
||||
Json::Value
|
||||
rpcError(int iError)
|
||||
rpcError(int iError, std::string msg)
|
||||
{
|
||||
Json::Value jvResult(Json::objectValue);
|
||||
RPC::inject_error(iError, jvResult);
|
||||
if (msg != "")
|
||||
RPC::inject_error(static_cast<error_code_i>(iError), msg, jvResult);
|
||||
else
|
||||
RPC::inject_error(iError, jvResult);
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/nodestore/impl/DatabaseRotatingImp.h>
|
||||
#include <ripple/protocol/HashPrefix.h>
|
||||
|
||||
@@ -25,6 +27,7 @@ namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
DatabaseRotatingImp::DatabaseRotatingImp(
|
||||
Application& app,
|
||||
Scheduler& scheduler,
|
||||
int readThreads,
|
||||
std::shared_ptr<Backend> writableBackend,
|
||||
@@ -32,6 +35,7 @@ DatabaseRotatingImp::DatabaseRotatingImp(
|
||||
Section const& config,
|
||||
beast::Journal j)
|
||||
: DatabaseRotating(scheduler, readThreads, config, j)
|
||||
, app_(app)
|
||||
, writableBackend_(std::move(writableBackend))
|
||||
, archiveBackend_(std::move(archiveBackend))
|
||||
{
|
||||
@@ -48,8 +52,58 @@ DatabaseRotatingImp::rotateWithLock(
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
|
||||
// Create the new backend
|
||||
auto newBackend = f(writableBackend_->getName());
|
||||
|
||||
// Before rotating, ensure all pinned ledgers are in the writable backend
|
||||
JLOG(j_.info())
|
||||
<< "Ensuring pinned ledgers are preserved before backend rotation";
|
||||
|
||||
// Use a lambda to handle the preservation of pinned ledgers
|
||||
auto ensurePinnedLedgersInWritable = [this]() {
|
||||
// Get list of pinned ledgers
|
||||
auto pinnedLedgers = app_.getLedgerMaster().getPinnedLedgersRangeSet();
|
||||
|
||||
for (auto const& range : pinnedLedgers)
|
||||
{
|
||||
for (auto seq = range.lower(); seq <= range.upper(); ++seq)
|
||||
{
|
||||
uint256 hash = app_.getLedgerMaster().getHashBySeq(seq);
|
||||
if (hash.isZero())
|
||||
continue;
|
||||
|
||||
// Try to load the ledger
|
||||
auto ledger = app_.getLedgerMaster().getLedgerByHash(hash);
|
||||
if (ledger && ledger->isImmutable())
|
||||
{
|
||||
// If we have the ledger, store it in the writable backend
|
||||
JLOG(j_.debug()) << "Ensuring pinned ledger " << seq
|
||||
<< " is in writable backend";
|
||||
Database::storeLedger(*ledger, writableBackend_);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If we don't have the ledger in memory, try to fetch its
|
||||
// objects directly
|
||||
JLOG(j_.debug()) << "Attempting to copy pinned ledger "
|
||||
<< seq << " header to writable backend";
|
||||
std::shared_ptr<NodeObject> headerObj;
|
||||
Status status =
|
||||
archiveBackend_->fetch(hash.data(), &headerObj);
|
||||
if (status == ok && headerObj)
|
||||
writableBackend_->store(headerObj);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Execute the lambda
|
||||
ensurePinnedLedgersInWritable();
|
||||
|
||||
// Now it's safe to mark the archive backend for deletion
|
||||
archiveBackend_->setDeletePath();
|
||||
|
||||
// Complete the rotation
|
||||
archiveBackend_ = std::move(writableBackend_);
|
||||
writableBackend_ = std::move(newBackend);
|
||||
}
|
||||
@@ -180,8 +234,8 @@ DatabaseRotatingImp::fetchNodeObject(
|
||||
}
|
||||
|
||||
// Update writable backend with data from the archive backend
|
||||
if (duplicate)
|
||||
writable->store(nodeObject);
|
||||
// if (duplicate)
|
||||
writable->store(nodeObject);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,10 @@ public:
|
||||
DatabaseRotatingImp&
|
||||
operator=(DatabaseRotatingImp const&) = delete;
|
||||
|
||||
Application& app_;
|
||||
|
||||
DatabaseRotatingImp(
|
||||
Application& app,
|
||||
Scheduler& scheduler,
|
||||
int readThreads,
|
||||
std::shared_ptr<Backend> writableBackend,
|
||||
|
||||
@@ -143,8 +143,9 @@ enum error_code_i {
|
||||
|
||||
rpcOBJECT_NOT_FOUND = 92,
|
||||
|
||||
rpcLAST =
|
||||
rpcOBJECT_NOT_FOUND // rpcLAST should always equal the last code.=
|
||||
rpcLEDGER_MISSING = 93,
|
||||
|
||||
rpcLAST = rpcLEDGER_MISSING // rpcLAST should always equal the last code.=
|
||||
};
|
||||
|
||||
/** Codes returned in the `warnings` array of certain RPC commands.
|
||||
|
||||
@@ -109,7 +109,8 @@ constexpr static ErrorInfo unorderedErrorInfos[]{
|
||||
{rpcTOO_BUSY, "tooBusy", "The server is too busy to help you now.", 503},
|
||||
{rpcTXN_NOT_FOUND, "txnNotFound", "Transaction not found.", 404},
|
||||
{rpcNAMESPACE_NOT_FOUND, "namespaceNotFound", "Namespace not found.", 404},
|
||||
{rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405}};
|
||||
{rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405},
|
||||
{rpcLEDGER_MISSING, "ledgerMissing", "One or more ledgers in the specified range is missing", 406}};
|
||||
// clang-format on
|
||||
|
||||
// Sort and validate unorderedErrorInfos at compile time. Should be
|
||||
|
||||
@@ -199,20 +199,21 @@ JSS(balances); // out: GatewayBalances
|
||||
JSS(base); // out: LogLevel
|
||||
JSS(base_fee); // out: NetworkOPs
|
||||
JSS(base_fee_no_hooks);
|
||||
JSS(base_fee_xrp); // out: NetworkOPs
|
||||
JSS(base_fee_native); // out: NetworkOPs
|
||||
JSS(bids); // out: Subscribe
|
||||
JSS(binary); // in: AccountTX, LedgerEntry,
|
||||
// AccountTxOld, Tx LedgerData
|
||||
JSS(blob); // out: ValidatorList
|
||||
JSS(blobs_v2); // out: ValidatorList
|
||||
// in: UNL
|
||||
JSS(books); // in: Subscribe, Unsubscribe
|
||||
JSS(both); // in: Subscribe, Unsubscribe
|
||||
JSS(both_sides); // in: Subscribe, Unsubscribe
|
||||
JSS(broadcast); // out: SubmitTransaction
|
||||
JSS(build_path); // in: TransactionSign
|
||||
JSS(build_version); // out: NetworkOPs
|
||||
JSS(base_fee_xrp); // out: NetworkOPs
|
||||
JSS(base_fee_native); // out: NetworkOPs
|
||||
JSS(bids); // out: Subscribe
|
||||
JSS(binary); // in: AccountTX, LedgerEntry,
|
||||
// AccountTxOld, Tx LedgerData
|
||||
JSS(blob); // out: ValidatorList
|
||||
JSS(blobs_v2); // out: ValidatorList
|
||||
// in: UNL
|
||||
JSS(books); // in: Subscribe, Unsubscribe
|
||||
JSS(both); // in: Subscribe, Unsubscribe
|
||||
JSS(both_sides); // in: Subscribe, Unsubscribe
|
||||
JSS(broadcast); // out: SubmitTransaction
|
||||
JSS(build_path); // in: TransactionSign
|
||||
JSS(build_version); // out: NetworkOPs
|
||||
JSS(bytes_written);
|
||||
JSS(cancel_after); // out: AccountChannels
|
||||
JSS(can_delete); // out: CanDelete
|
||||
JSS(changes); // out: BookChanges
|
||||
@@ -237,13 +238,15 @@ JSS(code); // out: errors
|
||||
JSS(command); // in: RPCHandler
|
||||
JSS(complete); // out: NetworkOPs, InboundLedger
|
||||
JSS(complete_ledgers); // out: NetworkOPs, PeerImp
|
||||
JSS(complete_shards); // out: OverlayImpl, PeerImp
|
||||
JSS(consensus); // out: NetworkOPs, LedgerConsensus
|
||||
JSS(converge_time); // out: NetworkOPs
|
||||
JSS(converge_time_s); // out: NetworkOPs
|
||||
JSS(cookie); // out: NetworkOPs
|
||||
JSS(count); // in: AccountTx*, ValidatorList
|
||||
JSS(counters); // in/out: retrieve counters
|
||||
JSS(complete_ledgers_pinned);
|
||||
JSS(complete_shards); // out: OverlayImpl, PeerImp
|
||||
JSS(compression_level);
|
||||
JSS(consensus); // out: NetworkOPs, LedgerConsensus
|
||||
JSS(converge_time); // out: NetworkOPs
|
||||
JSS(converge_time_s); // out: NetworkOPs
|
||||
JSS(cookie); // out: NetworkOPs
|
||||
JSS(count); // in: AccountTx*, ValidatorList
|
||||
JSS(counters); // in/out: retrieve counters
|
||||
JSS(coins);
|
||||
JSS(children);
|
||||
JSS(ctid); // in/out: Tx RPC
|
||||
@@ -257,7 +260,8 @@ JSS(currency); // in: paths/PathRequest, STAmount
|
||||
// AccountLines
|
||||
JSS(current); // out: OwnerInfo
|
||||
JSS(current_activities);
|
||||
JSS(current_ledger_size); // out: TxQ
|
||||
JSS(current_ledger_size); // out: TxQ
|
||||
JSS(current_ledger);
|
||||
JSS(current_queue_size); // out: TxQ
|
||||
JSS(data); // out: LedgerData
|
||||
JSS(date); // out: tx/Transaction, NetworkOPs
|
||||
@@ -289,18 +293,20 @@ JSS(drops); // out: TxQ
|
||||
JSS(duration_us); // out: NetworkOPs
|
||||
JSS(effective); // out: ValidatorList
|
||||
// in: UNL
|
||||
JSS(enabled); // out: AmendmentTable
|
||||
JSS(engine_result); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(engine_result_code); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(engine_result_message); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(ephemeral_key); // out: ValidatorInfo
|
||||
// in/out: Manifest
|
||||
JSS(error); // out: error
|
||||
JSS(elapsed_seconds);
|
||||
JSS(enabled); // out: AmendmentTable
|
||||
JSS(engine_result); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(engine_result_code); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(engine_result_message); // out: NetworkOPs, TransactionSign, Submit
|
||||
JSS(ephemeral_key); // out: ValidatorInfo
|
||||
// in/out: Manifest
|
||||
JSS(error); // out: error
|
||||
JSS(errored);
|
||||
JSS(error_code); // out: error
|
||||
JSS(error_exception); // out: Submit
|
||||
JSS(error_message); // out: error
|
||||
JSS(escrow); // in: LedgerEntry
|
||||
JSS(error_code); // out: error
|
||||
JSS(error_exception); // out: Submit
|
||||
JSS(error_message); // out: error
|
||||
JSS(escrow); // in: LedgerEntry
|
||||
JSS(estimated_time_remaining);
|
||||
JSS(emitted_txn); // in: LedgerEntry
|
||||
JSS(expand); // in: handler/Ledger
|
||||
JSS(expected_date); // out: any (warnings)
|
||||
@@ -310,6 +316,7 @@ JSS(expiration); // out: AccountOffers, AccountChannels,
|
||||
// ValidatorList
|
||||
JSS(fail_hard); // in: Sign, Submit
|
||||
JSS(failed); // out: InboundLedger
|
||||
JSS(failed_ledgers); // out: catalogue
|
||||
JSS(feature); // in: Feature
|
||||
JSS(features); // out: Feature
|
||||
JSS(fee); // out: NetworkOPs, Peers
|
||||
@@ -324,9 +331,12 @@ JSS(first); // out: rpc/Version
|
||||
JSS(firstSequence); // out: NodeToShardStatus
|
||||
JSS(firstShardIndex); // out: NodeToShardStatus
|
||||
JSS(finished);
|
||||
JSS(fix_txns); // in: LedgerCleaner
|
||||
JSS(fix_txns); // in: LedgerCleaner
|
||||
JSS(file);
|
||||
JSS(file_size);
|
||||
JSS(flags); // out: AccountOffers,
|
||||
// NetworkOPs
|
||||
JSS(force); // in: catalogue
|
||||
JSS(forward); // in: AccountTx
|
||||
JSS(freeze); // out: AccountLines
|
||||
JSS(freeze_peer); // out: AccountLines
|
||||
@@ -336,6 +346,7 @@ JSS(full_reply); // out: PathFind
|
||||
JSS(fullbelow_size); // out: GetCounts
|
||||
JSS(good); // out: RPCVersion
|
||||
JSS(hash); // out: NetworkOPs, InboundLedger,
|
||||
JSS(hash_mismatches); // out: catalogue
|
||||
// LedgerToJson, STTx; field
|
||||
JSS(hashes); // in: AccountObjects
|
||||
JSS(have_header); // out: InboundLedger
|
||||
@@ -354,8 +365,10 @@ JSS(id); // websocket.
|
||||
JSS(ident); // in: AccountCurrencies, AccountInfo,
|
||||
// OwnerInfo
|
||||
JSS(ignore_default); // in: AccountLines
|
||||
JSS(import_vlseq); // in: LedgerEntry
|
||||
JSS(inLedger); // out: tx/Transaction
|
||||
JSS(ignore_hash);
|
||||
JSS(import_vlseq); // in: LedgerEntry
|
||||
JSS(imported); // out: catalogue
|
||||
JSS(inLedger); // out: tx/Transaction
|
||||
JSS(in_queue);
|
||||
JSS(inbound); // out: PeerImp
|
||||
JSS(index); // in: LedgerEntry, DownloadShard
|
||||
@@ -374,42 +387,48 @@ JSS(issuer); // in: RipplePathFind, Subscribe,
|
||||
// out: STPathSet, STAmount
|
||||
JSS(job);
|
||||
JSS(job_queue);
|
||||
JSS(job_type);
|
||||
JSS(job_status);
|
||||
JSS(jobs);
|
||||
JSS(jsonrpc); // json version
|
||||
JSS(jq_trans_overflow); // JobQueue transaction limit overflow.
|
||||
JSS(kept); // out: SubmitTransaction
|
||||
JSS(key); // out
|
||||
JSS(key_type); // in/out: WalletPropose, TransactionSign
|
||||
JSS(latency); // out: PeerImp
|
||||
JSS(last); // out: RPCVersion
|
||||
JSS(lastSequence); // out: NodeToShardStatus
|
||||
JSS(lastShardIndex); // out: NodeToShardStatus
|
||||
JSS(last_close); // out: NetworkOPs
|
||||
JSS(last_refresh_time); // out: ValidatorSite
|
||||
JSS(last_refresh_status); // out: ValidatorSite
|
||||
JSS(last_refresh_message); // out: ValidatorSite
|
||||
JSS(ledger); // in: NetworkOPs, LedgerCleaner,
|
||||
// RPCHelpers
|
||||
// out: NetworkOPs, PeerImp
|
||||
JSS(ledger_current_index); // out: NetworkOPs, RPCHelpers,
|
||||
// LedgerCurrent, LedgerAccept,
|
||||
// AccountLines
|
||||
JSS(ledger_data); // out: LedgerHeader
|
||||
JSS(ledger_hash); // in: RPCHelpers, LedgerRequest,
|
||||
// RipplePathFind, TransactionEntry,
|
||||
// handlers/Ledger
|
||||
// out: NetworkOPs, RPCHelpers,
|
||||
// LedgerClosed, LedgerData,
|
||||
// AccountLines
|
||||
JSS(ledger_hit_rate); // out: GetCounts
|
||||
JSS(ledger_index); // in/out: many
|
||||
JSS(ledger_index_max); // in, out: AccountTx*
|
||||
JSS(ledger_index_min); // in, out: AccountTx*
|
||||
JSS(ledger_max); // in, out: AccountTx*
|
||||
JSS(ledger_min); // in, out: AccountTx*
|
||||
JSS(ledger_time); // out: NetworkOPs
|
||||
JSS(LEDGER_ENTRY_TYPES); // out: RPC server_definitions
|
||||
JSS(levels); // LogLevels
|
||||
JSS(jsonrpc); // json version
|
||||
JSS(jq_trans_overflow); // JobQueue transaction limit overflow.
|
||||
JSS(kept); // out: SubmitTransaction
|
||||
JSS(key); // out
|
||||
JSS(key_type); // in/out: WalletPropose, TransactionSign
|
||||
JSS(latency); // out: PeerImp
|
||||
JSS(last); // out: RPCVersion
|
||||
JSS(lastSequence); // out: NodeToShardStatus
|
||||
JSS(lastShardIndex); // out: NodeToShardStatus
|
||||
JSS(last_close); // out: NetworkOPs
|
||||
JSS(last_refresh_time); // out: ValidatorSite
|
||||
JSS(last_refresh_status); // out: ValidatorSite
|
||||
JSS(last_refresh_message); // out: ValidatorSite
|
||||
JSS(ledger); // in: NetworkOPs, LedgerCleaner,
|
||||
// RPCHelpers
|
||||
// out: NetworkOPs, PeerImp
|
||||
JSS(ledger_count);
|
||||
JSS(ledgers_loaded);
|
||||
JSS(ledgers_written);
|
||||
JSS(ledger_current_index); // out: NetworkOPs, RPCHelpers,
|
||||
// LedgerCurrent, LedgerAccept,
|
||||
// AccountLines
|
||||
JSS(ledger_data); // out: LedgerHeader
|
||||
JSS(ledger_hash); // in: RPCHelpers, LedgerRequest,
|
||||
// RipplePathFind, TransactionEntry,
|
||||
// handlers/Ledger
|
||||
// out: NetworkOPs, RPCHelpers,
|
||||
// LedgerClosed, LedgerData,
|
||||
// AccountLines
|
||||
JSS(ledger_hit_rate); // out: GetCounts
|
||||
JSS(ledger_index); // in/out: many
|
||||
JSS(ledger_index_max); // in, out: AccountTx*
|
||||
JSS(ledger_index_min); // in, out: AccountTx*
|
||||
JSS(ledger_max); // in, out: AccountTx*
|
||||
JSS(ledger_min); // in, out: AccountTx*
|
||||
JSS(ledger_time); // out: NetworkOPs
|
||||
JSS(LEDGER_ENTRY_TYPES); // out: RPC server_definitions
|
||||
JSS(levels); // LogLevels
|
||||
JSS(level);
|
||||
JSS(limit); // in/out: AccountTx*, AccountOffers,
|
||||
// AccountLines, AccountObjects
|
||||
// in: LedgerData, BookOffers
|
||||
@@ -527,6 +546,8 @@ JSS(password); // in: Subscribe
|
||||
JSS(paths); // in: RipplePathFind
|
||||
JSS(paths_canonical); // out: RipplePathFind
|
||||
JSS(paths_computed); // out: PathRequest, RipplePathFind
|
||||
JSS(output_file); // in: CatalogueCreate
|
||||
JSS(input_file); // in: CatalogueLoad
|
||||
JSS(payment_channel); // in: LedgerEntry
|
||||
JSS(pclose);
|
||||
JSS(peer); // in: AccountLines
|
||||
@@ -536,6 +557,7 @@ JSS(peers); // out: InboundLedger, handlers/Peers, Overlay
|
||||
JSS(peer_disconnects); // Severed peer connection counter.
|
||||
JSS(peer_disconnects_resources); // Severed peer connections because of
|
||||
// excess resource consumption.
|
||||
JSS(percent_complete);
|
||||
JSS(phash);
|
||||
JSS(port); // in: Connect
|
||||
JSS(previous); // out: Reservations
|
||||
@@ -618,6 +640,7 @@ JSS(signing_keys); // out: ValidatorList
|
||||
JSS(signing_time); // out: NetworkOPs
|
||||
JSS(signer_list); // in: AccountObjects
|
||||
JSS(signer_lists); // in/out: AccountInfo
|
||||
JSS(skipped); // out: catalogue
|
||||
JSS(snapshot); // in: Subscribe
|
||||
JSS(source_account); // in: PathRequest, RipplePathFind
|
||||
JSS(source_amount); // in: PathRequest, RipplePathFind
|
||||
@@ -625,6 +648,7 @@ JSS(source_currencies); // in: PathRequest, RipplePathFind
|
||||
JSS(source_tag); // out: AccountChannels
|
||||
JSS(stand_alone); // out: NetworkOPs
|
||||
JSS(start); // in: TxHistory
|
||||
JSS(start_time);
|
||||
JSS(started);
|
||||
JSS(state); // out: Logic.h, ServerState, LedgerData
|
||||
JSS(state_accounting); // out: NetworkOPs
|
||||
|
||||
1141
src/ripple/rpc/handlers/Catalogue.cpp
Normal file
1141
src/ripple/rpc/handlers/Catalogue.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -170,6 +170,12 @@ Json::Value
|
||||
doValidatorListSites(RPC::JsonContext&);
|
||||
Json::Value
|
||||
doValidatorInfo(RPC::JsonContext&);
|
||||
Json::Value
|
||||
doCatalogueCreate(RPC::JsonContext&);
|
||||
Json::Value
|
||||
doCatalogueStatus(RPC::JsonContext&);
|
||||
Json::Value
|
||||
doCatalogueLoad(RPC::JsonContext&);
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -174,6 +174,9 @@ Handler const handlerArray[]{
|
||||
// Evented methods
|
||||
{"subscribe", byRef(&doSubscribe), Role::USER, NO_CONDITION},
|
||||
{"unsubscribe", byRef(&doUnsubscribe), Role::USER, NO_CONDITION},
|
||||
{"catalogue_create", byRef(&doCatalogueCreate), Role::ADMIN, NO_CONDITION},
|
||||
{"catalogue_status", byRef(&doCatalogueStatus), Role::ADMIN, NO_CONDITION},
|
||||
{"catalogue_load", byRef(&doCatalogueLoad), Role::ADMIN, NO_CONDITION},
|
||||
};
|
||||
|
||||
class HandlerTable
|
||||
|
||||
@@ -570,6 +570,19 @@ getLedger(T& ledger, uint256 const& ledgerHash, Context& context)
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
// Helper function to determine if the types are compatible for assignment
|
||||
template <typename T, typename U>
|
||||
struct is_assignable_shared_ptr : std::false_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct is_assignable_shared_ptr<
|
||||
std::shared_ptr<T>&,
|
||||
std::shared_ptr<ReadView const>> : std::is_convertible<ReadView const*, T*>
|
||||
{
|
||||
};
|
||||
|
||||
template <class T>
|
||||
Status
|
||||
getLedger(T& ledger, uint32_t ledgerIndex, Context& context)
|
||||
@@ -579,10 +592,16 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return {rpcLGR_NOT_FOUND, "ledgerNotFound"};
|
||||
|
||||
auto cur = context.ledgerMaster.getCurrentLedger();
|
||||
if (cur->info().seq == ledgerIndex)
|
||||
{
|
||||
ledger = cur;
|
||||
if constexpr (is_assignable_shared_ptr<
|
||||
decltype(ledger),
|
||||
decltype(cur)>::value)
|
||||
{
|
||||
ledger = cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,6 +620,9 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context)
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
#include <iostream>
|
||||
#include <typeinfo>
|
||||
|
||||
template <class T>
|
||||
Status
|
||||
getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
||||
@@ -635,7 +657,15 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
||||
return {
|
||||
rpcLGR_NOT_FOUND,
|
||||
"Reporting does not track current ledger"};
|
||||
ledger = context.ledgerMaster.getCurrentLedger();
|
||||
auto cur = context.ledgerMaster.getCurrentLedger();
|
||||
|
||||
if constexpr (is_assignable_shared_ptr<
|
||||
decltype(ledger),
|
||||
decltype(cur)>::value)
|
||||
{
|
||||
ledger = cur;
|
||||
}
|
||||
|
||||
assert(ledger->open());
|
||||
}
|
||||
else if (shortcut == LedgerShortcut::CLOSED)
|
||||
@@ -685,6 +715,15 @@ getLedger<>(
|
||||
template Status
|
||||
getLedger<>(std::shared_ptr<ReadView const>&, uint256 const&, Context&);
|
||||
|
||||
template Status
|
||||
getLedger<>(std::shared_ptr<Ledger const>&, uint32_t, Context&);
|
||||
|
||||
template Status
|
||||
getLedger<>(std::shared_ptr<Ledger const>&, LedgerShortcut shortcut, Context&);
|
||||
|
||||
template Status
|
||||
getLedger<>(std::shared_ptr<Ledger const>&, uint256 const&, Context&);
|
||||
|
||||
bool
|
||||
isValidated(
|
||||
LedgerMaster& ledgerMaster,
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <ripple/shamap/SHAMapMissingNode.h>
|
||||
#include <ripple/shamap/SHAMapTreeNode.h>
|
||||
#include <ripple/shamap/TreeNodeCache.h>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
#include <cassert>
|
||||
#include <stack>
|
||||
#include <vector>
|
||||
@@ -365,6 +366,35 @@ public:
|
||||
void
|
||||
invariants() const;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Serialize a SHAMap to a stream, optionally as a delta from another map
|
||||
* Only leaf nodes are serialized since inner nodes can be reconstructed.
|
||||
*
|
||||
* @param stream The output stream to write to
|
||||
* @param writtenNodes Set to track written node hashes to avoid duplicates
|
||||
* @param baseSHAMap Optional base map to compute delta against
|
||||
* @return Number of nodes written
|
||||
*/
|
||||
template <typename StreamType>
|
||||
std::size_t
|
||||
serializeToStream(
|
||||
StreamType& stream,
|
||||
std::optional<std::reference_wrapper<const SHAMap>> baseSHAMap =
|
||||
std::nullopt) const;
|
||||
|
||||
/**
|
||||
* Deserialize a SHAMap from a stream
|
||||
* Reconstructs the full tree from leaf nodes.
|
||||
*
|
||||
* @param stream The input stream to read from
|
||||
* @param baseSHAMap Optional base map to apply deltas to
|
||||
* @return True if deserialization succeeded
|
||||
*/
|
||||
template <typename StreamType>
|
||||
bool
|
||||
deserializeFromStream(StreamType& stream);
|
||||
|
||||
private:
|
||||
using SharedPtrNodeStack =
|
||||
std::stack<std::pair<std::shared_ptr<SHAMapTreeNode>, SHAMapNodeID>>;
|
||||
|
||||
@@ -43,11 +43,13 @@ static constexpr unsigned char const wireTypeInner = 2;
|
||||
static constexpr unsigned char const wireTypeCompressedInner = 3;
|
||||
static constexpr unsigned char const wireTypeTransactionWithMeta = 4;
|
||||
|
||||
enum class SHAMapNodeType {
|
||||
enum SHAMapNodeType : uint8_t {
|
||||
tnINNER = 1,
|
||||
tnTRANSACTION_NM = 2, // transaction, no metadata
|
||||
tnTRANSACTION_MD = 3, // transaction, with metadata
|
||||
tnACCOUNT_STATE = 4
|
||||
tnACCOUNT_STATE = 4,
|
||||
tnREMOVE = 254, // special type to mark deleted nodes in serialization
|
||||
tnTERMINAL = 255 // special type to mark the end of a serialization stream
|
||||
};
|
||||
|
||||
class SHAMapTreeNode
|
||||
|
||||
@@ -1242,4 +1242,399 @@ SHAMap::invariants() const
|
||||
node->invariants(true);
|
||||
}
|
||||
|
||||
template <typename StreamType>
|
||||
std::size_t
|
||||
SHAMap::serializeToStream(
|
||||
StreamType& stream,
|
||||
std::optional<std::reference_wrapper<const SHAMap>> baseSHAMap) const
|
||||
{
|
||||
// Static map to track bytes written to streams
|
||||
static std::mutex streamMapMutex;
|
||||
static std::unordered_map<
|
||||
void*,
|
||||
std::pair<uint64_t, std::chrono::steady_clock::time_point>>
|
||||
streamBytesWritten;
|
||||
|
||||
// Flush threshold: 256 MiB
|
||||
constexpr uint64_t flushThreshold = 256 * 1024 * 1024;
|
||||
|
||||
// Local byte counter for this stream
|
||||
uint64_t localBytesWritten = 0;
|
||||
|
||||
// Single lambda that uses compile-time check for flush method existence
|
||||
auto tryFlush = [](auto& s) {
|
||||
if constexpr (requires(decltype(s) str) { str.flush(); })
|
||||
{
|
||||
s.flush();
|
||||
}
|
||||
// No-op if flush doesn't exist - compiler will optimize this branch out
|
||||
};
|
||||
|
||||
// Get the current bytes written from the global map (with lock)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(streamMapMutex);
|
||||
auto it = streamBytesWritten.find(static_cast<void*>(&stream));
|
||||
if (it != streamBytesWritten.end())
|
||||
{
|
||||
localBytesWritten = it->second.first;
|
||||
}
|
||||
|
||||
// Random cleanup of old entries (while we have the lock)
|
||||
if (!streamBytesWritten.empty())
|
||||
{
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
size_t randomIndex = std::rand() % streamBytesWritten.size();
|
||||
auto cleanupIt = std::next(streamBytesWritten.begin(), randomIndex);
|
||||
|
||||
// If entry is older than 5 minutes, remove it
|
||||
if (now - cleanupIt->second.second > std::chrono::minutes(5))
|
||||
{
|
||||
streamBytesWritten.erase(cleanupIt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::unordered_set<SHAMapHash, beast::uhash<>> writtenNodes;
|
||||
|
||||
if (!root_)
|
||||
return 0;
|
||||
|
||||
std::size_t nodeCount = 0;
|
||||
|
||||
auto serializeLeaf = [&stream,
|
||||
&localBytesWritten,
|
||||
flushThreshold,
|
||||
&tryFlush](SHAMapLeafNode const& node) -> bool {
|
||||
// write the node type
|
||||
auto t = node.getType();
|
||||
stream.write(reinterpret_cast<char const*>(&t), 1);
|
||||
localBytesWritten += 1;
|
||||
|
||||
// write the key
|
||||
auto const key = node.peekItem()->key();
|
||||
stream.write(reinterpret_cast<char const*>(key.data()), 32);
|
||||
localBytesWritten += 32;
|
||||
|
||||
// write the data size
|
||||
auto data = node.peekItem()->slice();
|
||||
uint32_t size = data.size();
|
||||
stream.write(reinterpret_cast<char const*>(&size), 4);
|
||||
localBytesWritten += 4;
|
||||
|
||||
// write the data
|
||||
stream.write(reinterpret_cast<char const*>(data.data()), size);
|
||||
localBytesWritten += size;
|
||||
|
||||
// Check if we should flush without locking
|
||||
if (localBytesWritten >= flushThreshold)
|
||||
{
|
||||
tryFlush(stream);
|
||||
localBytesWritten = 0;
|
||||
}
|
||||
|
||||
return !stream.fail();
|
||||
};
|
||||
|
||||
auto serializeRemovedLeaf = [&stream,
|
||||
&localBytesWritten,
|
||||
flushThreshold,
|
||||
&tryFlush](uint256 const& key) -> bool {
|
||||
// to indicate a node is removed it is written with a removal type
|
||||
auto t = SHAMapNodeType::tnREMOVE;
|
||||
stream.write(reinterpret_cast<char const*>(&t), 1);
|
||||
localBytesWritten += 1;
|
||||
|
||||
// write the key
|
||||
stream.write(reinterpret_cast<char const*>(key.data()), 32);
|
||||
localBytesWritten += 32;
|
||||
|
||||
// Check if we should flush without locking
|
||||
if (localBytesWritten >= flushThreshold)
|
||||
{
|
||||
tryFlush(stream);
|
||||
localBytesWritten = 0;
|
||||
}
|
||||
|
||||
return !stream.fail();
|
||||
};
|
||||
|
||||
// If we're creating a delta, first compute the differences
|
||||
if (baseSHAMap && baseSHAMap->get().root_)
|
||||
{
|
||||
const SHAMap& baseMap = baseSHAMap->get();
|
||||
|
||||
// Only compute delta if the maps are different
|
||||
if (getHash() != baseMap.getHash())
|
||||
{
|
||||
Delta differences;
|
||||
|
||||
if (compare(baseMap, differences, std::numeric_limits<int>::max()))
|
||||
{
|
||||
// Process each difference
|
||||
for (auto const& [key, deltaItem] : differences)
|
||||
{
|
||||
auto const& newItem = deltaItem.first;
|
||||
auto const& oldItem = deltaItem.second;
|
||||
|
||||
if (!oldItem && newItem)
|
||||
{
|
||||
// Added item
|
||||
SHAMapLeafNode* leaf = findKey(key);
|
||||
if (leaf && serializeLeaf(*leaf))
|
||||
++nodeCount;
|
||||
}
|
||||
else if (oldItem && !newItem)
|
||||
{
|
||||
// Removed item
|
||||
if (serializeRemovedLeaf(key))
|
||||
++nodeCount;
|
||||
}
|
||||
else if (
|
||||
oldItem && newItem &&
|
||||
oldItem->slice() != newItem->slice())
|
||||
{
|
||||
// Modified item
|
||||
SHAMapLeafNode* leaf = findKey(key);
|
||||
if (leaf && serializeLeaf(*leaf))
|
||||
++nodeCount;
|
||||
}
|
||||
}
|
||||
|
||||
// write a terminal symbol to indicate the map stream has ended
|
||||
auto t = SHAMapNodeType::tnTERMINAL;
|
||||
stream.write(reinterpret_cast<char const*>(&t), 1);
|
||||
localBytesWritten += 1;
|
||||
|
||||
// Check if we should flush without locking
|
||||
if (localBytesWritten >= flushThreshold)
|
||||
{
|
||||
tryFlush(stream);
|
||||
localBytesWritten = 0;
|
||||
}
|
||||
|
||||
// Update the global counter at the end (with lock)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(streamMapMutex);
|
||||
auto& streamData =
|
||||
streamBytesWritten[static_cast<void*>(&stream)];
|
||||
streamData.first = localBytesWritten;
|
||||
streamData.second = std::chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
return nodeCount;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Maps are identical, nothing to write
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise walk the entire tree and serialize all leaf nodes
|
||||
std::function<void(SHAMapTreeNode const&, SHAMapNodeID const&)> walkTree =
|
||||
[&](SHAMapTreeNode const& node, SHAMapNodeID const& nodeID) {
|
||||
if (node.isLeaf())
|
||||
{
|
||||
auto const& leaf = static_cast<SHAMapLeafNode const&>(node);
|
||||
auto const& hash = leaf.getHash();
|
||||
|
||||
// Avoid duplicates
|
||||
if (writtenNodes.insert(hash).second)
|
||||
{
|
||||
if (serializeLeaf(leaf))
|
||||
++nodeCount;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// It's an inner node, process its children
|
||||
auto const& inner = static_cast<SHAMapInnerNode const&>(node);
|
||||
for (int i = 0; i < branchFactor; ++i)
|
||||
{
|
||||
if (!inner.isEmptyBranch(i))
|
||||
{
|
||||
auto const& childHash = inner.getChildHash(i);
|
||||
|
||||
// Skip already written nodes
|
||||
if (writtenNodes.find(childHash) != writtenNodes.end())
|
||||
continue;
|
||||
|
||||
auto childNode =
|
||||
descendThrow(const_cast<SHAMapInnerNode*>(&inner), i);
|
||||
if (childNode)
|
||||
{
|
||||
SHAMapNodeID childID = nodeID.getChildNodeID(i);
|
||||
walkTree(*childNode, childID);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Start walking from root
|
||||
walkTree(*root_, SHAMapNodeID());
|
||||
|
||||
// write a terminal symbol to indicate the map stream has ended
|
||||
auto t = SHAMapNodeType::tnTERMINAL;
|
||||
stream.write(reinterpret_cast<char const*>(&t), 1);
|
||||
localBytesWritten += 1;
|
||||
|
||||
// Check if we should flush one last time without locking
|
||||
if (localBytesWritten >= flushThreshold)
|
||||
{
|
||||
tryFlush(stream);
|
||||
localBytesWritten = 0;
|
||||
}
|
||||
|
||||
// Update the global counter at the end (with lock)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(streamMapMutex);
|
||||
auto& streamData = streamBytesWritten[static_cast<void*>(&stream)];
|
||||
streamData.first = localBytesWritten;
|
||||
streamData.second = std::chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
return nodeCount;
|
||||
}
|
||||
|
||||
template <typename StreamType>
|
||||
bool
|
||||
SHAMap::deserializeFromStream(StreamType& stream)
|
||||
{
|
||||
try
|
||||
{
|
||||
JLOG(journal_.info()) << "Deserialization: Starting to deserialize "
|
||||
"from stream";
|
||||
|
||||
if (state_ != SHAMapState::Modifying && state_ != SHAMapState::Synching)
|
||||
return false;
|
||||
|
||||
if (!root_)
|
||||
root_ = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
|
||||
// Define a lambda to deserialize a leaf node
|
||||
auto deserializeLeaf =
|
||||
[this, &stream](SHAMapNodeType& nodeType /* out */) -> bool {
|
||||
stream.read(reinterpret_cast<char*>(&nodeType), 1);
|
||||
|
||||
if (nodeType == SHAMapNodeType::tnTERMINAL)
|
||||
{
|
||||
// end of map
|
||||
return false;
|
||||
}
|
||||
|
||||
uint256 key;
|
||||
uint32_t size{0};
|
||||
|
||||
stream.read(reinterpret_cast<char*>(key.data()), 32);
|
||||
|
||||
if (stream.fail())
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: stream stopped unexpectedly "
|
||||
<< "while trying to read key of next entry";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (nodeType == SHAMapNodeType::tnREMOVE)
|
||||
{
|
||||
// deletion
|
||||
if (!hasItem(key))
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: removal of key " << to_string(key)
|
||||
<< " but key is already absent.";
|
||||
return false;
|
||||
}
|
||||
delItem(key);
|
||||
return true;
|
||||
}
|
||||
|
||||
stream.read(reinterpret_cast<char*>(&size), 4);
|
||||
|
||||
if (stream.fail())
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: stream stopped unexpectedly"
|
||||
<< " while trying to read size of data for key "
|
||||
<< to_string(key);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (size > 1024 * 1024 * 1024)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: size of " << to_string(key)
|
||||
<< " is suspiciously large (" << size
|
||||
<< " bytes), bailing.";
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<uint8_t> data;
|
||||
data.resize(size);
|
||||
|
||||
stream.read(reinterpret_cast<char*>(data.data()), size);
|
||||
if (stream.fail())
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: Unexpected EOF while reading data for "
|
||||
<< to_string(key);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto item = make_shamapitem(key, makeSlice(data));
|
||||
if (hasItem(key))
|
||||
return updateGiveItem(nodeType, std::move(item));
|
||||
|
||||
return addGiveItem(nodeType, std::move(item));
|
||||
};
|
||||
|
||||
SHAMapNodeType lastParsed;
|
||||
while (!stream.eof() && deserializeLeaf(lastParsed))
|
||||
;
|
||||
|
||||
if (lastParsed != SHAMapNodeType::tnTERMINAL)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Deserialization: Unexpected EOF, terminal node not found.";
|
||||
return false;
|
||||
}
|
||||
|
||||
// Flush any dirty nodes and update hashes
|
||||
flushDirty(hotUNKNOWN);
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Exception during deserialization: " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// explicit instantiation of templates for rpc::Catalogue
|
||||
|
||||
using FilteringInputStream = boost::iostreams::filtering_stream<
|
||||
boost::iostreams::input,
|
||||
char,
|
||||
std::char_traits<char>,
|
||||
std::allocator<char>,
|
||||
boost::iostreams::public_>;
|
||||
|
||||
template bool
|
||||
SHAMap::deserializeFromStream<FilteringInputStream>(FilteringInputStream&);
|
||||
|
||||
using FilteringOutputStream = boost::iostreams::filtering_stream<
|
||||
boost::iostreams::output,
|
||||
char,
|
||||
std::char_traits<char>,
|
||||
std::allocator<char>,
|
||||
boost::iostreams::public_>;
|
||||
|
||||
template std::size_t
|
||||
SHAMap::serializeToStream<FilteringOutputStream>(
|
||||
FilteringOutputStream&,
|
||||
std::optional<std::reference_wrapper<const SHAMap>> baseSHAMap) const;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
865
src/test/rpc/Catalogue_test.cpp
Normal file
865
src/test/rpc/Catalogue_test.cpp
Normal file
@@ -0,0 +1,865 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012-2017 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/beast/unit_test.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <test/jtx.h>
|
||||
#include <thread>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
#pragma pack(push, 1) // pack the struct tightly
|
||||
struct TestCATLHeader
|
||||
{
|
||||
uint32_t magic = 0x4C544143UL;
|
||||
uint32_t min_ledger;
|
||||
uint32_t max_ledger;
|
||||
uint16_t version;
|
||||
uint16_t network_id;
|
||||
uint64_t filesize = 0; // Total size of the file including header
|
||||
std::array<uint8_t, 64> hash = {}; // SHA-512 hash, initially set to zeros
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
class Catalogue_test : public beast::unit_test::suite
|
||||
{
|
||||
// Helper to create test ledger data with complex state changes
|
||||
void
|
||||
prepareLedgerData(test::jtx::Env& env, int numLedgers)
|
||||
{
|
||||
using namespace test::jtx;
|
||||
Account alice{"alice"};
|
||||
Account bob{"bob"};
|
||||
Account charlie{"charlie"};
|
||||
|
||||
env.fund(XRP(10000), alice, bob, charlie);
|
||||
env.close();
|
||||
|
||||
// Set up trust lines and issue currency
|
||||
env(trust(bob, alice["USD"](1000)));
|
||||
env(trust(charlie, bob["EUR"](1000)));
|
||||
env.close();
|
||||
|
||||
env(pay(alice, bob, alice["USD"](500)));
|
||||
env.close();
|
||||
|
||||
// Create and remove an offer to test state deletion
|
||||
env(offer(bob, XRP(50), alice["USD"](1)));
|
||||
auto offerSeq =
|
||||
env.seq(bob) - 1; // Get the sequence of the offer we just created
|
||||
env.close();
|
||||
|
||||
// Cancel the offer
|
||||
env(offer_cancel(bob, offerSeq));
|
||||
env.close();
|
||||
|
||||
// Create another offer with same account
|
||||
env(offer(bob, XRP(60), alice["USD"](2)));
|
||||
env.close();
|
||||
|
||||
// Create a trust line and then remove it
|
||||
env(trust(charlie, bob["EUR"](1000)));
|
||||
env.close();
|
||||
env(trust(charlie, bob["EUR"](0)));
|
||||
env.close();
|
||||
|
||||
// Recreate the same trust line
|
||||
env(trust(charlie, bob["EUR"](2000)));
|
||||
env.close();
|
||||
|
||||
// Additional ledgers with various transactions
|
||||
for (int i = 0; i < numLedgers; ++i)
|
||||
{
|
||||
env(pay(alice, bob, XRP(100)));
|
||||
env(offer(bob, XRP(50), alice["USD"](1)));
|
||||
env.close();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueCreateBadInput(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_create: Invalid parameters");
|
||||
using namespace test::jtx;
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
|
||||
// No parameters
|
||||
{
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", {})[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Missing min_ledger
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::max_ledger] = 20;
|
||||
params[jss::output_file] = "/tmp/test.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Missing max_ledger
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 10;
|
||||
params[jss::output_file] = "/tmp/test.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Missing output_file
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 10;
|
||||
params[jss::max_ledger] = 20;
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Invalid output path (not absolute)
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 10;
|
||||
params[jss::max_ledger] = 20;
|
||||
params[jss::output_file] = "test.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// min_ledger > max_ledger
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 20;
|
||||
params[jss::max_ledger] = 10;
|
||||
params[jss::output_file] = "/tmp/test.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueCreate(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_create: Basic functionality");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment and some test ledgers
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
prepareLedgerData(env, 5);
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// Create catalogue
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 3;
|
||||
params[jss::max_ledger] = 5;
|
||||
params[jss::output_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result[jss::min_ledger] == 3);
|
||||
BEAST_EXPECT(result[jss::max_ledger] == 5);
|
||||
BEAST_EXPECT(result[jss::output_file] == cataloguePath);
|
||||
BEAST_EXPECT(result[jss::file_size].asUInt() > 0);
|
||||
BEAST_EXPECT(result[jss::ledgers_written].asUInt() == 3);
|
||||
|
||||
// Verify file exists and is not empty
|
||||
BEAST_EXPECT(boost::filesystem::exists(cataloguePath));
|
||||
BEAST_EXPECT(boost::filesystem::file_size(cataloguePath) > 0);
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueLoadBadInput(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_load: Invalid parameters");
|
||||
using namespace test::jtx;
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
|
||||
// No parameters
|
||||
{
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", {})[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Missing input_file
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Invalid input path (not absolute)
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = "test.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
|
||||
// Non-existent file
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = "/tmp/nonexistent.catl";
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::error] == "internal");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueLoadAndVerify(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_load: Load and verify");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment and test data
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
prepareLedgerData(env, 5);
|
||||
|
||||
// Store some key state information before catalogue creation
|
||||
auto const sourceLedger = env.closed();
|
||||
auto const bobKeylet = keylet::account(Account("bob").id());
|
||||
auto const charlieKeylet = keylet::account(Account("charlie").id());
|
||||
auto const eurTrustKeylet = keylet::line(
|
||||
Account("charlie").id(),
|
||||
Account("bob").id(),
|
||||
Currency(to_currency("EUR")));
|
||||
|
||||
// Get original state entries
|
||||
auto const bobAcct = sourceLedger->read(bobKeylet);
|
||||
auto const charlieAcct = sourceLedger->read(charlieKeylet);
|
||||
auto const eurTrust = sourceLedger->read(eurTrustKeylet);
|
||||
|
||||
BEAST_EXPECT(bobAcct != nullptr);
|
||||
BEAST_EXPECT(charlieAcct != nullptr);
|
||||
BEAST_EXPECT(eurTrust != nullptr);
|
||||
|
||||
BEAST_EXPECT(
|
||||
eurTrust->getFieldAmount(sfLowLimit).mantissa() ==
|
||||
2000000000000000ULL);
|
||||
|
||||
// Get initial complete_ledgers range
|
||||
auto const originalCompleteLedgers =
|
||||
env.app().getLedgerMaster().getCompleteLedgers();
|
||||
|
||||
// Create temporary directory for test files
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// First create a catalogue
|
||||
uint32_t minLedger = 3;
|
||||
uint32_t maxLedger = sourceLedger->info().seq;
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = minLedger;
|
||||
params[jss::max_ledger] = maxLedger;
|
||||
params[jss::output_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
}
|
||||
|
||||
// Create a new environment for loading with unique port
|
||||
Env loadEnv{
|
||||
*this,
|
||||
test::jtx::envconfig(test::jtx::port_increment, 3),
|
||||
features,
|
||||
nullptr,
|
||||
beast::severities::kInfo};
|
||||
|
||||
// Now load the catalogue
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
loadEnv.client().invoke("catalogue_load", params)[jss::result];
|
||||
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result[jss::ledger_min] == minLedger);
|
||||
BEAST_EXPECT(result[jss::ledger_max] == maxLedger);
|
||||
BEAST_EXPECT(result[jss::ledger_count] == (maxLedger - minLedger + 1));
|
||||
|
||||
// Verify complete_ledgers reflects loaded ledgers
|
||||
auto const newCompleteLedgers =
|
||||
loadEnv.app().getLedgerMaster().getCompleteLedgers();
|
||||
|
||||
BEAST_EXPECT(newCompleteLedgers == originalCompleteLedgers);
|
||||
|
||||
// Verify the loaded state matches the original
|
||||
auto const loadedLedger = loadEnv.closed();
|
||||
|
||||
// After loading each ledger
|
||||
|
||||
// Compare all ledgers from 3 to 16 inclusive
|
||||
for (std::uint32_t seq = 3; seq <= 16; ++seq)
|
||||
{
|
||||
auto const sourceLedger =
|
||||
env.app().getLedgerMaster().getLedgerByHash(
|
||||
env.app().getLedgerMaster().getHashBySeq(seq));
|
||||
|
||||
auto const loadedLedger =
|
||||
loadEnv.app().getLedgerMaster().getLedgerByHash(
|
||||
loadEnv.app().getLedgerMaster().getHashBySeq(seq));
|
||||
|
||||
if (!sourceLedger || !loadedLedger)
|
||||
{
|
||||
BEAST_EXPECT(false); // Test failure
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check basic ledger properties
|
||||
BEAST_EXPECT(sourceLedger->info().seq == loadedLedger->info().seq);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().hash == loadedLedger->info().hash);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().txHash == loadedLedger->info().txHash);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().accountHash ==
|
||||
loadedLedger->info().accountHash);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().parentHash ==
|
||||
loadedLedger->info().parentHash);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().drops == loadedLedger->info().drops);
|
||||
|
||||
// Check time-related properties
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().closeFlags ==
|
||||
loadedLedger->info().closeFlags);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().closeTimeResolution.count() ==
|
||||
loadedLedger->info().closeTimeResolution.count());
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().closeTime.time_since_epoch().count() ==
|
||||
loadedLedger->info().closeTime.time_since_epoch().count());
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info()
|
||||
.parentCloseTime.time_since_epoch()
|
||||
.count() ==
|
||||
loadedLedger->info()
|
||||
.parentCloseTime.time_since_epoch()
|
||||
.count());
|
||||
|
||||
// Check validation state
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().validated ==
|
||||
loadedLedger->info().validated);
|
||||
BEAST_EXPECT(
|
||||
sourceLedger->info().accepted == loadedLedger->info().accepted);
|
||||
|
||||
// Check SLE counts
|
||||
std::size_t sourceCount = 0;
|
||||
std::size_t loadedCount = 0;
|
||||
|
||||
for (auto const& sle : sourceLedger->sles)
|
||||
{
|
||||
sourceCount++;
|
||||
}
|
||||
|
||||
for (auto const& sle : loadedLedger->sles)
|
||||
{
|
||||
loadedCount++;
|
||||
}
|
||||
|
||||
BEAST_EXPECT(sourceCount == loadedCount);
|
||||
|
||||
// Check existence of imported keylets
|
||||
for (auto const& sle : sourceLedger->sles)
|
||||
{
|
||||
auto const key = sle->key();
|
||||
bool exists = loadedLedger->exists(keylet::unchecked(key));
|
||||
BEAST_EXPECT(exists);
|
||||
|
||||
// If it exists, check the serialized form matches
|
||||
if (exists)
|
||||
{
|
||||
auto loadedSle = loadedLedger->read(keylet::unchecked(key));
|
||||
Serializer s1, s2;
|
||||
sle->add(s1);
|
||||
loadedSle->add(s2);
|
||||
bool serializedEqual = (s1.peekData() == s2.peekData());
|
||||
BEAST_EXPECT(serializedEqual);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for extra keys in loaded ledger that aren't in source
|
||||
for (auto const& sle : loadedLedger->sles)
|
||||
{
|
||||
auto const key = sle->key();
|
||||
BEAST_EXPECT(sourceLedger->exists(keylet::unchecked(key)));
|
||||
}
|
||||
}
|
||||
|
||||
auto const loadedBobAcct = loadedLedger->read(bobKeylet);
|
||||
auto const loadedCharlieAcct = loadedLedger->read(charlieKeylet);
|
||||
auto const loadedEurTrust = loadedLedger->read(eurTrustKeylet);
|
||||
|
||||
BEAST_EXPECT(!!loadedBobAcct);
|
||||
BEAST_EXPECT(!!loadedCharlieAcct);
|
||||
BEAST_EXPECT(!!loadedEurTrust);
|
||||
|
||||
// Compare the serialized forms of the state objects
|
||||
bool const loaded =
|
||||
loadedBobAcct && loadedCharlieAcct && loadedEurTrust;
|
||||
|
||||
Serializer s1, s2;
|
||||
if (loaded)
|
||||
{
|
||||
bobAcct->add(s1);
|
||||
loadedBobAcct->add(s2);
|
||||
}
|
||||
BEAST_EXPECT(loaded && s1.peekData() == s2.peekData());
|
||||
|
||||
if (loaded)
|
||||
{
|
||||
s1.erase();
|
||||
s2.erase();
|
||||
charlieAcct->add(s1);
|
||||
loadedCharlieAcct->add(s2);
|
||||
}
|
||||
BEAST_EXPECT(loaded && s1.peekData() == s2.peekData());
|
||||
|
||||
if (loaded)
|
||||
{
|
||||
s1.erase();
|
||||
s2.erase();
|
||||
eurTrust->add(s1);
|
||||
loadedEurTrust->add(s2);
|
||||
}
|
||||
|
||||
BEAST_EXPECT(loaded && s1.peekData() == s2.peekData());
|
||||
|
||||
// Verify trust line amount matches
|
||||
BEAST_EXPECT(
|
||||
loaded &&
|
||||
loadedEurTrust->getFieldAmount(sfLowLimit).mantissa() ==
|
||||
2000000000000000ULL);
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testNetworkMismatch(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_load: Network ID mismatch");
|
||||
using namespace test::jtx;
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// Create environment with different network IDs
|
||||
{
|
||||
Env env1{
|
||||
*this,
|
||||
envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->NETWORK_ID = 123;
|
||||
return cfg;
|
||||
}),
|
||||
features,
|
||||
nullptr,
|
||||
beast::severities::kInfo};
|
||||
prepareLedgerData(env1, 5);
|
||||
|
||||
// Create catalogue with network ID 123
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 3;
|
||||
params[jss::max_ledger] = 5;
|
||||
params[jss::output_file] = cataloguePath;
|
||||
|
||||
auto const result = env1.client().invoke(
|
||||
"catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Try to load catalogue in environment with different network ID
|
||||
Env env2{
|
||||
*this,
|
||||
envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->NETWORK_ID = 456;
|
||||
return cfg;
|
||||
}),
|
||||
features,
|
||||
nullptr,
|
||||
beast::severities::kInfo};
|
||||
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env2.client().invoke("catalogue_load", params)[jss::result];
|
||||
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
}
|
||||
}
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueHashVerification(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_load: Hash verification");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment and test data
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
prepareLedgerData(env, 3);
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// Create catalogue
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 3;
|
||||
params[jss::max_ledger] = 5;
|
||||
params[jss::output_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result.isMember(jss::hash));
|
||||
std::string originalHash = result[jss::hash].asString();
|
||||
BEAST_EXPECT(!originalHash.empty());
|
||||
}
|
||||
|
||||
// Test 1: Successful hash verification (normal load)
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result.isMember(jss::hash));
|
||||
}
|
||||
|
||||
// Test 2: Corrupt the file and test hash mismatch detection
|
||||
{
|
||||
// Modify a byte in the middle of the file to cause hash mismatch
|
||||
std::fstream file(
|
||||
cataloguePath, std::ios::in | std::ios::out | std::ios::binary);
|
||||
BEAST_EXPECT(file.good());
|
||||
|
||||
// Skip header and modify a byte
|
||||
file.seekp(sizeof(TestCATLHeader) + 100, std::ios::beg);
|
||||
char byte = 0xFF;
|
||||
file.write(&byte, 1);
|
||||
file.close();
|
||||
|
||||
// Try to load the corrupted file
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(
|
||||
result[jss::error_message].asString().find(
|
||||
"hash verification failed") != std::string::npos);
|
||||
}
|
||||
|
||||
// Test 3: Test ignore_hash parameter
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
params[jss::ignore_hash] = true;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
// This might still fail due to data corruption, but not because of
|
||||
// hash verification The important part is that it didn't
|
||||
// immediately reject due to hash
|
||||
if (result[jss::status] == "error")
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
result[jss::error_message].asString().find(
|
||||
"hash verification failed") == std::string::npos);
|
||||
}
|
||||
}
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueFileSize(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_load: File size verification");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment and test data
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
prepareLedgerData(env, 3);
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// Create catalogue
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::min_ledger] = 3;
|
||||
params[jss::max_ledger] = 5;
|
||||
params[jss::output_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_create", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result.isMember(jss::file_size));
|
||||
uint64_t originalSize = result[jss::file_size].asUInt();
|
||||
BEAST_EXPECT(originalSize > 0);
|
||||
}
|
||||
|
||||
// Test 1: Successful file size verification (normal load)
|
||||
{
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == jss::success);
|
||||
BEAST_EXPECT(result.isMember(jss::file_size));
|
||||
}
|
||||
|
||||
// Test 2: Modify file size in header to cause mismatch
|
||||
{
|
||||
// Modify the filesize in the header to cause mismatch
|
||||
std::fstream file(
|
||||
cataloguePath, std::ios::in | std::ios::out | std::ios::binary);
|
||||
BEAST_EXPECT(file.good());
|
||||
|
||||
file.seekp(offsetof(TestCATLHeader, filesize), std::ios::beg);
|
||||
uint64_t wrongSize = 12345; // Some arbitrary wrong size
|
||||
file.write(
|
||||
reinterpret_cast<const char*>(&wrongSize), sizeof(wrongSize));
|
||||
file.close();
|
||||
|
||||
// Try to load the modified file
|
||||
Json::Value params{Json::objectValue};
|
||||
params[jss::input_file] = cataloguePath;
|
||||
|
||||
auto const result =
|
||||
env.client().invoke("catalogue_load", params)[jss::result];
|
||||
BEAST_EXPECT(result[jss::status] == "error");
|
||||
BEAST_EXPECT(result[jss::error] == "invalidParams");
|
||||
BEAST_EXPECT(
|
||||
result[jss::error_message].asString().find(
|
||||
"file size mismatch") != std::string::npos);
|
||||
}
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueCompression(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue: Compression levels");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment and test data
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
prepareLedgerData(env, 5);
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
std::vector<std::pair<std::string, Json::Value>> compressionTests = {
|
||||
{"no_compression", Json::Value(0)}, // Level 0 (none)
|
||||
{"min_compression", Json::Value(1)}, // Level 1 (minimal)
|
||||
{"default_compression", Json::Value(6)}, // Level 6 (default)
|
||||
{"max_compression", Json::Value(9)}, // Level 9 (maximum)
|
||||
{"boolean_true_compression",
|
||||
Json::Value(true)} // Boolean true (should use default level 6)
|
||||
};
|
||||
|
||||
uint64_t prevSize = 0;
|
||||
for (const auto& test : compressionTests)
|
||||
{
|
||||
std::string testName = test.first;
|
||||
Json::Value compressionLevel = test.second;
|
||||
|
||||
auto cataloguePath = (tempDir / (testName + ".catl")).string();
|
||||
|
||||
// Create catalogue with specific compression level
|
||||
Json::Value createParams{Json::objectValue};
|
||||
createParams[jss::min_ledger] = 3;
|
||||
createParams[jss::max_ledger] = 10;
|
||||
createParams[jss::output_file] = cataloguePath;
|
||||
createParams[jss::compression_level] = compressionLevel;
|
||||
|
||||
auto createResult = env.client().invoke(
|
||||
"catalogue_create", createParams)[jss::result];
|
||||
|
||||
BEAST_EXPECT(createResult[jss::status] == jss::success);
|
||||
|
||||
uint64_t fileSize = createResult[jss::file_size].asUInt();
|
||||
BEAST_EXPECT(fileSize > 0);
|
||||
|
||||
// Load the catalogue to verify it works
|
||||
Json::Value loadParams{Json::objectValue};
|
||||
loadParams[jss::input_file] = cataloguePath;
|
||||
|
||||
auto loadResult =
|
||||
env.client().invoke("catalogue_load", loadParams)[jss::result];
|
||||
BEAST_EXPECT(loadResult[jss::status] == jss::success);
|
||||
|
||||
// For levels > 0, verify size is smaller than uncompressed (or at
|
||||
// least not larger)
|
||||
if (prevSize > 0 && compressionLevel.asUInt() > 0)
|
||||
{
|
||||
BEAST_EXPECT(fileSize <= prevSize);
|
||||
}
|
||||
|
||||
// Store size for comparison with next level
|
||||
if (compressionLevel.asUInt() == 0)
|
||||
{
|
||||
prevSize = fileSize;
|
||||
}
|
||||
|
||||
// Verify compression level in response
|
||||
if (compressionLevel.isBool() && compressionLevel.asBool())
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
createResult[jss::compression_level].asUInt() == 6);
|
||||
}
|
||||
else
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
createResult[jss::compression_level].asUInt() ==
|
||||
compressionLevel.asUInt());
|
||||
}
|
||||
}
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
void
|
||||
testCatalogueStatus(FeatureBitset features)
|
||||
{
|
||||
testcase("catalogue_status: Status reporting");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Create environment
|
||||
Env env{
|
||||
*this, envconfig(), features, nullptr, beast::severities::kInfo};
|
||||
|
||||
boost::filesystem::path tempDir =
|
||||
boost::filesystem::temp_directory_path() /
|
||||
boost::filesystem::unique_path();
|
||||
boost::filesystem::create_directories(tempDir);
|
||||
|
||||
auto cataloguePath = (tempDir / "test.catl").string();
|
||||
|
||||
// Test 1: Check status when no job is running
|
||||
{
|
||||
auto result = env.client().invoke(
|
||||
"catalogue_status", Json::objectValue)[jss::result];
|
||||
std::cout << to_string(result) << "\n";
|
||||
BEAST_EXPECT(result[jss::job_status] == "no_job_running");
|
||||
}
|
||||
|
||||
// TODO: add a parallel job test here... if anyone feels thats actually
|
||||
// needed
|
||||
|
||||
boost::filesystem::remove_all(tempDir);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
using namespace test::jtx;
|
||||
FeatureBitset const all{supported_amendments()};
|
||||
testCatalogueCreateBadInput(all);
|
||||
testCatalogueCreate(all);
|
||||
testCatalogueLoadBadInput(all);
|
||||
testCatalogueLoadAndVerify(all);
|
||||
testNetworkMismatch(all);
|
||||
testCatalogueHashVerification(all);
|
||||
testCatalogueFileSize(all);
|
||||
testCatalogueCompression(all);
|
||||
testCatalogueStatus(all);
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(Catalogue, rpc, ripple);
|
||||
|
||||
} // namespace ripple
|
||||
Reference in New Issue
Block a user