Compare commits

..

1 Commits

Author SHA1 Message Date
Niq Dudfield
8b0be2d2f5 docs(freeze): canceling escrows with deep frozen assets is allowed 2025-07-09 10:26:24 +07:00
13 changed files with 116 additions and 1341 deletions

View File

@@ -59,8 +59,8 @@ runs:
- name: Export custom recipes
shell: bash
run: |
conan export external/snappy snappy/1.1.10@xahaud/stable
conan export external/soci soci/4.0.3@xahaud/stable
conan export external/snappy snappy/1.1.9@
conan export external/soci soci/4.0.3@
- name: Install dependencies
shell: bash

View File

@@ -1083,10 +1083,6 @@ message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}")
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
endif()
# Always define IS_XAHAUD=1 for xahaud builds
target_compile_definitions(rippled PRIVATE IS_XAHAUD=1)
message(STATUS "Building with IS_XAHAUD=1")
# any files that don't play well with unity should be added here
if (tests)
set_source_files_properties(

View File

@@ -32,8 +32,8 @@ class Xrpl(ConanFile):
'nudb/2.0.8',
'openssl/1.1.1u',
'protobuf/3.21.9',
'snappy/1.1.10@xahaud/stable',
'soci/4.0.3@xahaud/stable',
'snappy/1.1.10',
'soci/4.0.3',
'sqlite3/3.42.0',
'zlib/1.2.13',
'wasmedge/0.11.2',

View File

@@ -154,7 +154,7 @@ class SociConan(ConanFile):
self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix))
self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)]
if self.options.with_boost:
self.cpp_info.components["soci_core"].requires.append("boost::headers")
self.cpp_info.components["soci_core"].requires.append("boost::boost")
# soci_empty
if self.options.empty:

View File

@@ -342,7 +342,8 @@ getTransactionalStakeHolders(STTx const& tx, ReadView const& rv)
case ttOFFER_CANCEL:
case ttTICKET_CREATE:
case ttHOOK_SET:
case ttOFFER_CREATE: {
case ttOFFER_CREATE: // this is handled seperately
{
break;
}

View File

@@ -1277,8 +1277,7 @@ CreateOffer::doApply()
if (result.second)
{
sb.apply(ctx_.rawView());
if (!view().rules().enabled(featureIOUIssuerWeakTSH))
addWeakTSHFromBalanceChanges(sb);
addWeakTSHFromSandbox(sb);
}
else
sbCancel.apply(ctx_.rawView());

View File

@@ -425,8 +425,7 @@ Payment::doApply()
// on the TER. But always applying *should*
// be safe.
pv.apply(ctx_.rawView());
if (!view().rules().enabled(featureIOUIssuerWeakTSH))
addWeakTSHFromBalanceChanges(pv);
addWeakTSHFromSandbox(pv);
}
// TODO: is this right? If the amount is the correct amount, was

View File

@@ -1477,15 +1477,15 @@ Transactor::doHookCallback(
}
void
Transactor::addWeakTSHFromBalanceChanges(detail::ApplyViewBase const& pv)
Transactor::addWeakTSHFromSandbox(detail::ApplyViewBase const& pv)
{
// If Hooks are enabled then non-issuers who have their TL balance
// modified by the execution of the transaction have the opportunity to have
// their weak hooks executed.
// modified by the execution of the path have the opportunity to have their
// weak hooks executed.
if (ctx_.view().rules().enabled(featureHooks))
{
// anyone whose balance changed as a result of transaction processing is
// a weak TSH
// anyone whose balance changed as a result of this Pathing is a weak
// TSH
auto bc = pv.balanceChanges(view());
for (auto const& entry : bc)
@@ -1506,13 +1506,15 @@ Transactor::addWeakTSHFromBalanceChanges(detail::ApplyViewBase const& pv)
TER
Transactor::doTSH(
bool strong, // only strong iff true, only weak iff false
std::vector<std::pair<AccountID, bool>> tsh,
hook::HookStateMap& stateMap,
std::vector<hook::HookResult>& results,
std::shared_ptr<STObject const> const& provisionalMeta)
{
auto& view = ctx_.view();
std::vector<std::pair<AccountID, bool>> tsh =
hook::getTransactionalStakeHolders(ctx_.tx, view);
// add the extra TSH marked out by the specific transactor (if applicable)
if (!strong)
for (auto& weakTsh : additionalWeakTSH_)
@@ -1770,9 +1772,6 @@ Transactor::operator()()
// application to the ledger
std::map<AccountID, std::set<uint256>> aawMap;
std::vector<std::pair<AccountID, bool>> tsh =
hook::getTransactionalStakeHolders(ctx_.tx, ctx_.view());
// Pre-application (Strong TSH) Hooks are executed here
// These TSH have the right to rollback.
// Weak TSH and callback are executed post-application.
@@ -1801,7 +1800,7 @@ Transactor::operator()()
// (who have the right to rollback the txn), any weak TSH will be
// executed after doApply has been successful (callback as well)
result = doTSH(true, tsh, stateMap, hookResults, {});
result = doTSH(true, stateMap, hookResults, {});
}
// write state if all chains executed successfully
@@ -2055,23 +2054,7 @@ Transactor::operator()()
hook::HookStateMap stateMap;
std::vector<hook::HookResult> weakResults;
if (view().rules().enabled(featureIOUIssuerWeakTSH))
{
// Regardless of the transaction type, if the result changes the
// trust line balance, add high and low accounts to weakTSH.
ApplyViewImpl& avi = dynamic_cast<ApplyViewImpl&>(ctx_.view());
addWeakTSHFromBalanceChanges(avi);
}
if (!view().rules().enabled(featureIOUIssuerWeakTSH))
{
// before amendment enabled, we need to get TSHs after txn basic
// processing If the object is deleted in cancen txn, it may not
// be possible to obtain the appropriate TSH.
tsh = hook::getTransactionalStakeHolders(ctx_.tx, ctx_.view());
}
doTSH(false, tsh, stateMap, weakResults, proMeta);
doTSH(false, stateMap, weakResults, proMeta);
// execute any hooks that nominated for 'again as weak'
for (auto const& [accID, hookHashes] : aawMap)

View File

@@ -188,7 +188,6 @@ protected:
TER
doTSH(
bool strong, // only do strong TSH iff true, otheriwse only weak
std::vector<std::pair<AccountID, bool>> tsh,
hook::HookStateMap& stateMap,
std::vector<hook::HookResult>& result,
std::shared_ptr<STObject const> const& provisionalMeta);
@@ -214,7 +213,7 @@ protected:
std::shared_ptr<STObject const> const& provisionalMeta);
void
addWeakTSHFromBalanceChanges(detail::ApplyViewBase const& pv);
addWeakTSHFromSandbox(detail::ApplyViewBase const& pv);
// hooks amendment fields, these are unpopulated and unused unless
// featureHooks is enabled

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 85;
static constexpr std::size_t numFeatures = 84;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -372,7 +372,6 @@ extern uint256 const fixRewardClaimFlags;
extern uint256 const fixProvisionalDoubleThreading;
extern uint256 const featureClawback;
extern uint256 const featureDeepFreeze;
extern uint256 const featureIOUIssuerWeakTSH;
} // namespace ripple

View File

@@ -478,7 +478,6 @@ REGISTER_FIX (fixRewardClaimFlags, Supported::yes, VoteBehavior::De
REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FIX (fixProvisionalDoubleThreading, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -17,9 +17,6 @@
*/
//==============================================================================
// Define IS_XAHAUD to be false for rippled build
#if IS_XAHAUD
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/LedgerToJson.h>
@@ -38,30 +35,6 @@
#include <ripple/rpc/impl/RPCHelpers.h>
#include <ripple/rpc/impl/Tuning.h>
#include <ripple/shamap/SHAMapItem.h>
#else
// rippled includes
#include <xrpl/basics/Log.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/protocol/ErrorCodes.h>
#include <xrpl/protocol/LedgerFormats.h>
#include <xrpl/protocol/RPCErr.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/protocol/jss.h>
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/ledger/LedgerToJson.h>
#include <xrpld/app/main/Application.h>
#include <xrpld/app/tx/apply.h>
#include <xrpld/nodestore/NodeObject.h>
#include <xrpld/rpc/Context.h>
#include <xrpld/rpc/Role.h>
#include <xrpld/rpc/detail/RPCHelpers.h>
#include <xrpld/rpc/detail/Tuning.h>
#include <xrpld/shamap/SHAMap.h>
#include <xrpld/shamap/SHAMapItem.h>
#include <xrpld/shamap/SHAMapTreeNode.h>
// Note: rippled doesn't have GRPCHandlers.h
#endif
#include <atomic>
#include <condition_variable>
@@ -88,34 +61,6 @@ using duration = NetClock::duration;
#define CATL 0x4C544143UL /*"CATL" in LE*/
// Helper macro for RPC errors
#if IS_XAHAUD
#define CATALOGUE_RPC_ERROR(code, msg) rpcError(code, msg)
#else
#define CATALOGUE_RPC_ERROR(code, msg) RPC::make_error(code, msg)
#endif
// Special serialization markers (not part of SHAMapNodeType)
static constexpr uint8_t CATALOGUE_NODE_REMOVE = 0xFE; // Marks a removed node
static constexpr uint8_t CATALOGUE_NODE_TERMINAL = 0xFF; // Marks end of stream
// Self-contained JSON field names for catalogue operations
namespace catalogue_jss {
static constexpr auto job_status = "job_status";
static constexpr auto current_ledger = "current_ledger";
static constexpr auto percent_complete = "percent_complete";
static constexpr auto elapsed_seconds = "elapsed_seconds";
static constexpr auto estimated_time_remaining = "estimated_time_remaining";
static constexpr auto start_time = "start_time";
static constexpr auto job_type = "job_type";
static constexpr auto file = "file";
static constexpr auto file_size_estimated_human = "file_size_estimated_human";
static constexpr auto input_file = "input_file";
static constexpr auto ignore_hash = "ignore_hash";
static constexpr auto ledger_count = "ledger_count";
static constexpr auto ledgers_loaded = "ledgers_loaded";
} // namespace catalogue_jss
// Replace the current version constant
static constexpr uint16_t CATALOGUE_VERSION = 1;
@@ -361,305 +306,6 @@ public:
}
};
// Replacement serialization functions that use only SHAMap's public API
static size_t
serializeSHAMapToStream(
SHAMap const& shaMap,
boost::iostreams::filtering_ostream& stream,
SHAMapNodeType nodeType,
std::optional<std::reference_wrapper<const SHAMap>> prevMap = std::nullopt)
{
// Local byte counter
uint64_t localBytesWritten = 0;
// Single lambda that uses compile-time check for flush method existence
auto tryFlush = [](auto& s) {
if constexpr (requires(decltype(s) str) { str.flush(); })
{
s.flush();
}
// No-op if flush doesn't exist - compiler will optimize this branch out
};
// Helper to check if we need to flush
constexpr uint64_t flushThreshold = 256 * 1024 * 1024;
auto checkFlush = [&localBytesWritten, &tryFlush, &stream]() {
if (localBytesWritten >= flushThreshold)
{
tryFlush(stream);
localBytesWritten = 0;
}
};
// Helper lambda to serialize a leaf node
auto serializeLeaf = [&stream, &localBytesWritten, &checkFlush](
SHAMapItem const& item,
SHAMapNodeType nodeType) -> bool {
// write the node type
stream.write(reinterpret_cast<char const*>(&nodeType), 1);
localBytesWritten += 1;
// write the key
auto const key = item.key();
stream.write(reinterpret_cast<char const*>(key.data()), 32);
localBytesWritten += 32;
// write the data size
auto data = item.slice();
uint32_t size = data.size();
stream.write(reinterpret_cast<char const*>(&size), 4);
localBytesWritten += 4;
// write the data
stream.write(reinterpret_cast<char const*>(data.data()), size);
localBytesWritten += size;
checkFlush();
return !stream.fail();
};
// Helper lambda to serialize a removed leaf
auto serializeRemovedLeaf =
[&stream, &localBytesWritten, &checkFlush](uint256 const& key) -> bool {
// to indicate a node is removed it is written with a removal type
auto t = CATALOGUE_NODE_REMOVE;
stream.write(reinterpret_cast<char const*>(&t), 1);
localBytesWritten += 1;
// write the key
stream.write(reinterpret_cast<char const*>(key.data()), 32);
localBytesWritten += 32;
checkFlush();
return !stream.fail();
};
std::size_t nodeCount = 0;
// If we have a previous map, compute differences
// State maps are always different between ledgers due to at least the
// skiplist updates
if (prevMap)
{
SHAMap::Delta differences;
if (shaMap.compare(
prevMap->get(), differences, std::numeric_limits<int>::max()))
{
// Process each difference
for (auto const& [key, deltaItem] : differences)
{
auto const& newItem = deltaItem.first;
auto const& oldItem = deltaItem.second;
if (!oldItem && newItem)
{
// Added item
if (serializeLeaf(*newItem, nodeType))
++nodeCount;
}
else if (oldItem && !newItem)
{
// Removed item
if (serializeRemovedLeaf(key))
++nodeCount;
}
else if (
oldItem && newItem && oldItem->slice() != newItem->slice())
{
// Modified item
if (serializeLeaf(*newItem, nodeType))
++nodeCount;
}
}
}
}
else
{
// No previous map or maps are identical - serialize all items
for (auto const& item : shaMap)
{
if (serializeLeaf(item, nodeType))
++nodeCount;
}
}
// write a terminal symbol to indicate the map stream has ended
auto t = CATALOGUE_NODE_TERMINAL;
stream.write(reinterpret_cast<char const*>(&t), 1);
localBytesWritten += 1;
// Final flush if needed
if (localBytesWritten > 0)
{
tryFlush(stream);
}
return nodeCount;
}
// Replacement deserialization functions that use only SHAMap's public API
// Note: The original SHAMap::deserializeFromStream() checked that the map was
// in either Modifying or Synching state before allowing deserialization. We
// don't perform this check here because:
// 1. We don't have access to the private state_ member
// 2. In catalogue loading, we always work with freshly created maps that are
// modifiable
// 3. This function is only called from doCatalogueLoad with appropriate maps
// If called with an immutable map, it will fail at the first
// addGiveItem/delItem call.
static bool
deserializeSHAMapFromStream(
SHAMap& shaMap,
boost::iostreams::filtering_istream& stream,
SHAMapNodeType nodeType,
NodeObjectType flushType,
bool allowRemoval,
beast::Journal const& j)
{
try
{
// Define a lambda to deserialize a leaf node
auto deserializeLeaf =
[&shaMap, &stream, &j, nodeType, allowRemoval](
SHAMapNodeType& parsedType /* out */) -> bool {
stream.read(reinterpret_cast<char*>(&parsedType), 1);
if (static_cast<uint8_t>(parsedType) == CATALOGUE_NODE_TERMINAL)
{
// end of map
return false;
}
uint256 key;
uint32_t size{0};
stream.read(reinterpret_cast<char*>(key.data()), 32);
if (stream.fail())
{
JLOG(j.error())
<< "Deserialization: stream stopped unexpectedly "
<< "while trying to read key of next entry";
return false;
}
if (static_cast<uint8_t>(parsedType) == CATALOGUE_NODE_REMOVE)
{
// deletion
if (!allowRemoval)
{
JLOG(j.error()) << "Deserialization: unexpected removal in "
"this map type";
return false;
}
if (!shaMap.hasItem(key))
{
JLOG(j.error())
<< "Deserialization: removal of key " << to_string(key)
<< " but key is already absent.";
return false;
}
shaMap.delItem(key);
return true;
}
stream.read(reinterpret_cast<char*>(&size), 4);
if (stream.fail())
{
JLOG(j.error())
<< "Deserialization: stream stopped unexpectedly"
<< " while trying to read size of data for key "
<< to_string(key);
return false;
}
if (size > 1024 * 1024 * 1024)
{
JLOG(j.error()) << "Deserialization: size of " << to_string(key)
<< " is suspiciously large (" << size
<< " bytes), bailing.";
return false;
}
std::vector<uint8_t> data;
data.resize(size);
stream.read(reinterpret_cast<char*>(data.data()), size);
if (stream.fail())
{
JLOG(j.error())
<< "Deserialization: Unexpected EOF while reading data for "
<< to_string(key);
return false;
}
auto item = make_shamapitem(key, makeSlice(data));
if (shaMap.hasItem(key))
return shaMap.updateGiveItem(nodeType, std::move(item));
return shaMap.addGiveItem(nodeType, std::move(item));
};
SHAMapNodeType lastParsed;
while (!stream.eof() && deserializeLeaf(lastParsed))
;
if (static_cast<uint8_t>(lastParsed) != CATALOGUE_NODE_TERMINAL)
{
JLOG(j.error())
<< "Deserialization: Unexpected EOF, terminal node not found.";
return false;
}
// Flush any dirty nodes and update hashes
shaMap.flushDirty(flushType);
return true;
}
catch (std::exception const& e)
{
JLOG(j.error()) << "Exception during deserialization: " << e.what();
return false;
}
}
// Convenience wrappers for specific map types
static bool
deserializeStateMap(
SHAMap& stateMap,
boost::iostreams::filtering_istream& stream,
beast::Journal const& j)
{
return deserializeSHAMapFromStream(
stateMap,
stream,
SHAMapNodeType::tnACCOUNT_STATE,
hotACCOUNT_NODE,
true, // Allow removal for state maps
j);
}
static bool
deserializeTxMap(
SHAMap& txMap,
boost::iostreams::filtering_istream& stream,
beast::Journal const& j)
{
return deserializeSHAMapFromStream(
txMap,
stream,
SHAMapNodeType::tnTRANSACTION_MD,
hotTRANSACTION_NODE,
false, // No removal for tx maps
j);
}
// Helper function to generate status JSON
// IMPORTANT: Caller must hold at least a shared (read) lock on
// catalogueStatusMutex before calling this function
@@ -670,10 +316,10 @@ generateStatusJson(bool includeErrorInfo = false)
if (catalogueRunStatus.isRunning)
{
jvResult[catalogue_jss::job_status] = "job_in_progress";
jvResult[jss::job_status] = "job_in_progress";
jvResult[jss::min_ledger] = catalogueRunStatus.minLedger;
jvResult[jss::max_ledger] = catalogueRunStatus.maxLedger;
jvResult[catalogue_jss::current_ledger] = catalogueRunStatus.ledgerUpto;
jvResult[jss::current_ledger] = catalogueRunStatus.ledgerUpto;
// Calculate percentage complete - FIX: Handle ledgerUpto = 0 case
// properly
@@ -691,15 +337,14 @@ generateStatusJson(bool includeErrorInfo = false)
int percentage = (total_ledgers > 0)
? static_cast<int>((processed_ledgers * 100) / total_ledgers)
: 0;
jvResult[catalogue_jss::percent_complete] = percentage;
jvResult[jss::percent_complete] = percentage;
// Calculate elapsed time
auto now = std::chrono::system_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(
now - catalogueRunStatus.started)
.count();
jvResult[catalogue_jss::elapsed_seconds] =
static_cast<Json::UInt>(elapsed);
jvResult[jss::elapsed_seconds] = static_cast<Json::UInt>(elapsed);
// Calculate estimated time remaining
if (processed_ledgers > 0 && total_ledgers > processed_ledgers)
@@ -745,17 +390,16 @@ generateStatusJson(bool includeErrorInfo = false)
" second" +
(estimated_seconds_remaining > 1 ? "s" : "");
}
jvResult[catalogue_jss::estimated_time_remaining] =
time_remaining;
jvResult[jss::estimated_time_remaining] = time_remaining;
}
else
{
jvResult[catalogue_jss::estimated_time_remaining] = "unknown";
jvResult[jss::estimated_time_remaining] = "unknown";
}
}
else
{
jvResult[catalogue_jss::estimated_time_remaining] = "unknown";
jvResult[jss::estimated_time_remaining] = "unknown";
}
// Add start time as ISO 8601 string
@@ -765,16 +409,16 @@ generateStatusJson(bool includeErrorInfo = false)
char time_buffer[30];
std::strftime(
time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%SZ", tm_started);
jvResult[catalogue_jss::start_time] = time_buffer;
jvResult[jss::start_time] = time_buffer;
// Add job type
jvResult[catalogue_jss::job_type] =
jvResult[jss::job_type] =
(catalogueRunStatus.jobType == CatalogueJobType::CREATE)
? "catalogue_create"
: "catalogue_load";
// Add filename
jvResult[catalogue_jss::file] = catalogueRunStatus.filename;
jvResult[jss::file] = catalogueRunStatus.filename;
// Add compression level if applicable
if (catalogueRunStatus.compressionLevel > 0)
@@ -799,7 +443,7 @@ generateStatusJson(bool includeErrorInfo = false)
}
// Add estimated filesize ("unknown" if not available)
jvResult[catalogue_jss::file_size_estimated_human] =
jvResult[jss::file_size_estimated_human] =
catalogueRunStatus.fileSizeEstimated;
if (includeErrorInfo)
@@ -811,7 +455,7 @@ generateStatusJson(bool includeErrorInfo = false)
}
else
{
jvResult[catalogue_jss::job_status] = "no_job_running";
jvResult[jss::job_status] = "no_job_running";
}
return jvResult;
@@ -864,7 +508,7 @@ doCatalogueCreate(RPC::JsonContext& context)
if (!context.params.isMember(jss::min_ledger) ||
!context.params.isMember(jss::max_ledger))
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS, "expected min_ledger and max_ledger");
std::string filepath;
@@ -874,7 +518,7 @@ doCatalogueCreate(RPC::JsonContext& context)
if (!context.params.isMember(jss::output_file) ||
(filepath = context.params[jss::output_file].asString()).empty() ||
filepath.front() != '/')
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"expected output_file: <absolute writeable filepath>");
@@ -904,18 +548,18 @@ doCatalogueCreate(RPC::JsonContext& context)
if (stat(filepath.c_str(), &st) == 0)
{ // file exists
if (st.st_size > 0)
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"output_file already exists and is non-empty");
}
else if (errno != ENOENT)
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot stat output_file: " + std::string(strerror(errno)));
std::ofstream testWrite(filepath.c_str(), std::ios::out);
if (testWrite.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"output_file location is not writeable: " +
std::string(strerror(errno)));
@@ -924,7 +568,7 @@ doCatalogueCreate(RPC::JsonContext& context)
std::ofstream outfile(filepath.c_str(), std::ios::out | std::ios::binary);
if (outfile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"failed to open output_file: " + std::string(strerror(errno)));
@@ -932,8 +576,7 @@ doCatalogueCreate(RPC::JsonContext& context)
uint32_t max_ledger = context.params[jss::max_ledger].asUInt();
if (min_ledger > max_ledger)
return CATALOGUE_RPC_ERROR(
rpcINVALID_PARAMS, "min_ledger must be <= max_ledger");
return rpcError(rpcINVALID_PARAMS, "min_ledger must be <= max_ledger");
// Initialize status tracking
{
@@ -961,7 +604,7 @@ doCatalogueCreate(RPC::JsonContext& context)
outfile.write(reinterpret_cast<const char*>(&header), sizeof(CATLHeader));
if (outfile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"failed to write header: " + std::string(strerror(errno)));
@@ -1035,13 +678,10 @@ doCatalogueCreate(RPC::JsonContext& context)
return false;
}
size_t stateNodesWritten = serializeSHAMapToStream(
ledger->stateMap(),
*compStream,
SHAMapNodeType::tnACCOUNT_STATE,
prevStateMap);
size_t txNodesWritten = serializeSHAMapToStream(
ledger->txMap(), *compStream, SHAMapNodeType::tnTRANSACTION_MD);
size_t stateNodesWritten =
ledger->stateMap().serializeToStream(*compStream, prevStateMap);
size_t txNodesWritten =
ledger->txMap().serializeToStream(*compStream);
predictor.addLedger(info.seq, byteCounter.getBytesWritten());
@@ -1073,19 +713,13 @@ doCatalogueCreate(RPC::JsonContext& context)
UPDATE_CATALOGUE_STATUS(ledgerUpto, min_ledger);
// Load the first ledger
#if IS_XAHAUD
if (auto error = RPC::getLedger(currLedger, min_ledger, context))
return CATALOGUE_RPC_ERROR(error.toErrorCode(), error.message());
return rpcError(error.toErrorCode(), error.message());
if (!currLedger)
return CATALOGUE_RPC_ERROR(rpcLGR_NOT_FOUND, "Ledger not found");
#else
currLedger = context.ledgerMaster.getLedgerBySeq(min_ledger);
if (!currLedger)
return CATALOGUE_RPC_ERROR(rpcLGR_NOT_FOUND, "Ledger not found");
#endif
return rpcError(rpcLEDGER_MISSING);
if (!outputLedger(currLedger))
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL, "Error occurred while processing first ledger");
ledgers_written++;
@@ -1104,20 +738,14 @@ doCatalogueCreate(RPC::JsonContext& context)
// Load the next ledger
currLedger = nullptr; // Release any previous current ledger
#if IS_XAHAUD
if (auto error = RPC::getLedger(currLedger, ledger_seq, context))
return CATALOGUE_RPC_ERROR(error.toErrorCode(), error.message());
return rpcError(error.toErrorCode(), error.message());
if (!currLedger)
return CATALOGUE_RPC_ERROR(rpcLGR_NOT_FOUND, "Ledger not found");
#else
currLedger = context.ledgerMaster.getLedgerBySeq(ledger_seq);
if (!currLedger)
return CATALOGUE_RPC_ERROR(rpcLGR_NOT_FOUND, "Ledger not found");
#endif
return rpcError(rpcLEDGER_MISSING);
// Process with diff against previous ledger
if (!outputLedger(currLedger, prevLedger->stateMap()))
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL, "Error occurred while processing ledgers");
UPDATE_CATALOGUE_STATUS(
@@ -1145,7 +773,7 @@ doCatalogueCreate(RPC::JsonContext& context)
{
JLOG(context.j.warn())
<< "Could not get file size: " << std::strerror(errno);
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL, "failed to get file size for header update");
}
@@ -1159,7 +787,7 @@ doCatalogueCreate(RPC::JsonContext& context)
std::fstream updateFileSizeFile(
filepath.c_str(), std::ios::in | std::ios::out | std::ios::binary);
if (updateFileSizeFile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot open file for updating filesize: " +
std::string(strerror(errno)));
@@ -1174,7 +802,7 @@ doCatalogueCreate(RPC::JsonContext& context)
std::ifstream hashFile(filepath.c_str(), std::ios::in | std::ios::binary);
if (hashFile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot open file for hashing: " + std::string(strerror(errno)));
@@ -1187,8 +815,7 @@ doCatalogueCreate(RPC::JsonContext& context)
// Read and process the header portion
hashFile.read(buffer.data(), sizeof(CATLHeader));
if (hashFile.gcount() != sizeof(CATLHeader))
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "failed to read header for hashing");
return rpcError(rpcINTERNAL, "failed to read header for hashing");
// Zero out the hash portion in the buffer for hash calculation
std::fill(
@@ -1216,7 +843,7 @@ doCatalogueCreate(RPC::JsonContext& context)
std::fstream updateFile(
filepath.c_str(), std::ios::in | std::ios::out | std::ios::binary);
if (updateFile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot open file for updating hash: " +
std::string(strerror(errno)));
@@ -1283,17 +910,17 @@ doCatalogueLoad(RPC::JsonContext& context)
}
} opCleanup;
if (!context.params.isMember(catalogue_jss::input_file))
return CATALOGUE_RPC_ERROR(rpcINVALID_PARAMS, "expected input_file");
if (!context.params.isMember(jss::input_file))
return rpcError(rpcINVALID_PARAMS, "expected input_file");
// Check for ignore_hash parameter
bool ignore_hash = false;
if (context.params.isMember(catalogue_jss::ignore_hash))
ignore_hash = context.params[catalogue_jss::ignore_hash].asBool();
if (context.params.isMember(jss::ignore_hash))
ignore_hash = context.params[jss::ignore_hash].asBool();
std::string filepath = context.params[catalogue_jss::input_file].asString();
std::string filepath = context.params[jss::input_file].asString();
if (filepath.empty() || filepath.front() != '/')
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"expected input_file: <absolute readable filepath>");
@@ -1302,7 +929,7 @@ doCatalogueLoad(RPC::JsonContext& context)
// Check file size before attempting to read
struct stat st;
if (stat(filepath.c_str(), &st) != 0)
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot stat input_file: " + std::string(strerror(errno)));
@@ -1310,7 +937,7 @@ doCatalogueLoad(RPC::JsonContext& context)
// Minimal size check: at least a header must be present
if (file_size < sizeof(CATLHeader))
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"input_file too small (only " + std::to_string(file_size) +
" bytes), must be at least " +
@@ -1321,7 +948,7 @@ doCatalogueLoad(RPC::JsonContext& context)
// Check if file exists and is readable
std::ifstream infile(filepath.c_str(), std::ios::in | std::ios::binary);
if (infile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot open input_file: " + std::string(strerror(errno)));
@@ -1331,12 +958,10 @@ doCatalogueLoad(RPC::JsonContext& context)
CATLHeader header;
infile.read(reinterpret_cast<char*>(&header), sizeof(CATLHeader));
if (infile.fail())
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "failed to read catalogue header");
return rpcError(rpcINTERNAL, "failed to read catalogue header");
if (header.magic != CATL)
return CATALOGUE_RPC_ERROR(
rpcINVALID_PARAMS, "invalid catalogue file magic");
return rpcError(rpcINVALID_PARAMS, "invalid catalogue file magic");
// Save the hash from the header
std::array<uint8_t, 64> stored_hash = header.hash;
@@ -1368,12 +993,12 @@ doCatalogueLoad(RPC::JsonContext& context)
// Check version compatibility
if (version > 1) // Only checking base version number
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"unsupported catalogue version: " + std::to_string(version));
if (header.network_id != context.app.config().NETWORK_ID)
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"catalogue network ID mismatch: " +
std::to_string(header.network_id));
@@ -1385,7 +1010,7 @@ doCatalogueLoad(RPC::JsonContext& context)
<< "Catalogue file size mismatch. Header indicates "
<< header.filesize << " bytes, but actual file size is "
<< file_size << " bytes";
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS,
"catalogue file size mismatch: expected " +
std::to_string(header.filesize) + " bytes, got " +
@@ -1405,7 +1030,7 @@ doCatalogueLoad(RPC::JsonContext& context)
std::ifstream hashFile(
filepath.c_str(), std::ios::in | std::ios::binary);
if (hashFile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot reopen file for hash verification: " +
std::string(strerror(errno)));
@@ -1449,7 +1074,7 @@ doCatalogueLoad(RPC::JsonContext& context)
JLOG(context.j.error())
<< "Catalogue hash verification failed. Expected: " << hash_hex
<< ", Computed: " << computed_hex;
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINVALID_PARAMS, "catalogue hash verification failed");
}
@@ -1458,7 +1083,7 @@ doCatalogueLoad(RPC::JsonContext& context)
// Reopen file for reading
infile.open(filepath.c_str(), std::ios::in | std::ios::binary);
if (infile.fail())
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"cannot reopen file after hash verification: " +
std::string(strerror(errno)));
@@ -1532,8 +1157,7 @@ doCatalogueLoad(RPC::JsonContext& context)
<< "Catalogue load expected but could not "
<< "read the next ledger header at seq=" << expected_seq << ". "
<< "Ledgers prior to this in the file (if any) were loaded.";
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "Unexpected end of catalogue file.");
return rpcError(rpcINTERNAL, "Unexpected end of catalogue file.");
}
info.closeTime = time_point{duration{closeTime}};
@@ -1547,7 +1171,7 @@ doCatalogueLoad(RPC::JsonContext& context)
{
JLOG(context.j.error())
<< "Expected ledger " << expected_seq << ", bailing";
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL,
"Unexpected ledger out of sequence in catalogue file");
}
@@ -1567,12 +1191,11 @@ doCatalogueLoad(RPC::JsonContext& context)
ledger->setLedgerInfo(info);
// Deserialize the complete state map from leaf nodes
if (!deserializeStateMap(
ledger->stateMap(), *decompStream, context.j))
if (!ledger->stateMap().deserializeFromStream(*decompStream))
{
JLOG(context.j.error())
<< "Failed to deserialize base ledger state";
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL, "Failed to load base ledger state");
}
}
@@ -1582,37 +1205,32 @@ doCatalogueLoad(RPC::JsonContext& context)
if (!prevLedger)
{
JLOG(context.j.error()) << "Missing previous ledger for delta";
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "Missing previous ledger");
return rpcError(rpcINTERNAL, "Missing previous ledger");
}
// For delta ledgers, we need to start with previous ledger's state
// Both xahaud and rippled use similar approaches here
// Create a new ledger that starts as a copy of the previous ledger
ledger = std::make_shared<Ledger>(*prevLedger, info.closeTime);
auto snapshot = prevLedger->stateMap().snapShot(true);
// Now update the ledger info to match what we loaded from the
// catalogue
ledger->setLedgerInfo(info);
ledger = std::make_shared<Ledger>(
info,
context.app.config(),
context.app.getNodeFamily(),
*snapshot);
// Apply delta (only leaf-node changes)
if (!deserializeStateMap(
ledger->stateMap(), *decompStream, context.j))
if (!ledger->stateMap().deserializeFromStream(*decompStream))
{
JLOG(context.j.error())
<< "Failed to apply delta to ledger " << info.seq;
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "Failed to apply ledger delta");
return rpcError(rpcINTERNAL, "Failed to apply ledger delta");
}
}
// pull in the tx map
if (!deserializeTxMap(ledger->txMap(), *decompStream, context.j))
if (!ledger->txMap().deserializeFromStream(*decompStream))
{
JLOG(context.j.error())
<< "Failed to apply delta to ledger " << info.seq;
return CATALOGUE_RPC_ERROR(
rpcINTERNAL, "Failed to apply ledger delta");
return rpcError(rpcINTERNAL, "Failed to apply ledger delta");
}
// Finalize the ledger
@@ -1625,23 +1243,7 @@ doCatalogueLoad(RPC::JsonContext& context)
info.closeFlags & sLCF_NoConsensusTime);
ledger->setValidated();
#if IS_XAHAUD
ledger->setCloseFlags(info.closeFlags);
#else
// rippled doesn't have setCloseFlags method - close flags are set
// during setAccepted Currently only sLCF_NoConsensusTime (0x01) exists,
// which setAccepted() handles properly This check is future-proofing in
// case additional close flags are added later
if (info.closeFlags & ~sLCF_NoConsensusTime)
{
throw std::runtime_error(
"Catalogue contains close flags that rippled cannot handle. "
"closeFlags=0x" +
std::to_string(info.closeFlags) +
" but rippled only supports sLCF_NoConsensusTime via "
"setAccepted()");
}
#endif
ledger->setImmutable(true);
// we can double check the computed hashes now, since setImmutable
@@ -1654,7 +1256,7 @@ doCatalogueLoad(RPC::JsonContext& context)
"match. "
<< "This ledger was not saved, and ledger loading from this "
"catalogue file ended here.";
return CATALOGUE_RPC_ERROR(
return rpcError(
rpcINTERNAL, "Catalogue file contains a corrupted ledger.");
}
@@ -1662,11 +1264,7 @@ doCatalogueLoad(RPC::JsonContext& context)
pendSaveValidated(context.app, ledger, false, false);
// Store in ledger master
#if IS_XAHAUD
context.app.getLedgerMaster().storeLedger(ledger, true);
#else
context.app.getLedgerMaster().storeLedger(ledger);
#endif
if (info.seq == header.max_ledger &&
context.app.getLedgerMaster().getClosedLedger()->info().seq <
@@ -1676,13 +1274,8 @@ doCatalogueLoad(RPC::JsonContext& context)
context.app.getLedgerMaster().switchLCL(ledger);
}
#if IS_XAHAUD
context.app.getLedgerMaster().setLedgerRangePresent(
header.min_ledger, info.seq, true);
#else
context.app.getLedgerMaster().setLedgerRangePresent(
header.min_ledger, info.seq);
#endif
// Store the ledger
prevLedger = ledger;
@@ -1699,16 +1292,15 @@ doCatalogueLoad(RPC::JsonContext& context)
Json::Value jvResult;
jvResult[jss::ledger_min] = header.min_ledger;
jvResult[jss::ledger_max] = header.max_ledger;
jvResult[catalogue_jss::ledger_count] =
jvResult[jss::ledger_count] =
static_cast<Json::UInt>(header.max_ledger - header.min_ledger + 1);
jvResult[catalogue_jss::ledgers_loaded] =
static_cast<Json::UInt>(ledgersLoaded);
jvResult[jss::ledgers_loaded] = static_cast<Json::UInt>(ledgersLoaded);
jvResult[jss::file_size_human] = formatBytesIEC(file_size);
jvResult[jss::file_size] = std::to_string(file_size);
jvResult[jss::status] = jss::success;
jvResult[jss::compression_level] = compressionLevel;
jvResult[jss::hash] = hash_hex;
jvResult[catalogue_jss::ignore_hash] = ignore_hash;
jvResult[jss::ignore_hash] = ignore_hash;
return jvResult;
}

File diff suppressed because it is too large Load Diff