mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-18 12:52:33 +00:00
Compare commits
10 Commits
pratik/Fix
...
bthomee/no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
061c033f52 | ||
|
|
832a7e7e4a | ||
|
|
b2371c4c02 | ||
|
|
b94a7c4b44 | ||
|
|
9b9027112d | ||
|
|
8e7889c66e | ||
|
|
d836c3788d | ||
|
|
1cb7c0293f | ||
|
|
52dabc1f79 | ||
|
|
2d78d41f7b |
@@ -5,7 +5,7 @@ Loop: test.jtx test.unit_test
|
||||
test.unit_test == test.jtx
|
||||
|
||||
Loop: xrpld.app xrpld.overlay
|
||||
xrpld.overlay ~= xrpld.app
|
||||
xrpld.overlay == xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.peerfinder
|
||||
xrpld.peerfinder == xrpld.app
|
||||
|
||||
@@ -228,9 +228,8 @@ jobs:
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
PARALLELISM: ${{ env.SANITIZERS_ENABLED == 'true' && '1' || steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
./xrpld --unittest --unittest-jobs "${PARALLELISM}"
|
||||
./xrpld --unittest --unittest-jobs "${BUILD_NPROC}"
|
||||
|
||||
- name: Debug failure (Linux)
|
||||
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
|
||||
|
||||
@@ -17,10 +17,12 @@ find_dependency(Boost
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
#[=========================================================[
|
||||
OpenSSL
|
||||
|
||||
@@ -22,7 +22,7 @@ target_compile_definitions(
|
||||
BOOST_FILESYSTEM_NO_DEPRECATED
|
||||
>
|
||||
$<$<NOT:$<BOOL:${boost_show_deprecated}>>:
|
||||
BOOST_COROUTINES2_NO_DEPRECATION_WARNING
|
||||
BOOST_COROUTINES_NO_DEPRECATION_WARNING
|
||||
BOOST_BEAST_ALLOW_DEPRECATED
|
||||
BOOST_FILESYSTEM_DEPRECATED
|
||||
>
|
||||
|
||||
@@ -4,12 +4,13 @@ include(XrplSanitizers)
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
json
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
|
||||
add_library(xrpl_boost INTERFACE)
|
||||
@@ -20,7 +21,7 @@ target_link_libraries(
|
||||
INTERFACE Boost::headers
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::context
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
@@ -31,23 +32,13 @@ target_link_libraries(
|
||||
if (Boost_COMPILER)
|
||||
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
|
||||
endif ()
|
||||
|
||||
# GCC 14+ has a false positive -Wuninitialized warning in Boost.Coroutine2's
|
||||
# state.hpp when compiled with -O3. This is due to GCC's intentional behavior
|
||||
# change (Bug #98871, #119388) where warnings from inlined system header code
|
||||
# are no longer suppressed by -isystem. The warning occurs in operator|= in
|
||||
# boost/coroutine2/detail/state.hpp when inlined from push_control_block::destroy().
|
||||
# See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=119388
|
||||
if (is_gcc AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 14)
|
||||
target_compile_options(xrpl_boost INTERFACE -Wno-uninitialized)
|
||||
endif ()
|
||||
|
||||
# Boost.Context's ucontext backend has ASAN fiber-switching annotations
|
||||
# (start/finish_switch_fiber) that are compiled in when BOOST_USE_ASAN is defined.
|
||||
# This tells ASAN about coroutine stack switches, preventing false positive
|
||||
# stack-use-after-scope errors. BOOST_USE_UCONTEXT ensures the ucontext backend
|
||||
# is selected (fcontext does not support ASAN annotations).
|
||||
# These defines must match what Boost was compiled with (see conan/profiles/sanitizers).
|
||||
if (enable_asan)
|
||||
target_compile_definitions(xrpl_boost INTERFACE BOOST_USE_ASAN BOOST_USE_UCONTEXT)
|
||||
if (SANITIZERS_ENABLED AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else for gcc ?
|
||||
if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif ()
|
||||
|
||||
@@ -7,21 +7,16 @@ include(default)
|
||||
{% if compiler == "gcc" %}
|
||||
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
|
||||
{% set sanitizer_list = [] %}
|
||||
{% set defines = [] %}
|
||||
{% set model_code = "" %}
|
||||
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1", "-Wno-stringop-overflow"] %}
|
||||
|
||||
{% if "address" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("address") %}
|
||||
{% set model_code = "-mcmodel=large" %}
|
||||
{% set _ = defines.append("BOOST_USE_ASAN")%}
|
||||
{% set _ = defines.append("BOOST_USE_UCONTEXT")%}
|
||||
{% elif "thread" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("thread") %}
|
||||
{% set model_code = "-mcmodel=medium" %}
|
||||
{% set _ = extra_cxxflags.append("-Wno-tsan") %}
|
||||
{% set _ = defines.append("BOOST_USE_TSAN")%}
|
||||
{% set _ = defines.append("BOOST_USE_UCONTEXT")%}
|
||||
{% endif %}
|
||||
|
||||
{% if "undefinedbehavior" in sanitizers %}
|
||||
@@ -34,22 +29,16 @@ include(default)
|
||||
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
|
||||
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:exelinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:defines+={{defines}}
|
||||
{% endif %}
|
||||
{% elif compiler == "apple-clang" or compiler == "clang" %}
|
||||
{% if "address" in sanitizers or "thread" in sanitizers or "undefinedbehavior" in sanitizers %}
|
||||
{% set sanitizer_list = [] %}
|
||||
{% set defines = [] %}
|
||||
{% set extra_cxxflags = ["-fno-omit-frame-pointer", "-O1"] %}
|
||||
|
||||
{% if "address" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("address") %}
|
||||
{% set _ = defines.append("BOOST_USE_ASAN")%}
|
||||
{% set _ = defines.append("BOOST_USE_UCONTEXT")%}
|
||||
{% elif "thread" in sanitizers %}
|
||||
{% set _ = sanitizer_list.append("thread") %}
|
||||
{% set _ = defines.append("BOOST_USE_TSAN")%}
|
||||
{% set _ = defines.append("BOOST_USE_UCONTEXT")%}
|
||||
{% endif %}
|
||||
|
||||
{% if "undefinedbehavior" in sanitizers %}
|
||||
@@ -63,24 +52,8 @@ include(default)
|
||||
tools.build:cxxflags+=['{{sanitizer_flags}} {{" ".join(extra_cxxflags)}}']
|
||||
tools.build:sharedlinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:exelinkflags+=['{{sanitizer_flags}}']
|
||||
tools.build:defines+={{defines}}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
tools.info.package_id:confs+=["tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags", "tools.build:defines"]
|
||||
|
||||
[options]
|
||||
{% if sanitizers %}
|
||||
{% if "address" in sanitizers %}
|
||||
# Build Boost.Context with ucontext backend (not fcontext) so that
|
||||
# ASAN fiber-switching annotations (__sanitizer_start/finish_switch_fiber)
|
||||
# are compiled into the library. fcontext (assembly) has no ASAN support.
|
||||
# define=BOOST_USE_ASAN=1 is critical: it must be defined when building
|
||||
# Boost.Context itself so the ucontext backend compiles in the ASAN annotations.
|
||||
boost/*:extra_b2_flags=context-impl=ucontext address-sanitizer=on define=BOOST_USE_ASAN=1
|
||||
boost/*:without_context=False
|
||||
# Boost stacktrace fails to build with some sanitizers
|
||||
boost/*:without_stacktrace=True
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
tools.info.package_id:confs+=["tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
|
||||
15
conanfile.py
15
conanfile.py
@@ -1,5 +1,4 @@
|
||||
import re
|
||||
import os
|
||||
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
@@ -58,9 +57,6 @@ class Xrpl(ConanFile):
|
||||
"tests": False,
|
||||
"unity": False,
|
||||
"xrpld": False,
|
||||
"boost/*:without_context": False,
|
||||
"boost/*:without_coroutine": True,
|
||||
"boost/*:without_coroutine2": False,
|
||||
"date/*:header_only": True,
|
||||
"ed25519/*:shared": False,
|
||||
"grpc/*:shared": False,
|
||||
@@ -129,14 +125,6 @@ class Xrpl(ConanFile):
|
||||
self.options["boost"].visibility = "global"
|
||||
if self.settings.compiler in ["clang", "gcc"]:
|
||||
self.options["boost"].without_cobalt = True
|
||||
self.options["boost"].without_context = False
|
||||
self.options["boost"].without_coroutine = True
|
||||
self.options["boost"].without_coroutine2 = False
|
||||
# Check if environment variable exists
|
||||
if "SANITIZERS" in os.environ:
|
||||
sanitizers = os.environ["SANITIZERS"]
|
||||
if "address" in sanitizers.lower():
|
||||
self.default_options["fPIC"] = False
|
||||
|
||||
def requirements(self):
|
||||
# Conan 2 requires transitive headers to be specified
|
||||
@@ -208,8 +196,7 @@ class Xrpl(ConanFile):
|
||||
"boost::headers",
|
||||
"boost::chrono",
|
||||
"boost::container",
|
||||
"boost::context",
|
||||
"boost::coroutine2",
|
||||
"boost::coroutine",
|
||||
"boost::date_time",
|
||||
"boost::filesystem",
|
||||
"boost::json",
|
||||
|
||||
@@ -98,7 +98,6 @@ words:
|
||||
- endmacro
|
||||
- exceptioned
|
||||
- Falco
|
||||
- fcontext
|
||||
- finalizers
|
||||
- firewalled
|
||||
- fmtdur
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
template <class F>
|
||||
@@ -8,14 +10,16 @@ JobQueue::Coro::Coro(Coro_create_t, JobQueue& jq, JobType type, std::string cons
|
||||
, type_(type)
|
||||
, name_(name)
|
||||
, running_(false)
|
||||
, coro_([this, fn = std::forward<F>(f)](boost::coroutines2::coroutine<void>::push_type& do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
fn(shared_from_this());
|
||||
, coro_(
|
||||
[this, fn = std::forward<F>(f)](boost::coroutines::asymmetric_coroutine<void>::push_type& do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
fn(shared_from_this());
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
finished_ = true;
|
||||
#endif
|
||||
})
|
||||
},
|
||||
boost::coroutines::attributes(megabytes(1)))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <xrpl/core/detail/Workers.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
|
||||
#include <boost/coroutine2/all.hpp>
|
||||
#include <boost/coroutine/all.hpp>
|
||||
|
||||
#include <set>
|
||||
|
||||
@@ -48,8 +48,8 @@ public:
|
||||
std::mutex mutex_;
|
||||
std::mutex mutex_run_;
|
||||
std::condition_variable cv_;
|
||||
boost::coroutines2::coroutine<void>::pull_type coro_;
|
||||
boost::coroutines2::coroutine<void>::push_type* yield_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::pull_type coro_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type* yield_;
|
||||
#ifndef NDEBUG
|
||||
bool finished_ = false;
|
||||
#endif
|
||||
|
||||
@@ -244,7 +244,15 @@ message TMGetObjectByHash {
|
||||
|
||||
message TMLedgerNode {
|
||||
required bytes nodedata = 1;
|
||||
|
||||
// Used when fixLedgerNodeID is disabled.
|
||||
optional bytes nodeid = 2; // missing for ledger base data
|
||||
|
||||
// Used when fixLedgerNodeID is enabled. Neither value is set for ledger base data.
|
||||
oneof reference {
|
||||
bytes id = 3; // Set for inner nodes.
|
||||
uint32 depth = 4; // Set for leaf nodes.
|
||||
}
|
||||
}
|
||||
|
||||
enum TMLedgerInfoType {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
XRPL_FIX (LedgerNodeID, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -280,9 +280,12 @@ public:
|
||||
serializeRoot(Serializer& s) const;
|
||||
|
||||
SHAMapAddNode
|
||||
addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFilter* filter);
|
||||
addRootNode(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode> root_node, SHAMapSyncFilter const* filter);
|
||||
SHAMapAddNode
|
||||
addKnownNode(SHAMapNodeID const& nodeID, Slice const& rawNode, SHAMapSyncFilter* filter);
|
||||
addKnownNode(
|
||||
SHAMapNodeID const& node_id,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> tree_node,
|
||||
SHAMapSyncFilter const* filter);
|
||||
|
||||
// status functions
|
||||
void
|
||||
@@ -333,7 +336,7 @@ private:
|
||||
cacheLookup(SHAMapHash const& hash) const;
|
||||
|
||||
void
|
||||
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>&) const;
|
||||
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>& node) const;
|
||||
|
||||
// database operations
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
@@ -341,11 +344,11 @@ private:
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNode(SHAMapHash const& hash) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const;
|
||||
|
||||
/** Update hashes up to the root */
|
||||
void
|
||||
@@ -405,10 +408,11 @@ private:
|
||||
// If pending, callback is called as if it called fetchNodeNT
|
||||
using descendCallback = std::function<void(intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapHash const&)>;
|
||||
SHAMapTreeNode*
|
||||
descendAsync(SHAMapInnerNode* parent, int branch, SHAMapSyncFilter* filter, bool& pending, descendCallback&&) const;
|
||||
descendAsync(SHAMapInnerNode* parent, int branch, SHAMapSyncFilter const* filter, bool& pending, descendCallback&&)
|
||||
const;
|
||||
|
||||
std::pair<SHAMapTreeNode*, SHAMapNodeID>
|
||||
descend(SHAMapInnerNode* parent, SHAMapNodeID const& parentID, int branch, SHAMapSyncFilter* filter) const;
|
||||
descend(SHAMapInnerNode* parent, SHAMapNodeID const& parentID, int branch, SHAMapSyncFilter const* filter) const;
|
||||
|
||||
// Non-storing
|
||||
// Does not hook the returned node to its parent
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
# The idea is to empty this file gradually by fixing the underlying issues and removing suppressions.
|
||||
#
|
||||
# ASAN_OPTIONS="print_stacktrace=1:detect_container_overflow=0:suppressions=sanitizers/suppressions/asan.supp:halt_on_error=0"
|
||||
#
|
||||
# The detect_container_overflow=0 option disables false positives from:
|
||||
# - Boost intrusive containers (slist_iterator.hpp, hashtable.hpp, aged_unordered_container.h)
|
||||
# - Boost context/coroutine stack switching (Workers.cpp, thread.h)
|
||||
#
|
||||
# See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
|
||||
|
||||
# Boost
|
||||
interceptor_name:boost/asio
|
||||
|
||||
# Leaks in Doctest tests: xrpl.test.*
|
||||
interceptor_name:src/libxrpl/net/HTTPClient.cpp
|
||||
|
||||
@@ -172,7 +172,7 @@ SHAMap::finishFetch(SHAMapHash const& hash, std::shared_ptr<NodeObject> const& o
|
||||
|
||||
// See if a sync filter has a node
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const
|
||||
{
|
||||
if (auto nodeData = filter->getNode(hash))
|
||||
{
|
||||
@@ -198,7 +198,7 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
// Get a node without throwing
|
||||
// Used on maps where missing nodes are expected
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const
|
||||
{
|
||||
auto node = cacheLookup(hash);
|
||||
if (node)
|
||||
@@ -307,7 +307,7 @@ SHAMap::descendNoStore(SHAMapInnerNode& parent, int branch) const
|
||||
}
|
||||
|
||||
std::pair<SHAMapTreeNode*, SHAMapNodeID>
|
||||
SHAMap::descend(SHAMapInnerNode* parent, SHAMapNodeID const& parentID, int branch, SHAMapSyncFilter* filter) const
|
||||
SHAMap::descend(SHAMapInnerNode* parent, SHAMapNodeID const& parentID, int branch, SHAMapSyncFilter const* filter) const
|
||||
{
|
||||
XRPL_ASSERT(parent->isInner(), "xrpl::SHAMap::descend : valid parent input");
|
||||
XRPL_ASSERT((branch >= 0) && (branch < branchFactor), "xrpl::SHAMap::descend : valid branch input");
|
||||
@@ -334,7 +334,7 @@ SHAMapTreeNode*
|
||||
SHAMap::descendAsync(
|
||||
SHAMapInnerNode* parent,
|
||||
int branch,
|
||||
SHAMapSyncFilter* filter,
|
||||
SHAMapSyncFilter const* filter,
|
||||
bool& pending,
|
||||
descendCallback&& callback) const
|
||||
{
|
||||
|
||||
@@ -113,7 +113,7 @@ selectBranch(SHAMapNodeID const& id, uint256 const& hash)
|
||||
SHAMapNodeID
|
||||
SHAMapNodeID::createID(int depth, uint256 const& key)
|
||||
{
|
||||
XRPL_ASSERT((depth >= 0) && (depth < 65), "xrpl::SHAMapNodeID::createID : valid branch input");
|
||||
XRPL_ASSERT(depth >= 0 && depth <= SHAMap::leafDepth, "xrpl::SHAMapNodeID::createID : valid branch input");
|
||||
return SHAMapNodeID(depth, key & depthMask(depth));
|
||||
}
|
||||
|
||||
|
||||
@@ -467,7 +467,10 @@ SHAMap::serializeRoot(Serializer& s) const
|
||||
}
|
||||
|
||||
SHAMapAddNode
|
||||
SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFilter* filter)
|
||||
SHAMap::addRootNode(
|
||||
SHAMapHash const& hash,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> root_node,
|
||||
SHAMapSyncFilter const* filter)
|
||||
{
|
||||
// we already have a root_ node
|
||||
if (root_->getHash().isNonZero())
|
||||
@@ -478,14 +481,13 @@ SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFil
|
||||
}
|
||||
|
||||
XRPL_ASSERT(cowid_ >= 1, "xrpl::SHAMap::addRootNode : valid cowid");
|
||||
auto node = SHAMapTreeNode::makeFromWire(rootNode);
|
||||
if (!node || node->getHash() != hash)
|
||||
if (root_node->getHash() != hash)
|
||||
return SHAMapAddNode::invalid();
|
||||
|
||||
if (backed_)
|
||||
canonicalize(hash, node);
|
||||
canonicalize(hash, root_node);
|
||||
|
||||
root_ = node;
|
||||
root_ = root_node;
|
||||
|
||||
if (root_->isLeaf())
|
||||
clearSynching();
|
||||
@@ -501,9 +503,12 @@ SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFil
|
||||
}
|
||||
|
||||
SHAMapAddNode
|
||||
SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncFilter* filter)
|
||||
SHAMap::addKnownNode(
|
||||
SHAMapNodeID const& node_id,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> tree_node,
|
||||
SHAMapSyncFilter const* filter)
|
||||
{
|
||||
XRPL_ASSERT(!node.isRoot(), "xrpl::SHAMap::addKnownNode : valid node input");
|
||||
XRPL_ASSERT(!node_id.isRoot(), "xrpl::SHAMap::addKnownNode : valid node input");
|
||||
|
||||
if (!isSynching())
|
||||
{
|
||||
@@ -516,14 +521,14 @@ SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncF
|
||||
auto currNode = root_.get();
|
||||
|
||||
while (currNode->isInner() && !static_cast<SHAMapInnerNode*>(currNode)->isFullBelow(generation) &&
|
||||
(currNodeID.getDepth() < node.getDepth()))
|
||||
(currNodeID.getDepth() < node_id.getDepth()))
|
||||
{
|
||||
int const branch = selectBranch(currNodeID, node.getNodeID());
|
||||
int const branch = selectBranch(currNodeID, node_id.getNodeID());
|
||||
XRPL_ASSERT(branch >= 0, "xrpl::SHAMap::addKnownNode : valid branch");
|
||||
auto inner = static_cast<SHAMapInnerNode*>(currNode);
|
||||
if (inner->isEmptyBranch(branch))
|
||||
{
|
||||
JLOG(journal_.warn()) << "Add known node for empty branch" << node;
|
||||
JLOG(journal_.warn()) << "Add known node for empty branch" << node_id;
|
||||
return SHAMapAddNode::invalid();
|
||||
}
|
||||
|
||||
@@ -539,62 +544,41 @@ SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncF
|
||||
if (currNode != nullptr)
|
||||
continue;
|
||||
|
||||
auto newNode = SHAMapTreeNode::makeFromWire(rawNode);
|
||||
|
||||
if (!newNode || childHash != newNode->getHash())
|
||||
if (childHash != tree_node->getHash())
|
||||
{
|
||||
JLOG(journal_.warn()) << "Corrupt node received";
|
||||
return SHAMapAddNode::invalid();
|
||||
}
|
||||
|
||||
// In rare cases, a node can still be corrupt even after hash
|
||||
// validation. For leaf nodes, we perform an additional check to
|
||||
// ensure the node's position in the tree is consistent with its
|
||||
// content to prevent inconsistencies that could
|
||||
// propagate further down the line.
|
||||
if (newNode->isLeaf())
|
||||
{
|
||||
auto const& actualKey = static_cast<SHAMapLeafNode const*>(newNode.get())->peekItem()->key();
|
||||
|
||||
// Validate that this leaf belongs at the target position
|
||||
auto const expectedNodeID = SHAMapNodeID::createID(node.getDepth(), actualKey);
|
||||
if (expectedNodeID.getNodeID() != node.getNodeID())
|
||||
{
|
||||
JLOG(journal_.debug()) << "Leaf node position mismatch: "
|
||||
<< "expected=" << expectedNodeID.getNodeID() << ", actual=" << node.getNodeID();
|
||||
return SHAMapAddNode::invalid();
|
||||
}
|
||||
}
|
||||
|
||||
// Inner nodes must be at a level strictly less than 64
|
||||
// but leaf nodes (while notionally at level 64) can be
|
||||
// at any depth up to and including 64:
|
||||
if ((currNodeID.getDepth() > leafDepth) || (newNode->isInner() && currNodeID.getDepth() == leafDepth))
|
||||
if ((currNodeID.getDepth() > leafDepth) || (tree_node->isInner() && currNodeID.getDepth() == leafDepth))
|
||||
{
|
||||
// Map is provably invalid
|
||||
state_ = SHAMapState::Invalid;
|
||||
return SHAMapAddNode::useful();
|
||||
}
|
||||
|
||||
if (currNodeID != node)
|
||||
if (currNodeID != node_id)
|
||||
{
|
||||
// Either this node is broken or we didn't request it (yet)
|
||||
JLOG(journal_.warn()) << "unable to hook node " << node;
|
||||
JLOG(journal_.warn()) << "unable to hook node " << node_id;
|
||||
JLOG(journal_.info()) << " stuck at " << currNodeID;
|
||||
JLOG(journal_.info()) << "got depth=" << node.getDepth() << ", walked to= " << currNodeID.getDepth();
|
||||
JLOG(journal_.info()) << "got depth=" << node_id.getDepth() << ", walked to= " << currNodeID.getDepth();
|
||||
return SHAMapAddNode::useful();
|
||||
}
|
||||
|
||||
if (backed_)
|
||||
canonicalize(childHash, newNode);
|
||||
canonicalize(childHash, tree_node);
|
||||
|
||||
newNode = prevNode->canonicalizeChild(branch, std::move(newNode));
|
||||
tree_node = prevNode->canonicalizeChild(branch, std::move(tree_node));
|
||||
|
||||
if (filter)
|
||||
{
|
||||
Serializer s;
|
||||
newNode->serializeWithPrefix(s);
|
||||
filter->gotNode(false, childHash, ledgerSeq_, std::move(s.modData()), newNode->getType());
|
||||
tree_node->serializeWithPrefix(s);
|
||||
filter->gotNode(false, childHash, ledgerSeq_, std::move(s.modData()), tree_node->getType());
|
||||
}
|
||||
|
||||
return SHAMapAddNode::useful();
|
||||
|
||||
@@ -4357,17 +4357,21 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
makeSellOffers(XRP(1));
|
||||
checkOffers("nft_sell_offers", 1, 0, __LINE__);
|
||||
|
||||
// There are 15 sell offers.
|
||||
makeSellOffers(XRP(15));
|
||||
checkOffers("nft_sell_offers", 15, 0, __LINE__);
|
||||
// There are 250 sell offers.
|
||||
makeSellOffers(XRP(250));
|
||||
checkOffers("nft_sell_offers", 250, 0, __LINE__);
|
||||
|
||||
// There are 30 sell offers.
|
||||
makeSellOffers(XRP(30));
|
||||
checkOffers("nft_sell_offers", 30, 0, __LINE__);
|
||||
// There are 251 sell offers.
|
||||
makeSellOffers(XRP(251));
|
||||
checkOffers("nft_sell_offers", 251, 1, __LINE__);
|
||||
|
||||
// There are 50 sell offers (reduced from 501 to speed up CI).
|
||||
makeSellOffers(XRP(50));
|
||||
checkOffers("nft_sell_offers", 50, 0, __LINE__);
|
||||
// There are 500 sell offers.
|
||||
makeSellOffers(XRP(500));
|
||||
checkOffers("nft_sell_offers", 500, 1, __LINE__);
|
||||
|
||||
// There are 501 sell offers.
|
||||
makeSellOffers(XRP(501));
|
||||
checkOffers("nft_sell_offers", 501, 2, __LINE__);
|
||||
|
||||
// There are no buy offers.
|
||||
checkOffers("nft_buy_offers", 0, 0, __LINE__);
|
||||
@@ -4391,9 +4395,21 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
|
||||
makeBuyOffers(XRP(1));
|
||||
checkOffers("nft_buy_offers", 1, 0, __LINE__);
|
||||
|
||||
// There are 30 buy offers (reduced from 250 to speed up CI).
|
||||
makeBuyOffers(XRP(30));
|
||||
checkOffers("nft_buy_offers", 30, 0, __LINE__);
|
||||
// There are 250 buy offers.
|
||||
makeBuyOffers(XRP(250));
|
||||
checkOffers("nft_buy_offers", 250, 0, __LINE__);
|
||||
|
||||
// There are 251 buy offers.
|
||||
makeBuyOffers(XRP(251));
|
||||
checkOffers("nft_buy_offers", 251, 1, __LINE__);
|
||||
|
||||
// There are 500 buy offers.
|
||||
makeBuyOffers(XRP(500));
|
||||
checkOffers("nft_buy_offers", 500, 1, __LINE__);
|
||||
|
||||
// There are 501 buy offers.
|
||||
makeBuyOffers(XRP(501));
|
||||
checkOffers("nft_buy_offers", 501, 2, __LINE__);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -1010,7 +1010,7 @@ public:
|
||||
// Charlie - queue a transaction, with a higher fee
|
||||
// than default
|
||||
env(noop(charlie), fee(15), queued);
|
||||
checkMetrics(*this, env, 6, initQueueMax, 4, 3, 257);
|
||||
checkMetrics(*this, env, 6, initQueueMax, 4, 3, 256);
|
||||
|
||||
BEAST_EXPECT(env.seq(alice) == aliceSeq);
|
||||
BEAST_EXPECT(env.seq(bob) == bobSeq);
|
||||
|
||||
@@ -109,7 +109,8 @@ public:
|
||||
|
||||
unexpected(a.size() < 1, "NodeSize");
|
||||
|
||||
BEAST_EXPECT(destination.addRootNode(source.getHash(), makeSlice(a[0].second), nullptr).isGood());
|
||||
auto node = SHAMapTreeNode::makeFromWire(makeSlice(a[0].second));
|
||||
BEAST_EXPECT(destination.addRootNode(source.getHash(), std::move(node), nullptr).isGood());
|
||||
}
|
||||
|
||||
do
|
||||
@@ -145,7 +146,8 @@ public:
|
||||
// Don't use BEAST_EXPECT here b/c it will be called a
|
||||
// non-deterministic number of times and the number of tests run
|
||||
// should be deterministic
|
||||
if (!destination.addKnownNode(b[i].first, makeSlice(b[i].second), nullptr).isUseful())
|
||||
auto node = SHAMapTreeNode::makeFromWire(makeSlice(b[i].second));
|
||||
if (!destination.addKnownNode(b[i].first, std::move(node), nullptr).isUseful())
|
||||
fail("", __FILE__, __LINE__);
|
||||
}
|
||||
} while (true);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/ledger/TransactionStateSF.h>
|
||||
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
|
||||
@@ -815,38 +816,43 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
std::make_unique<AccountStateSF>(mLedger->stateMap().family().db(), app_.getLedgerMaster())};
|
||||
}();
|
||||
|
||||
try
|
||||
{
|
||||
auto const f = filter.get();
|
||||
auto const f = filter.get();
|
||||
|
||||
for (auto const& node : packet.nodes())
|
||||
for (auto const& ledger_node : packet.nodes())
|
||||
{
|
||||
if (!validateLedgerNode(app_, ledger_node))
|
||||
{
|
||||
auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
|
||||
|
||||
if (!nodeID)
|
||||
throw std::runtime_error("data does not properly deserialize");
|
||||
|
||||
if (nodeID->isRoot())
|
||||
{
|
||||
san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
|
||||
}
|
||||
else
|
||||
{
|
||||
san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
|
||||
}
|
||||
|
||||
if (!san.isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "Received bad node data";
|
||||
return;
|
||||
}
|
||||
JLOG(journal_.warn()) << "Got malformed ledger node";
|
||||
san.incInvalid();
|
||||
return;
|
||||
}
|
||||
|
||||
auto tree_node = getTreeNode(ledger_node.nodedata());
|
||||
if (!tree_node)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got invalid node data";
|
||||
san.incInvalid();
|
||||
return;
|
||||
}
|
||||
|
||||
auto const node_id = getSHAMapNodeID(app_, ledger_node, *tree_node);
|
||||
if (!node_id)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got invalid node id";
|
||||
san.incInvalid();
|
||||
return;
|
||||
}
|
||||
|
||||
if (node_id->isRoot())
|
||||
san += map.addRootNode(rootHash, std::move(*tree_node), f);
|
||||
else
|
||||
san += map.addKnownNode(*node_id, std::move(*tree_node), f);
|
||||
|
||||
if (!san.isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "Received bad node data";
|
||||
return;
|
||||
}
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(journal_.error()) << "Received bad node data: " << e.what();
|
||||
san.incInvalid();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!map.isSynching())
|
||||
@@ -885,7 +891,14 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
|
||||
}
|
||||
|
||||
AccountStateSF filter(mLedger->stateMap().family().db(), app_.getLedgerMaster());
|
||||
san += mLedger->stateMap().addRootNode(SHAMapHash{mLedger->header().accountHash}, data, &filter);
|
||||
auto node = SHAMapTreeNode::makeFromWire(data);
|
||||
if (!node)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got invalid node data";
|
||||
san.incInvalid();
|
||||
return false;
|
||||
}
|
||||
san += mLedger->stateMap().addRootNode(SHAMapHash{mLedger->header().accountHash}, node, &filter);
|
||||
return san.isGood();
|
||||
}
|
||||
|
||||
@@ -910,7 +923,14 @@ InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
|
||||
}
|
||||
|
||||
TransactionStateSF filter(mLedger->txMap().family().db(), app_.getLedgerMaster());
|
||||
san += mLedger->txMap().addRootNode(SHAMapHash{mLedger->header().txHash}, data, &filter);
|
||||
auto node = SHAMapTreeNode::makeFromWire(data);
|
||||
if (!node)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got invalid node data";
|
||||
san.incInvalid();
|
||||
return false;
|
||||
}
|
||||
san += mLedger->txMap().addRootNode(SHAMapHash{mLedger->header().txHash}, node, &filter);
|
||||
return san.isGood();
|
||||
}
|
||||
|
||||
@@ -1044,13 +1064,13 @@ InboundLedger::processData(std::shared_ptr<Peer> peer, protocol::TMLedgerData& p
|
||||
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
// Verify node IDs and data are complete
|
||||
for (auto const& node : packet.nodes())
|
||||
// Verify nodes are complete
|
||||
for (auto const& ledger_node : packet.nodes())
|
||||
{
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
if (!validateLedgerNode(app_, ledger_node))
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got bad node";
|
||||
peer->charge(Resource::feeMalformedRequest, "ledger_data bad node");
|
||||
JLOG(journal_.warn()) << "Got malformed ledger node";
|
||||
peer->charge(Resource::feeMalformedRequest, "ledger_node");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
#include <xrpl/basics/DecayingSample.h>
|
||||
@@ -213,29 +214,20 @@ public:
|
||||
gotStaleData(std::shared_ptr<protocol::TMLedgerData> packet_ptr) override
|
||||
{
|
||||
Serializer s;
|
||||
try
|
||||
for (auto const& ledger_node : packet_ptr->nodes())
|
||||
{
|
||||
for (int i = 0; i < packet_ptr->nodes().size(); ++i)
|
||||
{
|
||||
auto const& node = packet_ptr->nodes(i);
|
||||
if (!validateLedgerNode(app_, ledger_node))
|
||||
return;
|
||||
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
return;
|
||||
auto const tree_node = getTreeNode(ledger_node.nodedata());
|
||||
if (!tree_node)
|
||||
return;
|
||||
auto const tn = *tree_node;
|
||||
|
||||
auto newNode = SHAMapTreeNode::makeFromWire(makeSlice(node.nodedata()));
|
||||
s.erase();
|
||||
tn->serializeWithPrefix(s);
|
||||
|
||||
if (!newNode)
|
||||
return;
|
||||
|
||||
s.erase();
|
||||
newNode->serializeWithPrefix(s);
|
||||
|
||||
app_.getLedgerMaster().addFetchPack(
|
||||
newNode->getHash().as_uint256(), std::make_shared<Blob>(s.begin(), s.end()));
|
||||
}
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
app_.getLedgerMaster().addFetchPack(tn->getHash().as_uint256(), std::make_shared<Blob>(s.begin(), s.end()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/InboundTransactions.h>
|
||||
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
|
||||
#include <xrpld/app/ledger/detail/TransactionAcquire.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
@@ -127,26 +128,35 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> data;
|
||||
std::vector<std::pair<SHAMapNodeID, intr_ptr::SharedPtr<SHAMapTreeNode>>> data;
|
||||
data.reserve(packet.nodes().size());
|
||||
|
||||
for (auto const& node : packet.nodes())
|
||||
for (auto const& ledger_node : packet.nodes())
|
||||
{
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
if (!validateLedgerNode(app_, ledger_node))
|
||||
{
|
||||
peer->charge(Resource::feeMalformedRequest, "ledger_data");
|
||||
JLOG(j_.warn()) << "Got malformed ledger node";
|
||||
peer->charge(Resource::feeMalformedRequest, "ledger_node");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const id = deserializeSHAMapNodeID(node.nodeid());
|
||||
|
||||
if (!id)
|
||||
auto const tree_node = getTreeNode(ledger_node.nodedata());
|
||||
if (!tree_node)
|
||||
{
|
||||
peer->charge(Resource::feeInvalidData, "ledger_data");
|
||||
JLOG(j_.warn()) << "Got invalid node data";
|
||||
peer->charge(Resource::feeInvalidData, "node_data");
|
||||
return;
|
||||
}
|
||||
|
||||
data.emplace_back(std::make_pair(*id, makeSlice(node.nodedata())));
|
||||
auto const node_id = getSHAMapNodeID(app_, ledger_node, *tree_node);
|
||||
if (!node_id)
|
||||
{
|
||||
JLOG(j_.warn()) << "Got invalid node id";
|
||||
peer->charge(Resource::feeInvalidData, "node_id");
|
||||
return;
|
||||
}
|
||||
|
||||
data.emplace_back(std::make_pair(*node_id, *tree_node));
|
||||
}
|
||||
|
||||
if (!ta->takeNodes(data, peer).isUseful())
|
||||
|
||||
100
src/xrpld/app/ledger/detail/LedgerNodeHelpers.h
Normal file
100
src/xrpld/app/ledger/detail/LedgerNodeHelpers.h
Normal file
@@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/shamap/SHAMapLeafNode.h>
|
||||
#include <xrpl/shamap/SHAMapNodeID.h>
|
||||
#include <xrpl/shamap/SHAMapTreeNode.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
inline bool
|
||||
validateLedgerNode(Application& app, protocol::TMLedgerNode const& ledger_node)
|
||||
{
|
||||
if (!ledger_node.has_nodedata())
|
||||
return false;
|
||||
|
||||
// When the amendment is enabled, we expect the node ID to be present in the ledger node for
|
||||
// inner nodes, and the node depth to be present for leaf nodes. As we cannot confirm whether
|
||||
// the node is actually an inner or leaf node here, we will need to perform additional checks
|
||||
// separately.
|
||||
if (app.getAmendmentTable().isEnabled(fixLedgerNodeID))
|
||||
return ledger_node.has_id() || ledger_node.has_depth();
|
||||
|
||||
// When the amendment is disabled, we expect the node ID to always be present.
|
||||
return ledger_node.has_nodeid();
|
||||
}
|
||||
|
||||
inline std::optional<intr_ptr::SharedPtr<SHAMapTreeNode>>
|
||||
getTreeNode(std::string const& data)
|
||||
{
|
||||
auto const slice = makeSlice(data);
|
||||
try
|
||||
{
|
||||
return SHAMapTreeNode::makeFromWire(slice);
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
// We can use expected instead once we support C++23.
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
inline std::optional<SHAMapNodeID>
|
||||
getSHAMapNodeID(
|
||||
Application& app,
|
||||
protocol::TMLedgerNode const& ledger_node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> const& tree_node)
|
||||
{
|
||||
// When the amendment is enabled and a node depth is present, we can calculate the node ID.
|
||||
if (app.getAmendmentTable().isEnabled(fixLedgerNodeID))
|
||||
{
|
||||
if (tree_node->isInner())
|
||||
{
|
||||
XRPL_ASSERT(ledger_node.has_id(), "xrpl::getSHAMapNodeID : node ID is present");
|
||||
if (!ledger_node.has_id())
|
||||
return std::nullopt;
|
||||
|
||||
return deserializeSHAMapNodeID(ledger_node.id());
|
||||
}
|
||||
|
||||
if (tree_node->isLeaf())
|
||||
{
|
||||
XRPL_ASSERT(ledger_node.has_depth(), "xrpl::getSHAMapNodeID : node depth is present");
|
||||
if (!ledger_node.has_depth())
|
||||
return std::nullopt;
|
||||
|
||||
auto const key = static_cast<SHAMapLeafNode const*>(tree_node.get())->peekItem()->key();
|
||||
return SHAMapNodeID::createID(ledger_node.depth(), key);
|
||||
}
|
||||
|
||||
UNREACHABLE("xrpl::getSHAMapNodeID : tree node is neither inner nor leaf");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// When the amendment is disabled, we expect the node ID to always be present. For leaf nodes
|
||||
// we perform an extra check to ensure the node's position in the tree is consistent with its
|
||||
// content.
|
||||
XRPL_ASSERT(ledger_node.has_nodeid(), "xrpl::getSHAMapNodeID : node ID is present");
|
||||
if (!ledger_node.has_nodeid())
|
||||
return std::nullopt;
|
||||
|
||||
auto const node_id = deserializeSHAMapNodeID(ledger_node.nodeid());
|
||||
if (!node_id)
|
||||
return std::nullopt;
|
||||
|
||||
if (tree_node->isLeaf())
|
||||
{
|
||||
auto const key = static_cast<SHAMapLeafNode const*>(tree_node.get())->peekItem()->key();
|
||||
auto const expected_id = SHAMapNodeID::createID(static_cast<int>(node_id->getDepth()), key);
|
||||
if (node_id->getNodeID() != expected_id.getNodeID())
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return node_id;
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -144,7 +144,7 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
|
||||
SHAMapAddNode
|
||||
TransactionAcquire::takeNodes(
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
|
||||
std::vector<std::pair<SHAMapNodeID, intr_ptr::SharedPtr<SHAMapTreeNode>>> const& data,
|
||||
std::shared_ptr<Peer> const& peer)
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
@@ -170,18 +170,20 @@ TransactionAcquire::takeNodes(
|
||||
|
||||
for (auto const& d : data)
|
||||
{
|
||||
if (d.first.isRoot() && mHaveRoot)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Got root TXS node, already have it";
|
||||
continue;
|
||||
}
|
||||
|
||||
if (d.first.isRoot())
|
||||
{
|
||||
if (mHaveRoot)
|
||||
JLOG(journal_.debug()) << "Got root TXS node, already have it";
|
||||
else if (!mMap->addRootNode(SHAMapHash{hash_}, d.second, nullptr).isGood())
|
||||
{
|
||||
if (!mMap->addRootNode(SHAMapHash{hash_}, std::move(d.second), nullptr).isGood())
|
||||
JLOG(journal_.warn()) << "TX acquire got bad root node";
|
||||
}
|
||||
else
|
||||
mHaveRoot = true;
|
||||
}
|
||||
else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood())
|
||||
else if (!mMap->addKnownNode(d.first, std::move(d.second), &sf).isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "TX acquire got bad non-root node";
|
||||
return SHAMapAddNode::invalid();
|
||||
|
||||
@@ -19,7 +19,9 @@ public:
|
||||
~TransactionAcquire() = default;
|
||||
|
||||
SHAMapAddNode
|
||||
takeNodes(std::vector<std::pair<SHAMapNodeID, Slice>> const& data, std::shared_ptr<Peer> const&);
|
||||
takeNodes(
|
||||
std::vector<std::pair<SHAMapNodeID, intr_ptr::SharedPtr<SHAMapTreeNode>>> const& data,
|
||||
std::shared_ptr<Peer> const&);
|
||||
|
||||
void
|
||||
init(int startPeers);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <xrpld/app/ledger/InboundTransactions.h>
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/ledger/TransactionMaster.h>
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
#include <xrpld/app/misc/LoadFeeTrack.h>
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/app/misc/ValidatorList.h>
|
||||
@@ -3151,9 +3152,25 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
|
||||
{
|
||||
if (ledgerData.nodes_size() >= Tuning::hardMaxReplyNodes)
|
||||
break;
|
||||
|
||||
protocol::TMLedgerNode* node{ledgerData.add_nodes()};
|
||||
node->set_nodeid(d.first.getRawString());
|
||||
node->set_nodedata(d.second.data(), d.second.size());
|
||||
|
||||
// When the amendment is disabled, we always set the node ID. However, when the amendment is
|
||||
// enabled, we only set it for inner nodes, while for leaf nodes we set the node depth instead.
|
||||
if (!app_.getAmendmentTable().isEnabled(fixLedgerNodeID))
|
||||
{
|
||||
node->set_nodeid(d.first.getRawString());
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: Can we determine whether the node is an inner or leaf node without calling the
|
||||
// makeFromWire function?
|
||||
auto const node_slice = makeSlice(node->nodedata());
|
||||
if (auto const tree_node = SHAMapTreeNode::makeFromWire(node_slice); tree_node->isInner())
|
||||
node->set_id(d.first.getRawString());
|
||||
else if (tree_node->isLeaf())
|
||||
node->set_depth(d.first.getDepth());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user