Compare commits

..

77 Commits

Author SHA1 Message Date
Bart
5b5ad09c53 Merge branch 'develop' into bthomee/node_depth 2026-04-24 14:44:42 -04:00
Bart
2405a3353c Add [[nodiscard]] to getNodeFat 2026-04-24 09:52:39 -04:00
Bart
705622011b Merge branch 'develop' into bthomee/node_depth 2026-04-24 09:47:14 -04:00
Bart
ae5f5cb92b Use emplace_back instead of push_back 2026-04-24 09:45:03 -04:00
Bart
cde8f17b5d Merge branch 'develop' into bthomee/node_depth 2026-04-24 09:26:37 -04:00
Bart
c8fb69ee1a Address Copilot feedback 2026-04-23 19:02:21 -07:00
Bart
430f770f2a Apply clang-tidy diff 2026-04-23 18:52:19 -07:00
Bart
44590a7008 Review feedback 2026-04-23 18:15:01 -07:00
Bart
1934c316b2 Update levelization 2026-04-23 15:46:24 -07:00
Bart
fd2a8b5825 Apply clang-tidy diff 2026-04-23 15:43:59 -07:00
Bart
012144c16c Merge branch 'develop' into bthomee/node_depth 2026-04-23 15:30:19 -07:00
Bart
8e2d949680 Review feedback 2026-04-23 15:01:01 -07:00
Bart
b781018fee Merge branch 'develop' into bthomee/node_depth 2026-04-23 04:50:55 -07:00
Bart
d867c9b26d Address Copilot feedback 2026-04-21 15:29:50 -07:00
Bart
7749ed8488 Address Copilot feedback 2026-04-21 15:03:32 -07:00
Bart
4d01cac564 Update levelization 2026-04-21 15:00:07 -07:00
Bart
98e1ad2dec Add more clang-tidy fixes 2026-04-21 14:42:21 -07:00
Bart
342171bf20 Merge branch 'develop' into bthomee/node_depth 2026-04-21 14:06:15 -07:00
Bart
fe74f48e7a Add more clang-tidy fixes 2026-04-21 13:29:19 -07:00
Bart
d6e05cf513 Add more clang-tidy fixes 2026-04-21 11:28:40 -07:00
Bart
e31419aa06 Address Copilot feedback 2026-04-21 11:08:02 -07:00
Bart
05b4c67b96 Restore invalid clang-tidy fix 2026-04-21 10:58:12 -07:00
Bart
2524476124 Update levelization 2026-04-21 10:26:25 -07:00
Bart
8007788d77 Merge branch 'develop' into bthomee/node_depth 2026-04-21 10:25:58 -07:00
Bart
809395a59c Merge branch 'develop' into bthomee/node_depth 2026-04-20 11:21:40 -04:00
Bart
3ef64e019b Merge branch 'develop' into bthomee/node_depth 2026-04-02 16:56:09 -04:00
Bart
228ad1e98f Fix clang-tidy findings 2026-03-31 17:33:15 -04:00
Bart
c4b342a027 Merge branch 'develop' into bthomee/node_depth 2026-03-31 17:12:43 -04:00
Bart
556d80e724 Fix clang-tidy findings 2026-03-31 16:31:24 -04:00
Bart
044f1e67b9 Merge branch 'develop' into bthomee/node_depth 2026-03-31 14:00:34 -04:00
Bart
0ad72fae8f Merge branch 'develop' into bthomee/node_depth 2026-03-22 06:33:02 -04:00
Bart
cc383c5fb8 Clang-tidy fixes 2026-03-18 15:09:46 -04:00
Bart
88715f1e5a Merge branch 'develop' into bthomee/node_depth 2026-03-18 13:38:55 -04:00
Bart
e1477cef0c Fix merge conflict 2026-03-18 09:56:08 -04:00
Bart
942874d7b0 Merge branch 'develop' into bthomee/node_depth 2026-03-18 09:47:16 -04:00
Bart
79326fc6b5 Merge branch 'develop' into bthomee/node_depth 2026-03-16 19:48:17 -04:00
Bart
48535d5226 Merge branch 'develop' into bthomee/node_depth 2026-03-10 17:11:14 +01:00
Bart
d1a6558080 Merge branch 'develop' into bthomee/node_depth 2026-03-10 15:39:32 +01:00
Bart
84f86b354f Add defensive check 2026-03-06 13:28:39 +01:00
Bart
40a3985b02 Merge branch 'develop' into bthomee/node_depth 2026-03-06 09:27:59 +01:00
Bart
208bd35d45 Merge branch 'develop' into bthomee/node_depth 2026-03-05 08:36:13 +01:00
Bart
e90fbbf7b2 Copilot review feedback 2026-03-02 13:39:29 -05:00
Bart
277450e648 Merge branch 'develop' into bthomee/node_depth 2026-03-02 12:16:33 -05:00
Bart
e6993524ea Copilot review feedback 2026-03-02 12:10:12 -05:00
Bart
b117ecc6a2 Use std::string_view 2026-03-02 11:58:57 -05:00
Bart
6c3b00c342 Change takeNodes vector argument to r-value 2026-03-02 09:41:23 -05:00
Bart
8c296a935a Improve docstring 2026-03-01 17:44:48 -05:00
Bart
573ba82181 Copilot review feedback 2026-03-01 17:33:03 -05:00
Bart
1542ab7e27 Copilot review feedback 2026-03-01 17:04:04 -05:00
Bart
6374f4886d Support leaf nodes at depth 0, use std::move, simplify tests 2026-03-01 16:44:58 -05:00
Bart
ebf336f472 Copilot review feedback 2026-03-01 15:10:57 -05:00
Bart
ddc15ad612 Copilot review feedback 2026-03-01 14:48:27 -05:00
Bart
82db6ac498 Restore try-catch to protect against other exceptions 2026-03-01 13:33:39 -05:00
Bart
f749c41306 Add log message for consistency 2026-02-28 17:31:22 -05:00
Bart
f25e47a58d Improve comment 2026-02-28 17:27:15 -05:00
Bart
2396799bd8 Update levelization 2026-02-28 16:03:02 -05:00
Bart
4855b9f96a Improve function docstrings, fix tests 2026-02-28 15:46:49 -05:00
Bart
b2f65cb7eb Fix protocol version test 2026-02-28 14:40:55 -05:00
Bart
c523673885 Update levelization 2026-02-28 14:00:13 -05:00
Bart
caac4d63d3 Merge branch 'develop' into bthomee/node_depth 2026-02-28 13:56:37 -05:00
Bart
29b0076fa8 Use new protocol version instead of amendment, add tests 2026-02-28 13:54:00 -05:00
Bart
c9aa1094a7 Update docstrings 2026-02-27 12:55:14 -05:00
Bart
b86f69cb82 Merge branch 'develop' into bthomee/node_depth 2026-02-26 17:33:25 -05:00
Bart
5d0bf78512 Clang-format to 100 line length 2026-02-26 17:09:20 -05:00
Bart
554df631c6 Remove pragma once 2026-02-18 08:28:56 -05:00
Bart
5e704bfdfb Merge branch 'develop' into bthomee/node_depth 2026-02-18 08:02:52 -05:00
Bart
fe8cc02bfa Refine 2026-02-18 07:54:33 -05:00
Bart
061c033f52 Use oneof in proto message 2026-02-16 16:50:40 -05:00
Bart
832a7e7e4a Remove depth, do not include node ID for leaf nodes 2026-02-13 17:05:05 -05:00
Bart
b2371c4c02 Fixes 2026-02-13 15:47:08 -05:00
Bart
b94a7c4b44 Merge branch 'develop' into bthomee/node_depth 2026-02-13 11:46:56 -05:00
Bart
9b9027112d Use helper functions 2026-02-13 11:44:58 -05:00
Bart
8e7889c66e Refactor 2026-02-12 16:55:38 -05:00
Bart
d836c3788d Merge branch 'develop' into bthomee/node_depth 2026-02-12 15:33:13 -05:00
Bart
1cb7c0293f Check if amendment is enabled 2026-02-12 06:31:32 -05:00
Bart
52dabc1f79 Remove deprecated stanza on nodeid field 2026-02-11 16:28:35 -05:00
Bart
2d78d41f7b perf: Replace node ID by depth in TMLedgerNode 2026-02-11 15:55:16 -05:00
47 changed files with 973 additions and 675 deletions

View File

@@ -93,7 +93,6 @@ test.core > xrpl.basics
test.core > xrpl.core
test.core > xrpld.core
test.core > xrpl.json
test.core > xrpl.protocol
test.core > xrpl.rdb
test.core > xrpl.server
test.csf > xrpl.basics

View File

@@ -1,139 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#include <functional>
#include <mutex>
#include <set>
/** RAII class to check if an Item is already being processed on another thread,
* as indicated by it's presence in a Collection.
*
* If the Item is not in the Collection, it will be added under lock in the
* ctor, and removed under lock in the dtor. The object will be considered
* "usable" and evaluate to `true`.
*
* If the Item is in the Collection, no changes will be made to the collection,
* and the CanProcess object will be considered "unusable".
*
* It's up to the caller to decide what "usable" and "unusable" mean. (e.g.
* Process or skip a block of code, or set a flag.)
*
* The current use is to avoid lock contention that would be involved in
* processing something associated with the Item.
*
* Examples:
*
* void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...)
* {
* if (CanProcess check{acquiresMutex_, pendingAcquires_, hash})
* {
* acquire(hash, ...);
* }
* }
*
* bool
* NetworkOPsImp::recvValidation(
* std::shared_ptr<STValidation> const& val,
* std::string const& source)
* {
* CanProcess check(
* validationsMutex_, pendingValidations_, val->getLedgerHash());
* BypassAccept bypassAccept =
* check ? BypassAccept::no : BypassAccept::yes;
* handleNewValidation(app_, val, source, bypassAccept, m_journal);
* }
*
*/
class CanProcess
{
public:
template <class Mutex, class Collection, class Item>
CanProcess(Mutex& mtx, Collection& collection, Item const& item)
: cleanup_(insert(mtx, collection, item))
{
}
~CanProcess()
{
if (cleanup_)
cleanup_();
}
CanProcess(CanProcess const&) = delete;
CanProcess&
operator=(CanProcess const&) = delete;
explicit
operator bool() const
{
return static_cast<bool>(cleanup_);
}
private:
template <bool useIterator, class Mutex, class Collection, class Item>
std::function<void()>
doInsert(Mutex& mtx, Collection& collection, Item const& item)
{
std::unique_lock<Mutex> lock(mtx);
// TODO: Use structured binding once LLVM 16 is the minimum supported
// version. See also: https://github.com/llvm/llvm-project/issues/48582
// https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c
auto const insertResult = collection.insert(item);
auto const it = insertResult.first;
if (!insertResult.second)
return {};
if constexpr (useIterator)
return [&, it]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(it);
};
else
return [&]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(item);
};
}
// Generic insert() function doesn't use iterators because they may get
// invalidated
template <class Mutex, class Collection, class Item>
std::function<void()>
insert(Mutex& mtx, Collection& collection, Item const& item)
{
return doInsert<false>(mtx, collection, item);
}
// Specialize insert() for std::set, which does not invalidate iterators for
// insert and erase
template <class Mutex, class Item>
std::function<void()>
insert(Mutex& mtx, std::set<Item>& collection, Item const& item)
{
return doInsert<true>(mtx, collection, item);
}
// If set, then the item is "usable"
std::function<void()> cleanup_;
};
#endif

View File

@@ -11,6 +11,7 @@
#include <limits>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <vector>
@@ -231,4 +232,11 @@ makeSlice(std::basic_string<char, Traits, Alloc> const& s)
return Slice(s.data(), s.size());
}
template <class Traits>
Slice
makeSlice(std::basic_string_view<char, Traits> s)
{
return Slice(s.data(), s.size());
}
} // namespace xrpl

View File

@@ -197,7 +197,7 @@ public:
/** Add a suppression peer and get message's relay status.
* Return pair:
* element 1: true if the key is added.
* element 1: true if the peer is added.
* element 2: optional is seated to the relay time point or
* is unseated if has not relayed yet. */
std::pair<bool, std::optional<Stopwatch::time_point>>

View File

@@ -246,7 +246,15 @@ message TMGetObjectByHash {
message TMLedgerNode {
required bytes nodedata = 1;
optional bytes nodeid = 2; // missing for ledger base data
// Used when protocol version <2.3. Not set for ledger base data.
optional bytes nodeid = 2;
// Used when protocol version >=2.3. Neither value is set for ledger base data.
oneof reference {
bytes id = 3; // Set for inner nodes.
uint32 depth = 4; // Set for leaf nodes.
}
}
enum TMLedgerInfoType {

View File

@@ -35,8 +35,6 @@ struct LedgerHeader
// If validated is false, it means "not yet validated."
// Once validated is true, it will never be set false at a later time.
// NOTE: If you are accessing this directly, you are probably doing it
// wrong. Use LedgerMaster::isValidated().
// VFALCO TODO Make this not mutable
bool mutable validated = false;
bool accepted = false;

View File

@@ -185,7 +185,7 @@ public:
virtual bool
isFull() = 0;
virtual void
setMode(OperatingMode om, char const* reason) = 0;
setMode(OperatingMode om) = 0;
virtual bool
isBlocked() = 0;
virtual bool

View File

@@ -16,6 +16,7 @@
#include <set>
#include <stack>
#include <tuple>
#include <vector>
namespace xrpl {
@@ -73,6 +74,22 @@ enum class SHAMapState {
See https://en.wikipedia.org/wiki/Merkle_tree
*/
/** Holds a SHAMap node's identity, serialized data, and leaf status.
Used by getNodeFat to return node data for peer synchronization.
*/
struct SHAMapNodeData
{
SHAMapNodeID nodeID;
Blob data;
bool isLeaf;
SHAMapNodeData(SHAMapNodeID const& id, Blob d, bool leaf)
: nodeID(id), data(std::move(d)), isLeaf(leaf)
{
}
};
class SHAMap
{
private:
@@ -85,7 +102,7 @@ private:
/** The sequence of the ledger that this map references, if any. */
std::uint32_t ledgerSeq_ = 0;
intr_ptr::SharedPtr<SHAMapTreeNode> root_;
SHAMapTreeNodePtr root_;
mutable SHAMapState state_;
SHAMapType const type_;
bool backed_ = true; // Map is backed by the database
@@ -250,10 +267,10 @@ public:
std::vector<std::pair<SHAMapNodeID, uint256>>
getMissingNodes(int maxNodes, SHAMapSyncFilter* filter);
bool
[[nodiscard]] bool
getNodeFat(
SHAMapNodeID const& wanted,
std::vector<std::pair<SHAMapNodeID, Blob>>& data,
std::vector<SHAMapNodeData>& data,
bool fatLeaves,
std::uint32_t depth) const;
@@ -280,10 +297,42 @@ public:
void
serializeRoot(Serializer& s) const;
/** Add a root node to the SHAMap during synchronization.
*
* This function is used when receiving the root node of a SHAMap from a peer during ledger
* synchronization. The node must already have been deserialized.
*
* @param hash The expected hash of the root node.
* @param rootNode A deserialized root node to add.
* @param filter Optional sync filter to track received nodes.
* @return Status indicating whether the node was useful, duplicate, or invalid.
*
* @note This function expects the rootNode to be a valid, deserialized SHAMapTreeNode. The
* caller is responsible for deserialization and basic validation before calling this
* function.
*/
SHAMapAddNode
addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFilter* filter);
addRootNode(SHAMapHash const& hash, SHAMapTreeNodePtr rootNode, SHAMapSyncFilter const* filter);
/** Add a known node at a specific position in the SHAMap during synchronization.
*
* This function is used when receiving nodes from peers during ledger synchronization. The node
* is inserted at the position specified by nodeID. The node must already have been
* deserialized.
*
* @param nodeID The position in the tree where this node belongs.
* @param treeNode A deserialized tree node to add.
* @param filter Optional sync filter to track received nodes.
* @return Status indicating whether the node was useful, duplicate, or invalid.
*
* @note This function expects that the caller has already validated that the nodeID is
* consistent with the node's content.
*/
SHAMapAddNode
addKnownNode(SHAMapNodeID const& nodeID, Slice const& rawNode, SHAMapSyncFilter* filter);
addKnownNode(
SHAMapNodeID const& nodeID,
SHAMapTreeNodePtr treeNode,
SHAMapSyncFilter const* filter);
// status functions
void
@@ -326,36 +375,32 @@ public:
invariants() const;
private:
using SharedPtrNodeStack =
std::stack<std::pair<intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapNodeID>>;
using SharedPtrNodeStack = std::stack<std::pair<SHAMapTreeNodePtr, SHAMapNodeID>>;
using DeltaRef =
std::pair<boost::intrusive_ptr<SHAMapItem const>, boost::intrusive_ptr<SHAMapItem const>>;
// tree node cache operations
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
cacheLookup(SHAMapHash const& hash) const;
void
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>&) const;
canonicalize(SHAMapHash const& hash, SHAMapTreeNodePtr&) const;
// database operations
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
fetchNodeFromDB(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
fetchNodeNT(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const;
SHAMapTreeNodePtr
fetchNode(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
SHAMapTreeNodePtr
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const;
/** Update hashes up to the root */
void
dirtyUp(
SharedPtrNodeStack& stack,
uint256 const& target,
intr_ptr::SharedPtr<SHAMapTreeNode> terminal);
dirtyUp(SharedPtrNodeStack& stack, uint256 const& target, SHAMapTreeNodePtr terminal);
/** Walk towards the specified id, returning the node. Caller must check
if the return is nullptr, and if not, if the node->peekItem()->key() ==
@@ -377,25 +422,21 @@ private:
preFlushNode(intr_ptr::SharedPtr<Node> node) const;
/** write and canonicalize modified node */
intr_ptr::SharedPtr<SHAMapTreeNode>
writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node) const;
SHAMapTreeNodePtr
writeNode(NodeObjectType t, SHAMapTreeNodePtr node) const;
// returns the first item at or below this node
SHAMapLeafNode*
firstBelow(intr_ptr::SharedPtr<SHAMapTreeNode>, SharedPtrNodeStack& stack, int branch = 0)
const;
firstBelow(SHAMapTreeNodePtr node, SharedPtrNodeStack& stack, int branch = 0) const;
// returns the last item at or below this node
SHAMapLeafNode*
lastBelow(
intr_ptr::SharedPtr<SHAMapTreeNode> node,
SharedPtrNodeStack& stack,
int branch = branchFactor) const;
lastBelow(SHAMapTreeNodePtr node, SharedPtrNodeStack& stack, int branch = branchFactor) const;
// helper function for firstBelow and lastBelow
SHAMapLeafNode*
belowHelper(
intr_ptr::SharedPtr<SHAMapTreeNode> node,
SHAMapTreeNodePtr node,
SharedPtrNodeStack& stack,
int branch,
std::tuple<int, std::function<bool(int)>, std::function<void(int&)>> const& loopParams)
@@ -407,20 +448,19 @@ private:
descend(SHAMapInnerNode*, int branch) const;
SHAMapTreeNode*
descendThrow(SHAMapInnerNode*, int branch) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
descend(SHAMapInnerNode&, int branch) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
descendThrow(SHAMapInnerNode&, int branch) const;
// Descend with filter
// If pending, callback is called as if it called fetchNodeNT
using descendCallback =
std::function<void(intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapHash const&)>;
using descendCallback = std::function<void(SHAMapTreeNodePtr, SHAMapHash const&)>;
SHAMapTreeNode*
descendAsync(
SHAMapInnerNode* parent,
int branch,
SHAMapSyncFilter* filter,
SHAMapSyncFilter const* filter,
bool& pending,
descendCallback&&) const;
@@ -429,11 +469,11 @@ private:
SHAMapInnerNode* parent,
SHAMapNodeID const& parentID,
int branch,
SHAMapSyncFilter* filter) const;
SHAMapSyncFilter const* filter) const;
// Non-storing
// Does not hook the returned node to its parent
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
descendNoStore(SHAMapInnerNode&, int branch) const;
/** If there is only one leaf below this node, get its contents */
@@ -495,10 +535,10 @@ private:
// nodes we may have acquired from deferred reads
using DeferredNode = std::tuple<
SHAMapInnerNode*, // parent node
SHAMapNodeID, // parent node ID
int, // branch
intr_ptr::SharedPtr<SHAMapTreeNode>>; // node
SHAMapInnerNode*, // parent node
SHAMapNodeID, // parent node ID
int, // branch
SHAMapTreeNodePtr>; // node
int deferred_;
std::mutex deferLock_;
@@ -524,7 +564,7 @@ private:
gmn_ProcessDeferredReads(MissingNodes&);
// fetch from DB helper function
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
finishFetch(SHAMapHash const& hash, std::shared_ptr<NodeObject> const& object) const;
};

View File

@@ -27,7 +27,7 @@ public:
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
clone(std::uint32_t cowid) const final
{
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(item_, cowid, hash_);

View File

@@ -87,7 +87,7 @@ public:
void
partialDestructor() override;
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
clone(std::uint32_t cowid) const override;
SHAMapNodeType
@@ -121,19 +121,19 @@ public:
getChildHash(int m) const;
void
setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child);
setChild(int m, SHAMapTreeNodePtr child);
void
shareChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> const& child);
shareChild(int m, SHAMapTreeNodePtr const& child);
SHAMapTreeNode*
getChildPointer(int branch);
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
getChild(int branch);
intr_ptr::SharedPtr<SHAMapTreeNode>
canonicalizeChild(int branch, intr_ptr::SharedPtr<SHAMapTreeNode> node);
SHAMapTreeNodePtr
canonicalizeChild(int branch, SHAMapTreeNodePtr node);
// sync functions
bool
@@ -161,10 +161,10 @@ public:
void
invariants(bool is_root = false) const override;
static intr_ptr::SharedPtr<SHAMapTreeNode>
static SHAMapTreeNodePtr
makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid);
static intr_ptr::SharedPtr<SHAMapTreeNode>
static SHAMapTreeNodePtr
makeCompressedInner(Slice data);
};

View File

@@ -166,4 +166,6 @@ private:
makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid);
};
using SHAMapTreeNodePtr = intr_ptr::SharedPtr<SHAMapTreeNode>;
} // namespace xrpl

View File

@@ -26,7 +26,7 @@ public:
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
clone(std::uint32_t cowid) const final
{
return intr_ptr::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);

View File

@@ -27,7 +27,7 @@ public:
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
clone(std::uint32_t cowid) const override
{
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(item_, cowid, hash_);

View File

@@ -11,5 +11,5 @@ using TreeNodeCache = TaggedCache<
SHAMapTreeNode,
/*IsKeyCache*/ false,
intr_ptr::SharedWeakUnionPtr<SHAMapTreeNode>,
intr_ptr::SharedPtr<SHAMapTreeNode>>;
SHAMapTreeNodePtr>;
} // namespace xrpl

View File

@@ -148,7 +148,7 @@ public:
/** Get the number of elements in each array and a pointer to the start
of each array.
*/
[[nodiscard]] std::tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
[[nodiscard]] std::tuple<std::uint8_t, SHAMapHash*, SHAMapTreeNodePtr*>
getHashesAndChildren() const;
/** Get the `hashes` array */
@@ -156,7 +156,7 @@ public:
getHashes() const;
/** Get the `children` array */
[[nodiscard]] intr_ptr::SharedPtr<SHAMapTreeNode>*
[[nodiscard]] SHAMapTreeNodePtr*
getChildren() const;
/** Call the `f` callback for all 16 (branchFactor) branches - even if

View File

@@ -25,8 +25,7 @@ static_assert(
// Terminology: A chunk is the memory being allocated from a block. A block
// contains multiple chunks. This is the terminology the boost documentation
// uses. Pools use "Simple Segregated Storage" as their storage format.
constexpr size_t elementSizeBytes =
(sizeof(SHAMapHash) + sizeof(intr_ptr::SharedPtr<SHAMapTreeNode>));
constexpr size_t elementSizeBytes = (sizeof(SHAMapHash) + sizeof(SHAMapTreeNodePtr));
constexpr size_t blockSizeBytes = kilobytes(512);
@@ -363,8 +362,7 @@ inline TaggedPointer::TaggedPointer(
// keep
new (&dstHashes[dstIndex]) SHAMapHash{srcHashes[srcIndex]};
new (&dstChildren[dstIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{std::move(srcChildren[srcIndex])};
new (&dstChildren[dstIndex]) SHAMapTreeNodePtr{std::move(srcChildren[srcIndex])};
++dstIndex;
++srcIndex;
}
@@ -375,7 +373,7 @@ inline TaggedPointer::TaggedPointer(
if (dstIsDense)
{
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&dstChildren[dstIndex]) SHAMapTreeNodePtr{};
++dstIndex;
}
}
@@ -383,7 +381,7 @@ inline TaggedPointer::TaggedPointer(
{
// add
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&dstChildren[dstIndex]) SHAMapTreeNodePtr{};
++dstIndex;
if (srcIsDense)
{
@@ -396,7 +394,7 @@ inline TaggedPointer::TaggedPointer(
if (dstIsDense)
{
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&dstChildren[dstIndex]) SHAMapTreeNodePtr{};
++dstIndex;
}
if (srcIsDense)
@@ -413,7 +411,7 @@ inline TaggedPointer::TaggedPointer(
for (int i = dstIndex; i < dstNumAllocated; ++i)
{
new (&dstHashes[i]) SHAMapHash{};
new (&dstChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&dstChildren[i]) SHAMapTreeNodePtr{};
}
*this = std::move(dst);
}
@@ -433,7 +431,7 @@ inline TaggedPointer::TaggedPointer(
// allocate hashes and children, but do not run constructors
TaggedPointer newHashesAndChildren{RawAllocateTag{}, toAllocate};
SHAMapHash *newHashes, *oldHashes;
intr_ptr::SharedPtr<SHAMapTreeNode>*newChildren, *oldChildren;
SHAMapTreeNodePtr *newChildren, *oldChildren;
std::uint8_t newNumAllocated;
// structured bindings can't be captured in c++ 17; use tie instead
std::tie(newNumAllocated, newHashes, newChildren) = newHashesAndChildren.getHashesAndChildren();
@@ -444,8 +442,7 @@ inline TaggedPointer::TaggedPointer(
// new arrays are dense, old arrays are sparse
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
new (&newHashes[branchNum]) SHAMapHash{oldHashes[indexNum]};
new (&newChildren[branchNum])
intr_ptr::SharedPtr<SHAMapTreeNode>{std::move(oldChildren[indexNum])};
new (&newChildren[branchNum]) SHAMapTreeNodePtr{std::move(oldChildren[indexNum])};
});
// Run the constructors for the remaining elements
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
@@ -453,7 +450,7 @@ inline TaggedPointer::TaggedPointer(
if ((1 << i) & isBranch)
continue;
new (&newHashes[i]) SHAMapHash{};
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&newChildren[i]) SHAMapTreeNodePtr{};
}
}
else
@@ -463,14 +460,14 @@ inline TaggedPointer::TaggedPointer(
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
new (&newHashes[curCompressedIndex]) SHAMapHash{oldHashes[indexNum]};
new (&newChildren[curCompressedIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{std::move(oldChildren[indexNum])};
SHAMapTreeNodePtr{std::move(oldChildren[indexNum])};
++curCompressedIndex;
});
// Run the constructors for the remaining elements
for (int i = curCompressedIndex; i < newNumAllocated; ++i)
{
new (&newHashes[i]) SHAMapHash{};
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&newChildren[i]) SHAMapTreeNodePtr{};
}
}
@@ -484,7 +481,7 @@ inline TaggedPointer::TaggedPointer(std::uint8_t numChildren)
for (std::size_t i = 0; i < numAllocated; ++i)
{
new (&hashes[i]) SHAMapHash{};
new (&children[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
new (&children[i]) SHAMapTreeNodePtr{};
}
}
@@ -522,14 +519,13 @@ TaggedPointer::isDense() const
return (tp_ & tagMask) == boundaries.size() - 1;
}
[[nodiscard]] inline std::tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
[[nodiscard]] inline std::tuple<std::uint8_t, SHAMapHash*, SHAMapTreeNodePtr*>
TaggedPointer::getHashesAndChildren() const
{
auto const [tag, ptr] = decode();
auto const hashes = reinterpret_cast<SHAMapHash*>(ptr);
std::uint8_t numAllocated = boundaries[tag];
auto const children =
reinterpret_cast<intr_ptr::SharedPtr<SHAMapTreeNode>*>(hashes + numAllocated);
auto const children = reinterpret_cast<SHAMapTreeNodePtr*>(hashes + numAllocated);
return {numAllocated, hashes, children};
};
@@ -539,7 +535,7 @@ TaggedPointer::getHashes() const
return reinterpret_cast<SHAMapHash*>(tp_ & ptrMask);
};
[[nodiscard]] inline intr_ptr::SharedPtr<SHAMapTreeNode>*
[[nodiscard]] inline SHAMapTreeNodePtr*
TaggedPointer::getChildren() const
{
auto [unused1, unused2, result] = getHashesAndChildren();

View File

@@ -97,10 +97,7 @@ SHAMap::snapShot(bool isMutable) const
}
void
SHAMap::dirtyUp(
SharedPtrNodeStack& stack,
uint256 const& target,
intr_ptr::SharedPtr<SHAMapTreeNode> child)
SHAMap::dirtyUp(SharedPtrNodeStack& stack, uint256 const& target, SHAMapTreeNodePtr child)
{
// walk the tree up from through the inner nodes to the root_
// update hashes and links
@@ -165,7 +162,7 @@ SHAMap::findKey(uint256 const& id) const
return leaf;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
{
XRPL_ASSERT(backed_, "xrpl::SHAMap::fetchNodeFromDB : is backed");
@@ -173,7 +170,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
return finishFetch(hash, obj);
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::finishFetch(SHAMapHash const& hash, std::shared_ptr<NodeObject> const& object) const
{
XRPL_ASSERT(backed_, "xrpl::SHAMap::finishFetch : is backed");
@@ -208,8 +205,8 @@ SHAMap::finishFetch(SHAMapHash const& hash, std::shared_ptr<NodeObject> const& o
}
// See if a sync filter has a node
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
SHAMapTreeNodePtr
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const
{
if (auto nodeData = filter->getNode(hash))
{
@@ -234,8 +231,8 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
// Get a node without throwing
// Used on maps where missing nodes are expected
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
SHAMapTreeNodePtr
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter const* filter) const
{
auto node = cacheLookup(hash);
if (node)
@@ -257,7 +254,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
return node;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::fetchNodeNT(SHAMapHash const& hash) const
{
auto node = cacheLookup(hash);
@@ -269,7 +266,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash) const
}
// Throw if the node is missing
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::fetchNode(SHAMapHash const& hash) const
{
auto node = fetchNodeNT(hash);
@@ -291,10 +288,10 @@ SHAMap::descendThrow(SHAMapInnerNode* parent, int branch) const
return ret;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::descendThrow(SHAMapInnerNode& parent, int branch) const
{
intr_ptr::SharedPtr<SHAMapTreeNode> ret = descend(parent, branch);
SHAMapTreeNodePtr ret = descend(parent, branch);
if (!ret && !parent.isEmptyBranch(branch))
Throw<SHAMapMissingNode>(type_, parent.getChildHash(branch));
@@ -309,7 +306,7 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
if ((ret != nullptr) || !backed_)
return ret;
intr_ptr::SharedPtr<SHAMapTreeNode> node = fetchNodeNT(parent->getChildHash(branch));
SHAMapTreeNodePtr node = fetchNodeNT(parent->getChildHash(branch));
if (!node)
return nullptr;
@@ -317,10 +314,10 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
return node.get();
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::descend(SHAMapInnerNode& parent, int branch) const
{
intr_ptr::SharedPtr<SHAMapTreeNode> node = parent.getChild(branch);
SHAMapTreeNodePtr node = parent.getChild(branch);
if (node || !backed_)
return node;
@@ -334,10 +331,10 @@ SHAMap::descend(SHAMapInnerNode& parent, int branch) const
// Gets the node that would be hooked to this branch,
// but doesn't hook it up.
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::descendNoStore(SHAMapInnerNode& parent, int branch) const
{
intr_ptr::SharedPtr<SHAMapTreeNode> ret = parent.getChild(branch);
SHAMapTreeNodePtr ret = parent.getChild(branch);
if (!ret && backed_)
ret = fetchNode(parent.getChildHash(branch));
return ret;
@@ -348,7 +345,7 @@ SHAMap::descend(
SHAMapInnerNode* parent,
SHAMapNodeID const& parentID,
int branch,
SHAMapSyncFilter* filter) const
SHAMapSyncFilter const* filter) const
{
XRPL_ASSERT(parent->isInner(), "xrpl::SHAMap::descend : valid parent input");
XRPL_ASSERT(
@@ -361,7 +358,7 @@ SHAMap::descend(
if (child == nullptr)
{
auto const& childHash = parent->getChildHash(branch);
intr_ptr::SharedPtr<SHAMapTreeNode> childNode = fetchNodeNT(childHash, filter);
SHAMapTreeNodePtr childNode = fetchNodeNT(childHash, filter);
if (childNode)
{
@@ -377,7 +374,7 @@ SHAMapTreeNode*
SHAMap::descendAsync(
SHAMapInnerNode* parent,
int branch,
SHAMapSyncFilter* filter,
SHAMapSyncFilter const* filter,
bool& pending,
descendCallback&& callback) const
{
@@ -434,7 +431,7 @@ SHAMap::unshareNode(intr_ptr::SharedPtr<Node> node, SHAMapNodeID const& nodeID)
SHAMapLeafNode*
SHAMap::belowHelper(
intr_ptr::SharedPtr<SHAMapTreeNode> node,
SHAMapTreeNodePtr node,
SharedPtrNodeStack& stack,
int branch,
std::tuple<int, std::function<bool(int)>, std::function<void(int&)>> const& loopParams) const
@@ -479,8 +476,7 @@ SHAMap::belowHelper(
return nullptr;
}
SHAMapLeafNode*
SHAMap::lastBelow(intr_ptr::SharedPtr<SHAMapTreeNode> node, SharedPtrNodeStack& stack, int branch)
const
SHAMap::lastBelow(SHAMapTreeNodePtr node, SharedPtrNodeStack& stack, int branch) const
{
auto init = branchFactor - 1;
auto cmp = [](int i) { return i >= 0; };
@@ -489,8 +485,7 @@ SHAMap::lastBelow(intr_ptr::SharedPtr<SHAMapTreeNode> node, SharedPtrNodeStack&
return belowHelper(node, stack, branch, {init, cmp, incr});
}
SHAMapLeafNode*
SHAMap::firstBelow(intr_ptr::SharedPtr<SHAMapTreeNode> node, SharedPtrNodeStack& stack, int branch)
const
SHAMap::firstBelow(SHAMapTreeNodePtr node, SharedPtrNodeStack& stack, int branch) const
{
auto init = 0;
auto cmp = [](int i) { return i <= branchFactor; };
@@ -699,10 +694,8 @@ SHAMap::delItem(uint256 const& id)
SHAMapNodeType const type = leaf->getType();
using TreeNodeType = intr_ptr::SharedPtr<SHAMapTreeNode>;
// What gets attached to the end of the chain (For now, nothing, since we deleted the leaf)
TreeNodeType prevNode;
SHAMapTreeNodePtr prevNode;
while (!stack.empty())
{
@@ -728,7 +721,7 @@ SHAMap::delItem(uint256 const& id)
// no children below this branch
//
// Note: This is unnecessary due to the std::move above but left here for safety
prevNode = TreeNodeType{};
prevNode = SHAMapTreeNodePtr{};
}
else if (bc == 1)
{
@@ -741,7 +734,7 @@ SHAMap::delItem(uint256 const& id)
{
if (!node->isEmptyBranch(i))
{
node->setChild(i, TreeNodeType{});
node->setChild(i, SHAMapTreeNodePtr{});
break;
}
}
@@ -937,8 +930,8 @@ SHAMap::fetchRoot(SHAMapHash const& hash, SHAMapSyncFilter* filter)
@note The node must have already been unshared by having the caller
first call SHAMapTreeNode::unshare().
*/
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMap::writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node) const
SHAMapTreeNodePtr
SHAMap::writeNode(NodeObjectType t, SHAMapTreeNodePtr node) const
{
XRPL_ASSERT(node->cowid() == 0, "xrpl::SHAMap::writeNode : valid input node");
XRPL_ASSERT(backed_, "xrpl::SHAMap::writeNode : is backed");
@@ -1155,7 +1148,7 @@ SHAMap::dump(bool hash) const
JLOG(journal_.info()) << leafCount << " resident leaves";
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMap::cacheLookup(SHAMapHash const& hash) const
{
auto ret = f_.getTreeNodeCache()->fetch(hash.as_uint256());
@@ -1164,7 +1157,7 @@ SHAMap::cacheLookup(SHAMapHash const& hash) const
}
void
SHAMap::canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>& node) const
SHAMap::canonicalize(SHAMapHash const& hash, SHAMapTreeNodePtr& node) const
{
XRPL_ASSERT(backed_, "xrpl::SHAMap::canonicalize : is backed");
XRPL_ASSERT(node->cowid() == 0, "xrpl::SHAMap::canonicalize : valid node input");

View File

@@ -261,7 +261,7 @@ SHAMap::walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing) co
{
if (!node->isEmptyBranch(i))
{
intr_ptr::SharedPtr<SHAMapTreeNode> const nextNode = descendNoStore(*node, i);
SHAMapTreeNodePtr const nextNode = descendNoStore(*node, i);
if (nextNode)
{
@@ -286,7 +286,7 @@ SHAMap::walkMapParallel(std::vector<SHAMapMissingNode>& missingNodes, int maxMis
return false;
using StackEntry = intr_ptr::SharedPtr<SHAMapInnerNode>;
std::array<intr_ptr::SharedPtr<SHAMapTreeNode>, 16> topChildren;
std::array<SHAMapTreeNodePtr, 16> topChildren;
{
auto const& innerRoot = intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
for (int i = 0; i < 16; ++i)
@@ -331,8 +331,7 @@ SHAMap::walkMapParallel(std::vector<SHAMapMissingNode>& missingNodes, int maxMis
{
if (node->isEmptyBranch(i))
continue;
intr_ptr::SharedPtr<SHAMapTreeNode> const nextNode =
descendNoStore(*node, i);
SHAMapTreeNodePtr const nextNode = descendNoStore(*node, i);
if (nextNode)
{

View File

@@ -37,7 +37,7 @@ SHAMapInnerNode::~SHAMapInnerNode() = default;
void
SHAMapInnerNode::partialDestructor()
{
intr_ptr::SharedPtr<SHAMapTreeNode>* children = nullptr;
SHAMapTreeNodePtr* children = nullptr;
// structured bindings can't be captured in c++ 17; use tie instead
std::tie(std::ignore, std::ignore, children) = hashesAndChildren_.getHashesAndChildren();
iterNonEmptyChildIndexes([&](auto branchNum, auto indexNum) { children[indexNum].reset(); });
@@ -69,7 +69,7 @@ SHAMapInnerNode::getChildIndex(int i) const
return hashesAndChildren_.getChildIndex(isBranch_, i);
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapInnerNode::clone(std::uint32_t cowid) const
{
auto const branchCount = getBranchCount();
@@ -79,7 +79,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const
p->isBranch_ = isBranch_;
p->fullBelowGen_ = fullBelowGen_;
SHAMapHash *cloneHashes = nullptr, *thisHashes = nullptr;
intr_ptr::SharedPtr<SHAMapTreeNode>*cloneChildren = nullptr, *thisChildren = nullptr;
SHAMapTreeNodePtr *cloneChildren = nullptr, *thisChildren = nullptr;
// structured bindings can't be captured in c++ 17; use tie instead
std::tie(std::ignore, cloneHashes, cloneChildren) =
p->hashesAndChildren_.getHashesAndChildren();
@@ -118,7 +118,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const
return p;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapInnerNode::makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid)
{
// A full inner node is serialized as 16 256-bit hashes, back to back:
@@ -153,7 +153,7 @@ SHAMapInnerNode::makeFullInner(Slice data, SHAMapHash const& hash, bool hashVali
return ret;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapInnerNode::makeCompressedInner(Slice data)
{
// A compressed inner node is serialized as a series of 33 byte chunks,
@@ -207,7 +207,7 @@ void
SHAMapInnerNode::updateHashDeep()
{
SHAMapHash* hashes = nullptr;
intr_ptr::SharedPtr<SHAMapTreeNode>* children = nullptr;
SHAMapTreeNodePtr* children = nullptr;
// structured bindings can't be captured in c++ 17; use tie instead
std::tie(std::ignore, hashes, children) = hashesAndChildren_.getHashesAndChildren();
iterNonEmptyChildIndexes([&](auto branchNum, auto indexNum) {
@@ -265,7 +265,7 @@ SHAMapInnerNode::getString(SHAMapNodeID const& id) const
// We are modifying an inner node
void
SHAMapInnerNode::setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child)
SHAMapInnerNode::setChild(int m, SHAMapTreeNodePtr child)
{
XRPL_ASSERT(
(m >= 0) && (m < branchFactor), "xrpl::SHAMapInnerNode::setChild : valid branch input");
@@ -307,7 +307,7 @@ SHAMapInnerNode::setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child)
// finished modifying, now make shareable
void
SHAMapInnerNode::shareChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> const& child)
SHAMapInnerNode::shareChild(int m, SHAMapTreeNodePtr const& child)
{
XRPL_ASSERT(
(m >= 0) && (m < branchFactor), "xrpl::SHAMapInnerNode::shareChild : valid branch input");
@@ -337,7 +337,7 @@ SHAMapInnerNode::getChildPointer(int branch)
return hashesAndChildren_.getChildren()[index].get();
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapInnerNode::getChild(int branch)
{
XRPL_ASSERT(
@@ -364,8 +364,8 @@ SHAMapInnerNode::getChildHash(int m) const
return zeroSHAMapHash;
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapInnerNode::canonicalizeChild(int branch, intr_ptr::SharedPtr<SHAMapTreeNode> node)
SHAMapTreeNodePtr
SHAMapInnerNode::canonicalizeChild(int branch, SHAMapTreeNodePtr node)
{
XRPL_ASSERT(
branch >= 0 && branch < branchFactor,

View File

@@ -129,7 +129,9 @@ selectBranch(SHAMapNodeID const& id, uint256 const& hash)
SHAMapNodeID
SHAMapNodeID::createID(int depth, uint256 const& key)
{
XRPL_ASSERT((depth >= 0) && (depth < 65), "xrpl::SHAMapNodeID::createID : valid branch input");
XRPL_ASSERT(
depth >= 0 && depth <= SHAMap::leafDepth,
"xrpl::SHAMapNodeID::createID : valid branch input");
return SHAMapNodeID(depth, key & depthMask(depth));
}

View File

@@ -66,7 +66,7 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
{
if (!node->isEmptyBranch(pos))
{
intr_ptr::SharedPtr<SHAMapTreeNode> const child = descendNoStore(*node, pos);
SHAMapTreeNodePtr const child = descendNoStore(*node, pos);
if (!function(*child))
return;
@@ -204,8 +204,7 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se)
branch,
mn.filter_,
pending,
[node, nodeID, branch, &mn](
intr_ptr::SharedPtr<SHAMapTreeNode> found, SHAMapHash const&) {
[node, nodeID, branch, &mn](SHAMapTreeNodePtr found, SHAMapHash const&) {
// a read completed asynchronously
std::unique_lock<std::mutex> const lock{mn.deferLock_};
mn.finishedReads_.emplace_back(node, nodeID, branch, std::move(found));
@@ -268,8 +267,7 @@ SHAMap::gmn_ProcessDeferredReads(MissingNodes& mn)
int complete = 0;
while (complete != mn.deferred_)
{
std::tuple<SHAMapInnerNode*, SHAMapNodeID, int, intr_ptr::SharedPtr<SHAMapTreeNode>>
deferredNode;
std::tuple<SHAMapInnerNode*, SHAMapNodeID, int, SHAMapTreeNodePtr> deferredNode;
{
std::unique_lock<std::mutex> lock{mn.deferLock_};
@@ -417,7 +415,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter)
bool
SHAMap::getNodeFat(
SHAMapNodeID const& wanted,
std::vector<std::pair<SHAMapNodeID, Blob>>& data,
std::vector<SHAMapNodeData>& data,
bool fatLeaves,
std::uint32_t depth) const
{
@@ -463,7 +461,7 @@ SHAMap::getNodeFat(
// Add this node to the reply
s.erase();
node->serializeForWire(s);
data.emplace_back(nodeID, s.getData());
data.emplace_back(nodeID, s.getData(), node->isLeaf());
if (node->isInner())
{
@@ -493,7 +491,7 @@ SHAMap::getNodeFat(
// Just include this node
s.erase();
childNode->serializeForWire(s);
data.emplace_back(childID, s.getData());
data.emplace_back(childID, s.getData(), childNode->isLeaf());
}
}
}
@@ -511,8 +509,18 @@ SHAMap::serializeRoot(Serializer& s) const
}
SHAMapAddNode
SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFilter* filter)
SHAMap::addRootNode(
SHAMapHash const& hash,
SHAMapTreeNodePtr rootNode,
SHAMapSyncFilter const* filter)
{
XRPL_ASSERT(rootNode, "xrpl::SHAMap::addRootNode : non-null root node");
if (!rootNode)
{
JLOG(journal_.error()) << "Null node received";
return SHAMapAddNode::invalid();
}
// we already have a root_ node
if (root_->getHash().isNonZero())
{
@@ -522,14 +530,16 @@ SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFil
}
XRPL_ASSERT(cowid_ >= 1, "xrpl::SHAMap::addRootNode : valid cowid");
auto node = SHAMapTreeNode::makeFromWire(rootNode);
if (!node || node->getHash() != hash)
if (rootNode->getHash() != hash)
{
JLOG(journal_.warn()) << "Corrupt node received";
return SHAMapAddNode::invalid();
}
if (backed_)
canonicalize(hash, node);
canonicalize(hash, rootNode);
root_ = node;
root_ = std::move(rootNode);
if (root_->isLeaf())
clearSynching();
@@ -546,9 +556,23 @@ SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFil
}
SHAMapAddNode
SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncFilter* filter)
SHAMap::addKnownNode(
SHAMapNodeID const& nodeID,
SHAMapTreeNodePtr treeNode,
SHAMapSyncFilter const* filter)
{
XRPL_ASSERT(!node.isRoot(), "xrpl::SHAMap::addKnownNode : valid node input");
XRPL_ASSERT(!nodeID.isRoot(), "xrpl::SHAMap::addKnownNode : valid node input");
if (nodeID.isRoot())
{
JLOG(journal_.error()) << "Root node received";
return SHAMapAddNode::invalid();
}
XRPL_ASSERT(treeNode, "xrpl::SHAMap::addKnownNode : non-null tree node");
if (!treeNode)
{
JLOG(journal_.error()) << "Null node received";
return SHAMapAddNode::invalid();
}
if (!isSynching())
{
@@ -562,14 +586,14 @@ SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncF
while (currNode->isInner() &&
!safe_downcast<SHAMapInnerNode*>(currNode)->isFullBelow(generation) &&
(currNodeID.getDepth() < node.getDepth()))
(currNodeID.getDepth() < nodeID.getDepth()))
{
int const branch = selectBranch(currNodeID, node.getNodeID());
int const branch = selectBranch(currNodeID, nodeID.getNodeID());
XRPL_ASSERT(branch >= 0, "xrpl::SHAMap::addKnownNode : valid branch");
auto inner = safe_downcast<SHAMapInnerNode*>(currNode);
if (inner->isEmptyBranch(branch))
{
JLOG(journal_.warn()) << "Add known node for empty branch" << node;
JLOG(journal_.warn()) << "Add known node for empty branch" << nodeID;
return SHAMapAddNode::invalid();
}
@@ -585,67 +609,44 @@ SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncF
if (currNode != nullptr)
continue;
auto newNode = SHAMapTreeNode::makeFromWire(rawNode);
if (!newNode || childHash != newNode->getHash())
if (childHash != treeNode->getHash())
{
JLOG(journal_.warn()) << "Corrupt node received";
return SHAMapAddNode::invalid();
}
// In rare cases, a node can still be corrupt even after hash
// validation. For leaf nodes, we perform an additional check to
// ensure the node's position in the tree is consistent with its
// content to prevent inconsistencies that could
// propagate further down the line.
if (newNode->isLeaf())
{
auto const& actualKey =
safe_downcast<SHAMapLeafNode const*>(newNode.get())->peekItem()->key();
// Validate that this leaf belongs at the target position
auto const expectedNodeID = SHAMapNodeID::createID(node.getDepth(), actualKey);
if (expectedNodeID.getNodeID() != node.getNodeID())
{
JLOG(journal_.debug())
<< "Leaf node position mismatch: "
<< "expected=" << expectedNodeID.getNodeID() << ", actual=" << node.getNodeID();
return SHAMapAddNode::invalid();
}
}
// Inner nodes must be at a level strictly less than 64
// but leaf nodes (while notionally at level 64) can be
// at any depth up to and including 64:
if ((currNodeID.getDepth() > leafDepth) ||
(newNode->isInner() && currNodeID.getDepth() == leafDepth))
(treeNode->isInner() && currNodeID.getDepth() == leafDepth))
{
// Map is provably invalid
state_ = SHAMapState::Invalid;
return SHAMapAddNode::useful();
}
if (currNodeID != node)
if (currNodeID != nodeID)
{
// Either this node is broken or we didn't request it (yet)
JLOG(journal_.warn()) << "unable to hook node " << node;
JLOG(journal_.warn()) << "unable to hook node " << nodeID;
JLOG(journal_.info()) << " stuck at " << currNodeID;
JLOG(journal_.info()) << "got depth=" << node.getDepth()
JLOG(journal_.info()) << "got depth=" << nodeID.getDepth()
<< ", walked to= " << currNodeID.getDepth();
return SHAMapAddNode::useful();
}
if (backed_)
canonicalize(childHash, newNode);
canonicalize(childHash, treeNode);
newNode = prevNode->canonicalizeChild(branch, std::move(newNode));
treeNode = prevNode->canonicalizeChild(branch, std::move(treeNode));
if (filter != nullptr)
{
Serializer s;
newNode->serializeWithPrefix(s);
treeNode->serializeWithPrefix(s);
filter->gotNode(
false, childHash, ledgerSeq_, std::move(s.modData()), newNode->getType());
false, childHash, ledgerSeq_, std::move(s.modData()), treeNode->getType());
}
return SHAMapAddNode::useful();

View File

@@ -25,7 +25,7 @@
namespace xrpl {
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapTreeNode::makeTransaction(Slice data, SHAMapHash const& hash, bool hashValid)
{
auto item = make_shamapitem(sha512Half(HashPrefix::transactionID, data), data);
@@ -36,7 +36,7 @@ SHAMapTreeNode::makeTransaction(Slice data, SHAMapHash const& hash, bool hashVal
return intr_ptr::make_shared<SHAMapTxLeafNode>(std::move(item), 0);
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapTreeNode::makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid)
{
Serializer s(data.data(), data.size());
@@ -60,7 +60,7 @@ SHAMapTreeNode::makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(std::move(item), 0);
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapTreeNode::makeAccountState(Slice data, SHAMapHash const& hash, bool hashValid)
{
Serializer s(data.data(), data.size());
@@ -87,7 +87,7 @@ SHAMapTreeNode::makeAccountState(Slice data, SHAMapHash const& hash, bool hashVa
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(std::move(item), 0);
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapTreeNode::makeFromWire(Slice rawNode)
{
if (rawNode.empty())
@@ -118,7 +118,7 @@ SHAMapTreeNode::makeFromWire(Slice rawNode)
Throw<std::runtime_error>("wire: Unknown type (" + std::to_string(type) + ")");
}
intr_ptr::SharedPtr<SHAMapTreeNode>
SHAMapTreeNodePtr
SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash)
{
if (rawNode.size() < 4)

View File

@@ -0,0 +1,350 @@
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/beast/unit_test/suite.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/shamap/SHAMap.h>
#include <xrpl/shamap/SHAMapAccountStateLeafNode.h>
#include <xrpl/shamap/SHAMapInnerNode.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <xrpl.pb.h>
#include <bit>
#include <cstdint>
#include <string>
namespace xrpl::tests {
class LedgerNodeHelpers_test : public beast::unit_test::suite
{
static boost::intrusive_ptr<SHAMapItem>
makeTestItem(std::uint32_t seed)
{
Serializer s;
s.add32(seed);
s.add32(seed + 1);
s.add32(seed + 2);
return make_shamapitem(s.getSHA512Half(), s.slice());
}
static std::string
serializeNode(SHAMapTreeNodePtr const& node)
{
Serializer s;
node->serializeForWire(s);
auto const slice = s.slice();
return std::string(std::bit_cast<char const*>(slice.data()), slice.size());
}
void
testValidateLedgerNode()
{
// In the tests below the validity of the content of the node data and ID fields is not
// checked - only that the fields have values when expected. The content of the fields is
// verified in the other tests in this file.
testcase("validateLedgerNode");
// Invalid: missing all fields.
{
protocol::TMLedgerNode const node;
BEAST_EXPECT(!validateLedgerNode(node));
}
// Invalid: missing `nodedata` field.
{
protocol::TMLedgerNode node;
node.set_nodeid("test_nodeid");
BEAST_EXPECT(!validateLedgerNode(node));
}
// Invalid: missing `nodedata` field.
{
protocol::TMLedgerNode node;
node.set_id("test_nodeid");
BEAST_EXPECT(!validateLedgerNode(node));
}
// Invalid: missing `nodedata` field.
{
protocol::TMLedgerNode node;
node.set_depth(1);
BEAST_EXPECT(!validateLedgerNode(node));
}
// Valid: legacy `nodeid` field.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_nodeid("test_nodeid");
BEAST_EXPECT(validateLedgerNode(node));
}
// Invalid: has both legacy `nodeid` and new `id` fields.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_nodeid("test_nodeid");
node.set_id("test_nodeid");
BEAST_EXPECT(!validateLedgerNode(node));
}
// Invalid: has both legacy `nodeid` and new `depth` fields.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_nodeid("test_nodeid");
node.set_depth(5);
BEAST_EXPECT(!validateLedgerNode(node));
}
// Valid: new `id` field.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_id("test_id");
BEAST_EXPECT(validateLedgerNode(node));
}
// Valid: new `depth` field.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_depth(5);
BEAST_EXPECT(validateLedgerNode(node));
}
// Valid: `depth` at minimum depth.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_depth(0);
BEAST_EXPECT(validateLedgerNode(node));
}
// Valid: `depth` at arbitrary depth between minimum and maximum.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_depth(10);
BEAST_EXPECT(validateLedgerNode(node));
}
// Valid: `depth` at maximum depth.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_depth(SHAMap::leafDepth);
BEAST_EXPECT(validateLedgerNode(node));
}
// Invalid: `depth` is greater than maximum depth.
{
protocol::TMLedgerNode node;
node.set_nodedata("test_data");
node.set_depth(SHAMap::leafDepth + 1);
BEAST_EXPECT(!validateLedgerNode(node));
}
}
void
testGetTreeNode()
{
testcase("getTreeNode");
// Valid: inner node. It must have at least one child for `serializeNode` to work.
{
auto const innerNode = intr_ptr::make_shared<SHAMapInnerNode>(1);
auto const childNode = intr_ptr::make_shared<SHAMapInnerNode>(1);
innerNode->setChild(0, childNode);
auto const innerData = serializeNode(innerNode);
auto const result = getTreeNode(innerData);
BEAST_EXPECT(result->isInner());
}
// Valid: leaf node.
{
auto const leafItem = makeTestItem(12345);
auto const leafNode = intr_ptr::make_shared<SHAMapAccountStateLeafNode>(leafItem, 1);
auto const leafData = serializeNode(leafNode);
auto result = getTreeNode(leafData);
BEAST_EXPECT(result->isLeaf());
}
// Invalid: empty data.
{
auto const result = getTreeNode("");
BEAST_EXPECT(!result);
}
// Invalid: garbage data.
{
auto const result = getTreeNode("invalid");
BEAST_EXPECT(!result);
}
// Invalid: truncated data.
{
auto const leafItem = makeTestItem(54321);
auto const leafNode = intr_ptr::make_shared<SHAMapAccountStateLeafNode>(leafItem, 1);
// Truncate the data to trigger an exception in SHAMapTreeNode::makeAccountState when
// the data is used to deserialize the node.
uint256 const tag;
auto const leafData = serializeNode(leafNode).substr(0, tag.bytes - 1);
auto const result = getTreeNode(leafData);
BEAST_EXPECT(!result);
}
}
void
testGetSHAMapNodeID()
{
testcase("getSHAMapNodeID");
{
// Tests using inner nodes at various depths.
auto const innerNode = intr_ptr::make_shared<SHAMapInnerNode>(1);
auto const childNode = intr_ptr::make_shared<SHAMapInnerNode>(1);
innerNode->setChild(0, childNode);
auto const innerData = serializeNode(innerNode);
// Valid: legacy `nodeid` field at arbitrary depth.
{
auto const innerDepth = 3;
auto const innerID = SHAMapNodeID::createID(innerDepth, uint256{});
protocol::TMLedgerNode node;
node.set_nodedata(innerData);
node.set_nodeid(innerID.getRawString());
auto const result = getSHAMapNodeID(node, innerNode);
BEAST_EXPECT(result == innerID);
}
// Valid: new `id` field at minimum depth.
{
auto const innerDepth = 0;
auto const innerID = SHAMapNodeID::createID(innerDepth, uint256{});
protocol::TMLedgerNode node;
node.set_nodedata(innerData);
node.set_id(innerID.getRawString());
auto const result = getSHAMapNodeID(node, innerNode);
BEAST_EXPECT(result == innerID);
}
// Invalid: new `depth` field should not be used for inner nodes.
{
protocol::TMLedgerNode node;
node.set_nodedata(innerData);
node.set_depth(10);
auto const result = getSHAMapNodeID(node, innerNode);
BEAST_EXPECT(!result);
}
}
{
// Tests using leaf nodes at various depths.
auto const leafItem = makeTestItem(12345);
auto const leafNode = intr_ptr::make_shared<SHAMapAccountStateLeafNode>(leafItem, 1);
auto const leafData = serializeNode(leafNode);
auto const leafKey = leafItem->key();
// Valid: legacy `nodeid` field at arbitrary depth.
{
auto const leafDepth = 5;
auto const leafID = SHAMapNodeID::createID(leafDepth, leafKey);
protocol::TMLedgerNode ledgerNode;
ledgerNode.set_nodedata(leafData);
ledgerNode.set_nodeid(leafID.getRawString());
auto const result = getSHAMapNodeID(ledgerNode, leafNode);
BEAST_EXPECT(result == leafID);
}
// Invalid: new `id` field should not be used for leaf nodes.
{
auto const leafDepth = 5;
auto const leafID = SHAMapNodeID::createID(leafDepth, leafKey);
protocol::TMLedgerNode ledgerNode;
ledgerNode.set_nodedata(leafData);
ledgerNode.set_id(leafID.getRawString());
auto const result = getSHAMapNodeID(ledgerNode, leafNode);
BEAST_EXPECT(!result);
}
// Valid: new `depth` field at minimum depth.
{
auto const leafDepth = 0;
auto const leafID = SHAMapNodeID::createID(leafDepth, leafKey);
protocol::TMLedgerNode node;
node.set_nodedata(leafData);
node.set_depth(leafDepth);
auto const result = getSHAMapNodeID(node, leafNode);
BEAST_EXPECT(result == leafID);
}
// Valid: new `depth` field at arbitrary depth between minimum and maximum.
{
auto const leafDepth = 10;
auto const leafID = SHAMapNodeID::createID(leafDepth, leafKey);
protocol::TMLedgerNode ledgerNode;
ledgerNode.set_nodedata(leafData);
ledgerNode.set_depth(leafDepth);
auto const result = getSHAMapNodeID(ledgerNode, leafNode);
BEAST_EXPECT(result == leafID);
}
// Valid: new `depth` field at maximum depth.
// Note that we do not test a depth greater than the maximum depth, because the proto
// message is assumed to have been validated by the time the getSHAMapNodeID function is
// called.
{
auto const leafDepth = SHAMap::leafDepth;
auto const leafID = SHAMapNodeID::createID(leafDepth, leafKey);
protocol::TMLedgerNode node;
node.set_nodedata(leafData);
node.set_depth(leafDepth);
auto const result = getSHAMapNodeID(node, leafNode);
BEAST_EXPECT(result == leafID);
}
// Invalid: legacy `nodeid` field where the node ID is inconsistent with the key.
{
auto const otherItem = makeTestItem(54321);
auto const otherNode =
intr_ptr::make_shared<SHAMapAccountStateLeafNode>(otherItem, 1);
auto const otherData = serializeNode(otherNode);
auto const otherKey = otherItem->key();
auto const otherDepth = 1;
auto const otherID = SHAMapNodeID::createID(otherDepth, otherKey);
protocol::TMLedgerNode ledgerNode;
ledgerNode.set_nodedata(otherData);
ledgerNode.set_nodeid(otherID.getRawString());
auto const result = getSHAMapNodeID(ledgerNode, leafNode);
BEAST_EXPECT(!result);
}
}
}
public:
void
run() override
{
testValidateLedgerNode();
testGetTreeNode();
testGetSHAMapNodeID();
}
};
BEAST_DEFINE_TESTSUITE(LedgerNodeHelpers, app, xrpl);
} // namespace xrpl::tests

View File

@@ -130,12 +130,7 @@ public:
}
void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
{
}

View File

@@ -1,165 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2016 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/basics/CanProcess.h>
#include <xrpl/beast/unit_test.h>
#include <memory>
namespace ripple {
namespace test {
struct CanProcess_test : beast::unit_test::suite
{
template <class Mutex, class Collection, class Item>
void
test(
std::string const& name,
Mutex& mtx,
Collection& collection,
std::vector<Item> const& items)
{
testcase(name);
if (!BEAST_EXPECT(!items.empty()))
return;
if (!BEAST_EXPECT(collection.empty()))
return;
// CanProcess objects can't be copied or moved. To make that easier,
// store shared_ptrs
std::vector<std::shared_ptr<CanProcess>> trackers;
// Fill up the vector with two CanProcess for each Item. The first
// inserts the item into the collection and is "good". The second does
// not and is "bad".
for (int i = 0; i < items.size(); ++i)
{
{
auto const& good =
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
{
auto const& bad =
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * (i + 1));
BEAST_EXPECT(collection.size() == i + 1);
}
BEAST_EXPECT(collection.size() == items.size());
// Now remove the items from the vector<CanProcess> two at a time, and
// try to get another CanProcess for that item.
for (int i = 0; i < items.size(); ++i)
{
// Remove the "bad" one in the second position
// This will have no effect on the collection
{
auto const iter = trackers.begin() + 1;
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size());
{
// Append a new "bad" one
auto const& bad =
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
// Remove the "good" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size() - 1);
{
// Append a new "good" one
auto const& good =
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
}
// Now remove them all two at a time
for (int i = items.size() - 1; i >= 0; --i)
{
// Remove the "bad" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
// Remove the "good" one now in front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == 2 * i);
BEAST_EXPECT(collection.size() == i);
}
BEAST_EXPECT(trackers.empty());
BEAST_EXPECT(collection.empty());
}
void
run() override
{
{
std::mutex m;
std::set<int> collection;
std::vector<int> const items{1, 2, 3, 4, 5};
test("set of int", m, collection, items);
}
{
std::mutex m;
std::set<std::string> collection;
std::vector<std::string> const items{"one", "two", "three", "four", "five"};
test("set of string", m, collection, items);
}
{
std::mutex m;
std::unordered_set<char> collection;
std::vector<char> const items{'1', '2', '3', '4', '5'};
test("unorderd_set of char", m, collection, items);
}
{
std::mutex m;
std::unordered_set<std::uint64_t> collection;
std::vector<std::uint64_t> const items{100u, 1000u, 150u, 4u, 0u};
test("unordered_set of uint64_t", m, collection, items);
}
}
};
BEAST_DEFINE_TESTSUITE(CanProcess, ripple_basics, ripple);
} // namespace test
} // namespace ripple

View File

@@ -7,7 +7,6 @@
#include <xrpl/basics/BasicConfig.h>
#include <xrpl/beast/unit_test/suite.h>
#include <xrpl/beast/utility/temp_dir.h>
#include <xrpl/protocol/SystemParameters.h> // IWYU pragma: keep
#include <xrpl/server/Port.h>
#include <boost/filesystem/operations.hpp>

View File

@@ -63,8 +63,8 @@ public:
negotiateProtocolVersion("RTXP/1.2, XRPL/2.0, XRPL/2.1") == make_protocol(2, 1));
BEAST_EXPECT(negotiateProtocolVersion("XRPL/2.2") == make_protocol(2, 2));
BEAST_EXPECT(
negotiateProtocolVersion("RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/999.999") ==
make_protocol(2, 2));
negotiateProtocolVersion("RTXP/1.2, XRPL/2.3, XRPL/2.4, XRPL/999.999") ==
make_protocol(2, 3));
BEAST_EXPECT(negotiateProtocolVersion("XRPL/999.999, WebSocket/1.0") == std::nullopt);
BEAST_EXPECT(negotiateProtocolVersion("") == std::nullopt);
}

View File

@@ -1,7 +1,6 @@
#include <test/shamap/common.h>
#include <test/unit_test/SuiteJournal.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/SHAMapHash.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/base_uint.h>
@@ -115,14 +114,17 @@ public:
destination.setSynching();
{
std::vector<std::pair<SHAMapNodeID, Blob>> a;
std::vector<SHAMapNodeData> a;
BEAST_EXPECT(source.getNodeFat(SHAMapNodeID(), a, rand_bool(eng_), rand_int(eng_, 2)));
unexpected(a.empty(), "NodeSize");
BEAST_EXPECT(destination.addRootNode(source.getHash(), makeSlice(a[0].second), nullptr)
.isGood());
auto node = SHAMapTreeNode::makeFromWire(makeSlice(a[0].data));
if (!node)
fail("", __FILE__, __LINE__);
BEAST_EXPECT(
destination.addRootNode(source.getHash(), std::move(node), nullptr).isGood());
}
do
@@ -136,7 +138,7 @@ public:
break;
// get as many nodes as possible based on this information
std::vector<std::pair<SHAMapNodeID, Blob>> b;
std::vector<SHAMapNodeData> b;
for (auto& it : nodesMissing)
{
@@ -158,8 +160,10 @@ public:
// Don't use BEAST_EXPECT here b/c it will be called a
// non-deterministic number of times and the number of tests run
// should be deterministic
if (!destination.addKnownNode(b[i].first, makeSlice(b[i].second), nullptr)
.isUseful())
auto node = SHAMapTreeNode::makeFromWire(makeSlice(b[i].data));
if (!node)
fail("", __FILE__, __LINE__);
if (!destination.addKnownNode(b[i].nodeID, std::move(node), nullptr).isUseful())
fail("", __FILE__, __LINE__);
}
} while (true);

View File

@@ -159,8 +159,10 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
// Tell the ledger acquire system that we need the consensus ledger
acquiringLedger_ = hash;
app_.getInboundLedgers().acquireAsync(
jtADVANCE, "GetConsL1", hash, 0, InboundLedger::Reason::CONSENSUS);
app_.getJobQueue().addJob(jtADVANCE, "GetConsL1", [id = hash, &app = app_, this]() {
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger1 started";
app.getInboundLedgers().acquireAsync(id, 0, InboundLedger::Reason::CONSENSUS);
});
}
return std::nullopt;
}
@@ -1050,7 +1052,7 @@ void
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
{
if ((positions == 0u) && app_.getOPs().isFull())
app_.getOPs().setMode(OperatingMode::CONNECTED, "updateOperatingMode: no positions");
app_.getOPs().setMode(OperatingMode::CONNECTED);
}
void

View File

@@ -128,8 +128,12 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
{
JLOG(j_.warn()) << "Need validated ledger for preferred ledger analysis " << hash;
app_.getInboundLedgers().acquireAsync(
jtADVANCE, "GetConsL2", hash, 0, InboundLedger::Reason::CONSENSUS);
Application* pApp = &app_;
app_.getJobQueue().addJob(jtADVANCE, "GetConsL2", [pApp, hash, this]() {
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger2 started";
pApp->getInboundLedgers().acquireAsync(hash, 0, InboundLedger::Reason::CONSENSUS);
});
return std::nullopt;
}

View File

@@ -9,6 +9,7 @@
#include <mutex>
#include <set>
#include <string_view>
#include <utility>
namespace xrpl {
@@ -131,16 +132,16 @@ private:
processData(std::shared_ptr<Peer> peer, protocol::TMLedgerData& data);
bool
takeHeader(std::string const& data);
takeHeader(std::string_view data);
void
receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode&);
receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san);
bool
takeTxRootNode(Slice const& data, SHAMapAddNode&);
takeTxRootNode(std::string_view data, SHAMapAddNode& san);
bool
takeAsRootNode(Slice const& data, SHAMapAddNode&);
takeAsRootNode(std::string_view data, SHAMapAddNode& san);
std::vector<uint256>
neededTxHashes(int max, SHAMapSyncFilter* filter) const;

View File

@@ -26,12 +26,7 @@ public:
// Queue. TODO review whether all callers of acquire() can use this
// instead. Inbound ledger acquisition is asynchronous anyway.
virtual void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) = 0;
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) = 0;
virtual std::shared_ptr<InboundLedger>
find(LedgerHash const& hash) = 0;

View File

@@ -4,6 +4,7 @@
#include <xrpld/app/ledger/InboundLedgers.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/ledger/TransactionStateSF.h>
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
#include <xrpld/app/ledger/detail/TimeoutCounter.h>
#include <xrpld/app/main/Application.h>
#include <xrpld/overlay/Message.h>
@@ -42,8 +43,8 @@
#include <mutex>
#include <random>
#include <sstream>
#include <stdexcept>
#include <string>
#include <string_view>
#include <tuple>
#include <unordered_map>
#include <utility>
@@ -385,14 +386,7 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
if (!wasProgress)
{
if (checkLocal())
{
// Done. Something else (probably consensus) built the ledger
// locally while waiting for data (or possibly before requesting)
XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done");
JLOG(journal_.info()) << "Finished while waiting " << hash_;
return;
}
checkLocal();
mByHash = true;
@@ -800,7 +794,7 @@ InboundLedger::filterNodes(
*/
// data must not have hash prefix
bool
InboundLedger::takeHeader(std::string const& data)
InboundLedger::takeHeader(std::string_view data)
{
// Return value: true=normal, false=bad data
JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
@@ -888,20 +882,31 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
{
auto const f = filter.get();
for (auto const& node : packet.nodes())
for (auto const& ledgerNode : packet.nodes())
{
auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
auto treeNode = getTreeNode(ledgerNode.nodedata());
if (!treeNode)
{
JLOG(journal_.warn()) << "Got invalid node data";
san.incInvalid();
return;
}
auto const nodeID = getSHAMapNodeID(ledgerNode, treeNode);
if (!nodeID)
throw std::runtime_error("data does not properly deserialize");
{
JLOG(journal_.warn()) << "Got invalid node id";
san.incInvalid();
return;
}
if (nodeID->isRoot())
{
san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
san += map.addRootNode(rootHash, std::move(treeNode), f);
}
else
{
san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
san += map.addKnownNode(*nodeID, std::move(treeNode), f);
}
if (!san.isGood())
@@ -941,7 +946,7 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
Call with a lock
*/
bool
InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
InboundLedger::takeAsRootNode(std::string_view data, SHAMapAddNode& san)
{
if (failed_ || mHaveState)
{
@@ -957,9 +962,17 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
// LCOV_EXCL_STOP
}
auto treeNode = getTreeNode(data);
if (!treeNode)
{
JLOG(journal_.warn()) << "Got invalid node data";
san.incInvalid();
return false;
}
AccountStateSF filter(mLedger->stateMap().family().db(), app_.getLedgerMaster());
san +=
mLedger->stateMap().addRootNode(SHAMapHash{mLedger->header().accountHash}, data, &filter);
san += mLedger->stateMap().addRootNode(
SHAMapHash{mLedger->header().accountHash}, std::move(treeNode), &filter);
return san.isGood();
}
@@ -967,7 +980,7 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
Call with a lock
*/
bool
InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
InboundLedger::takeTxRootNode(std::string_view data, SHAMapAddNode& san)
{
if (failed_ || mHaveTransactions)
{
@@ -983,8 +996,17 @@ InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
// LCOV_EXCL_STOP
}
auto treeNode = getTreeNode(data);
if (!treeNode)
{
JLOG(journal_.warn()) << "Got invalid node data";
san.incInvalid();
return false;
}
TransactionStateSF filter(mLedger->txMap().family().db(), app_.getLedgerMaster());
san += mLedger->txMap().addRootNode(SHAMapHash{mLedger->header().txHash}, data, &filter);
san += mLedger->txMap().addRootNode(
SHAMapHash{mLedger->header().txHash}, std::move(treeNode), &filter);
return san.isGood();
}
@@ -1081,13 +1103,13 @@ InboundLedger::processData(std::shared_ptr<Peer> peer, protocol::TMLedgerData& p
}
if (!mHaveState && (packet.nodes().size() > 1) &&
!takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
!takeAsRootNode(packet.nodes(1).nodedata(), san))
{
JLOG(journal_.warn()) << "Included AS root invalid";
}
if (!mHaveTransactions && (packet.nodes().size() > 2) &&
!takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
!takeTxRootNode(packet.nodes(2).nodedata(), san))
{
JLOG(journal_.warn()) << "Included TX root invalid";
}
@@ -1118,13 +1140,13 @@ InboundLedger::processData(std::shared_ptr<Peer> peer, protocol::TMLedgerData& p
ScopedLockType const sl(mtx_);
// Verify node IDs and data are complete
for (auto const& node : packet.nodes())
// Verify nodes are complete
for (auto const& ledgerNode : packet.nodes())
{
if (!node.has_nodeid() || !node.has_nodedata())
if (!validateLedgerNode(ledgerNode))
{
JLOG(journal_.warn()) << "Got bad node";
peer->charge(Resource::feeMalformedRequest, "ledger_data bad node");
JLOG(journal_.warn()) << "Got malformed ledger node";
peer->charge(Resource::feeMalformedRequest, "ledgerNode");
return -1;
}
}

View File

@@ -2,14 +2,13 @@
#include <xrpld/app/ledger/InboundLedger.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
#include <xrpld/app/main/Application.h>
#include <xrpld/overlay/PeerSet.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/DecayingSample.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/UnorderedContainers.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/scope.h>
@@ -84,15 +83,12 @@ public:
(reason != InboundLedger::Reason::CONSENSUS))
return {};
std::stringstream ss;
bool isNew = true;
std::shared_ptr<InboundLedger> inbound;
{
ScopedLockType sl(mLock);
if (stopping_)
{
JLOG(j_.debug()) << "Abort(stopping): " << ss.str();
return {};
}
@@ -111,61 +107,47 @@ public:
++mCounter;
}
}
ss << " IsNew: " << (isNew ? "true" : "false");
if (inbound->isFailed())
{
JLOG(j_.debug()) << "Abort(failed): " << ss.str();
return {};
}
if (!isNew)
inbound->update(seq);
if (!inbound->isComplete())
{
JLOG(j_.debug()) << "InProgress: " << ss.str();
return {};
}
JLOG(j_.debug()) << "Complete: " << ss.str();
return inbound->getLedger();
};
using namespace std::chrono_literals;
return perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
std::shared_ptr<Ledger const> ledger =
perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
return ledger;
}
void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
{
if (auto check = std::make_shared<CanProcess const>(acquiresMutex_, pendingAcquires_, hash);
*check)
std::unique_lock lock(acquiresMutex_);
try
{
app_.getJobQueue().addJob(type, name, [check, name, hash, seq, reason, this]() {
JLOG(j_.debug()) << "JOB acquireAsync " << name << " started ";
try
{
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn()) << "Exception thrown for acquiring new "
"inbound ledger "
<< hash << ": " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new "
"inbound ledger "
<< hash;
}
});
if (pendingAcquires_.contains(hash))
return;
pendingAcquires_.insert(hash);
scope_unlock const unlock(lock);
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn()) << "Exception thrown for acquiring new inbound ledger " << hash << ": "
<< e.what();
}
catch (...)
{
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new inbound ledger " << hash;
}
pendingAcquires_.erase(hash);
}
std::shared_ptr<InboundLedger>
@@ -266,23 +248,20 @@ public:
Serializer s;
try
{
for (int i = 0; i < packet_ptr->nodes().size(); ++i)
for (auto const& ledgerNode : packet_ptr->nodes())
{
auto const& node = packet_ptr->nodes(i);
if (!node.has_nodeid() || !node.has_nodedata())
if (!validateLedgerNode(ledgerNode))
return;
auto newNode = SHAMapTreeNode::makeFromWire(makeSlice(node.nodedata()));
if (!newNode)
auto const treeNode = getTreeNode(ledgerNode.nodedata());
if (!treeNode)
return;
s.erase();
newNode->serializeWithPrefix(s);
treeNode->serializeWithPrefix(s);
app_.getLedgerMaster().addFetchPack(
newNode->getHash().as_uint256(), std::make_shared<Blob>(s.begin(), s.end()));
treeNode->getHash().as_uint256(), std::make_shared<Blob>(s.begin(), s.end()));
}
}
catch (std::exception const&) // NOLINT(bugprone-empty-catch)

View File

@@ -1,11 +1,11 @@
#include <xrpld/app/ledger/InboundTransactions.h>
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
#include <xrpld/app/ledger/detail/TransactionAcquire.h>
#include <xrpld/app/main/Application.h>
#include <xrpld/overlay/PeerSet.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/UnorderedContainers.h>
#include <xrpl/beast/insight/Collector.h>
#include <xrpl/protocol/RippleLedgerHash.h>
@@ -14,6 +14,7 @@
#include <xrpl/shamap/SHAMap.h>
#include <xrpl/shamap/SHAMapMissingNode.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <xrpl.pb.h>
@@ -144,29 +145,38 @@ public:
return;
}
std::vector<std::pair<SHAMapNodeID, Slice>> data;
std::vector<std::pair<SHAMapNodeID, SHAMapTreeNodePtr>> data;
data.reserve(packet.nodes().size());
for (auto const& node : packet.nodes())
for (auto const& ledgerNode : packet.nodes())
{
if (!node.has_nodeid() || !node.has_nodedata())
if (!validateLedgerNode(ledgerNode))
{
peer->charge(Resource::feeMalformedRequest, "ledger_data");
JLOG(j_.warn()) << "Got malformed ledger node";
peer->charge(Resource::feeMalformedRequest, "ledgerNode");
return;
}
auto const id = deserializeSHAMapNodeID(node.nodeid());
if (!id)
auto treeNode = getTreeNode(ledgerNode.nodedata());
if (!treeNode)
{
peer->charge(Resource::feeInvalidData, "ledger_data");
JLOG(j_.warn()) << "Got invalid node data";
peer->charge(Resource::feeInvalidData, "node_data");
return;
}
data.emplace_back(*id, makeSlice(node.nodedata()));
auto const nodeID = getSHAMapNodeID(ledgerNode, treeNode);
if (!nodeID)
{
JLOG(j_.warn()) << "Got invalid node id";
peer->charge(Resource::feeInvalidData, "node_id");
return;
}
data.emplace_back(*nodeID, std::move(treeNode));
}
if (!ta->takeNodes(data, peer).isUseful())
if (!ta->takeNodes(std::move(data), peer).isUseful())
peer->charge(Resource::feeUselessData, "ledger_data not useful");
}

View File

@@ -965,9 +965,8 @@ LedgerMaster::checkAccept(std::shared_ptr<Ledger const> const& ledger)
return;
}
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq << " ("
<< to_short_string(ledger->header().hash) << ") with >= " << minVal
<< " validations";
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq
<< " with >= " << minVal << " validations";
ledger->setValidated();
ledger->setFull();

View File

@@ -0,0 +1,91 @@
#include <xrpld/app/ledger/detail/LedgerNodeHelpers.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/safe_cast.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/shamap/SHAMap.h>
#include <xrpl/shamap/SHAMapLeafNode.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <xrpl.pb.h>
#include <exception>
#include <optional>
#include <string_view>
namespace xrpl {
bool
validateLedgerNode(protocol::TMLedgerNode const& ledgerNode)
{
if (!ledgerNode.has_nodedata())
return false;
if (ledgerNode.has_nodeid())
return !ledgerNode.has_id() && !ledgerNode.has_depth();
return ledgerNode.has_id() ||
(ledgerNode.has_depth() && ledgerNode.depth() <= SHAMap::leafDepth);
}
SHAMapTreeNodePtr
getTreeNode(std::string_view data)
{
auto const slice = makeSlice(data);
try
{
return SHAMapTreeNode::makeFromWire(slice);
}
catch (std::exception const&)
{
return {};
}
}
std::optional<SHAMapNodeID>
getSHAMapNodeID(protocol::TMLedgerNode const& ledgerNode, SHAMapTreeNodePtr const& treeNode)
{
if (ledgerNode.has_id() || ledgerNode.has_depth())
{
if (treeNode->isInner())
{
if (!ledgerNode.has_id())
return std::nullopt;
return deserializeSHAMapNodeID(ledgerNode.id());
}
if (treeNode->isLeaf())
{
if (!ledgerNode.has_depth())
return std::nullopt;
auto const key =
safe_downcast<SHAMapLeafNode const*>(treeNode.get())->peekItem()->key();
return SHAMapNodeID::createID(ledgerNode.depth(), key);
}
UNREACHABLE("xrpl::getSHAMapNodeID : tree node is neither inner nor leaf");
return std::nullopt;
}
if (!ledgerNode.has_nodeid())
return std::nullopt;
auto nodeID = deserializeSHAMapNodeID(ledgerNode.nodeid());
if (!nodeID.has_value())
return std::nullopt;
if (treeNode->isLeaf())
{
auto const key = safe_downcast<SHAMapLeafNode const*>(treeNode.get())->peekItem()->key();
auto const expected_id = SHAMapNodeID::createID(static_cast<int>(nodeID->getDepth()), key);
if (nodeID->getNodeID() != expected_id.getNodeID())
return std::nullopt;
}
return nodeID;
}
} // namespace xrpl

View File

@@ -0,0 +1,72 @@
#pragma once
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <optional>
#include <string_view>
namespace protocol {
class TMLedgerNode;
} // namespace protocol
namespace xrpl {
/**
* @brief Validates a ledger node proto message.
*
* This function checks whether a ledger node has the expected fields (for non-ledger base data):
* - The node must have `nodedata`.
* - If the legacy `nodeid` field is present then the new `id` and `depth` fields must not be
* present.
* - If the new `id` or `depth` fields are present (it is a oneof field, so only one of the two can
* be set) then the legacy `nodeid` must not be present.
* - If the `depth` field is present then it must be between 0 and SHAMap::leafDepth (inclusive).
*
* @param ledgerNode The ledger node to validate.
* @return true if the ledger node has the expected fields, false otherwise.
*/
[[nodiscard]] bool
validateLedgerNode(protocol::TMLedgerNode const& ledgerNode);
/**
* @brief Deserializes a SHAMapTreeNode from wire format data.
*
* This function attempts to create a SHAMapTreeNode from the provided data string. If the data is
* malformed or deserialization fails, the function returns a nullptr instead of throwing an
* exception.
*
* @param data The serialized node data in wire format.
* @return The deserialized tree node if successful, or a nullptr if deserialization fails.
*/
[[nodiscard]] SHAMapTreeNodePtr
getTreeNode(std::string_view data);
/**
* @brief Extracts or reconstructs the SHAMapNodeID from a ledger node proto message.
*
* This function retrieves the SHAMapNodeID for a tree node, with behavior that depends on which
* field is set and the node type (inner vs. leaf).
*
* When the legacy `nodeid` field is set in the message:
* - For all nodes: Deserializes the node ID from the field.
* - For leaf nodes: Validates that the node ID is consistent with the leaf's key.
*
* When the new `id` or `depth` field is set in the message:
* - For inner nodes: Deserializes the node ID from the `id` field.
* - For leaf nodes: Reconstructs the node ID using both the depth from the `depth` field and the
* key from the leaf node's item.
* Note that root nodes may be inner nodes or leaf nodes.
*
* @param ledgerNode The validated protocol message containing the ledger node data.
* @param treeNode The deserialized tree node (inner or leaf node).
* @return An optional containing the node ID if extraction/reconstruction succeeds, or std::nullopt
* if the required fields are missing or validation fails.
* @note This function expects that the caller has already validated the ledger node by calling the
* `validateLedgerNode` function and obtained a valid tree node by calling `getTreeNode`.
*/
[[nodiscard]] std::optional<SHAMapNodeID>
getSHAMapNodeID(protocol::TMLedgerNode const& ledgerNode, SHAMapTreeNodePtr const& treeNode);
} // namespace xrpl

View File

@@ -25,8 +25,7 @@ TimeoutCounter::TimeoutCounter(
QueueJobParameter&& jobParameter,
beast::Journal journal)
: app_(app)
, sink_(journal, to_short_string(hash) + " ")
, journal_(sink_)
, journal_(journal)
, hash_(hash)
, timerInterval_(interval)
, queueJobParameter_(std::move(jobParameter))
@@ -42,7 +41,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
{
if (isDone())
return;
JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count() << "ms";
timer_.expires_after(timerInterval_);
timer_.async_wait([wptr = pmDowncast()](boost::system::error_code const& ec) {
if (ec == boost::asio::error::operation_aborted)
@@ -50,10 +48,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
if (auto ptr = wptr.lock())
{
JLOG(ptr->journal_.debug())
<< "timer: ec: " << ec
<< " (operation_aborted: " << boost::asio::error::operation_aborted << " - "
<< (ec == boost::asio::error::operation_aborted ? "aborted" : "other") << ")";
ScopedLockType sl(ptr->mtx_);
ptr->queueJob(sl);
}

View File

@@ -3,7 +3,6 @@
#include <xrpld/app/main/Application.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/WrappedSink.h>
#include <xrpl/core/Job.h>
#include <boost/asio/basic_waitable_timer.hpp>
@@ -104,7 +103,6 @@ protected:
// Used in this class for access to boost::asio::io_context and
// xrpl::Overlay. Used in subtypes for the kitchen sink.
Application& app_;
beast::WrappedSink sink_;
beast::Journal journal_;
mutable std::recursive_mutex mtx_;

View File

@@ -7,13 +7,13 @@
#include <xrpld/overlay/PeerSet.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/core/Job.h>
#include <xrpl/server/NetworkOPs.h>
#include <xrpl/shamap/SHAMap.h>
#include <xrpl/shamap/SHAMapAddNode.h>
#include <xrpl/shamap/SHAMapMissingNode.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <xrpl.pb.h>
@@ -173,7 +173,7 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
SHAMapAddNode
TransactionAcquire::takeNodes(
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
std::vector<std::pair<SHAMapNodeID, SHAMapTreeNodePtr>> data,
std::shared_ptr<Peer> const& peer)
{
ScopedLockType const sl(mtx_);

View File

@@ -21,8 +21,8 @@ public:
SHAMapAddNode
takeNodes(
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
std::shared_ptr<Peer> const&);
std::vector<std::pair<SHAMapNodeID, SHAMapTreeNodePtr>> data,
std::shared_ptr<Peer> const& peer);
void
init(int startPeers);

View File

@@ -35,7 +35,6 @@
#include <xrpld/rpc/MPTokenIssuanceID.h>
#include <xrpld/rpc/ServerHandler.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/ToString.h>
#include <xrpl/basics/UnorderedContainers.h>
@@ -486,7 +485,7 @@ public:
isFull() override;
void
setMode(OperatingMode om, char const* reason) override;
setMode(OperatingMode om) override;
bool
isBlocked() override;
@@ -924,7 +923,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
inline void
NetworkOPsImp::setStandAlone()
{
setMode(OperatingMode::FULL, "setStandAlone");
setMode(OperatingMode::FULL);
}
inline void
@@ -1067,7 +1066,7 @@ NetworkOPsImp::processHeartbeatTimer()
{
if (mMode != OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::DISCONNECTED, "Heartbeat: insufficient peers");
setMode(OperatingMode::DISCONNECTED);
std::stringstream ss;
ss << "Node count (" << numPeers << ") has fallen "
<< "below required minimum (" << minPeerCount_ << ").";
@@ -1091,7 +1090,7 @@ NetworkOPsImp::processHeartbeatTimer()
if (mMode == OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
setMode(OperatingMode::CONNECTED);
JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient.";
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers << " peers. ";
}
@@ -1102,11 +1101,11 @@ NetworkOPsImp::processHeartbeatTimer()
CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
if (mMode == OperatingMode::SYNCING)
{
setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
setMode(OperatingMode::SYNCING);
}
else if (mMode == OperatingMode::CONNECTED)
{
setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
setMode(OperatingMode::CONNECTED);
}
auto newMode = mMode.load();
if (origMode != newMode)
@@ -1810,7 +1809,7 @@ void
NetworkOPsImp::setAmendmentBlocked()
{
amendmentBlocked_ = true;
setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
setMode(OperatingMode::CONNECTED);
}
inline bool
@@ -1841,7 +1840,7 @@ void
NetworkOPsImp::setUNLBlocked()
{
unlBlocked_ = true;
setMode(OperatingMode::CONNECTED, "setUNLBlocked");
setMode(OperatingMode::CONNECTED);
}
inline void
@@ -1941,7 +1940,7 @@ NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint
if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
{
setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
setMode(OperatingMode::CONNECTED);
}
if (consensus)
@@ -2029,8 +2028,8 @@ NetworkOPsImp::beginConsensus(
// this shouldn't happen unless we jump ledgers
if (mMode == OperatingMode::FULL)
{
JLOG(m_journal.warn()) << "beginConsensus Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING);
CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
}
@@ -2158,7 +2157,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
// validations we have for LCL. If the ledger is good enough, go to
// TRACKING - TODO
if (!needNetworkLedger_)
setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
setMode(OperatingMode::TRACKING);
}
if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) &&
@@ -2171,7 +2170,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
if (registry_.get().getTimeKeeper().now() <
(current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
{
setMode(OperatingMode::FULL, "endConsensus: check full");
setMode(OperatingMode::FULL);
}
}
@@ -2183,7 +2182,7 @@ NetworkOPsImp::consensusViewChange()
{
if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
{
setMode(OperatingMode::CONNECTED, "consensusViewChange");
setMode(OperatingMode::CONNECTED);
}
}
@@ -2487,7 +2486,7 @@ NetworkOPsImp::pubPeerStatus(std::function<Json::Value(void)> const& func)
}
void
NetworkOPsImp::setMode(OperatingMode om, char const* reason)
NetworkOPsImp::setMode(OperatingMode om)
{
using namespace std::chrono_literals;
if (om == OperatingMode::CONNECTED)
@@ -2507,12 +2506,11 @@ NetworkOPsImp::setMode(OperatingMode om, char const* reason)
if (mMode == om)
return;
auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
mMode = om;
accounting_.mode(om);
JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
pubServer();
}
@@ -2521,24 +2519,36 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
{
JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source;
std::unique_lock lock(validationsMutex_);
BypassAccept bypassAccept = BypassAccept::no;
try
{
CanProcess const check(validationsMutex_, pendingValidations_, val->getLedgerHash());
try
if (pendingValidations_.contains(val->getLedgerHash()))
{
BypassAccept bypassAccept = check ? BypassAccept::no : BypassAccept::yes;
handleNewValidation(registry_.app(), val, source, bypassAccept, m_journal);
bypassAccept = BypassAccept::yes;
}
catch (std::exception const& e)
else
{
JLOG(m_journal.warn()) << "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn())
<< "Unknown exception thrown for handling new validation " << val->getLedgerHash();
pendingValidations_.insert(val->getLedgerHash());
}
scope_unlock const unlock(lock);
handleNewValidation(registry_.get().getApp(), val, source, bypassAccept, m_journal);
}
catch (std::exception const& e)
{
JLOG(m_journal.warn()) << "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn()) << "Unknown exception thrown for handling new validation "
<< val->getLedgerHash();
}
if (bypassAccept == BypassAccept::no)
{
pendingValidations_.erase(val->getLedgerHash());
}
lock.unlock();
pubValidation(val);

View File

@@ -17,6 +17,7 @@ enum class ProtocolFeature {
ValidatorListPropagation,
ValidatorList2Propagation,
LedgerReplay,
LedgerNodeDepth,
};
/** Represents a peer connection in the overlay. */

View File

@@ -61,6 +61,7 @@
#include <xrpl/server/Handoff.h>
#include <xrpl/server/LoadFeeTrack.h>
#include <xrpl/server/NetworkOPs.h>
#include <xrpl/shamap/SHAMap.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <xrpl/tx/apply.h>
@@ -565,6 +566,8 @@ PeerImp::supportsFeature(ProtocolFeature f) const
return protocol_ >= make_protocol(2, 1);
case ProtocolFeature::ValidatorList2Propagation:
return protocol_ >= make_protocol(2, 2);
case ProtocolFeature::LedgerNodeDepth:
return protocol_ >= make_protocol(2, 3);
case ProtocolFeature::LedgerReplay:
return ledgerReplayEnabled_;
}
@@ -1611,7 +1614,8 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
}
}
// Verify ledger node IDs
// Verify and parse ledger node IDs
std::vector<SHAMapNodeID> nodeIDs;
if (itype != protocol::liBASE)
{
if (m->nodeids_size() <= 0)
@@ -1620,13 +1624,16 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
return;
}
nodeIDs.reserve(m->nodeids_size());
for (auto const& nodeId : m->nodeids())
{
if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
auto parsed = deserializeSHAMapNodeID(nodeId);
if (!parsed)
{
badData("Invalid SHAMap node ID");
return;
}
nodeIDs.push_back(std::move(*parsed));
}
}
@@ -1649,10 +1656,11 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
// Queue a job to process the request
std::weak_ptr<PeerImp> const weak = shared_from_this();
app_.getJobQueue().addJob(jtLEDGER_REQ, "RcvGetLedger", [weak, m]() {
if (auto peer = weak.lock())
peer->processLedgerRequest(m);
});
app_.getJobQueue().addJob(
jtLEDGER_REQ, "RcvGetLedger", [weak, m, nodeIDs = std::move(nodeIDs)]() mutable {
if (auto peer = weak.lock())
peer->processLedgerRequest(m, std::move(nodeIDs));
});
}
void
@@ -3361,7 +3369,9 @@ PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
}
void
PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
PeerImp::processLedgerRequest(
std::shared_ptr<protocol::TMGetLedger> const& m,
std::vector<SHAMapNodeID> nodeIDs)
{
// Do not resource charge a peer responding to a relay
if (!m->has_requestcookie())
@@ -3446,26 +3456,25 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
}
// Add requested node data to reply
if (m->nodeids_size() > 0)
if (!nodeIDs.empty())
{
std::uint32_t const defaultDepth = isHighLatency() ? 2 : 1;
auto const queryDepth{m->has_querydepth() ? m->querydepth() : defaultDepth};
std::vector<std::pair<SHAMapNodeID, Blob>> data;
std::vector<SHAMapNodeData> data;
auto const useLedgerNodeDepth = supportsFeature(ProtocolFeature::LedgerNodeDepth);
for (int i = 0;
i < m->nodeids_size() && ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
++i)
for (auto const& nodeID : nodeIDs)
{
auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
if (ledgerData.nodes_size() >= Tuning::softMaxReplyNodes)
break;
data.clear();
data.reserve(Tuning::softMaxReplyNodes);
try
{
// NOLINTNEXTLINE(bugprone-unchecked-optional-access) nodeids checked in onGetLedger
if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
if (map->getNodeFat(nodeID, data, fatLeaves, queryDepth))
{
JLOG(p_journal_.trace())
<< "processLedgerRequest: getNodeFat got " << data.size() << " nodes";
@@ -3474,9 +3483,26 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
{
if (ledgerData.nodes_size() >= Tuning::hardMaxReplyNodes)
break;
protocol::TMLedgerNode* node{ledgerData.add_nodes()};
node->set_nodeid(d.first.getRawString());
node->set_nodedata(d.second.data(), d.second.size());
node->set_nodedata(d.data.data(), d.data.size());
// When the LedgerNodeDepth protocol feature is not supported by the peer,
// we always set the `nodeid` field. However, when it is supported then we
// set the `id` field for inner nodes and the `depth` field for leaf nodes.
if (!useLedgerNodeDepth)
{
node->set_nodeid(d.nodeID.getRawString());
}
else if (d.isLeaf)
{
node->set_depth(d.nodeID.getDepth());
}
else
{
node->set_id(d.nodeID.getRawString());
}
}
}
else
@@ -3515,7 +3541,7 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
info += ", no hash specified";
JLOG(p_journal_.warn())
<< "processLedgerRequest: getNodeFat with nodeId " << *shaMapNodeId
<< "processLedgerRequest: getNodeFat with nodeId " << nodeID
<< " and ledger info type " << info << " throws exception: " << e.what();
}
}

View File

@@ -14,6 +14,7 @@
#include <xrpl/protocol/STTx.h>
#include <xrpl/protocol/STValidation.h>
#include <xrpl/resource/Fees.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <boost/circular_buffer.hpp>
#include <boost/endian/conversion.hpp>
@@ -792,7 +793,9 @@ private:
getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const;
void
processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m);
processLedgerRequest(
std::shared_ptr<protocol::TMGetLedger> const& m,
std::vector<SHAMapNodeID> nodeIDs);
};
//------------------------------------------------------------------------------

View File

@@ -28,6 +28,7 @@ namespace xrpl {
constexpr ProtocolVersion const supportedProtocolList[]{
{2, 1},
{2, 2},
{2, 3},
};
// This ugly construct ensures that supportedProtocolList is sorted in strictly