mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-04 11:15:56 +00:00
Intrusive SHAMap smart pointers for efficient memory use and lock-free synchronization (#5152)
The main goal of this optimisation is memory reduction in SHAMapTreeNodes by introducing intrusive pointers instead of standard std::shared_ptr and std::weak_ptr.
This commit is contained in:
committed by
GitHub
parent
2bc5cb240f
commit
fc204773d6
@@ -16,6 +16,18 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||
# GCC-specific fixes
|
||||
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
|
||||
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
|
||||
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
# Clang-specific fixes
|
||||
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
|
||||
elseif(MSVC)
|
||||
# MSVC-specific fixes
|
||||
add_compile_options(/wd4068) # Ignore unknown pragmas
|
||||
endif()
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
|
||||
515
include/xrpl/basics/IntrusivePointer.h
Normal file
515
include/xrpl/basics/IntrusivePointer.h
Normal file
@@ -0,0 +1,515 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Tag to create an intrusive pointer from another intrusive pointer by using a
|
||||
static cast. This is useful to create an intrusive pointer to a derived
|
||||
class from an intrusive pointer to a base class.
|
||||
*/
|
||||
struct StaticCastTagSharedIntrusive
|
||||
{
|
||||
};
|
||||
|
||||
/** Tag to create an intrusive pointer from another intrusive pointer by using a
|
||||
dynamic cast. This is useful to create an intrusive pointer to a derived
|
||||
class from an intrusive pointer to a base class. If the cast fails an empty
|
||||
(null) intrusive pointer is created.
|
||||
*/
|
||||
struct DynamicCastTagSharedIntrusive
|
||||
{
|
||||
};
|
||||
|
||||
/** When creating or adopting a raw pointer, controls whether the strong count
|
||||
is incremented or not. Use this tag to increment the strong count.
|
||||
*/
|
||||
struct SharedIntrusiveAdoptIncrementStrongTag
|
||||
{
|
||||
};
|
||||
|
||||
/** When creating or adopting a raw pointer, controls whether the strong count
|
||||
is incremented or not. Use this tag to leave the strong count unchanged.
|
||||
*/
|
||||
struct SharedIntrusiveAdoptNoIncrementTag
|
||||
{
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
|
||||
template <class T>
|
||||
concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
|
||||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A shared intrusive pointer class that supports weak pointers.
|
||||
|
||||
This is meant to be used for SHAMapInnerNodes, but may be useful for other
|
||||
cases. Since the reference counts are stored on the pointee, the pointee is
|
||||
not destroyed until both the strong _and_ weak pointer counts go to zero.
|
||||
When the strong pointer count goes to zero, the "partialDestructor" is
|
||||
called. This can be used to destroy as much of the object as possible while
|
||||
still retaining the reference counts. For example, for SHAMapInnerNodes the
|
||||
children may be reset in that function. Note that std::shared_poiner WILL
|
||||
run the destructor when the strong count reaches zero, but may not free the
|
||||
memory used by the object until the weak count reaches zero. In rippled, we
|
||||
typically allocate shared pointers with the `make_shared` function. When
|
||||
that is used, the memory is not reclaimed until the weak count reaches zero.
|
||||
*/
|
||||
template <class T>
|
||||
class SharedIntrusive
|
||||
{
|
||||
public:
|
||||
SharedIntrusive() = default;
|
||||
|
||||
template <CAdoptTag TAdoptTag>
|
||||
SharedIntrusive(T* p, TAdoptTag) noexcept;
|
||||
|
||||
SharedIntrusive(SharedIntrusive const& rhs);
|
||||
|
||||
template <class TT>
|
||||
// TODO: convertible_to isn't quite right. That include a static castable.
|
||||
// Find the right concept.
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedIntrusive(SharedIntrusive&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive const& rhs);
|
||||
|
||||
bool
|
||||
operator!=(std::nullptr_t) const;
|
||||
|
||||
bool
|
||||
operator==(std::nullptr_t) const;
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
/** Adopt the raw pointer. The strong reference may or may not be
|
||||
incremented, depending on the TAdoptTag
|
||||
*/
|
||||
template <CAdoptTag TAdoptTag = SharedIntrusiveAdoptIncrementStrongTag>
|
||||
void
|
||||
adopt(T* p);
|
||||
|
||||
~SharedIntrusive();
|
||||
|
||||
/** Create a new SharedIntrusive by statically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by statically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by dynamically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by dynamically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
|
||||
|
||||
T&
|
||||
operator*() const noexcept;
|
||||
|
||||
T*
|
||||
operator->() const noexcept;
|
||||
|
||||
explicit
|
||||
operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the strong count, and run the
|
||||
appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** Get the raw pointer */
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** Return the strong count */
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
template <class TT, class... Args>
|
||||
friend SharedIntrusive<TT>
|
||||
make_SharedIntrusive(Args&&... args);
|
||||
|
||||
template <class TT>
|
||||
friend class SharedIntrusive;
|
||||
|
||||
template <class TT>
|
||||
friend class SharedWeakUnion;
|
||||
|
||||
template <class TT>
|
||||
friend class WeakIntrusive;
|
||||
|
||||
private:
|
||||
/** Return the raw pointer held by this object. */
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
/** Exchange the current raw pointer held by this object with the given
|
||||
pointer. Decrement the strong count of the raw pointer previously held
|
||||
by this object and run the appropriate release action.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseAndStore(T* next);
|
||||
|
||||
/** Set the raw pointer directly. This is wrapped in a function so the class
|
||||
can support both atomic and non-atomic pointers in a future patch.
|
||||
*/
|
||||
void
|
||||
unsafeSetRawPtr(T* p);
|
||||
|
||||
/** Exchange the raw pointer directly.
|
||||
This sets the raw pointer to the given value and returns the previous
|
||||
value. This is wrapped in a function so the class can support both
|
||||
atomic and non-atomic pointers in a future patch.
|
||||
*/
|
||||
T*
|
||||
unsafeExchange(T* p);
|
||||
|
||||
/** pointer to the type with an intrusive count */
|
||||
T* ptr_{nullptr};
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A weak intrusive pointer class for the SharedIntrusive pointer class.
|
||||
|
||||
Note that this weak pointer class asks differently from normal weak pointer
|
||||
classes. When the strong pointer count goes to zero, the "partialDestructor"
|
||||
is called. See the comment on SharedIntrusive for a fuller explanation.
|
||||
*/
|
||||
template <class T>
|
||||
class WeakIntrusive
|
||||
{
|
||||
public:
|
||||
WeakIntrusive() = default;
|
||||
|
||||
WeakIntrusive(WeakIntrusive const& rhs);
|
||||
|
||||
WeakIntrusive(WeakIntrusive&& rhs);
|
||||
|
||||
WeakIntrusive(SharedIntrusive<T> const& rhs);
|
||||
|
||||
// There is no move constructor from a strong intrusive ptr because
|
||||
// moving would be move expensive than copying in this case (the strong
|
||||
// ref would need to be decremented)
|
||||
WeakIntrusive(SharedIntrusive<T> const&& rhs) = delete;
|
||||
|
||||
// Since there are no current use cases for copy assignment in
|
||||
// WeakIntrusive, we delete this operator to simplify the implementation. If
|
||||
// a need arises in the future, we can reintroduce it with proper
|
||||
// consideration."
|
||||
WeakIntrusive&
|
||||
operator=(WeakIntrusive const&) = delete;
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
WeakIntrusive&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Adopt the raw pointer and increment the weak count. */
|
||||
void
|
||||
adopt(T* ptr);
|
||||
|
||||
~WeakIntrusive();
|
||||
|
||||
/** Get a strong pointer from the weak pointer, if possible. This will
|
||||
only return a seated pointer if the strong count on the raw pointer
|
||||
is non-zero before locking.
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true if the strong count is zero. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** Set the pointer to null and decrement the weak count.
|
||||
|
||||
Note: This may run the destructor if the strong count is zero.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
private:
|
||||
T* ptr_ = nullptr;
|
||||
|
||||
/** Decrement the weak count. This does _not_ set the raw pointer to
|
||||
null.
|
||||
|
||||
Note: This may run the destructor if the strong count is zero.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A combination of a strong and a weak intrusive pointer stored in the
|
||||
space of a single pointer.
|
||||
|
||||
This class is similar to a `std::variant<SharedIntrusive,WeakIntrusive>`
|
||||
with some optimizations. In particular, it uses a low-order bit to
|
||||
determine if the raw pointer represents a strong pointer or a weak
|
||||
pointer. It can also be quickly switched between its strong pointer and
|
||||
weak pointer representations. This class is useful for storing intrusive
|
||||
pointers in tagged caches.
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class SharedWeakUnion
|
||||
{
|
||||
// Tagged pointer. Low bit determines if this is a strong or a weak
|
||||
// pointer. The low bit must be masked to zero when converting back to a
|
||||
// pointer. If the low bit is '1', this is a weak pointer.
|
||||
static_assert(
|
||||
alignof(T) >= 2,
|
||||
"Bad alignment: Combo pointer requires low bit to be zero");
|
||||
|
||||
public:
|
||||
SharedWeakUnion() = default;
|
||||
|
||||
SharedWeakUnion(SharedWeakUnion const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedWeakUnion(SharedWeakUnion&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
SharedWeakUnion&
|
||||
operator=(SharedWeakUnion const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion&
|
||||
operator=(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
~SharedWeakUnion();
|
||||
|
||||
/** Return a strong pointer if this is already a strong pointer (i.e.
|
||||
don't lock the weak pointer. Use the `lock` method if that's what's
|
||||
needed)
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
seated.
|
||||
*/
|
||||
explicit
|
||||
operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the appropriate ref count, and
|
||||
run the appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise
|
||||
return null.
|
||||
*/
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise
|
||||
* return 0
|
||||
*/
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong
|
||||
pointer.
|
||||
|
||||
@return true if successfully converted to a strong pointer (or was
|
||||
already a strong pointer). Otherwise false.
|
||||
*/
|
||||
bool
|
||||
convertToStrong();
|
||||
|
||||
/** If this is a strong pointer, attempt to convert it to a weak
|
||||
pointer.
|
||||
|
||||
@return false if the pointer is null. Otherwise return true.
|
||||
*/
|
||||
bool
|
||||
convertToWeak();
|
||||
|
||||
private:
|
||||
// Tagged pointer. Low bit determines if this is a strong or a weak
|
||||
// pointer. The low bit must be masked to zero when converting back to a
|
||||
// pointer. If the low bit is '1', this is a weak pointer.
|
||||
std::uintptr_t tp_{0};
|
||||
static constexpr std::uintptr_t tagMask = 1;
|
||||
static constexpr std::uintptr_t ptrMask = ~tagMask;
|
||||
|
||||
private:
|
||||
/** Return the raw pointer held by this object.
|
||||
*/
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
enum class RefStrength { strong, weak };
|
||||
/** Set the raw pointer and tag bit directly.
|
||||
*/
|
||||
void
|
||||
unsafeSetRawPtr(T* p, RefStrength rs);
|
||||
|
||||
/** Set the raw pointer and tag bit to all zeros (strong null pointer).
|
||||
*/
|
||||
void unsafeSetRawPtr(std::nullptr_t);
|
||||
|
||||
/** Decrement the appropriate ref count, and run the appropriate release
|
||||
action. Note: this does _not_ set the raw pointer to null.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Create a shared intrusive pointer.
|
||||
|
||||
Note: unlike std::shared_ptr, where there is an advantage of allocating
|
||||
the pointer and control block together, there is no benefit for intrusive
|
||||
pointers.
|
||||
*/
|
||||
template <class TT, class... Args>
|
||||
SharedIntrusive<TT>
|
||||
make_SharedIntrusive(Args&&... args)
|
||||
{
|
||||
auto p = new TT(std::forward<Args>(args)...);
|
||||
|
||||
static_assert(
|
||||
noexcept(SharedIntrusive<TT>(
|
||||
std::declval<TT*>(),
|
||||
std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
|
||||
"SharedIntrusive constructor should not throw or this can leak "
|
||||
"memory");
|
||||
|
||||
return SharedIntrusive<TT>(p, SharedIntrusiveAdoptNoIncrementTag{});
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
namespace intr_ptr {
|
||||
template <class T>
|
||||
using SharedPtr = SharedIntrusive<T>;
|
||||
|
||||
template <class T>
|
||||
using WeakPtr = WeakIntrusive<T>;
|
||||
|
||||
template <class T>
|
||||
using SharedWeakUnionPtr = SharedWeakUnion<T>;
|
||||
|
||||
template <class T, class... A>
|
||||
SharedPtr<T>
|
||||
make_shared(A&&... args)
|
||||
{
|
||||
return make_SharedIntrusive<T>(std::forward<A>(args)...);
|
||||
}
|
||||
|
||||
template <class T, class TT>
|
||||
SharedPtr<T>
|
||||
static_pointer_cast(TT const& v)
|
||||
{
|
||||
return SharedPtr<T>(StaticCastTagSharedIntrusive{}, v);
|
||||
}
|
||||
|
||||
template <class T, class TT>
|
||||
SharedPtr<T>
|
||||
dynamic_pointer_cast(TT const& v)
|
||||
{
|
||||
return SharedPtr<T>(DynamicCastTagSharedIntrusive{}, v);
|
||||
}
|
||||
} // namespace intr_ptr
|
||||
} // namespace ripple
|
||||
#endif
|
||||
740
include/xrpl/basics/IntrusivePointer.ipp
Normal file
740
include/xrpl/basics/IntrusivePointer.ipp
Normal file
@@ -0,0 +1,740 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/IntrusiveRefCounts.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
template <class T>
|
||||
template <CAdoptTag TAdoptTag>
|
||||
SharedIntrusive<T>::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p}
|
||||
{
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
template <class T>
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive const& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeReleaseAndStore(p);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, TT>)
|
||||
{
|
||||
// This case should never be hit. The operator above will run instead.
|
||||
// (The normal operator= is needed or it will be marked `deleted`)
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
}
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeReleaseAndStore(p);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive&& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
|
||||
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
static_assert(
|
||||
!std::is_same_v<T, TT>,
|
||||
"This overload should not be instantiated for T == TT");
|
||||
|
||||
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedIntrusive<T>::operator!=(std::nullptr_t) const
|
||||
{
|
||||
return this->get() != nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedIntrusive<T>::operator==(std::nullptr_t) const
|
||||
{
|
||||
return this->get() == nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <CAdoptTag TAdoptTag>
|
||||
void
|
||||
SharedIntrusive<T>::adopt(T* p)
|
||||
{
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
}
|
||||
unsafeReleaseAndStore(p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::~SharedIntrusive()
|
||||
{
|
||||
unsafeReleaseAndStore(nullptr);
|
||||
};
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = static_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{static_cast<T*>(rhs.unsafeExchange(nullptr))}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = dynamic_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
// This can be simplified without the `exchange`, but the `exchange` is kept
|
||||
// in anticipation of supporting atomic operations.
|
||||
auto toSet = rhs.unsafeExchange(nullptr);
|
||||
if (toSet)
|
||||
{
|
||||
ptr_ = dynamic_cast<T*>(toSet);
|
||||
if (!ptr_)
|
||||
// need to set the pointer back or will leak
|
||||
rhs.unsafeExchange(toSet);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T&
|
||||
SharedIntrusive<T>::operator*() const noexcept
|
||||
{
|
||||
return *unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::operator->() const noexcept
|
||||
{
|
||||
return unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::operator bool() const noexcept
|
||||
{
|
||||
return bool(unsafeGetRawPtr());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::reset()
|
||||
{
|
||||
unsafeReleaseAndStore(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::get() const
|
||||
{
|
||||
return unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedIntrusive<T>::use_count() const
|
||||
{
|
||||
if (auto p = unsafeGetRawPtr())
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::unsafeGetRawPtr() const
|
||||
{
|
||||
return ptr_;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::unsafeSetRawPtr(T* p)
|
||||
{
|
||||
ptr_ = p;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::unsafeExchange(T* p)
|
||||
{
|
||||
return std::exchange(ptr_, p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::unsafeReleaseAndStore(T* next)
|
||||
{
|
||||
auto prev = unsafeExchange(next);
|
||||
if (!prev)
|
||||
return;
|
||||
|
||||
using enum ReleaseStrongRefAction;
|
||||
auto action = prev->releaseStrongRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete prev;
|
||||
break;
|
||||
case partialDestroy:
|
||||
prev->partialDestructor();
|
||||
partialDestructorFinished(&prev);
|
||||
// prev is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive const& rhs) : ptr_{rhs.ptr_}
|
||||
{
|
||||
if (ptr_)
|
||||
ptr_->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_}
|
||||
{
|
||||
rhs.ptr_ = nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs)
|
||||
: ptr_{rhs.unsafeGetRawPtr()}
|
||||
{
|
||||
if (ptr_)
|
||||
ptr_->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
WeakIntrusive<T>&
|
||||
WeakIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addWeakRef();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::adopt(T* ptr)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
if (ptr)
|
||||
ptr->addWeakRef();
|
||||
ptr_ = ptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::~WeakIntrusive()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
WeakIntrusive<T>::lock() const
|
||||
{
|
||||
if (ptr_ && ptr_->checkoutStrongRefFromWeak())
|
||||
{
|
||||
return SharedIntrusive<T>{ptr_, SharedIntrusiveAdoptNoIncrementTag{}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
WeakIntrusive<T>::expired() const
|
||||
{
|
||||
return (!ptr_ || ptr_->expired());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::reset()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
ptr_ = nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::unsafeReleaseNoStore()
|
||||
{
|
||||
if (!ptr_)
|
||||
return;
|
||||
|
||||
using enum ReleaseWeakRefAction;
|
||||
auto action = ptr_->releaseWeakRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete ptr_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion const& rhs) : tp_{rhs.tp_}
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
if (rhs.isStrong())
|
||||
p->addStrongRef();
|
||||
else
|
||||
p->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion&& rhs) : tp_{rhs.tp_}
|
||||
{
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedWeakUnion const& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
unsafeReleaseNoStore();
|
||||
|
||||
if (auto p = rhs.unsafeGetRawPtr())
|
||||
{
|
||||
if (rhs.isStrong())
|
||||
{
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
}
|
||||
else
|
||||
{
|
||||
p->addWeakRef();
|
||||
unsafeSetRawPtr(p, RefStrength::weak);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
unsafeSetRawPtr(rhs.unsafeGetRawPtr(), RefStrength::strong);
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::~SharedWeakUnion()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
// Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
// lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
SharedWeakUnion<T>::getStrong() const
|
||||
{
|
||||
SharedIntrusive<T> result;
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (p && isStrong())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::operator bool() const noexcept
|
||||
{
|
||||
return bool(get());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::reset()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakUnion<T>::get() const
|
||||
{
|
||||
return isStrong() ? unsafeGetRawPtr() : nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedWeakUnion<T>::use_count() const
|
||||
{
|
||||
if (auto p = get())
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::expired() const
|
||||
{
|
||||
auto p = unsafeGetRawPtr();
|
||||
return (!p || p->expired());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
SharedWeakUnion<T>::lock() const
|
||||
{
|
||||
SharedIntrusive<T> result;
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return result;
|
||||
|
||||
if (isStrong())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
|
||||
return result;
|
||||
}
|
||||
|
||||
if (p->checkoutStrongRefFromWeak())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptNoIncrementTag>(p);
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::isStrong() const
|
||||
{
|
||||
return !(tp_ & tagMask);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::isWeak() const
|
||||
{
|
||||
return tp_ & tagMask;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::convertToStrong()
|
||||
{
|
||||
if (isStrong())
|
||||
return true;
|
||||
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (p && p->checkoutStrongRefFromWeak())
|
||||
{
|
||||
[[maybe_unused]] auto action = p->releaseWeakRef();
|
||||
XRPL_ASSERT(
|
||||
(action == ReleaseWeakRefAction::noop),
|
||||
"ripple::SharedWeakUnion::convertToStrong : "
|
||||
"action is noop");
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::convertToWeak()
|
||||
{
|
||||
if (isWeak())
|
||||
return true;
|
||||
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return false;
|
||||
|
||||
using enum ReleaseStrongRefAction;
|
||||
auto action = p->addWeakReleaseStrongRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
// We just added a weak ref. How could we destroy?
|
||||
UNREACHABLE(
|
||||
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
|
||||
"added ref");
|
||||
delete p;
|
||||
unsafeSetRawPtr(nullptr);
|
||||
return true; // Should never happen
|
||||
case partialDestroy:
|
||||
// This is a weird case. We just converted the last strong
|
||||
// pointer to a weak pointer.
|
||||
p->partialDestructor();
|
||||
partialDestructorFinished(&p);
|
||||
// p is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
unsafeSetRawPtr(p, RefStrength::weak);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakUnion<T>::unsafeGetRawPtr() const
|
||||
{
|
||||
return reinterpret_cast<T*>(tp_ & ptrMask);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::unsafeSetRawPtr(T* p, RefStrength rs)
|
||||
{
|
||||
tp_ = reinterpret_cast<std::uintptr_t>(p);
|
||||
if (tp_ && rs == RefStrength::weak)
|
||||
tp_ |= tagMask;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::unsafeSetRawPtr(std::nullptr_t)
|
||||
{
|
||||
tp_ = 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::unsafeReleaseNoStore()
|
||||
{
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
if (isStrong())
|
||||
{
|
||||
using enum ReleaseStrongRefAction;
|
||||
auto strongAction = p->releaseStrongRef();
|
||||
switch (strongAction)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete p;
|
||||
break;
|
||||
case partialDestroy:
|
||||
p->partialDestructor();
|
||||
partialDestructorFinished(&p);
|
||||
// p is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
using enum ReleaseWeakRefAction;
|
||||
auto weakAction = p->releaseWeakRef();
|
||||
switch (weakAction)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
502
include/xrpl/basics/IntrusiveRefCounts.h
Normal file
502
include/xrpl/basics/IntrusiveRefCounts.h
Normal file
@@ -0,0 +1,502 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** Action to perform when releasing a strong pointer.
|
||||
|
||||
noop: Do nothing. For example, a `noop` action will occur when a count is
|
||||
decremented to a non-zero value.
|
||||
|
||||
partialDestroy: Run the `partialDestructor`. This action will happen when a
|
||||
strong count is decremented to zero and the weak count is non-zero.
|
||||
|
||||
destroy: Run the destructor. This action will occur when either the strong
|
||||
count or weak count is decremented and the other count is also zero.
|
||||
*/
|
||||
enum class ReleaseStrongRefAction { noop, partialDestroy, destroy };
|
||||
|
||||
/** Action to perform when releasing a weak pointer.
|
||||
|
||||
noop: Do nothing. For example, a `noop` action will occur when a count is
|
||||
decremented to a non-zero value.
|
||||
|
||||
destroy: Run the destructor. This action will occur when either the strong
|
||||
count or weak count is decremented and the other count is also zero.
|
||||
*/
|
||||
enum class ReleaseWeakRefAction { noop, destroy };
|
||||
|
||||
/** Implement the strong count, weak count, and bit flags for an intrusive
|
||||
pointer.
|
||||
|
||||
A class can satisfy the requirements of a ripple::IntrusivePointer by
|
||||
inheriting from this class.
|
||||
*/
|
||||
struct IntrusiveRefCounts
|
||||
{
|
||||
virtual ~IntrusiveRefCounts() noexcept;
|
||||
|
||||
// This must be `noexcept` or the make_SharedIntrusive function could leak
|
||||
// memory.
|
||||
void
|
||||
addStrongRef() const noexcept;
|
||||
|
||||
void
|
||||
addWeakRef() const noexcept;
|
||||
|
||||
ReleaseStrongRefAction
|
||||
releaseStrongRef() const;
|
||||
|
||||
// Same as:
|
||||
// {
|
||||
// addWeakRef();
|
||||
// return releaseStrongRef;
|
||||
// }
|
||||
// done as one atomic operation
|
||||
ReleaseStrongRefAction
|
||||
addWeakReleaseStrongRef() const;
|
||||
|
||||
ReleaseWeakRefAction
|
||||
releaseWeakRef() const;
|
||||
|
||||
// Returns true is able to checkout a strong ref. False otherwise
|
||||
bool
|
||||
checkoutStrongRefFromWeak() const noexcept;
|
||||
|
||||
bool
|
||||
expired() const noexcept;
|
||||
|
||||
std::size_t
|
||||
use_count() const noexcept;
|
||||
|
||||
// This function MUST be called after a partial destructor finishes running.
|
||||
// Calling this function may cause other threads to delete the object
|
||||
// pointed to by `o`, so `o` should never be used after calling this
|
||||
// function. The parameter will be set to a `nullptr` after calling this
|
||||
// function to emphasize that it should not be used.
|
||||
// Note: This is intentionally NOT called at the end of `partialDestructor`.
|
||||
// The reason for this is if new classes are written to support this smart
|
||||
// pointer class, they need to write their own `partialDestructor` function
|
||||
// and ensure `partialDestructorFinished` is called at the end. Putting this
|
||||
// call inside the smart pointer class itself is expected to be less error
|
||||
// prone.
|
||||
// Note: The "two-star" programming is intentional. It emphasizes that `o`
|
||||
// may be deleted and the unergonomic API is meant to signal the special
|
||||
// nature of this function call to callers.
|
||||
// Note: This is a template to support incompletely defined classes.
|
||||
template <class T>
|
||||
friend void
|
||||
partialDestructorFinished(T** o);
|
||||
|
||||
private:
|
||||
// TODO: We may need to use a uint64_t for both counts. This will reduce the
|
||||
// memory savings. We need to audit the code to make sure 16 bit counts are
|
||||
// enough for strong pointers and 14 bit counts are enough for weak
|
||||
// pointers. Use type aliases to make it easy to switch types.
|
||||
using CountType = std::uint16_t;
|
||||
static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
|
||||
static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
|
||||
using FieldType = std::uint32_t;
|
||||
static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
|
||||
static constexpr FieldType one = 1;
|
||||
|
||||
/** `refCounts` consists of four fields that are treated atomically:
|
||||
|
||||
1. Strong count. This is a count of the number of shared pointers that
|
||||
hold a reference to this object. When the strong counts goes to zero,
|
||||
if the weak count is zero, the destructor is run. If the weak count is
|
||||
non-zero when the strong count goes to zero then the partialDestructor
|
||||
is run.
|
||||
|
||||
2. Weak count. This is a count of the number of weak pointer that hold
|
||||
a reference to this object. When the weak count goes to zero and the
|
||||
strong count is also zero, then the destructor is run.
|
||||
|
||||
3. Partial destroy started bit. This bit is set if the
|
||||
`partialDestructor` function has been started (or is about to be
|
||||
started). This is used to prevent the destructor from running
|
||||
concurrently with the partial destructor. This can easily happen when
|
||||
the last strong pointer release its reference in one thread and starts
|
||||
the partialDestructor, while in another thread the last weak pointer
|
||||
goes out of scope and starts the destructor while the partialDestructor
|
||||
is still running. Both a start and finished bit is needed to handle a
|
||||
corner-case where the last strong pointer goes out of scope, then then
|
||||
last `weakPointer` goes out of scope, but this happens before the
|
||||
`partialDestructor` bit is set. It would be possible to use a single
|
||||
bit if it could also be set atomically when the strong count goes to
|
||||
zero and the weak count is non-zero, but that would add complexity (and
|
||||
likely slow down common cases as well).
|
||||
|
||||
4. Partial destroy finished bit. This bit is set when the
|
||||
`partialDestructor` has finished running. See (3) above for more
|
||||
information.
|
||||
|
||||
*/
|
||||
|
||||
mutable std::atomic<FieldType> refCounts{strongDelta};
|
||||
|
||||
/** Amount to change the strong count when adding or releasing a reference
|
||||
|
||||
Note: The strong count is stored in the low `StrongCountNumBits` bits
|
||||
of refCounts
|
||||
*/
|
||||
static constexpr FieldType strongDelta = 1;
|
||||
|
||||
/** Amount to change the weak count when adding or releasing a reference
|
||||
|
||||
Note: The weak count is stored in the high `WeakCountNumBits` bits of
|
||||
refCounts
|
||||
*/
|
||||
static constexpr FieldType weakDelta = (one << StrongCountNumBits);
|
||||
|
||||
/** Flag that is set when the partialDestroy function has started running
|
||||
(or is about to start running).
|
||||
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyStartedMask =
|
||||
(one << (FieldTypeBits - 1));
|
||||
|
||||
/** Flag that is set when the partialDestroy function has finished running
|
||||
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyFinishedMask =
|
||||
(one << (FieldTypeBits - 2));
|
||||
|
||||
/** Mask that will zero out all the `count` bits and leave the tag bits
|
||||
unchanged.
|
||||
*/
|
||||
static constexpr FieldType tagMask =
|
||||
partialDestroyStartedMask | partialDestroyFinishedMask;
|
||||
|
||||
/** Mask that will zero out the `tag` bits and leave the count bits
|
||||
unchanged.
|
||||
*/
|
||||
static constexpr FieldType valueMask = ~tagMask;
|
||||
|
||||
/** Mask that will zero out everything except the strong count.
|
||||
*/
|
||||
static constexpr FieldType strongMask =
|
||||
((one << StrongCountNumBits) - 1) & valueMask;
|
||||
|
||||
/** Mask that will zero out everything except the weak count.
|
||||
*/
|
||||
static constexpr FieldType weakMask =
|
||||
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
|
||||
|
||||
/** Unpack the count and tag fields from the packed atomic integer form. */
|
||||
struct RefCountPair
|
||||
{
|
||||
CountType strong;
|
||||
CountType weak;
|
||||
/** The `partialDestroyStartedBit` is set to on when the partial
|
||||
destroy function is started. It is not a boolean; it is a uint32
|
||||
with all bits zero with the possible exception of the
|
||||
`partialDestroyStartedMask` bit. This is done so it can be directly
|
||||
masked into the `combinedValue`.
|
||||
*/
|
||||
FieldType partialDestroyStartedBit{0};
|
||||
/** The `partialDestroyFinishedBit` is set to on when the partial
|
||||
destroy function has finished.
|
||||
*/
|
||||
FieldType partialDestroyFinishedBit{0};
|
||||
RefCountPair(FieldType v) noexcept;
|
||||
RefCountPair(CountType s, CountType w) noexcept;
|
||||
|
||||
/** Convert back to the packed integer form. */
|
||||
FieldType
|
||||
combinedValue() const noexcept;
|
||||
|
||||
static constexpr CountType maxStrongValue =
|
||||
static_cast<CountType>((one << StrongCountNumBits) - 1);
|
||||
static constexpr CountType maxWeakValue =
|
||||
static_cast<CountType>((one << WeakCountNumBits) - 1);
|
||||
/** Put an extra margin to detect when running up against limits.
|
||||
This is only used in debug code, and is useful if we reduce the
|
||||
number of bits in the strong and weak counts (to 16 and 14 bits).
|
||||
*/
|
||||
static constexpr CountType checkStrongMaxValue = maxStrongValue - 32;
|
||||
static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
|
||||
};
|
||||
};
|
||||
|
||||
inline void
|
||||
IntrusiveRefCounts::addStrongRef() const noexcept
|
||||
{
|
||||
refCounts.fetch_add(strongDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
inline void
|
||||
IntrusiveRefCounts::addWeakRef() const noexcept
|
||||
{
|
||||
refCounts.fetch_add(weakDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
inline ReleaseStrongRefAction
|
||||
IntrusiveRefCounts::releaseStrongRef() const
|
||||
{
|
||||
// Subtract `strongDelta` from refCounts. If this releases the last strong
|
||||
// ref, set the `partialDestroyStarted` bit. It is important that the ref
|
||||
// count and the `partialDestroyStartedBit` are changed atomically (hence
|
||||
// the loop and `compare_exchange` op). If this didn't need to be done
|
||||
// atomically, the loop could be replaced with a `fetch_sub` and a
|
||||
// conditional `fetch_or`. This loop will almost always run once.
|
||||
|
||||
using enum ReleaseStrongRefAction;
|
||||
auto prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
while (1)
|
||||
{
|
||||
RefCountPair const prevVal{prevIntVal};
|
||||
XRPL_ASSERT(
|
||||
(prevVal.strong >= strongDelta),
|
||||
"ripple::IntrusiveRefCounts::releaseStrongRef : previous ref "
|
||||
"higher than new");
|
||||
auto nextIntVal = prevIntVal - strongDelta;
|
||||
ReleaseStrongRefAction action = noop;
|
||||
if (prevVal.strong == 1)
|
||||
{
|
||||
if (prevVal.weak == 0)
|
||||
{
|
||||
action = destroy;
|
||||
}
|
||||
else
|
||||
{
|
||||
nextIntVal |= partialDestroyStartedMask;
|
||||
action = partialDestroy;
|
||||
}
|
||||
}
|
||||
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_release))
|
||||
{
|
||||
// Can't be in partial destroy because only decrementing the strong
|
||||
// count to zero can start a partial destroy, and that can't happen
|
||||
// twice.
|
||||
XRPL_ASSERT(
|
||||
(action == noop) || !(prevIntVal & partialDestroyStartedMask),
|
||||
"ripple::IntrusiveRefCounts::releaseStrongRef : not in partial "
|
||||
"destroy");
|
||||
return action;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline ReleaseStrongRefAction
|
||||
IntrusiveRefCounts::addWeakReleaseStrongRef() const
|
||||
{
|
||||
using enum ReleaseStrongRefAction;
|
||||
|
||||
static_assert(weakDelta > strongDelta);
|
||||
auto constexpr delta = weakDelta - strongDelta;
|
||||
auto prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
// This loop will almost always run once. The loop is needed to atomically
|
||||
// change the counts and flags (the count could be atomically changed, but
|
||||
// the flags depend on the current value of the counts).
|
||||
//
|
||||
// Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
|
||||
// may be able to be set non-atomically. But it is easier to reason about
|
||||
// the code if the flag is set atomically.
|
||||
while (1)
|
||||
{
|
||||
RefCountPair const prevVal{prevIntVal};
|
||||
// Converted the last strong pointer to a weak pointer.
|
||||
//
|
||||
// Can't be in partial destroy because only decrementing the
|
||||
// strong count to zero can start a partial destroy, and that
|
||||
// can't happen twice.
|
||||
XRPL_ASSERT(
|
||||
(!prevVal.partialDestroyStartedBit),
|
||||
"ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not in "
|
||||
"partial destroy");
|
||||
|
||||
auto nextIntVal = prevIntVal + delta;
|
||||
ReleaseStrongRefAction action = noop;
|
||||
if (prevVal.strong == 1)
|
||||
{
|
||||
if (prevVal.weak == 0)
|
||||
{
|
||||
action = noop;
|
||||
}
|
||||
else
|
||||
{
|
||||
nextIntVal |= partialDestroyStartedMask;
|
||||
action = partialDestroy;
|
||||
}
|
||||
}
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_release))
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(!(prevIntVal & partialDestroyStartedMask)),
|
||||
"ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not "
|
||||
"started partial destroy");
|
||||
return action;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline ReleaseWeakRefAction
|
||||
IntrusiveRefCounts::releaseWeakRef() const
|
||||
{
|
||||
auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
|
||||
RefCountPair prev = prevIntVal;
|
||||
if (prev.weak == 1 && prev.strong == 0)
|
||||
{
|
||||
if (!prev.partialDestroyStartedBit)
|
||||
{
|
||||
// This case should only be hit if the partialDestroyStartedBit is
|
||||
// set non-atomically (and even then very rarely). The code is kept
|
||||
// in case we need to set the flag non-atomically for perf reasons.
|
||||
refCounts.wait(prevIntVal, std::memory_order_acq_rel);
|
||||
prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
prev = RefCountPair{prevIntVal};
|
||||
}
|
||||
if (!prev.partialDestroyFinishedBit)
|
||||
{
|
||||
// partial destroy MUST finish before running a full destroy (when
|
||||
// using weak pointers)
|
||||
refCounts.wait(prevIntVal - weakDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
return ReleaseWeakRefAction::destroy;
|
||||
}
|
||||
return ReleaseWeakRefAction::noop;
|
||||
}
|
||||
|
||||
inline bool
|
||||
IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept
|
||||
{
|
||||
auto curValue = RefCountPair{1, 1}.combinedValue();
|
||||
auto desiredValue = RefCountPair{2, 1}.combinedValue();
|
||||
|
||||
while (!refCounts.compare_exchange_weak(
|
||||
curValue, desiredValue, std::memory_order_release))
|
||||
{
|
||||
RefCountPair const prev{curValue};
|
||||
if (!prev.strong)
|
||||
return false;
|
||||
|
||||
desiredValue = curValue + strongDelta;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool
|
||||
IntrusiveRefCounts::expired() const noexcept
|
||||
{
|
||||
RefCountPair const val = refCounts.load(std::memory_order_acquire);
|
||||
return val.strong == 0;
|
||||
}
|
||||
|
||||
inline std::size_t
|
||||
IntrusiveRefCounts::use_count() const noexcept
|
||||
{
|
||||
RefCountPair const val = refCounts.load(std::memory_order_acquire);
|
||||
return val.strong;
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
auto v = refCounts.load(std::memory_order_acquire);
|
||||
XRPL_ASSERT(
|
||||
(!(v & valueMask)),
|
||||
"ripple::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
|
||||
auto t = v & tagMask;
|
||||
XRPL_ASSERT(
|
||||
(!t || t == tagMask),
|
||||
"ripple::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
|
||||
IntrusiveRefCounts::FieldType v) noexcept
|
||||
: strong{static_cast<CountType>(v & strongMask)}
|
||||
, weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
|
||||
, partialDestroyStartedBit{v & partialDestroyStartedMask}
|
||||
, partialDestroyFinishedBit{v & partialDestroyFinishedMask}
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
|
||||
"ripple::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside "
|
||||
"range");
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
|
||||
IntrusiveRefCounts::CountType s,
|
||||
IntrusiveRefCounts::CountType w) noexcept
|
||||
: strong{s}, weak{w}
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
|
||||
"ripple::IntrusiveRefCounts::RefCountPair(CountType, CountType) : "
|
||||
"inputs inside range");
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::FieldType
|
||||
IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
|
||||
"ripple::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
|
||||
"inside range");
|
||||
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
|
||||
<< IntrusiveRefCounts::StrongCountNumBits) |
|
||||
static_cast<IntrusiveRefCounts::FieldType>(strong) |
|
||||
partialDestroyStartedBit | partialDestroyFinishedBit;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void
|
||||
partialDestructorFinished(T** o)
|
||||
{
|
||||
T& self = **o;
|
||||
IntrusiveRefCounts::RefCountPair p =
|
||||
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
|
||||
XRPL_ASSERT(
|
||||
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit &&
|
||||
!p.strong),
|
||||
"ripple::partialDestructorFinished : not a weak ref");
|
||||
if (!p.weak)
|
||||
{
|
||||
// There was a weak count before the partial destructor ran (or we would
|
||||
// have run the full destructor) and now there isn't a weak count. Some
|
||||
// thread is waiting to run the destructor.
|
||||
self.refCounts.notify_one();
|
||||
}
|
||||
// Set the pointer to null to emphasize that the object shouldn't be used
|
||||
// after calling this function as it may be destroyed in another thread.
|
||||
*o = nullptr;
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
135
include/xrpl/basics/SharedWeakCachePointer.h
Normal file
135
include/xrpl/basics/SharedWeakCachePointer.h
Normal file
@@ -0,0 +1,135 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
|
||||
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
|
||||
|
||||
#include <memory>
|
||||
#include <variant>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** A combination of a std::shared_ptr and a std::weak_pointer.
|
||||
|
||||
|
||||
This class is a wrapper to a `std::variant<std::shared_ptr,std::weak_ptr>`
|
||||
This class is useful for storing intrusive pointers in tagged caches using less
|
||||
memory than storing both pointers directly.
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class SharedWeakCachePointer
|
||||
{
|
||||
public:
|
||||
SharedWeakCachePointer() = default;
|
||||
|
||||
SharedWeakCachePointer(SharedWeakCachePointer const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer(std::shared_ptr<TT> const& rhs);
|
||||
|
||||
SharedWeakCachePointer(SharedWeakCachePointer&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer(std::shared_ptr<TT>&& rhs);
|
||||
|
||||
SharedWeakCachePointer&
|
||||
operator=(SharedWeakCachePointer const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer&
|
||||
operator=(std::shared_ptr<TT> const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer&
|
||||
operator=(std::shared_ptr<TT>&& rhs);
|
||||
|
||||
~SharedWeakCachePointer();
|
||||
|
||||
/** Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
*/
|
||||
std::shared_ptr<T> const&
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
seated.
|
||||
*/
|
||||
explicit
|
||||
operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the appropriate ref count, and run
|
||||
the appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise return
|
||||
null.
|
||||
*/
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise return 0
|
||||
*/
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
std::shared_ptr<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong pointer.
|
||||
|
||||
@return true if successfully converted to a strong pointer (or was
|
||||
already a strong pointer). Otherwise false.
|
||||
*/
|
||||
bool
|
||||
convertToStrong();
|
||||
|
||||
/** If this is a strong pointer, attempt to convert it to a weak pointer.
|
||||
|
||||
@return false if the pointer is null. Otherwise return true.
|
||||
*/
|
||||
bool
|
||||
convertToWeak();
|
||||
|
||||
private:
|
||||
std::variant<std::shared_ptr<T>, std::weak_ptr<T>> combo_;
|
||||
};
|
||||
} // namespace ripple
|
||||
#endif
|
||||
192
include/xrpl/basics/SharedWeakCachePointer.ipp
Normal file
192
include/xrpl/basics/SharedWeakCachePointer.ipp
Normal file
@@ -0,0 +1,192 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
|
||||
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
|
||||
|
||||
#include <xrpl/basics/SharedWeakCachePointer.h>
|
||||
|
||||
namespace ripple {
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
SharedWeakCachePointer const& rhs) = default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
std::shared_ptr<TT> const& rhs)
|
||||
: combo_{rhs}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
SharedWeakCachePointer&& rhs) = default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs)
|
||||
: combo_{std::move(rhs)}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(SharedWeakCachePointer const& rhs) =
|
||||
default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT> const& rhs)
|
||||
{
|
||||
combo_ = rhs;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT>&& rhs)
|
||||
{
|
||||
combo_ = std::move(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::~SharedWeakCachePointer() = default;
|
||||
|
||||
// Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
// lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
template <class T>
|
||||
std::shared_ptr<T> const&
|
||||
SharedWeakCachePointer<T>::getStrong() const
|
||||
{
|
||||
static std::shared_ptr<T> const empty;
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return *p;
|
||||
return empty;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::operator bool() const noexcept
|
||||
{
|
||||
return !!std::get_if<std::shared_ptr<T>>(&combo_);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakCachePointer<T>::reset()
|
||||
{
|
||||
combo_ = std::shared_ptr<T>{};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakCachePointer<T>::get() const
|
||||
{
|
||||
return std::get_if<std::shared_ptr<T>>(&combo_).get();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedWeakCachePointer<T>::use_count() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::expired() const
|
||||
{
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
return p->expired();
|
||||
return !std::get_if<std::shared_ptr<T>>(&combo_);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::shared_ptr<T>
|
||||
SharedWeakCachePointer<T>::lock() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return *p;
|
||||
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
return p->lock();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::isStrong() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return !!p->get();
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::isWeak() const
|
||||
{
|
||||
return !isStrong();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::convertToStrong()
|
||||
{
|
||||
if (isStrong())
|
||||
return true;
|
||||
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
{
|
||||
if (auto s = p->lock())
|
||||
{
|
||||
combo_ = std::move(s);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::convertToWeak()
|
||||
{
|
||||
if (isWeak())
|
||||
return true;
|
||||
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
{
|
||||
combo_ = std::weak_ptr<T>(*p);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -20,7 +20,9 @@
|
||||
#ifndef RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
|
||||
#define RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/SharedWeakCachePointer.ipp>
|
||||
#include <xrpl/basics/UnorderedContainers.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
#include <xrpl/beast/clock/abstract_clock.h>
|
||||
@@ -51,6 +53,8 @@ template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache = false,
|
||||
class SharedWeakUnionPointerType = SharedWeakCachePointer<T>,
|
||||
class SharedPointerType = std::shared_ptr<T>,
|
||||
class Hash = hardened_hash<>,
|
||||
class KeyEqual = std::equal_to<Key>,
|
||||
class Mutex = std::recursive_mutex>
|
||||
@@ -61,6 +65,8 @@ public:
|
||||
using key_type = Key;
|
||||
using mapped_type = T;
|
||||
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
|
||||
using shared_weak_combo_pointer_type = SharedWeakUnionPointerType;
|
||||
using shared_pointer_type = SharedPointerType;
|
||||
|
||||
public:
|
||||
TaggedCache(
|
||||
@@ -70,231 +76,48 @@ public:
|
||||
clock_type& clock,
|
||||
beast::Journal journal,
|
||||
beast::insight::Collector::ptr const& collector =
|
||||
beast::insight::NullCollector::New())
|
||||
: m_journal(journal)
|
||||
, m_clock(clock)
|
||||
, m_stats(
|
||||
name,
|
||||
std::bind(&TaggedCache::collect_metrics, this),
|
||||
collector)
|
||||
, m_name(name)
|
||||
, m_target_size(size)
|
||||
, m_target_age(expiration)
|
||||
, m_cache_count(0)
|
||||
, m_hits(0)
|
||||
, m_misses(0)
|
||||
{
|
||||
}
|
||||
beast::insight::NullCollector::New());
|
||||
|
||||
public:
|
||||
/** Return the clock associated with the cache. */
|
||||
clock_type&
|
||||
clock()
|
||||
{
|
||||
return m_clock;
|
||||
}
|
||||
clock();
|
||||
|
||||
/** Returns the number of items in the container. */
|
||||
std::size_t
|
||||
size() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
|
||||
void
|
||||
setTargetSize(int s)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_target_size = s;
|
||||
|
||||
if (s > 0)
|
||||
{
|
||||
for (auto& partition : m_cache.map())
|
||||
{
|
||||
partition.rehash(static_cast<std::size_t>(
|
||||
(s + (s >> 2)) /
|
||||
(partition.max_load_factor() * m_cache.partitions()) +
|
||||
1));
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(m_journal.debug()) << m_name << " target size set to " << s;
|
||||
}
|
||||
|
||||
clock_type::duration
|
||||
getTargetAge() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_target_age;
|
||||
}
|
||||
|
||||
void
|
||||
setTargetAge(clock_type::duration s)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_target_age = s;
|
||||
JLOG(m_journal.debug())
|
||||
<< m_name << " target age set to " << m_target_age.count();
|
||||
}
|
||||
size() const;
|
||||
|
||||
int
|
||||
getCacheSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache_count;
|
||||
}
|
||||
getCacheSize() const;
|
||||
|
||||
int
|
||||
getTrackSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
getTrackSize() const;
|
||||
|
||||
float
|
||||
getHitRate()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total = static_cast<float>(m_hits + m_misses);
|
||||
return m_hits * (100.0f / std::max(1.0f, total));
|
||||
}
|
||||
getHitRate();
|
||||
|
||||
void
|
||||
clear()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
}
|
||||
clear();
|
||||
|
||||
void
|
||||
reset()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
m_hits = 0;
|
||||
m_misses = 0;
|
||||
}
|
||||
reset();
|
||||
|
||||
/** Refresh the last access time on a key if present.
|
||||
@return `true` If the key was found.
|
||||
*/
|
||||
template <class KeyComparable>
|
||||
bool
|
||||
touch_if_exists(KeyComparable const& key)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const iter(m_cache.find(key));
|
||||
if (iter == m_cache.end())
|
||||
{
|
||||
++m_stats.misses;
|
||||
return false;
|
||||
}
|
||||
iter->second.touch(m_clock.now());
|
||||
++m_stats.hits;
|
||||
return true;
|
||||
}
|
||||
touch_if_exists(KeyComparable const& key);
|
||||
|
||||
using SweptPointersVector = std::pair<
|
||||
std::vector<std::shared_ptr<mapped_type>>,
|
||||
std::vector<std::weak_ptr<mapped_type>>>;
|
||||
using SweptPointersVector = std::vector<SharedWeakUnionPointerType>;
|
||||
|
||||
void
|
||||
sweep()
|
||||
{
|
||||
// Keep references to all the stuff we sweep
|
||||
// For performance, each worker thread should exit before the swept data
|
||||
// is destroyed but still within the main cache lock.
|
||||
std::vector<SweptPointersVector> allStuffToSweep(m_cache.partitions());
|
||||
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
clock_type::time_point when_expire;
|
||||
|
||||
auto const start = std::chrono::steady_clock::now();
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
if (m_target_size == 0 ||
|
||||
(static_cast<int>(m_cache.size()) <= m_target_size))
|
||||
{
|
||||
when_expire = now - m_target_age;
|
||||
}
|
||||
else
|
||||
{
|
||||
when_expire =
|
||||
now - m_target_age * m_target_size / m_cache.size();
|
||||
|
||||
clock_type::duration const minimumAge(std::chrono::seconds(1));
|
||||
if (when_expire > (now - minimumAge))
|
||||
when_expire = now - minimumAge;
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< m_name << " is growing fast " << m_cache.size() << " of "
|
||||
<< m_target_size << " aging at "
|
||||
<< (now - when_expire).count() << " of "
|
||||
<< m_target_age.count();
|
||||
}
|
||||
|
||||
std::vector<std::thread> workers;
|
||||
workers.reserve(m_cache.partitions());
|
||||
std::atomic<int> allRemovals = 0;
|
||||
|
||||
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
|
||||
{
|
||||
workers.push_back(sweepHelper(
|
||||
when_expire,
|
||||
now,
|
||||
m_cache.map()[p],
|
||||
allStuffToSweep[p],
|
||||
allRemovals,
|
||||
lock));
|
||||
}
|
||||
for (std::thread& worker : workers)
|
||||
worker.join();
|
||||
|
||||
m_cache_count -= allRemovals;
|
||||
}
|
||||
// At this point allStuffToSweep will go out of scope outside the lock
|
||||
// and decrement the reference count on each strong pointer.
|
||||
JLOG(m_journal.debug())
|
||||
<< m_name << " TaggedCache sweep lock duration "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - start)
|
||||
.count()
|
||||
<< "ms";
|
||||
}
|
||||
sweep();
|
||||
|
||||
bool
|
||||
del(const key_type& key, bool valid)
|
||||
{
|
||||
// Remove from cache, if !valid, remove from map too. Returns true if
|
||||
// removed from cache
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
return false;
|
||||
|
||||
Entry& entry = cit->second;
|
||||
|
||||
bool ret = false;
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
--m_cache_count;
|
||||
entry.ptr.reset();
|
||||
ret = true;
|
||||
}
|
||||
|
||||
if (!valid || entry.isExpired())
|
||||
m_cache.erase(cit);
|
||||
|
||||
return ret;
|
||||
}
|
||||
del(const key_type& key, bool valid);
|
||||
|
||||
public:
|
||||
/** Replace aliased objects with originals.
|
||||
|
||||
Due to concurrency it is possible for two separate objects with
|
||||
@@ -308,100 +131,23 @@ public:
|
||||
|
||||
@return `true` If the key already existed.
|
||||
*/
|
||||
public:
|
||||
template <class R>
|
||||
bool
|
||||
canonicalize(
|
||||
const key_type& key,
|
||||
std::shared_ptr<T>& data,
|
||||
std::function<bool(std::shared_ptr<T> const&)>&& replace)
|
||||
{
|
||||
// Return canonical value, store if needed, refresh in cache
|
||||
// Return values: true=we had the data already
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
{
|
||||
m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(m_clock.now(), data));
|
||||
++m_cache_count;
|
||||
return false;
|
||||
}
|
||||
|
||||
Entry& entry = cit->second;
|
||||
entry.touch(m_clock.now());
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
if (replace(entry.ptr))
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
}
|
||||
else
|
||||
{
|
||||
data = entry.ptr;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto cachedData = entry.lock();
|
||||
|
||||
if (cachedData)
|
||||
{
|
||||
if (replace(entry.ptr))
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry.ptr = cachedData;
|
||||
data = cachedData;
|
||||
}
|
||||
|
||||
++m_cache_count;
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
++m_cache_count;
|
||||
|
||||
return false;
|
||||
}
|
||||
SharedPointerType& data,
|
||||
R&& replaceCallback);
|
||||
|
||||
bool
|
||||
canonicalize_replace_cache(
|
||||
const key_type& key,
|
||||
std::shared_ptr<T> const& data)
|
||||
{
|
||||
return canonicalize(
|
||||
key,
|
||||
const_cast<std::shared_ptr<T>&>(data),
|
||||
[](std::shared_ptr<T> const&) { return true; });
|
||||
}
|
||||
SharedPointerType const& data);
|
||||
|
||||
bool
|
||||
canonicalize_replace_client(const key_type& key, std::shared_ptr<T>& data)
|
||||
{
|
||||
return canonicalize(
|
||||
key, data, [](std::shared_ptr<T> const&) { return false; });
|
||||
}
|
||||
canonicalize_replace_client(const key_type& key, SharedPointerType& data);
|
||||
|
||||
std::shared_ptr<T>
|
||||
fetch(const key_type& key)
|
||||
{
|
||||
std::lock_guard<mutex_type> l(m_mutex);
|
||||
auto ret = initialFetch(key, l);
|
||||
if (!ret)
|
||||
++m_misses;
|
||||
return ret;
|
||||
}
|
||||
SharedPointerType
|
||||
fetch(const key_type& key);
|
||||
|
||||
/** Insert the element into the container.
|
||||
If the key already exists, nothing happens.
|
||||
@@ -410,26 +156,11 @@ public:
|
||||
template <class ReturnType = bool>
|
||||
auto
|
||||
insert(key_type const& key, T const& value)
|
||||
-> std::enable_if_t<!IsKeyCache, ReturnType>
|
||||
{
|
||||
auto p = std::make_shared<T>(std::cref(value));
|
||||
return canonicalize_replace_client(key, p);
|
||||
}
|
||||
-> std::enable_if_t<!IsKeyCache, ReturnType>;
|
||||
|
||||
template <class ReturnType = bool>
|
||||
auto
|
||||
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
auto [it, inserted] = m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(now));
|
||||
if (!inserted)
|
||||
it->second.last_access = now;
|
||||
return inserted;
|
||||
}
|
||||
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>;
|
||||
|
||||
// VFALCO NOTE It looks like this returns a copy of the data in
|
||||
// the output parameter 'data'. This could be expensive.
|
||||
@@ -437,50 +168,18 @@ public:
|
||||
// simply return an iterator.
|
||||
//
|
||||
bool
|
||||
retrieve(const key_type& key, T& data)
|
||||
{
|
||||
// retrieve the value of the stored data
|
||||
auto entry = fetch(key);
|
||||
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
data = *entry;
|
||||
return true;
|
||||
}
|
||||
retrieve(const key_type& key, T& data);
|
||||
|
||||
mutex_type&
|
||||
peekMutex()
|
||||
{
|
||||
return m_mutex;
|
||||
}
|
||||
peekMutex();
|
||||
|
||||
std::vector<key_type>
|
||||
getKeys() const
|
||||
{
|
||||
std::vector<key_type> v;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
v.reserve(m_cache.size());
|
||||
for (auto const& _ : m_cache)
|
||||
v.push_back(_.first);
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
getKeys() const;
|
||||
|
||||
// CachedSLEs functions.
|
||||
/** Returns the fraction of cache hits. */
|
||||
double
|
||||
rate() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const tot = m_hits + m_misses;
|
||||
if (tot == 0)
|
||||
return 0;
|
||||
return double(m_hits) / tot;
|
||||
}
|
||||
rate() const;
|
||||
|
||||
/** Fetch an item from the cache.
|
||||
If the digest was not found, Handler
|
||||
@@ -488,73 +187,16 @@ public:
|
||||
std::shared_ptr<SLE const>(void)
|
||||
*/
|
||||
template <class Handler>
|
||||
std::shared_ptr<T>
|
||||
fetch(key_type const& digest, Handler const& h)
|
||||
{
|
||||
{
|
||||
std::lock_guard l(m_mutex);
|
||||
if (auto ret = initialFetch(digest, l))
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto sle = h();
|
||||
if (!sle)
|
||||
return {};
|
||||
|
||||
std::lock_guard l(m_mutex);
|
||||
++m_misses;
|
||||
auto const [it, inserted] =
|
||||
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
||||
if (!inserted)
|
||||
it->second.touch(m_clock.now());
|
||||
return it->second.ptr;
|
||||
}
|
||||
SharedPointerType
|
||||
fetch(key_type const& digest, Handler const& h);
|
||||
// End CachedSLEs functions.
|
||||
|
||||
private:
|
||||
std::shared_ptr<T>
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
|
||||
{
|
||||
auto cit = m_cache.find(key);
|
||||
if (cit == m_cache.end())
|
||||
return {};
|
||||
|
||||
Entry& entry = cit->second;
|
||||
if (entry.isCached())
|
||||
{
|
||||
++m_hits;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr;
|
||||
}
|
||||
entry.ptr = entry.lock();
|
||||
if (entry.isCached())
|
||||
{
|
||||
// independent of cache size, so not counted as a hit
|
||||
++m_cache_count;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr;
|
||||
}
|
||||
|
||||
m_cache.erase(cit);
|
||||
return {};
|
||||
}
|
||||
SharedPointerType
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
|
||||
|
||||
void
|
||||
collect_metrics()
|
||||
{
|
||||
m_stats.size.set(getCacheSize());
|
||||
|
||||
{
|
||||
beast::insight::Gauge::value_type hit_rate(0);
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total(m_hits + m_misses);
|
||||
if (total != 0)
|
||||
hit_rate = (m_hits * 100) / total;
|
||||
}
|
||||
m_stats.hit_rate.set(hit_rate);
|
||||
}
|
||||
}
|
||||
collect_metrics();
|
||||
|
||||
private:
|
||||
struct Stats
|
||||
@@ -600,36 +242,37 @@ private:
|
||||
class ValueEntry
|
||||
{
|
||||
public:
|
||||
std::shared_ptr<mapped_type> ptr;
|
||||
std::weak_ptr<mapped_type> weak_ptr;
|
||||
shared_weak_combo_pointer_type ptr;
|
||||
clock_type::time_point last_access;
|
||||
|
||||
ValueEntry(
|
||||
clock_type::time_point const& last_access_,
|
||||
std::shared_ptr<mapped_type> const& ptr_)
|
||||
: ptr(ptr_), weak_ptr(ptr_), last_access(last_access_)
|
||||
shared_pointer_type const& ptr_)
|
||||
: ptr(ptr_), last_access(last_access_)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
isWeak() const
|
||||
{
|
||||
return ptr == nullptr;
|
||||
if (!ptr)
|
||||
return true;
|
||||
return ptr.isWeak();
|
||||
}
|
||||
bool
|
||||
isCached() const
|
||||
{
|
||||
return ptr != nullptr;
|
||||
return ptr && ptr.isStrong();
|
||||
}
|
||||
bool
|
||||
isExpired() const
|
||||
{
|
||||
return weak_ptr.expired();
|
||||
return ptr.expired();
|
||||
}
|
||||
std::shared_ptr<mapped_type>
|
||||
SharedPointerType
|
||||
lock()
|
||||
{
|
||||
return weak_ptr.lock();
|
||||
return ptr.lock();
|
||||
}
|
||||
void
|
||||
touch(clock_type::time_point const& now)
|
||||
@@ -658,72 +301,7 @@ private:
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
stuffToSweep.first.reserve(partition.size());
|
||||
stuffToSweep.second.reserve(partition.size());
|
||||
{
|
||||
auto cit = partition.begin();
|
||||
while (cit != partition.end())
|
||||
{
|
||||
if (cit->second.isWeak())
|
||||
{
|
||||
// weak
|
||||
if (cit->second.isExpired())
|
||||
{
|
||||
stuffToSweep.second.push_back(
|
||||
std::move(cit->second.weak_ptr));
|
||||
++mapRemovals;
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
else if (cit->second.last_access <= when_expire)
|
||||
{
|
||||
// strong, expired
|
||||
++cacheRemovals;
|
||||
if (cit->second.ptr.use_count() == 1)
|
||||
{
|
||||
stuffToSweep.first.push_back(
|
||||
std::move(cit->second.ptr));
|
||||
++mapRemovals;
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
// remains weakly cached
|
||||
cit->second.ptr.reset();
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// strong, not expired
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mapRemovals || cacheRemovals)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "TaggedCache partition sweep " << m_name
|
||||
<< ": cache = " << partition.size() << "-" << cacheRemovals
|
||||
<< ", map-=" << mapRemovals;
|
||||
}
|
||||
|
||||
allRemovals += cacheRemovals;
|
||||
});
|
||||
}
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
[[nodiscard]] std::thread
|
||||
sweepHelper(
|
||||
@@ -732,45 +310,7 @@ private:
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
{
|
||||
auto cit = partition.begin();
|
||||
while (cit != partition.end())
|
||||
{
|
||||
if (cit->second.last_access > now)
|
||||
{
|
||||
cit->second.last_access = now;
|
||||
++cit;
|
||||
}
|
||||
else if (cit->second.last_access <= when_expire)
|
||||
{
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mapRemovals || cacheRemovals)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "TaggedCache partition sweep " << m_name
|
||||
<< ": cache = " << partition.size() << "-" << cacheRemovals
|
||||
<< ", map-=" << mapRemovals;
|
||||
}
|
||||
|
||||
allRemovals += cacheRemovals;
|
||||
});
|
||||
};
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
beast::Journal m_journal;
|
||||
clock_type& m_clock;
|
||||
@@ -782,10 +322,10 @@ private:
|
||||
std::string m_name;
|
||||
|
||||
// Desired number of cache entries (0 = ignore)
|
||||
int m_target_size;
|
||||
const int m_target_size;
|
||||
|
||||
// Desired maximum cache age
|
||||
clock_type::duration m_target_age;
|
||||
const clock_type::duration m_target_age;
|
||||
|
||||
// Number of items cached
|
||||
int m_cache_count;
|
||||
|
||||
1029
include/xrpl/basics/TaggedCache.ipp
Normal file
1029
include/xrpl/basics/TaggedCache.ipp
Normal file
File diff suppressed because it is too large
Load Diff
842
src/test/basics/IntrusiveShared_test.cpp
Normal file
842
src/test/basics/IntrusiveShared_test.cpp
Normal file
@@ -0,0 +1,842 @@
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/IntrusiveRefCounts.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <barrier>
|
||||
#include <chrono>
|
||||
#include <latch>
|
||||
#include <optional>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
|
||||
namespace ripple {
|
||||
namespace tests {
|
||||
|
||||
namespace {
|
||||
enum class TrackedState : std::uint8_t {
|
||||
uninitialized,
|
||||
alive,
|
||||
partiallyDeletedStarted,
|
||||
partiallyDeleted,
|
||||
deletedStarted,
|
||||
deleted
|
||||
};
|
||||
|
||||
class TIBase : public IntrusiveRefCounts
|
||||
{
|
||||
public:
|
||||
static constexpr std::size_t maxStates = 128;
|
||||
static std::array<std::atomic<TrackedState>, maxStates> state;
|
||||
static std::atomic<int> nextId;
|
||||
static TrackedState
|
||||
getState(int id)
|
||||
{
|
||||
assert(id < state.size());
|
||||
return state[id].load(std::memory_order_acquire);
|
||||
}
|
||||
static void
|
||||
resetStates(bool resetCallback)
|
||||
{
|
||||
for (int i = 0; i < maxStates; ++i)
|
||||
{
|
||||
state[i].store(
|
||||
TrackedState::uninitialized, std::memory_order_release);
|
||||
}
|
||||
nextId.store(0, std::memory_order_release);
|
||||
if (resetCallback)
|
||||
TIBase::tracingCallback_ = [](TrackedState,
|
||||
std::optional<TrackedState>) {};
|
||||
}
|
||||
|
||||
struct ResetStatesGuard
|
||||
{
|
||||
bool resetCallback_{false};
|
||||
|
||||
ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
|
||||
{
|
||||
TIBase::resetStates(resetCallback_);
|
||||
}
|
||||
~ResetStatesGuard()
|
||||
{
|
||||
TIBase::resetStates(resetCallback_);
|
||||
}
|
||||
};
|
||||
|
||||
TIBase() : id_{checkoutID()}
|
||||
{
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(TrackedState::alive, std::memory_order_relaxed);
|
||||
}
|
||||
~TIBase()
|
||||
{
|
||||
using enum TrackedState;
|
||||
|
||||
assert(state.size() > id_);
|
||||
tracingCallback_(
|
||||
state[id_].load(std::memory_order_relaxed), deletedStarted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
// Use relaxed memory order to try to avoid atomic operations from
|
||||
// adding additional memory synchronizations that may hide threading
|
||||
// errors in the underlying shared pointer class.
|
||||
state[id_].store(deletedStarted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(deletedStarted, deleted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(TrackedState::deleted, std::nullopt);
|
||||
}
|
||||
|
||||
void
|
||||
partialDestructor()
|
||||
{
|
||||
using enum TrackedState;
|
||||
|
||||
assert(state.size() > id_);
|
||||
tracingCallback_(
|
||||
state[id_].load(std::memory_order_relaxed),
|
||||
partiallyDeletedStarted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(partiallyDeleted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(partiallyDeleted, std::nullopt);
|
||||
}
|
||||
|
||||
static std::function<void(TrackedState, std::optional<TrackedState>)>
|
||||
tracingCallback_;
|
||||
|
||||
int id_;
|
||||
|
||||
private:
|
||||
static int
|
||||
checkoutID()
|
||||
{
|
||||
return nextId.fetch_add(1, std::memory_order_acq_rel);
|
||||
}
|
||||
};
|
||||
|
||||
std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
|
||||
std::atomic<int> TIBase::nextId{0};
|
||||
|
||||
std::function<void(TrackedState, std::optional<TrackedState>)>
|
||||
TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
|
||||
|
||||
} // namespace
|
||||
|
||||
class IntrusiveShared_test : public beast::unit_test::suite
|
||||
{
|
||||
public:
|
||||
void
|
||||
testBasics()
|
||||
{
|
||||
testcase("Basics");
|
||||
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
TIBase b;
|
||||
BEAST_EXPECT(b.use_count() == 1);
|
||||
b.addWeakRef();
|
||||
BEAST_EXPECT(b.use_count() == 1);
|
||||
auto s = b.releaseStrongRef();
|
||||
BEAST_EXPECT(s == ReleaseStrongRefAction::partialDestroy);
|
||||
BEAST_EXPECT(b.use_count() == 0);
|
||||
TIBase* pb = &b;
|
||||
partialDestructorFinished(&pb);
|
||||
BEAST_EXPECT(!pb);
|
||||
auto w = b.releaseWeakRef();
|
||||
BEAST_EXPECT(w == ReleaseWeakRefAction::destroy);
|
||||
}
|
||||
|
||||
std::vector<SharedIntrusive<TIBase>> strong;
|
||||
std::vector<WeakIntrusive<TIBase>> weak;
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
auto b = make_SharedIntrusive<TIBase>();
|
||||
auto id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
strong.push_back(b);
|
||||
}
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
strong.resize(strong.size() - 1);
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
strong.clear();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
|
||||
b = make_SharedIntrusive<TIBase>();
|
||||
id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
weak.push_back(b);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
}
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
weak.resize(weak.size() - 1);
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
while (!weak.empty())
|
||||
{
|
||||
weak.resize(weak.size() - 1);
|
||||
if (weak.size())
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
}
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
auto b = make_SharedIntrusive<TIBase>();
|
||||
auto id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
WeakIntrusive<TIBase> w{b};
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
auto s = w.lock();
|
||||
BEAST_EXPECT(s && s->use_count() == 2);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(s && s->use_count() == 1);
|
||||
s.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
BEAST_EXPECT(w.expired());
|
||||
s = w.lock();
|
||||
// Cannot convert a weak pointer to a strong pointer if object is
|
||||
// already partially deleted
|
||||
BEAST_EXPECT(!s);
|
||||
w.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
using swu = SharedWeakUnion<TIBase>;
|
||||
swu b = make_SharedIntrusive<TIBase>();
|
||||
BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
|
||||
auto id = b.get()->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
swu w = b;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
|
||||
w.convertToWeak();
|
||||
BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
|
||||
swu s = w;
|
||||
BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
|
||||
s.convertToStrong();
|
||||
BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(s.use_count() == 1);
|
||||
BEAST_EXPECT(!w.expired());
|
||||
s.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
BEAST_EXPECT(w.expired());
|
||||
w.convertToStrong();
|
||||
// Cannot convert a weak pointer to a strong pointer if object is
|
||||
// already partially deleted
|
||||
BEAST_EXPECT(w.isWeak());
|
||||
w.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
{
|
||||
// Testing SharedWeakUnion assignment operator
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
auto strong1 = make_SharedIntrusive<TIBase>();
|
||||
auto strong2 = make_SharedIntrusive<TIBase>();
|
||||
|
||||
auto id1 = strong1->id_;
|
||||
auto id2 = strong2->id_;
|
||||
|
||||
BEAST_EXPECT(id1 != id2);
|
||||
|
||||
SharedWeakUnion<TIBase> union1 = strong1;
|
||||
SharedWeakUnion<TIBase> union2 = strong2;
|
||||
|
||||
BEAST_EXPECT(union1.isStrong());
|
||||
BEAST_EXPECT(union2.isStrong());
|
||||
BEAST_EXPECT(union1.get() == strong1.get());
|
||||
BEAST_EXPECT(union2.get() == strong2.get());
|
||||
|
||||
// 1) Normal assignment: explicitly calls SharedWeakUnion assignment
|
||||
union1 = union2;
|
||||
BEAST_EXPECT(union1.isStrong());
|
||||
BEAST_EXPECT(union2.isStrong());
|
||||
BEAST_EXPECT(union1.get() == union2.get());
|
||||
BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
|
||||
BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive);
|
||||
|
||||
// 2) Test self-assignment
|
||||
BEAST_EXPECT(union1.isStrong());
|
||||
BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
|
||||
int initialRefCount = strong1->use_count();
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wself-assign-overloaded"
|
||||
union1 = union1; // Self-assignment
|
||||
#pragma clang diagnostic pop
|
||||
BEAST_EXPECT(union1.isStrong());
|
||||
BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
|
||||
BEAST_EXPECT(strong1->use_count() == initialRefCount);
|
||||
|
||||
// 3) Test assignment from null union pointer
|
||||
union1 = SharedWeakUnion<TIBase>();
|
||||
BEAST_EXPECT(union1.get() == nullptr);
|
||||
|
||||
// 4) Test assignment to expired union pointer
|
||||
strong2.reset();
|
||||
union2.reset();
|
||||
union1 = union2;
|
||||
BEAST_EXPECT(union1.get() == nullptr);
|
||||
BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testPartialDelete()
|
||||
{
|
||||
testcase("Partial Delete");
|
||||
|
||||
// This test creates two threads. One with a strong pointer and one
|
||||
// with a weak pointer. The strong pointer is reset while the weak
|
||||
// pointer still holds a reference, triggering a partial delete.
|
||||
// While the partial delete function runs (a sleep is inserted) the
|
||||
// weak pointer is reset. The destructor should wait to run until
|
||||
// after the partial delete function has completed running.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
WeakIntrusive<TIBase> weak{strong};
|
||||
bool destructorRan = false;
|
||||
bool partialDeleteRan = false;
|
||||
std::latch partialDeleteStartedSyncPoint{2};
|
||||
strong->tracingCallback_ = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
if (next == deletedStarted)
|
||||
{
|
||||
// strong goes out of scope while weak is still in scope
|
||||
// This checks that partialDelete has run to completion
|
||||
// before the desturctor is called. A sleep is inserted
|
||||
// inside the partial delete to make sure the destructor is
|
||||
// given an opportunity to run durring partial delete.
|
||||
BEAST_EXPECT(cur == partiallyDeleted);
|
||||
}
|
||||
if (next == partiallyDeletedStarted)
|
||||
{
|
||||
partialDeleteStartedSyncPoint.arrive_and_wait();
|
||||
using namespace std::chrono_literals;
|
||||
// Sleep and let the weak pointer go out of scope,
|
||||
// potentially triggering a destructor while partial delete
|
||||
// is running. The test is to make sure that doesn't happen.
|
||||
std::this_thread::sleep_for(800ms);
|
||||
}
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
partialDeleteRan = true;
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
destructorRan = true;
|
||||
}
|
||||
};
|
||||
std::thread t1{[&] {
|
||||
partialDeleteStartedSyncPoint.arrive_and_wait();
|
||||
weak.reset(); // Trigger a full delete as soon as the partial
|
||||
// delete starts
|
||||
}};
|
||||
std::thread t2{[&] {
|
||||
strong.reset(); // Trigger a partial delete
|
||||
}};
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
BEAST_EXPECT(destructorRan && partialDeleteRan);
|
||||
}
|
||||
|
||||
void
|
||||
testDestructor()
|
||||
{
|
||||
testcase("Destructor");
|
||||
|
||||
// This test creates two threads. One with a strong pointer and one
|
||||
// with a weak pointer. The weak pointer is reset while the strong
|
||||
// pointer still holds a reference. Then the strong pointer is
|
||||
// reset. Only the destructor should run. The partial destructor
|
||||
// should not be called. Since the weak reset runs to completion
|
||||
// before the strong pointer is reset, threading doesn't add much to
|
||||
// this test, but there is no harm in keeping it.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
WeakIntrusive<TIBase> weak{strong};
|
||||
bool destructorRan = false;
|
||||
bool partialDeleteRan = false;
|
||||
std::latch weakResetSyncPoint{2};
|
||||
strong->tracingCallback_ = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
partialDeleteRan = true;
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
destructorRan = true;
|
||||
}
|
||||
};
|
||||
std::thread t1{[&] {
|
||||
weak.reset();
|
||||
weakResetSyncPoint.arrive_and_wait();
|
||||
}};
|
||||
std::thread t2{[&] {
|
||||
weakResetSyncPoint.arrive_and_wait();
|
||||
strong.reset(); // Trigger a partial delete
|
||||
}};
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
BEAST_EXPECT(destructorRan && !partialDeleteRan);
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedClearMixedVariant()
|
||||
{
|
||||
testcase("Multithreaded Clear Mixed Variant");
|
||||
|
||||
// This test creates and destroys many strong and weak pointers in a
|
||||
// loop. There is a random mix of strong and weak pointers stored in
|
||||
// a vector (held as a variant). Both threads clear all the pointers
|
||||
// and check that the invariants hold.
|
||||
|
||||
using enum TrackedState;
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
auto createVecOfPointers = [&](auto const& toClone,
|
||||
std::default_random_engine& eng)
|
||||
-> std::vector<
|
||||
std::variant<SharedIntrusive<TIBase>, WeakIntrusive<TIBase>>> {
|
||||
std::vector<
|
||||
std::variant<SharedIntrusive<TIBase>, WeakIntrusive<TIBase>>>
|
||||
result;
|
||||
std::uniform_int_distribution<> toCreateDist(4, 64);
|
||||
std::uniform_int_distribution<> isStrongDist(0, 1);
|
||||
auto numToCreate = toCreateDist(eng);
|
||||
result.reserve(numToCreate);
|
||||
for (int i = 0; i < numToCreate; ++i)
|
||||
{
|
||||
if (isStrongDist(eng))
|
||||
{
|
||||
result.push_back(SharedIntrusive<TIBase>(toClone));
|
||||
}
|
||||
else
|
||||
{
|
||||
result.push_back(WeakIntrusive<TIBase>(toClone));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toClone;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToCloneSyncPoint{numThreads};
|
||||
std::barrier postCreateVecOfPointersSyncPoint{numThreads};
|
||||
auto engines = [&]() -> std::vector<std::default_random_engine> {
|
||||
std::random_device rd;
|
||||
std::vector<std::default_random_engine> result;
|
||||
result.reserve(numThreads);
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
result.emplace_back(rd());
|
||||
return result;
|
||||
}();
|
||||
|
||||
// cloneAndDestroy clones the strong pointer into a vector of mixed
|
||||
// strong and weak pointers and destroys them all at once.
|
||||
// threadId==0 is special.
|
||||
auto cloneAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// Thread 0 is the genesis thread. It creates the strong
|
||||
// pointers to be cloned by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toClone.clear();
|
||||
toClone.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toClone.begin(), toClone.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToCloneSyncPoint.arrive_and_wait();
|
||||
|
||||
auto v =
|
||||
createVecOfPointers(toClone[threadId], engines[threadId]);
|
||||
toClone[threadId].reset();
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateVecOfPointersSyncPoint.arrive_and_wait();
|
||||
|
||||
v.clear();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(cloneAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedClearMixedUnion()
|
||||
{
|
||||
testcase("Multithreaded Clear Mixed Union");
|
||||
|
||||
// This test creates and destroys many SharedWeak pointers in a
|
||||
// loop. All the pointers start as strong and a loop randomly
|
||||
// convert them between strong and weak pointers. Both threads clear
|
||||
// all the pointers and check that the invariants hold.
|
||||
//
|
||||
// Note: This test also differs from the test above in that the pointers
|
||||
// randomly change from strong to weak and from weak to strong in a
|
||||
// loop. This can't be done in the variant test above because variant is
|
||||
// not thread safe while the SharedWeakUnion is thread safe.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
auto createVecOfPointers = [&](auto const& toClone,
|
||||
std::default_random_engine& eng)
|
||||
-> std::vector<SharedWeakUnion<TIBase>> {
|
||||
std::vector<SharedWeakUnion<TIBase>> result;
|
||||
std::uniform_int_distribution<> toCreateDist(4, 64);
|
||||
auto numToCreate = toCreateDist(eng);
|
||||
result.reserve(numToCreate);
|
||||
for (int i = 0; i < numToCreate; ++i)
|
||||
result.push_back(SharedIntrusive<TIBase>(toClone));
|
||||
return result;
|
||||
};
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int flipPointersLoopIters = 256;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toClone;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToCloneSyncPoint{numThreads};
|
||||
std::barrier postCreateVecOfPointersSyncPoint{numThreads};
|
||||
std::barrier postFlipPointersLoopSyncPoint{numThreads};
|
||||
auto engines = [&]() -> std::vector<std::default_random_engine> {
|
||||
std::random_device rd;
|
||||
std::vector<std::default_random_engine> result;
|
||||
result.reserve(numThreads);
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
result.emplace_back(rd());
|
||||
return result;
|
||||
}();
|
||||
|
||||
// cloneAndDestroy clones the strong pointer into a vector of
|
||||
// mixed strong and weak pointers, runs a loop that randomly
|
||||
// changes strong pointers to weak pointers, and destroys them
|
||||
// all at once.
|
||||
auto cloneAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// threadId 0 is the genesis thread. It creates the
|
||||
// strong point to be cloned by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toClone.clear();
|
||||
toClone.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toClone.begin(), toClone.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToCloneSyncPoint.arrive_and_wait();
|
||||
|
||||
auto v =
|
||||
createVecOfPointers(toClone[threadId], engines[threadId]);
|
||||
toClone[threadId].reset();
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateVecOfPointersSyncPoint.arrive_and_wait();
|
||||
|
||||
std::uniform_int_distribution<> isStrongDist(0, 1);
|
||||
for (int f = 0; f < flipPointersLoopIters; ++f)
|
||||
{
|
||||
for (auto& p : v)
|
||||
{
|
||||
if (isStrongDist(engines[threadId]))
|
||||
{
|
||||
p.convertToStrong();
|
||||
}
|
||||
else
|
||||
{
|
||||
p.convertToWeak();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postFlipPointersLoopSyncPoint.arrive_and_wait();
|
||||
|
||||
v.clear();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(cloneAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedLockingWeak()
|
||||
{
|
||||
testcase("Multithreaded Locking Weak");
|
||||
|
||||
// This test creates a single shared atomic pointer that multiple thread
|
||||
// create weak pointers from. The threads then lock the weak pointers.
|
||||
// Both threads clear all the pointers and check that the invariants
|
||||
// hold.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int lockWeakLoopIters = 256;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toLock;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToLockSyncPoint{numThreads};
|
||||
std::barrier postLockWeakLoopSyncPoint{numThreads};
|
||||
|
||||
// lockAndDestroy creates weak pointers from the strong pointer
|
||||
// and runs a loop that locks the weak pointer. At the end of the loop
|
||||
// all the pointers are destroyed all at once.
|
||||
auto lockAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// threadId 0 is the genesis thread. It creates the
|
||||
// strong point to be locked by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toLock.clear();
|
||||
toLock.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toLock.begin(), toLock.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToLockSyncPoint.arrive_and_wait();
|
||||
|
||||
// Multiple threads all create a weak pointer from the same
|
||||
// strong pointer
|
||||
WeakIntrusive weak{toLock[threadId]};
|
||||
for (int wi = 0; wi < lockWeakLoopIters; ++wi)
|
||||
{
|
||||
BEAST_EXPECT(!weak.expired());
|
||||
auto strong = weak.lock();
|
||||
BEAST_EXPECT(strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postLockWeakLoopSyncPoint.arrive_and_wait();
|
||||
|
||||
toLock[threadId].reset();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(lockAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testBasics();
|
||||
testPartialDelete();
|
||||
testDestructor();
|
||||
testMultithreadedClearMixedVariant();
|
||||
testMultithreadedClearMixedUnion();
|
||||
testMultithreadedLockingWeak();
|
||||
}
|
||||
}; // namespace tests
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(IntrusiveShared, ripple_basics, ripple);
|
||||
} // namespace tests
|
||||
} // namespace ripple
|
||||
@@ -20,9 +20,8 @@
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/beast/clock/manual_clock.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -20,9 +20,8 @@
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/beast/clock/manual_clock.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <xrpld/app/ledger/PendingSaves.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
#include <xrpld/app/misc/HashRouter.h>
|
||||
#include <xrpld/app/misc/LoadFeeTrack.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/misc/SHAMapStore.h>
|
||||
@@ -35,15 +34,12 @@
|
||||
#include <xrpld/app/misc/ValidatorList.h>
|
||||
#include <xrpld/app/paths/PathRequests.h>
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/core/TimeKeeper.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/Peer.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/MathUtilities.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/basics/UptimeClock.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
@@ -57,7 +53,6 @@
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/protocol/STTx.h>
|
||||
|
||||
|
||||
@@ -55,6 +55,8 @@ template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#include <xrpld/ledger/CachedView.h>
|
||||
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
|
||||
namespace ripple {
|
||||
namespace detail {
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
#include <xrpl/protocol/SystemParameters.h>
|
||||
|
||||
#include <condition_variable>
|
||||
|
||||
@@ -29,7 +29,10 @@
|
||||
#include <xrpld/shamap/SHAMapLeafNode.h>
|
||||
#include <xrpld/shamap/SHAMapMissingNode.h>
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
#include <xrpld/shamap/TreeNodeCache.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/UnorderedContainers.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
@@ -103,7 +106,7 @@ private:
|
||||
/** The sequence of the ledger that this map references, if any. */
|
||||
std::uint32_t ledgerSeq_ = 0;
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode> root_;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> root_;
|
||||
mutable SHAMapState state_;
|
||||
SHAMapType const type_;
|
||||
bool backed_ = true; // Map is backed by the database
|
||||
@@ -365,29 +368,30 @@ public:
|
||||
invariants() const;
|
||||
|
||||
private:
|
||||
using SharedPtrNodeStack =
|
||||
std::stack<std::pair<std::shared_ptr<SHAMapTreeNode>, SHAMapNodeID>>;
|
||||
using SharedPtrNodeStack = std::stack<
|
||||
std::pair<intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapNodeID>>;
|
||||
using DeltaRef = std::pair<
|
||||
boost::intrusive_ptr<SHAMapItem const>,
|
||||
boost::intrusive_ptr<SHAMapItem const>>;
|
||||
|
||||
// tree node cache operations
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
cacheLookup(SHAMapHash const& hash) const;
|
||||
|
||||
void
|
||||
canonicalize(SHAMapHash const& hash, std::shared_ptr<SHAMapTreeNode>&)
|
||||
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>&)
|
||||
const;
|
||||
|
||||
// database operations
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeFromDB(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNode(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
|
||||
/** Update hashes up to the root */
|
||||
@@ -395,7 +399,7 @@ private:
|
||||
dirtyUp(
|
||||
SharedPtrNodeStack& stack,
|
||||
uint256 const& target,
|
||||
std::shared_ptr<SHAMapTreeNode> terminal);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> terminal);
|
||||
|
||||
/** Walk towards the specified id, returning the node. Caller must check
|
||||
if the return is nullptr, and if not, if the node->peekItem()->key() ==
|
||||
@@ -409,36 +413,36 @@ private:
|
||||
|
||||
/** Unshare the node, allowing it to be modified */
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
unshareNode(std::shared_ptr<Node>, SHAMapNodeID const& nodeID);
|
||||
intr_ptr::SharedPtr<Node>
|
||||
unshareNode(intr_ptr::SharedPtr<Node>, SHAMapNodeID const& nodeID);
|
||||
|
||||
/** prepare a node to be modified before flushing */
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
preFlushNode(std::shared_ptr<Node> node) const;
|
||||
intr_ptr::SharedPtr<Node>
|
||||
preFlushNode(intr_ptr::SharedPtr<Node> node) const;
|
||||
|
||||
/** write and canonicalize modified node */
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node) const;
|
||||
|
||||
// returns the first item at or below this node
|
||||
SHAMapLeafNode*
|
||||
firstBelow(
|
||||
std::shared_ptr<SHAMapTreeNode>,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch = 0) const;
|
||||
|
||||
// returns the last item at or below this node
|
||||
SHAMapLeafNode*
|
||||
lastBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch = branchFactor) const;
|
||||
|
||||
// helper function for firstBelow and lastBelow
|
||||
SHAMapLeafNode*
|
||||
belowHelper(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch,
|
||||
std::tuple<
|
||||
@@ -452,15 +456,15 @@ private:
|
||||
descend(SHAMapInnerNode*, int branch) const;
|
||||
SHAMapTreeNode*
|
||||
descendThrow(SHAMapInnerNode*, int branch) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descend(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descendThrow(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descend(SHAMapInnerNode&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descendThrow(SHAMapInnerNode&, int branch) const;
|
||||
|
||||
// Descend with filter
|
||||
// If pending, callback is called as if it called fetchNodeNT
|
||||
using descendCallback =
|
||||
std::function<void(std::shared_ptr<SHAMapTreeNode>, SHAMapHash const&)>;
|
||||
using descendCallback = std::function<
|
||||
void(intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapHash const&)>;
|
||||
SHAMapTreeNode*
|
||||
descendAsync(
|
||||
SHAMapInnerNode* parent,
|
||||
@@ -478,8 +482,8 @@ private:
|
||||
|
||||
// Non-storing
|
||||
// Does not hook the returned node to its parent
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descendNoStore(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descendNoStore(SHAMapInnerNode&, int branch) const;
|
||||
|
||||
/** If there is only one leaf below this node, get its contents */
|
||||
boost::intrusive_ptr<SHAMapItem const> const&
|
||||
@@ -540,10 +544,10 @@ private:
|
||||
|
||||
// nodes we may have acquired from deferred reads
|
||||
using DeferredNode = std::tuple<
|
||||
SHAMapInnerNode*, // parent node
|
||||
SHAMapNodeID, // parent node ID
|
||||
int, // branch
|
||||
std::shared_ptr<SHAMapTreeNode>>; // node
|
||||
SHAMapInnerNode*, // parent node
|
||||
SHAMapNodeID, // parent node ID
|
||||
int, // branch
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>>; // node
|
||||
|
||||
int deferred_;
|
||||
std::mutex deferLock_;
|
||||
@@ -577,7 +581,7 @@ private:
|
||||
gmn_ProcessDeferredReads(MissingNodes&);
|
||||
|
||||
// fetch from DB helper function
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
finishFetch(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<NodeObject> const& object) const;
|
||||
|
||||
@@ -51,10 +51,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const final override
|
||||
{
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
item_, cowid, hash_);
|
||||
}
|
||||
|
||||
|
||||
@@ -21,9 +21,10 @@
|
||||
#define RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED
|
||||
|
||||
#include <xrpld/shamap/SHAMapNodeID.h>
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
#include <xrpld/shamap/detail/TaggedPointer.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -41,7 +42,7 @@ public:
|
||||
private:
|
||||
/** Opaque type that contains the `hashes` array (array of type
|
||||
`SHAMapHash`) and the `children` array (array of type
|
||||
`std::shared_ptr<SHAMapInnerNode>`).
|
||||
`intr_ptr::SharedPtr<SHAMapInnerNode>`).
|
||||
*/
|
||||
TaggedPointer hashesAndChildren_;
|
||||
|
||||
@@ -106,7 +107,11 @@ public:
|
||||
operator=(SHAMapInnerNode const&) = delete;
|
||||
~SHAMapInnerNode();
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
// Needed to support intrusive weak pointers
|
||||
void
|
||||
partialDestructor() override;
|
||||
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const override;
|
||||
|
||||
SHAMapNodeType
|
||||
@@ -140,19 +145,19 @@ public:
|
||||
getChildHash(int m) const;
|
||||
|
||||
void
|
||||
setChild(int m, std::shared_ptr<SHAMapTreeNode> child);
|
||||
setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child);
|
||||
|
||||
void
|
||||
shareChild(int m, std::shared_ptr<SHAMapTreeNode> const& child);
|
||||
shareChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> const& child);
|
||||
|
||||
SHAMapTreeNode*
|
||||
getChildPointer(int branch);
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
getChild(int branch);
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
canonicalizeChild(int branch, std::shared_ptr<SHAMapTreeNode> node);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
canonicalizeChild(int branch, intr_ptr::SharedPtr<SHAMapTreeNode> node);
|
||||
|
||||
// sync functions
|
||||
bool
|
||||
@@ -180,10 +185,10 @@ public:
|
||||
void
|
||||
invariants(bool is_root = false) const override;
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeCompressedInner(Slice data);
|
||||
};
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#define RIPPLE_SHAMAP_SHAMAPLEAFNODE_H_INCLUDED
|
||||
|
||||
#include <xrpld/shamap/SHAMapItem.h>
|
||||
#include <xrpld/shamap/SHAMapNodeID.h>
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
#include <xrpld/shamap/SHAMapItem.h>
|
||||
#include <xrpld/shamap/SHAMapNodeID.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/IntrusiveRefCounts.h>
|
||||
#include <xrpl/basics/SHAMapHash.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
|
||||
@@ -46,7 +48,7 @@ enum class SHAMapNodeType {
|
||||
tnACCOUNT_STATE = 4
|
||||
};
|
||||
|
||||
class SHAMapTreeNode
|
||||
class SHAMapTreeNode : public IntrusiveRefCounts
|
||||
{
|
||||
protected:
|
||||
SHAMapHash hash_;
|
||||
@@ -85,15 +87,19 @@ protected:
|
||||
public:
|
||||
virtual ~SHAMapTreeNode() noexcept = default;
|
||||
|
||||
// Needed to support weak intrusive pointers
|
||||
virtual void
|
||||
partialDestructor() {};
|
||||
|
||||
/** \defgroup SHAMap Copy-on-Write Support
|
||||
|
||||
By nature, a node may appear in multiple SHAMap instances. Rather than
|
||||
actually duplicating these nodes, SHAMap opts to be memory efficient
|
||||
and uses copy-on-write semantics for nodes.
|
||||
By nature, a node may appear in multiple SHAMap instances. Rather
|
||||
than actually duplicating these nodes, SHAMap opts to be memory
|
||||
efficient and uses copy-on-write semantics for nodes.
|
||||
|
||||
Only nodes that are not modified and don't need to be flushed back can
|
||||
be shared. Once a node needs to be changed, it must first be copied and
|
||||
the copy must marked as not shareable.
|
||||
Only nodes that are not modified and don't need to be flushed back
|
||||
can be shared. Once a node needs to be changed, it must first be
|
||||
copied and the copy must marked as not shareable.
|
||||
|
||||
Note that just because a node may not be *owned* by a given SHAMap
|
||||
instance does not mean that the node is NOT a part of any SHAMap. It
|
||||
@@ -105,8 +111,8 @@ public:
|
||||
/** @{ */
|
||||
/** Returns the SHAMap that owns this node.
|
||||
|
||||
@return the ID of the SHAMap that owns this node, or 0 if the node
|
||||
is not owned by any SHAMap and is a candidate for sharing.
|
||||
@return the ID of the SHAMap that owns this node, or 0 if the
|
||||
node is not owned by any SHAMap and is a candidate for sharing.
|
||||
*/
|
||||
std::uint32_t
|
||||
cowid() const
|
||||
@@ -126,7 +132,7 @@ public:
|
||||
}
|
||||
|
||||
/** Make a copy of this node, setting the owner. */
|
||||
virtual std::shared_ptr<SHAMapTreeNode>
|
||||
virtual intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const = 0;
|
||||
/** @} */
|
||||
|
||||
@@ -167,20 +173,20 @@ public:
|
||||
virtual void
|
||||
invariants(bool is_root = false) const = 0;
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFromPrefix(Slice rawNode, SHAMapHash const& hash);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFromWire(Slice rawNode);
|
||||
|
||||
private:
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeTransaction(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeAccountState(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
};
|
||||
|
||||
|
||||
@@ -50,10 +50,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const final override
|
||||
{
|
||||
return std::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);
|
||||
}
|
||||
|
||||
SHAMapNodeType
|
||||
|
||||
@@ -51,10 +51,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const override
|
||||
{
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(item_, cowid, hash_);
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
item_, cowid, hash_);
|
||||
}
|
||||
|
||||
SHAMapNodeType
|
||||
|
||||
@@ -22,12 +22,17 @@
|
||||
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
using TreeNodeCache = TaggedCache<uint256, SHAMapTreeNode>;
|
||||
|
||||
using TreeNodeCache = TaggedCache<
|
||||
uint256,
|
||||
SHAMapTreeNode,
|
||||
/*IsKeyCache*/ false,
|
||||
intr_ptr::SharedWeakUnionPtr<SHAMapTreeNode>,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>>;
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include <xrpld/app/main/Tuning.h>
|
||||
#include <xrpld/shamap/NodeFamily.h>
|
||||
|
||||
#include <sstream>
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
|
||||
@@ -24,25 +24,26 @@
|
||||
#include <xrpld/shamap/SHAMapTxLeafNode.h>
|
||||
#include <xrpld/shamap/SHAMapTxPlusMetaLeafNode.h>
|
||||
|
||||
#include <xrpl/basics/TaggedCache.ipp>
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
[[nodiscard]] std::shared_ptr<SHAMapLeafNode>
|
||||
[[nodiscard]] intr_ptr::SharedPtr<SHAMapLeafNode>
|
||||
makeTypedLeaf(
|
||||
SHAMapNodeType type,
|
||||
boost::intrusive_ptr<SHAMapItem const> item,
|
||||
std::uint32_t owner)
|
||||
{
|
||||
if (type == SHAMapNodeType::tnTRANSACTION_NM)
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), owner);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(std::move(item), owner);
|
||||
|
||||
if (type == SHAMapNodeType::tnTRANSACTION_MD)
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
std::move(item), owner);
|
||||
|
||||
if (type == SHAMapNodeType::tnACCOUNT_STATE)
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), owner);
|
||||
|
||||
LogicError(
|
||||
@@ -54,7 +55,7 @@ makeTypedLeaf(
|
||||
SHAMap::SHAMap(SHAMapType t, Family& f)
|
||||
: f_(f), journal_(f.journal()), state_(SHAMapState::Modifying), type_(t)
|
||||
{
|
||||
root_ = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
// The `hash` parameter is unused. It is part of the interface so it's clear
|
||||
@@ -64,7 +65,7 @@ SHAMap::SHAMap(SHAMapType t, Family& f)
|
||||
SHAMap::SHAMap(SHAMapType t, uint256 const& hash, Family& f)
|
||||
: f_(f), journal_(f.journal()), state_(SHAMapState::Synching), type_(t)
|
||||
{
|
||||
root_ = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
SHAMap::SHAMap(SHAMap const& other, bool isMutable)
|
||||
@@ -95,7 +96,7 @@ void
|
||||
SHAMap::dirtyUp(
|
||||
SharedPtrNodeStack& stack,
|
||||
uint256 const& target,
|
||||
std::shared_ptr<SHAMapTreeNode> child)
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> child)
|
||||
{
|
||||
// walk the tree up from through the inner nodes to the root_
|
||||
// update hashes and links
|
||||
@@ -112,7 +113,7 @@ SHAMap::dirtyUp(
|
||||
while (!stack.empty())
|
||||
{
|
||||
auto node =
|
||||
std::dynamic_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
SHAMapNodeID nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
XRPL_ASSERT(node, "ripple::SHAMap::dirtyUp : non-null node");
|
||||
@@ -141,12 +142,13 @@ SHAMap::walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack) const
|
||||
if (stack != nullptr)
|
||||
stack->push({inNode, nodeID});
|
||||
|
||||
auto const inner = std::static_pointer_cast<SHAMapInnerNode>(inNode);
|
||||
auto const inner =
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(inNode);
|
||||
auto const branch = selectBranch(nodeID, id);
|
||||
if (inner->isEmptyBranch(branch))
|
||||
return nullptr;
|
||||
|
||||
inNode = descendThrow(inner, branch);
|
||||
inNode = descendThrow(*inner, branch);
|
||||
nodeID = nodeID.getChildNodeID(branch);
|
||||
}
|
||||
|
||||
@@ -164,7 +166,7 @@ SHAMap::findKey(uint256 const& id) const
|
||||
return leaf;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
|
||||
{
|
||||
XRPL_ASSERT(backed_, "ripple::SHAMap::fetchNodeFromDB : is backed");
|
||||
@@ -172,7 +174,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
|
||||
return finishFetch(hash, obj);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::finishFetch(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<NodeObject> const& object) const
|
||||
@@ -211,7 +213,7 @@ SHAMap::finishFetch(
|
||||
}
|
||||
|
||||
// See if a sync filter has a node
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
if (auto nodeData = filter->getNode(hash))
|
||||
@@ -244,7 +246,7 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
|
||||
// Get a node without throwing
|
||||
// Used on maps where missing nodes are expected
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
auto node = cacheLookup(hash);
|
||||
@@ -267,7 +269,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
return node;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash) const
|
||||
{
|
||||
auto node = cacheLookup(hash);
|
||||
@@ -279,7 +281,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash) const
|
||||
}
|
||||
|
||||
// Throw if the node is missing
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNode(SHAMapHash const& hash) const
|
||||
{
|
||||
auto node = fetchNodeNT(hash);
|
||||
@@ -301,14 +303,13 @@ SHAMap::descendThrow(SHAMapInnerNode* parent, int branch) const
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descendThrow(std::shared_ptr<SHAMapInnerNode> const& parent, int branch)
|
||||
const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descendThrow(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> ret = descend(parent, branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> ret = descend(parent, branch);
|
||||
|
||||
if (!ret && !parent->isEmptyBranch(branch))
|
||||
Throw<SHAMapMissingNode>(type_, parent->getChildHash(branch));
|
||||
if (!ret && !parent.isEmptyBranch(branch))
|
||||
Throw<SHAMapMissingNode>(type_, parent.getChildHash(branch));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -320,7 +321,7 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
|
||||
if (ret || !backed_)
|
||||
return ret;
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode> node =
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node =
|
||||
fetchNodeNT(parent->getChildHash(branch));
|
||||
if (!node)
|
||||
return nullptr;
|
||||
@@ -329,32 +330,29 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
|
||||
return node.get();
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descend(std::shared_ptr<SHAMapInnerNode> const& parent, int branch)
|
||||
const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descend(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> node = parent->getChild(branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node = parent.getChild(branch);
|
||||
if (node || !backed_)
|
||||
return node;
|
||||
|
||||
node = fetchNode(parent->getChildHash(branch));
|
||||
node = fetchNode(parent.getChildHash(branch));
|
||||
if (!node)
|
||||
return nullptr;
|
||||
return {};
|
||||
|
||||
node = parent->canonicalizeChild(branch, std::move(node));
|
||||
node = parent.canonicalizeChild(branch, std::move(node));
|
||||
return node;
|
||||
}
|
||||
|
||||
// Gets the node that would be hooked to this branch,
|
||||
// but doesn't hook it up.
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descendNoStore(
|
||||
std::shared_ptr<SHAMapInnerNode> const& parent,
|
||||
int branch) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descendNoStore(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> ret = parent->getChild(branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> ret = parent.getChild(branch);
|
||||
if (!ret && backed_)
|
||||
ret = fetchNode(parent->getChildHash(branch));
|
||||
ret = fetchNode(parent.getChildHash(branch));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -379,7 +377,7 @@ SHAMap::descend(
|
||||
if (!child)
|
||||
{
|
||||
auto const& childHash = parent->getChildHash(branch);
|
||||
std::shared_ptr<SHAMapTreeNode> childNode =
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> childNode =
|
||||
fetchNodeNT(childHash, filter);
|
||||
|
||||
if (childNode)
|
||||
@@ -436,8 +434,8 @@ SHAMap::descendAsync(
|
||||
}
|
||||
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
intr_ptr::SharedPtr<Node>
|
||||
SHAMap::unshareNode(intr_ptr::SharedPtr<Node> node, SHAMapNodeID const& nodeID)
|
||||
{
|
||||
// make sure the node is suitable for the intended operation (copy on write)
|
||||
XRPL_ASSERT(
|
||||
@@ -449,7 +447,7 @@ SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
XRPL_ASSERT(
|
||||
state_ != SHAMapState::Immutable,
|
||||
"ripple::SHAMap::unshareNode : not immutable");
|
||||
node = std::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
node = intr_ptr::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
if (nodeID.isRoot())
|
||||
root_ = node;
|
||||
}
|
||||
@@ -458,7 +456,7 @@ SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
|
||||
SHAMapLeafNode*
|
||||
SHAMap::belowHelper(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch,
|
||||
std::tuple<int, std::function<bool(int)>, std::function<void(int&)>> const&
|
||||
@@ -467,11 +465,11 @@ SHAMap::belowHelper(
|
||||
auto& [init, cmp, incr] = loopParams;
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto n = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto n = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
stack.push({node, {leafDepth, n->peekItem()->key()}});
|
||||
return n.get();
|
||||
}
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
if (stack.empty())
|
||||
stack.push({inner, SHAMapNodeID{}});
|
||||
else
|
||||
@@ -480,17 +478,17 @@ SHAMap::belowHelper(
|
||||
{
|
||||
if (!inner->isEmptyBranch(i))
|
||||
{
|
||||
node = descendThrow(inner, i);
|
||||
node.adopt(descendThrow(inner.get(), i));
|
||||
XRPL_ASSERT(
|
||||
!stack.empty(),
|
||||
"ripple::SHAMap::belowHelper : non-empty stack");
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto n = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto n = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
stack.push({n, {leafDepth, n->peekItem()->key()}});
|
||||
return n.get();
|
||||
}
|
||||
inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
stack.push({inner, stack.top().second.getChildNodeID(branch)});
|
||||
i = init; // descend and reset loop
|
||||
}
|
||||
@@ -501,7 +499,7 @@ SHAMap::belowHelper(
|
||||
}
|
||||
SHAMapLeafNode*
|
||||
SHAMap::lastBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch) const
|
||||
{
|
||||
@@ -513,7 +511,7 @@ SHAMap::lastBelow(
|
||||
}
|
||||
SHAMapLeafNode*
|
||||
SHAMap::firstBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch) const
|
||||
{
|
||||
@@ -593,12 +591,12 @@ SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const
|
||||
XRPL_ASSERT(
|
||||
!node->isLeaf(),
|
||||
"ripple::SHAMap::peekNextItem : another node is not leaf");
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (auto i = selectBranch(nodeID, id) + 1; i < branchFactor; ++i)
|
||||
{
|
||||
if (!inner->isEmptyBranch(i))
|
||||
{
|
||||
node = descendThrow(inner, i);
|
||||
node = descendThrow(*inner, i);
|
||||
auto leaf = firstBelow(node, stack, i);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -654,14 +652,14 @@ SHAMap::upper_bound(uint256 const& id) const
|
||||
}
|
||||
else
|
||||
{
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (auto branch = selectBranch(nodeID, id) + 1;
|
||||
branch < branchFactor;
|
||||
++branch)
|
||||
{
|
||||
if (!inner->isEmptyBranch(branch))
|
||||
{
|
||||
node = descendThrow(inner, branch);
|
||||
node = descendThrow(*inner, branch);
|
||||
auto leaf = firstBelow(node, stack, branch);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -691,13 +689,13 @@ SHAMap::lower_bound(uint256 const& id) const
|
||||
}
|
||||
else
|
||||
{
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (int branch = selectBranch(nodeID, id) - 1; branch >= 0;
|
||||
--branch)
|
||||
{
|
||||
if (!inner->isEmptyBranch(branch))
|
||||
{
|
||||
node = descendThrow(inner, branch);
|
||||
node = descendThrow(*inner, branch);
|
||||
auto leaf = lastBelow(node, stack, branch);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -732,7 +730,8 @@ SHAMap::delItem(uint256 const& id)
|
||||
if (stack.empty())
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
|
||||
auto leaf = std::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto leaf =
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
stack.pop();
|
||||
|
||||
if (!leaf || (leaf->peekItem()->key() != id))
|
||||
@@ -742,12 +741,12 @@ SHAMap::delItem(uint256 const& id)
|
||||
|
||||
// What gets attached to the end of the chain
|
||||
// (For now, nothing, since we deleted the leaf)
|
||||
std::shared_ptr<SHAMapTreeNode> prevNode;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> prevNode;
|
||||
|
||||
while (!stack.empty())
|
||||
{
|
||||
auto node =
|
||||
std::static_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
SHAMapNodeID nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
|
||||
@@ -775,7 +774,8 @@ SHAMap::delItem(uint256 const& id)
|
||||
{
|
||||
if (!node->isEmptyBranch(i))
|
||||
{
|
||||
node->setChild(i, nullptr);
|
||||
node->setChild(
|
||||
i, intr_ptr::SharedPtr<SHAMapTreeNode>{});
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -824,7 +824,7 @@ SHAMap::addGiveItem(
|
||||
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
if (leaf->peekItem()->key() == tag)
|
||||
return false;
|
||||
}
|
||||
@@ -832,7 +832,7 @@ SHAMap::addGiveItem(
|
||||
if (node->isInner())
|
||||
{
|
||||
// easy case, we end on an inner node
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
int branch = selectBranch(nodeID, tag);
|
||||
XRPL_ASSERT(
|
||||
inner->isEmptyBranch(branch),
|
||||
@@ -843,13 +843,13 @@ SHAMap::addGiveItem(
|
||||
{
|
||||
// this is a leaf node that has to be made an inner node holding two
|
||||
// items
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto otherItem = leaf->peekItem();
|
||||
XRPL_ASSERT(
|
||||
otherItem && (tag != otherItem->key()),
|
||||
"ripple::SHAMap::addGiveItem : non-null item");
|
||||
|
||||
node = std::make_shared<SHAMapInnerNode>(node->cowid());
|
||||
node = intr_ptr::make_shared<SHAMapInnerNode>(node->cowid());
|
||||
|
||||
unsigned int b1, b2;
|
||||
|
||||
@@ -861,7 +861,7 @@ SHAMap::addGiveItem(
|
||||
// we need a new inner node, since both go on same branch at this
|
||||
// level
|
||||
nodeID = nodeID.getChildNodeID(b1);
|
||||
node = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
node = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
// we can add the two leaf nodes here
|
||||
@@ -915,7 +915,8 @@ SHAMap::updateGiveItem(
|
||||
if (stack.empty())
|
||||
Throw<SHAMapMissingNode>(type_, tag);
|
||||
|
||||
auto node = std::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto node =
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
|
||||
@@ -987,8 +988,9 @@ SHAMap::fetchRoot(SHAMapHash const& hash, SHAMapSyncFilter* filter)
|
||||
@note The node must have already been unshared by having the caller
|
||||
first call SHAMapTreeNode::unshare().
|
||||
*/
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node)
|
||||
const
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
node->cowid() == 0, "ripple::SHAMap::writeNode : valid input node");
|
||||
@@ -1007,8 +1009,8 @@ SHAMap::writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const
|
||||
// pointer to because flushing modifies inner nodes -- it
|
||||
// makes them point to canonical/shared nodes.
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
SHAMap::preFlushNode(std::shared_ptr<Node> node) const
|
||||
intr_ptr::SharedPtr<Node>
|
||||
SHAMap::preFlushNode(intr_ptr::SharedPtr<Node> node) const
|
||||
{
|
||||
// A shared node should never need to be flushed
|
||||
// because that would imply someone modified it
|
||||
@@ -1019,7 +1021,7 @@ SHAMap::preFlushNode(std::shared_ptr<Node> node) const
|
||||
{
|
||||
// Node is not uniquely ours, so unshare it before
|
||||
// possibly modifying it
|
||||
node = std::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
node = intr_ptr::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
}
|
||||
return node;
|
||||
}
|
||||
@@ -1061,17 +1063,17 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
return 1;
|
||||
}
|
||||
|
||||
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
auto node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
|
||||
if (node->isEmpty())
|
||||
{ // replace empty root with a new empty root
|
||||
root_ = std::make_shared<SHAMapInnerNode>(0);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Stack of {parent,index,child} pointers representing
|
||||
// inner nodes we are in the process of flushing
|
||||
using StackEntry = std::pair<std::shared_ptr<SHAMapInnerNode>, int>;
|
||||
using StackEntry = std::pair<intr_ptr::SharedPtr<SHAMapInnerNode>, int>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> stack;
|
||||
|
||||
node = preFlushNode(std::move(node));
|
||||
@@ -1108,7 +1110,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
// The semantics of this changes when we move to c++-20
|
||||
// Right now no move will occur; With c++-20 child will
|
||||
// be moved from.
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(
|
||||
node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
std::move(child));
|
||||
pos = 0;
|
||||
}
|
||||
@@ -1140,7 +1142,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
node->unshare();
|
||||
|
||||
if (doWrite)
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(
|
||||
node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
writeNode(t, std::move(node)));
|
||||
|
||||
++flushed;
|
||||
@@ -1214,7 +1216,7 @@ SHAMap::dump(bool hash) const
|
||||
JLOG(journal_.info()) << leafCount << " resident leaves";
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::cacheLookup(SHAMapHash const& hash) const
|
||||
{
|
||||
auto ret = f_.getTreeNodeCache()->fetch(hash.as_uint256());
|
||||
@@ -1227,7 +1229,7 @@ SHAMap::cacheLookup(SHAMapHash const& hash) const
|
||||
void
|
||||
SHAMap::canonicalize(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<SHAMapTreeNode>& node) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>& node) const
|
||||
{
|
||||
XRPL_ASSERT(backed_, "ripple::SHAMap::canonicalize : is backed");
|
||||
XRPL_ASSERT(
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <xrpld/shamap/SHAMap.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
#include <array>
|
||||
@@ -242,28 +243,28 @@ SHAMap::walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing)
|
||||
if (!root_->isInner()) // root_ is only node, and we have it
|
||||
return;
|
||||
|
||||
using StackEntry = std::shared_ptr<SHAMapInnerNode>;
|
||||
using StackEntry = intr_ptr::SharedPtr<SHAMapInnerNode>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> nodeStack;
|
||||
|
||||
nodeStack.push(std::static_pointer_cast<SHAMapInnerNode>(root_));
|
||||
nodeStack.push(intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_));
|
||||
|
||||
while (!nodeStack.empty())
|
||||
{
|
||||
std::shared_ptr<SHAMapInnerNode> node = std::move(nodeStack.top());
|
||||
intr_ptr::SharedPtr<SHAMapInnerNode> node = std::move(nodeStack.top());
|
||||
nodeStack.pop();
|
||||
|
||||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
if (!node->isEmptyBranch(i))
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(node, i);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(*node, i);
|
||||
|
||||
if (nextNode)
|
||||
{
|
||||
if (nextNode->isInner())
|
||||
nodeStack.push(
|
||||
std::static_pointer_cast<SHAMapInnerNode>(
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
nextNode));
|
||||
}
|
||||
else
|
||||
@@ -285,15 +286,15 @@ SHAMap::walkMapParallel(
|
||||
if (!root_->isInner()) // root_ is only node, and we have it
|
||||
return false;
|
||||
|
||||
using StackEntry = std::shared_ptr<SHAMapInnerNode>;
|
||||
std::array<std::shared_ptr<SHAMapTreeNode>, 16> topChildren;
|
||||
using StackEntry = intr_ptr::SharedPtr<SHAMapInnerNode>;
|
||||
std::array<intr_ptr::SharedPtr<SHAMapTreeNode>, 16> topChildren;
|
||||
{
|
||||
auto const& innerRoot =
|
||||
std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
if (!innerRoot->isEmptyBranch(i))
|
||||
topChildren[i] = descendNoStore(innerRoot, i);
|
||||
topChildren[i] = descendNoStore(*innerRoot, i);
|
||||
}
|
||||
}
|
||||
std::vector<std::thread> workers;
|
||||
@@ -314,7 +315,7 @@ SHAMap::walkMapParallel(
|
||||
continue;
|
||||
|
||||
nodeStacks[rootChildIndex].push(
|
||||
std::static_pointer_cast<SHAMapInnerNode>(child));
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(child));
|
||||
|
||||
JLOG(journal_.debug()) << "starting worker " << rootChildIndex;
|
||||
workers.push_back(std::thread(
|
||||
@@ -324,7 +325,7 @@ SHAMap::walkMapParallel(
|
||||
{
|
||||
while (!nodeStack.empty())
|
||||
{
|
||||
std::shared_ptr<SHAMapInnerNode> node =
|
||||
intr_ptr::SharedPtr<SHAMapInnerNode> node =
|
||||
std::move(nodeStack.top());
|
||||
XRPL_ASSERT(
|
||||
node,
|
||||
@@ -335,14 +336,15 @@ SHAMap::walkMapParallel(
|
||||
{
|
||||
if (node->isEmptyBranch(i))
|
||||
continue;
|
||||
std::shared_ptr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(node, i);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(*node, i);
|
||||
|
||||
if (nextNode)
|
||||
{
|
||||
if (nextNode->isInner())
|
||||
nodeStack.push(std::static_pointer_cast<
|
||||
SHAMapInnerNode>(nextNode));
|
||||
nodeStack.push(
|
||||
intr_ptr::static_pointer_cast<
|
||||
SHAMapInnerNode>(nextNode));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -21,15 +21,13 @@
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
#include <xrpld/shamap/detail/TaggedPointer.ipp>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/basics/spinlock.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
SHAMapInnerNode::SHAMapInnerNode(
|
||||
@@ -41,6 +39,17 @@ SHAMapInnerNode::SHAMapInnerNode(
|
||||
|
||||
SHAMapInnerNode::~SHAMapInnerNode() = default;
|
||||
|
||||
void
|
||||
SHAMapInnerNode::partialDestructor()
|
||||
{
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>* children;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, std::ignore, children) =
|
||||
hashesAndChildren_.getHashesAndChildren();
|
||||
iterNonEmptyChildIndexes(
|
||||
[&](auto branchNum, auto indexNum) { children[indexNum].reset(); });
|
||||
}
|
||||
|
||||
template <class F>
|
||||
void
|
||||
SHAMapInnerNode::iterChildren(F&& f) const
|
||||
@@ -68,17 +77,17 @@ SHAMapInnerNode::getChildIndex(int i) const
|
||||
return hashesAndChildren_.getChildIndex(isBranch_, i);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::clone(std::uint32_t cowid) const
|
||||
{
|
||||
auto const branchCount = getBranchCount();
|
||||
auto const thisIsSparse = !hashesAndChildren_.isDense();
|
||||
auto p = std::make_shared<SHAMapInnerNode>(cowid, branchCount);
|
||||
auto p = intr_ptr::make_shared<SHAMapInnerNode>(cowid, branchCount);
|
||||
p->hash_ = hash_;
|
||||
p->isBranch_ = isBranch_;
|
||||
p->fullBelowGen_ = fullBelowGen_;
|
||||
SHAMapHash *cloneHashes, *thisHashes;
|
||||
std::shared_ptr<SHAMapTreeNode>*cloneChildren, *thisChildren;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>*cloneChildren, *thisChildren;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, cloneHashes, cloneChildren) =
|
||||
p->hashesAndChildren_.getHashesAndChildren();
|
||||
@@ -119,7 +128,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const
|
||||
return p;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::makeFullInner(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -129,7 +138,7 @@ SHAMapInnerNode::makeFullInner(
|
||||
if (data.size() != branchFactor * uint256::bytes)
|
||||
Throw<std::runtime_error>("Invalid FI node");
|
||||
|
||||
auto ret = std::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
auto ret = intr_ptr::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
|
||||
SerialIter si(data);
|
||||
|
||||
@@ -153,7 +162,7 @@ SHAMapInnerNode::makeFullInner(
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::makeCompressedInner(Slice data)
|
||||
{
|
||||
// A compressed inner node is serialized as a series of 33 byte chunks,
|
||||
@@ -166,7 +175,7 @@ SHAMapInnerNode::makeCompressedInner(Slice data)
|
||||
|
||||
SerialIter si(data);
|
||||
|
||||
auto ret = std::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
auto ret = intr_ptr::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
|
||||
auto hashes = ret->hashesAndChildren_.getHashes();
|
||||
|
||||
@@ -208,13 +217,13 @@ void
|
||||
SHAMapInnerNode::updateHashDeep()
|
||||
{
|
||||
SHAMapHash* hashes;
|
||||
std::shared_ptr<SHAMapTreeNode>* children;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>* children;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, hashes, children) =
|
||||
hashesAndChildren_.getHashesAndChildren();
|
||||
iterNonEmptyChildIndexes([&](auto branchNum, auto indexNum) {
|
||||
if (children[indexNum] != nullptr)
|
||||
hashes[indexNum] = children[indexNum]->getHash();
|
||||
if (auto p = children[indexNum].get())
|
||||
hashes[indexNum] = p->getHash();
|
||||
});
|
||||
updateHash();
|
||||
}
|
||||
@@ -272,7 +281,7 @@ SHAMapInnerNode::getString(const SHAMapNodeID& id) const
|
||||
|
||||
// We are modifying an inner node
|
||||
void
|
||||
SHAMapInnerNode::setChild(int m, std::shared_ptr<SHAMapTreeNode> child)
|
||||
SHAMapInnerNode::setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(m >= 0) && (m < branchFactor),
|
||||
@@ -314,7 +323,9 @@ SHAMapInnerNode::setChild(int m, std::shared_ptr<SHAMapTreeNode> child)
|
||||
|
||||
// finished modifying, now make shareable
|
||||
void
|
||||
SHAMapInnerNode::shareChild(int m, std::shared_ptr<SHAMapTreeNode> const& child)
|
||||
SHAMapInnerNode::shareChild(
|
||||
int m,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> const& child)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
(m >= 0) && (m < branchFactor),
|
||||
@@ -349,7 +360,7 @@ SHAMapInnerNode::getChildPointer(int branch)
|
||||
return hashesAndChildren_.getChildren()[index].get();
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::getChild(int branch)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
@@ -378,10 +389,10 @@ SHAMapInnerNode::getChildHash(int m) const
|
||||
return zeroSHAMapHash;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::canonicalizeChild(
|
||||
int branch,
|
||||
std::shared_ptr<SHAMapTreeNode> node)
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
branch >= 0 && branch < branchFactor,
|
||||
|
||||
@@ -47,10 +47,10 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
if (!root_->isInner())
|
||||
return;
|
||||
|
||||
using StackEntry = std::pair<int, std::shared_ptr<SHAMapInnerNode>>;
|
||||
using StackEntry = std::pair<int, intr_ptr::SharedPtr<SHAMapInnerNode>>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> stack;
|
||||
|
||||
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
auto node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
int pos = 0;
|
||||
|
||||
while (true)
|
||||
@@ -59,8 +59,8 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
{
|
||||
if (!node->isEmptyBranch(pos))
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> child =
|
||||
descendNoStore(node, pos);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> child =
|
||||
descendNoStore(*node, pos);
|
||||
if (!function(*child))
|
||||
return;
|
||||
|
||||
@@ -79,7 +79,8 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
}
|
||||
|
||||
// descend to the child's first position
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(child);
|
||||
node =
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(child);
|
||||
pos = 0;
|
||||
}
|
||||
}
|
||||
@@ -115,7 +116,7 @@ SHAMap::visitDifferences(
|
||||
|
||||
if (root_->isLeaf())
|
||||
{
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(root_);
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(root_);
|
||||
if (!have ||
|
||||
!have->hasLeafNode(leaf->peekItem()->key(), leaf->getHash()))
|
||||
function(*root_);
|
||||
@@ -202,7 +203,8 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se)
|
||||
mn.filter_,
|
||||
pending,
|
||||
[node, nodeID, branch, &mn](
|
||||
std::shared_ptr<SHAMapTreeNode> found, SHAMapHash const&) {
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> found,
|
||||
SHAMapHash const&) {
|
||||
// a read completed asynchronously
|
||||
std::unique_lock<std::mutex> lock{mn.deferLock_};
|
||||
mn.finishedReads_.emplace_back(
|
||||
@@ -271,7 +273,7 @@ SHAMap::gmn_ProcessDeferredReads(MissingNodes& mn)
|
||||
SHAMapInnerNode*,
|
||||
SHAMapNodeID,
|
||||
int,
|
||||
std::shared_ptr<SHAMapTreeNode>>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>>
|
||||
deferredNode;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock{mn.deferLock_};
|
||||
@@ -327,7 +329,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter)
|
||||
f_.getFullBelowCache()->getGeneration());
|
||||
|
||||
if (!root_->isInner() ||
|
||||
std::static_pointer_cast<SHAMapInnerNode>(root_)->isFullBelow(
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_)->isFullBelow(
|
||||
mn.generation_))
|
||||
{
|
||||
clearSynching();
|
||||
@@ -807,8 +809,9 @@ SHAMap::getProofPath(uint256 const& key) const
|
||||
}
|
||||
|
||||
if (auto const& node = stack.top().first; !node || node->isInner() ||
|
||||
std::static_pointer_cast<SHAMapLeafNode>(node)->peekItem()->key() !=
|
||||
key)
|
||||
intr_ptr::static_pointer_cast<SHAMapLeafNode>(node)
|
||||
->peekItem()
|
||||
->key() != key)
|
||||
{
|
||||
JLOG(journal_.debug()) << "no path to " << key;
|
||||
return {};
|
||||
|
||||
@@ -23,18 +23,16 @@
|
||||
#include <xrpld/shamap/SHAMapTxLeafNode.h>
|
||||
#include <xrpld/shamap/SHAMapTxPlusMetaLeafNode.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <openssl/sha.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeTransaction(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -44,12 +42,13 @@ SHAMapTreeNode::makeTransaction(
|
||||
make_shamapitem(sha512Half(HashPrefix::transactionID, data), data);
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), 0, hash);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeTransactionWithMeta(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -72,13 +71,13 @@ SHAMapTreeNode::makeTransactionWithMeta(
|
||||
auto item = make_shamapitem(tag, s.slice());
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeAccountState(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -104,13 +103,14 @@ SHAMapTreeNode::makeAccountState(
|
||||
auto item = make_shamapitem(tag, s.slice());
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeFromWire(Slice rawNode)
|
||||
{
|
||||
if (rawNode.empty())
|
||||
@@ -142,7 +142,7 @@ SHAMapTreeNode::makeFromWire(Slice rawNode)
|
||||
"wire: Unknown type (" + std::to_string(type) + ")");
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash)
|
||||
{
|
||||
if (rawNode.size() < 4)
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
|
||||
#include <xrpld/shamap/SHAMapTreeNode.h>
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.h>
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -57,6 +59,7 @@ namespace ripple {
|
||||
*/
|
||||
class TaggedPointer
|
||||
{
|
||||
private:
|
||||
static_assert(
|
||||
alignof(SHAMapHash) >= 4,
|
||||
"Bad alignment: Tag pointer requires low two bits to be zero.");
|
||||
@@ -170,7 +173,7 @@ public:
|
||||
of each array.
|
||||
*/
|
||||
[[nodiscard]] std::
|
||||
tuple<std::uint8_t, SHAMapHash*, std::shared_ptr<SHAMapTreeNode>*>
|
||||
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
|
||||
getHashesAndChildren() const;
|
||||
|
||||
/** Get the `hashes` array */
|
||||
@@ -178,7 +181,7 @@ public:
|
||||
getHashes() const;
|
||||
|
||||
/** Get the `children` array */
|
||||
[[nodiscard]] std::shared_ptr<SHAMapTreeNode>*
|
||||
[[nodiscard]] intr_ptr::SharedPtr<SHAMapTreeNode>*
|
||||
getChildren() const;
|
||||
|
||||
/** Call the `f` callback for all 16 (branchFactor) branches - even if
|
||||
|
||||
@@ -50,7 +50,7 @@ static_assert(
|
||||
// contains multiple chunks. This is the terminology the boost documentation
|
||||
// uses. Pools use "Simple Segregated Storage" as their storage format.
|
||||
constexpr size_t elementSizeBytes =
|
||||
(sizeof(SHAMapHash) + sizeof(std::shared_ptr<SHAMapTreeNode>));
|
||||
(sizeof(SHAMapHash) + sizeof(intr_ptr::SharedPtr<SHAMapTreeNode>));
|
||||
|
||||
constexpr size_t blockSizeBytes = kilobytes(512);
|
||||
|
||||
@@ -240,7 +240,7 @@ TaggedPointer::destroyHashesAndChildren()
|
||||
for (std::size_t i = 0; i < numAllocated; ++i)
|
||||
{
|
||||
hashes[i].~SHAMapHash();
|
||||
children[i].~shared_ptr<SHAMapTreeNode>();
|
||||
std::destroy_at(&children[i]);
|
||||
}
|
||||
|
||||
auto [tag, ptr] = decode();
|
||||
@@ -397,8 +397,10 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
// keep
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{srcHashes[srcIndex]};
|
||||
new (&dstChildren[dstIndex]) std::shared_ptr<SHAMapTreeNode>{
|
||||
std::move(srcChildren[srcIndex])};
|
||||
|
||||
new (&dstChildren[dstIndex])
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(srcChildren[srcIndex])};
|
||||
++dstIndex;
|
||||
++srcIndex;
|
||||
}
|
||||
@@ -410,7 +412,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{};
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
}
|
||||
}
|
||||
@@ -418,7 +420,8 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
// add
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&dstChildren[dstIndex])
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
if (srcIsDense)
|
||||
{
|
||||
@@ -432,7 +435,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{};
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
}
|
||||
if (srcIsDense)
|
||||
@@ -449,7 +452,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
for (int i = dstIndex; i < dstNumAllocated; ++i)
|
||||
{
|
||||
new (&dstHashes[i]) SHAMapHash{};
|
||||
new (&dstChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&dstChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
*this = std::move(dst);
|
||||
}
|
||||
@@ -469,7 +472,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
// allocate hashes and children, but do not run constructors
|
||||
TaggedPointer newHashesAndChildren{RawAllocateTag{}, toAllocate};
|
||||
SHAMapHash *newHashes, *oldHashes;
|
||||
std::shared_ptr<SHAMapTreeNode>*newChildren, *oldChildren;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>*newChildren, *oldChildren;
|
||||
std::uint8_t newNumAllocated;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(newNumAllocated, newHashes, newChildren) =
|
||||
@@ -481,7 +484,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
// new arrays are dense, old arrays are sparse
|
||||
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
|
||||
new (&newHashes[branchNum]) SHAMapHash{oldHashes[indexNum]};
|
||||
new (&newChildren[branchNum]) std::shared_ptr<SHAMapTreeNode>{
|
||||
new (&newChildren[branchNum]) intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(oldChildren[indexNum])};
|
||||
});
|
||||
// Run the constructors for the remaining elements
|
||||
@@ -490,7 +493,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
if ((1 << i) & isBranch)
|
||||
continue;
|
||||
new (&newHashes[i]) SHAMapHash{};
|
||||
new (&newChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -501,7 +504,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
new (&newHashes[curCompressedIndex])
|
||||
SHAMapHash{oldHashes[indexNum]};
|
||||
new (&newChildren[curCompressedIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(oldChildren[indexNum])};
|
||||
++curCompressedIndex;
|
||||
});
|
||||
@@ -509,7 +512,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
for (int i = curCompressedIndex; i < newNumAllocated; ++i)
|
||||
{
|
||||
new (&newHashes[i]) SHAMapHash{};
|
||||
new (&newChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -523,7 +526,7 @@ inline TaggedPointer::TaggedPointer(std::uint8_t numChildren)
|
||||
for (std::size_t i = 0; i < numAllocated; ++i)
|
||||
{
|
||||
new (&hashes[i]) SHAMapHash{};
|
||||
new (&children[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&children[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,14 +565,15 @@ TaggedPointer::isDense() const
|
||||
}
|
||||
|
||||
[[nodiscard]] inline std::
|
||||
tuple<std::uint8_t, SHAMapHash*, std::shared_ptr<SHAMapTreeNode>*>
|
||||
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
|
||||
TaggedPointer::getHashesAndChildren() const
|
||||
{
|
||||
auto const [tag, ptr] = decode();
|
||||
auto const hashes = reinterpret_cast<SHAMapHash*>(ptr);
|
||||
std::uint8_t numAllocated = boundaries[tag];
|
||||
auto const children = reinterpret_cast<std::shared_ptr<SHAMapTreeNode>*>(
|
||||
hashes + numAllocated);
|
||||
auto const children =
|
||||
reinterpret_cast<intr_ptr::SharedPtr<SHAMapTreeNode>*>(
|
||||
hashes + numAllocated);
|
||||
return {numAllocated, hashes, children};
|
||||
};
|
||||
|
||||
@@ -579,7 +583,7 @@ TaggedPointer::getHashes() const
|
||||
return reinterpret_cast<SHAMapHash*>(tp_ & ptrMask);
|
||||
};
|
||||
|
||||
[[nodiscard]] inline std::shared_ptr<SHAMapTreeNode>*
|
||||
[[nodiscard]] inline intr_ptr::SharedPtr<SHAMapTreeNode>*
|
||||
TaggedPointer::getChildren() const
|
||||
{
|
||||
auto [unused1, unused2, result] = getHashesAndChildren();
|
||||
|
||||
Reference in New Issue
Block a user