diff --git a/CMakeLists.txt b/CMakeLists.txt index 03dba51d0c..a9f063db57 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,6 +16,18 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + # GCC-specific fixes + add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage) + # -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3 +elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + # Clang-specific fixes + add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options +elseif(MSVC) + # MSVC-specific fixes + add_compile_options(/wd4068) # Ignore unknown pragmas +endif() + # make GIT_COMMIT_HASH define available to all sources find_package(Git) if(Git_FOUND) diff --git a/include/xrpl/basics/IntrusivePointer.h b/include/xrpl/basics/IntrusivePointer.h new file mode 100644 index 0000000000..b28ea4e6b8 --- /dev/null +++ b/include/xrpl/basics/IntrusivePointer.h @@ -0,0 +1,515 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED +#define RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +//------------------------------------------------------------------------------ + +/** Tag to create an intrusive pointer from another intrusive pointer by using a + static cast. This is useful to create an intrusive pointer to a derived + class from an intrusive pointer to a base class. +*/ +struct StaticCastTagSharedIntrusive +{ +}; + +/** Tag to create an intrusive pointer from another intrusive pointer by using a + dynamic cast. This is useful to create an intrusive pointer to a derived + class from an intrusive pointer to a base class. If the cast fails an empty + (null) intrusive pointer is created. +*/ +struct DynamicCastTagSharedIntrusive +{ +}; + +/** When creating or adopting a raw pointer, controls whether the strong count + is incremented or not. Use this tag to increment the strong count. +*/ +struct SharedIntrusiveAdoptIncrementStrongTag +{ +}; + +/** When creating or adopting a raw pointer, controls whether the strong count + is incremented or not. Use this tag to leave the strong count unchanged. +*/ +struct SharedIntrusiveAdoptNoIncrementTag +{ +}; + +//------------------------------------------------------------------------------ +// + +template +concept CAdoptTag = std::is_same_v || + std::is_same_v; + +//------------------------------------------------------------------------------ + +/** A shared intrusive pointer class that supports weak pointers. + + This is meant to be used for SHAMapInnerNodes, but may be useful for other + cases. Since the reference counts are stored on the pointee, the pointee is + not destroyed until both the strong _and_ weak pointer counts go to zero. + When the strong pointer count goes to zero, the "partialDestructor" is + called. This can be used to destroy as much of the object as possible while + still retaining the reference counts. For example, for SHAMapInnerNodes the + children may be reset in that function. Note that std::shared_poiner WILL + run the destructor when the strong count reaches zero, but may not free the + memory used by the object until the weak count reaches zero. In rippled, we + typically allocate shared pointers with the `make_shared` function. When + that is used, the memory is not reclaimed until the weak count reaches zero. +*/ +template +class SharedIntrusive +{ +public: + SharedIntrusive() = default; + + template + SharedIntrusive(T* p, TAdoptTag) noexcept; + + SharedIntrusive(SharedIntrusive const& rhs); + + template + // TODO: convertible_to isn't quite right. That include a static castable. + // Find the right concept. + requires std::convertible_to + SharedIntrusive(SharedIntrusive const& rhs); + + SharedIntrusive(SharedIntrusive&& rhs); + + template + requires std::convertible_to + SharedIntrusive(SharedIntrusive&& rhs); + + SharedIntrusive& + operator=(SharedIntrusive const& rhs); + + bool + operator!=(std::nullptr_t) const; + + bool + operator==(std::nullptr_t) const; + + template + requires std::convertible_to + SharedIntrusive& + operator=(SharedIntrusive const& rhs); + + SharedIntrusive& + operator=(SharedIntrusive&& rhs); + + template + requires std::convertible_to + SharedIntrusive& + operator=(SharedIntrusive&& rhs); + + /** Adopt the raw pointer. The strong reference may or may not be + incremented, depending on the TAdoptTag + */ + template + void + adopt(T* p); + + ~SharedIntrusive(); + + /** Create a new SharedIntrusive by statically casting the pointer + controlled by the rhs param. + */ + template + SharedIntrusive( + StaticCastTagSharedIntrusive, + SharedIntrusive const& rhs); + + /** Create a new SharedIntrusive by statically casting the pointer + controlled by the rhs param. + */ + template + SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive&& rhs); + + /** Create a new SharedIntrusive by dynamically casting the pointer + controlled by the rhs param. + */ + template + SharedIntrusive( + DynamicCastTagSharedIntrusive, + SharedIntrusive const& rhs); + + /** Create a new SharedIntrusive by dynamically casting the pointer + controlled by the rhs param. + */ + template + SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive&& rhs); + + T& + operator*() const noexcept; + + T* + operator->() const noexcept; + + explicit + operator bool() const noexcept; + + /** Set the pointer to null, decrement the strong count, and run the + appropriate release action. + */ + void + reset(); + + /** Get the raw pointer */ + T* + get() const; + + /** Return the strong count */ + std::size_t + use_count() const; + + template + friend SharedIntrusive + make_SharedIntrusive(Args&&... args); + + template + friend class SharedIntrusive; + + template + friend class SharedWeakUnion; + + template + friend class WeakIntrusive; + +private: + /** Return the raw pointer held by this object. */ + T* + unsafeGetRawPtr() const; + + /** Exchange the current raw pointer held by this object with the given + pointer. Decrement the strong count of the raw pointer previously held + by this object and run the appropriate release action. + */ + void + unsafeReleaseAndStore(T* next); + + /** Set the raw pointer directly. This is wrapped in a function so the class + can support both atomic and non-atomic pointers in a future patch. + */ + void + unsafeSetRawPtr(T* p); + + /** Exchange the raw pointer directly. + This sets the raw pointer to the given value and returns the previous + value. This is wrapped in a function so the class can support both + atomic and non-atomic pointers in a future patch. + */ + T* + unsafeExchange(T* p); + + /** pointer to the type with an intrusive count */ + T* ptr_{nullptr}; +}; + +//------------------------------------------------------------------------------ + +/** A weak intrusive pointer class for the SharedIntrusive pointer class. + +Note that this weak pointer class asks differently from normal weak pointer +classes. When the strong pointer count goes to zero, the "partialDestructor" +is called. See the comment on SharedIntrusive for a fuller explanation. +*/ +template +class WeakIntrusive +{ +public: + WeakIntrusive() = default; + + WeakIntrusive(WeakIntrusive const& rhs); + + WeakIntrusive(WeakIntrusive&& rhs); + + WeakIntrusive(SharedIntrusive const& rhs); + + // There is no move constructor from a strong intrusive ptr because + // moving would be move expensive than copying in this case (the strong + // ref would need to be decremented) + WeakIntrusive(SharedIntrusive const&& rhs) = delete; + + // Since there are no current use cases for copy assignment in + // WeakIntrusive, we delete this operator to simplify the implementation. If + // a need arises in the future, we can reintroduce it with proper + // consideration." + WeakIntrusive& + operator=(WeakIntrusive const&) = delete; + + template + requires std::convertible_to + WeakIntrusive& + operator=(SharedIntrusive const& rhs); + + /** Adopt the raw pointer and increment the weak count. */ + void + adopt(T* ptr); + + ~WeakIntrusive(); + + /** Get a strong pointer from the weak pointer, if possible. This will + only return a seated pointer if the strong count on the raw pointer + is non-zero before locking. + */ + SharedIntrusive + lock() const; + + /** Return true if the strong count is zero. */ + bool + expired() const; + + /** Set the pointer to null and decrement the weak count. + + Note: This may run the destructor if the strong count is zero. + */ + void + reset(); + +private: + T* ptr_ = nullptr; + + /** Decrement the weak count. This does _not_ set the raw pointer to + null. + + Note: This may run the destructor if the strong count is zero. + */ + void + unsafeReleaseNoStore(); +}; + +//------------------------------------------------------------------------------ + +/** A combination of a strong and a weak intrusive pointer stored in the + space of a single pointer. + + This class is similar to a `std::variant` + with some optimizations. In particular, it uses a low-order bit to + determine if the raw pointer represents a strong pointer or a weak + pointer. It can also be quickly switched between its strong pointer and + weak pointer representations. This class is useful for storing intrusive + pointers in tagged caches. + */ + +template +class SharedWeakUnion +{ + // Tagged pointer. Low bit determines if this is a strong or a weak + // pointer. The low bit must be masked to zero when converting back to a + // pointer. If the low bit is '1', this is a weak pointer. + static_assert( + alignof(T) >= 2, + "Bad alignment: Combo pointer requires low bit to be zero"); + +public: + SharedWeakUnion() = default; + + SharedWeakUnion(SharedWeakUnion const& rhs); + + template + requires std::convertible_to + SharedWeakUnion(SharedIntrusive const& rhs); + + SharedWeakUnion(SharedWeakUnion&& rhs); + + template + requires std::convertible_to + SharedWeakUnion(SharedIntrusive&& rhs); + + SharedWeakUnion& + operator=(SharedWeakUnion const& rhs); + + template + requires std::convertible_to + SharedWeakUnion& + operator=(SharedIntrusive const& rhs); + + template + requires std::convertible_to + SharedWeakUnion& + operator=(SharedIntrusive&& rhs); + + ~SharedWeakUnion(); + + /** Return a strong pointer if this is already a strong pointer (i.e. + don't lock the weak pointer. Use the `lock` method if that's what's + needed) + */ + SharedIntrusive + getStrong() const; + + /** Return true if this is a strong pointer and the strong pointer is + seated. + */ + explicit + operator bool() const noexcept; + + /** Set the pointer to null, decrement the appropriate ref count, and + run the appropriate release action. + */ + void + reset(); + + /** If this is a strong pointer, return the raw pointer. Otherwise + return null. + */ + T* + get() const; + + /** If this is a strong pointer, return the strong count. Otherwise + * return 0 + */ + std::size_t + use_count() const; + + /** Return true if there is a non-zero strong count. */ + bool + expired() const; + + /** If this is a strong pointer, return the strong pointer. Otherwise + attempt to lock the weak pointer. + */ + SharedIntrusive + lock() const; + + /** Return true is this represents a strong pointer. */ + bool + isStrong() const; + + /** Return true is this represents a weak pointer. */ + bool + isWeak() const; + + /** If this is a weak pointer, attempt to convert it to a strong + pointer. + + @return true if successfully converted to a strong pointer (or was + already a strong pointer). Otherwise false. + */ + bool + convertToStrong(); + + /** If this is a strong pointer, attempt to convert it to a weak + pointer. + + @return false if the pointer is null. Otherwise return true. + */ + bool + convertToWeak(); + +private: + // Tagged pointer. Low bit determines if this is a strong or a weak + // pointer. The low bit must be masked to zero when converting back to a + // pointer. If the low bit is '1', this is a weak pointer. + std::uintptr_t tp_{0}; + static constexpr std::uintptr_t tagMask = 1; + static constexpr std::uintptr_t ptrMask = ~tagMask; + +private: + /** Return the raw pointer held by this object. + */ + T* + unsafeGetRawPtr() const; + + enum class RefStrength { strong, weak }; + /** Set the raw pointer and tag bit directly. + */ + void + unsafeSetRawPtr(T* p, RefStrength rs); + + /** Set the raw pointer and tag bit to all zeros (strong null pointer). + */ + void unsafeSetRawPtr(std::nullptr_t); + + /** Decrement the appropriate ref count, and run the appropriate release + action. Note: this does _not_ set the raw pointer to null. + */ + void + unsafeReleaseNoStore(); +}; + +//------------------------------------------------------------------------------ + +/** Create a shared intrusive pointer. + + Note: unlike std::shared_ptr, where there is an advantage of allocating + the pointer and control block together, there is no benefit for intrusive + pointers. +*/ +template +SharedIntrusive +make_SharedIntrusive(Args&&... args) +{ + auto p = new TT(std::forward(args)...); + + static_assert( + noexcept(SharedIntrusive( + std::declval(), + std::declval())), + "SharedIntrusive constructor should not throw or this can leak " + "memory"); + + return SharedIntrusive(p, SharedIntrusiveAdoptNoIncrementTag{}); +} + +//------------------------------------------------------------------------------ + +namespace intr_ptr { +template +using SharedPtr = SharedIntrusive; + +template +using WeakPtr = WeakIntrusive; + +template +using SharedWeakUnionPtr = SharedWeakUnion; + +template +SharedPtr +make_shared(A&&... args) +{ + return make_SharedIntrusive(std::forward(args)...); +} + +template +SharedPtr +static_pointer_cast(TT const& v) +{ + return SharedPtr(StaticCastTagSharedIntrusive{}, v); +} + +template +SharedPtr +dynamic_pointer_cast(TT const& v) +{ + return SharedPtr(DynamicCastTagSharedIntrusive{}, v); +} +} // namespace intr_ptr +} // namespace ripple +#endif diff --git a/include/xrpl/basics/IntrusivePointer.ipp b/include/xrpl/basics/IntrusivePointer.ipp new file mode 100644 index 0000000000..1ac3f2bab4 --- /dev/null +++ b/include/xrpl/basics/IntrusivePointer.ipp @@ -0,0 +1,740 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED +#define RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED + +#include +#include + +#include + +namespace ripple { + +template +template +SharedIntrusive::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p} +{ + if constexpr (std::is_same_v< + TAdoptTag, + SharedIntrusiveAdoptIncrementStrongTag>) + { + if (p) + p->addStrongRef(); + } +} + +template +SharedIntrusive::SharedIntrusive(SharedIntrusive const& rhs) + : ptr_{[&] { + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + return p; + }()} +{ +} + +template +template + requires std::convertible_to +SharedIntrusive::SharedIntrusive(SharedIntrusive const& rhs) + : ptr_{[&] { + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + return p; + }()} +{ +} + +template +SharedIntrusive::SharedIntrusive(SharedIntrusive&& rhs) + : ptr_{rhs.unsafeExchange(nullptr)} +{ +} + +template +template + requires std::convertible_to +SharedIntrusive::SharedIntrusive(SharedIntrusive&& rhs) + : ptr_{rhs.unsafeExchange(nullptr)} +{ +} +template +SharedIntrusive& +SharedIntrusive::operator=(SharedIntrusive const& rhs) +{ + if (this == &rhs) + return *this; + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + unsafeReleaseAndStore(p); + return *this; +} + +template +template +// clang-format off +requires std::convertible_to +// clang-format on +SharedIntrusive& +SharedIntrusive::operator=(SharedIntrusive const& rhs) +{ + if constexpr (std::is_same_v) + { + // This case should never be hit. The operator above will run instead. + // (The normal operator= is needed or it will be marked `deleted`) + if (this == &rhs) + return *this; + } + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + unsafeReleaseAndStore(p); + return *this; +} + +template +SharedIntrusive& +SharedIntrusive::operator=(SharedIntrusive&& rhs) +{ + if (this == &rhs) + return *this; + + unsafeReleaseAndStore(rhs.unsafeExchange(nullptr)); + return *this; +} + +template +template +// clang-format off +requires std::convertible_to +// clang-format on +SharedIntrusive& +SharedIntrusive::operator=(SharedIntrusive&& rhs) +{ + static_assert( + !std::is_same_v, + "This overload should not be instantiated for T == TT"); + + unsafeReleaseAndStore(rhs.unsafeExchange(nullptr)); + return *this; +} + +template +bool +SharedIntrusive::operator!=(std::nullptr_t) const +{ + return this->get() != nullptr; +} + +template +bool +SharedIntrusive::operator==(std::nullptr_t) const +{ + return this->get() == nullptr; +} + +template +template +void +SharedIntrusive::adopt(T* p) +{ + if constexpr (std::is_same_v< + TAdoptTag, + SharedIntrusiveAdoptIncrementStrongTag>) + { + if (p) + p->addStrongRef(); + } + unsafeReleaseAndStore(p); +} + +template +SharedIntrusive::~SharedIntrusive() +{ + unsafeReleaseAndStore(nullptr); +}; + +template +template +SharedIntrusive::SharedIntrusive( + StaticCastTagSharedIntrusive, + SharedIntrusive const& rhs) + : ptr_{[&] { + auto p = static_cast(rhs.unsafeGetRawPtr()); + if (p) + p->addStrongRef(); + return p; + }()} +{ +} + +template +template +SharedIntrusive::SharedIntrusive( + StaticCastTagSharedIntrusive, + SharedIntrusive&& rhs) + : ptr_{static_cast(rhs.unsafeExchange(nullptr))} +{ +} + +template +template +SharedIntrusive::SharedIntrusive( + DynamicCastTagSharedIntrusive, + SharedIntrusive const& rhs) + : ptr_{[&] { + auto p = dynamic_cast(rhs.unsafeGetRawPtr()); + if (p) + p->addStrongRef(); + return p; + }()} +{ +} + +template +template +SharedIntrusive::SharedIntrusive( + DynamicCastTagSharedIntrusive, + SharedIntrusive&& rhs) +{ + // This can be simplified without the `exchange`, but the `exchange` is kept + // in anticipation of supporting atomic operations. + auto toSet = rhs.unsafeExchange(nullptr); + if (toSet) + { + ptr_ = dynamic_cast(toSet); + if (!ptr_) + // need to set the pointer back or will leak + rhs.unsafeExchange(toSet); + } +} + +template +T& +SharedIntrusive::operator*() const noexcept +{ + return *unsafeGetRawPtr(); +} + +template +T* +SharedIntrusive::operator->() const noexcept +{ + return unsafeGetRawPtr(); +} + +template +SharedIntrusive::operator bool() const noexcept +{ + return bool(unsafeGetRawPtr()); +} + +template +void +SharedIntrusive::reset() +{ + unsafeReleaseAndStore(nullptr); +} + +template +T* +SharedIntrusive::get() const +{ + return unsafeGetRawPtr(); +} + +template +std::size_t +SharedIntrusive::use_count() const +{ + if (auto p = unsafeGetRawPtr()) + return p->use_count(); + return 0; +} + +template +T* +SharedIntrusive::unsafeGetRawPtr() const +{ + return ptr_; +} + +template +void +SharedIntrusive::unsafeSetRawPtr(T* p) +{ + ptr_ = p; +} + +template +T* +SharedIntrusive::unsafeExchange(T* p) +{ + return std::exchange(ptr_, p); +} + +template +void +SharedIntrusive::unsafeReleaseAndStore(T* next) +{ + auto prev = unsafeExchange(next); + if (!prev) + return; + + using enum ReleaseStrongRefAction; + auto action = prev->releaseStrongRef(); + switch (action) + { + case noop: + break; + case destroy: + delete prev; + break; + case partialDestroy: + prev->partialDestructor(); + partialDestructorFinished(&prev); + // prev is null and may no longer be used + break; + } +} + +//------------------------------------------------------------------------------ + +template +WeakIntrusive::WeakIntrusive(WeakIntrusive const& rhs) : ptr_{rhs.ptr_} +{ + if (ptr_) + ptr_->addWeakRef(); +} + +template +WeakIntrusive::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_} +{ + rhs.ptr_ = nullptr; +} + +template +WeakIntrusive::WeakIntrusive(SharedIntrusive const& rhs) + : ptr_{rhs.unsafeGetRawPtr()} +{ + if (ptr_) + ptr_->addWeakRef(); +} + +template +template +// clang-format off +requires std::convertible_to +// clang-format on +WeakIntrusive& +WeakIntrusive::operator=(SharedIntrusive const& rhs) +{ + unsafeReleaseNoStore(); + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addWeakRef(); + return *this; +} + +template +void +WeakIntrusive::adopt(T* ptr) +{ + unsafeReleaseNoStore(); + if (ptr) + ptr->addWeakRef(); + ptr_ = ptr; +} + +template +WeakIntrusive::~WeakIntrusive() +{ + unsafeReleaseNoStore(); +} + +template +SharedIntrusive +WeakIntrusive::lock() const +{ + if (ptr_ && ptr_->checkoutStrongRefFromWeak()) + { + return SharedIntrusive{ptr_, SharedIntrusiveAdoptNoIncrementTag{}}; + } + return {}; +} + +template +bool +WeakIntrusive::expired() const +{ + return (!ptr_ || ptr_->expired()); +} + +template +void +WeakIntrusive::reset() +{ + unsafeReleaseNoStore(); + ptr_ = nullptr; +} + +template +void +WeakIntrusive::unsafeReleaseNoStore() +{ + if (!ptr_) + return; + + using enum ReleaseWeakRefAction; + auto action = ptr_->releaseWeakRef(); + switch (action) + { + case noop: + break; + case destroy: + delete ptr_; + break; + } +} + +//------------------------------------------------------------------------------ + +template +SharedWeakUnion::SharedWeakUnion(SharedWeakUnion const& rhs) : tp_{rhs.tp_} +{ + auto p = rhs.unsafeGetRawPtr(); + if (!p) + return; + + if (rhs.isStrong()) + p->addStrongRef(); + else + p->addWeakRef(); +} + +template +template + requires std::convertible_to +SharedWeakUnion::SharedWeakUnion(SharedIntrusive const& rhs) +{ + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + unsafeSetRawPtr(p, RefStrength::strong); +} + +template +SharedWeakUnion::SharedWeakUnion(SharedWeakUnion&& rhs) : tp_{rhs.tp_} +{ + rhs.unsafeSetRawPtr(nullptr); +} + +template +template + requires std::convertible_to +SharedWeakUnion::SharedWeakUnion(SharedIntrusive&& rhs) +{ + auto p = rhs.unsafeGetRawPtr(); + if (p) + unsafeSetRawPtr(p, RefStrength::strong); + rhs.unsafeSetRawPtr(nullptr); +} + +template +SharedWeakUnion& +SharedWeakUnion::operator=(SharedWeakUnion const& rhs) +{ + if (this == &rhs) + return *this; + unsafeReleaseNoStore(); + + if (auto p = rhs.unsafeGetRawPtr()) + { + if (rhs.isStrong()) + { + p->addStrongRef(); + unsafeSetRawPtr(p, RefStrength::strong); + } + else + { + p->addWeakRef(); + unsafeSetRawPtr(p, RefStrength::weak); + } + } + else + { + unsafeSetRawPtr(nullptr); + } + return *this; +} + +template +template +// clang-format off +requires std::convertible_to +// clang-format on +SharedWeakUnion& +SharedWeakUnion::operator=(SharedIntrusive const& rhs) +{ + unsafeReleaseNoStore(); + auto p = rhs.unsafeGetRawPtr(); + if (p) + p->addStrongRef(); + unsafeSetRawPtr(p, RefStrength::strong); + return *this; +} + +template +template +// clang-format off +requires std::convertible_to +// clang-format on +SharedWeakUnion& +SharedWeakUnion::operator=(SharedIntrusive&& rhs) +{ + unsafeReleaseNoStore(); + unsafeSetRawPtr(rhs.unsafeGetRawPtr(), RefStrength::strong); + rhs.unsafeSetRawPtr(nullptr); + return *this; +} + +template +SharedWeakUnion::~SharedWeakUnion() +{ + unsafeReleaseNoStore(); +}; + +// Return a strong pointer if this is already a strong pointer (i.e. don't +// lock the weak pointer. Use the `lock` method if that's what's needed) +template +SharedIntrusive +SharedWeakUnion::getStrong() const +{ + SharedIntrusive result; + auto p = unsafeGetRawPtr(); + if (p && isStrong()) + { + result.template adopt(p); + } + return result; +} + +template +SharedWeakUnion::operator bool() const noexcept +{ + return bool(get()); +} + +template +void +SharedWeakUnion::reset() +{ + unsafeReleaseNoStore(); + unsafeSetRawPtr(nullptr); +} + +template +T* +SharedWeakUnion::get() const +{ + return isStrong() ? unsafeGetRawPtr() : nullptr; +} + +template +std::size_t +SharedWeakUnion::use_count() const +{ + if (auto p = get()) + return p->use_count(); + return 0; +} + +template +bool +SharedWeakUnion::expired() const +{ + auto p = unsafeGetRawPtr(); + return (!p || p->expired()); +} + +template +SharedIntrusive +SharedWeakUnion::lock() const +{ + SharedIntrusive result; + auto p = unsafeGetRawPtr(); + if (!p) + return result; + + if (isStrong()) + { + result.template adopt(p); + return result; + } + + if (p->checkoutStrongRefFromWeak()) + { + result.template adopt(p); + return result; + } + return result; +} + +template +bool +SharedWeakUnion::isStrong() const +{ + return !(tp_ & tagMask); +} + +template +bool +SharedWeakUnion::isWeak() const +{ + return tp_ & tagMask; +} + +template +bool +SharedWeakUnion::convertToStrong() +{ + if (isStrong()) + return true; + + auto p = unsafeGetRawPtr(); + if (p && p->checkoutStrongRefFromWeak()) + { + [[maybe_unused]] auto action = p->releaseWeakRef(); + XRPL_ASSERT( + (action == ReleaseWeakRefAction::noop), + "ripple::SharedWeakUnion::convertToStrong : " + "action is noop"); + unsafeSetRawPtr(p, RefStrength::strong); + return true; + } + return false; +} + +template +bool +SharedWeakUnion::convertToWeak() +{ + if (isWeak()) + return true; + + auto p = unsafeGetRawPtr(); + if (!p) + return false; + + using enum ReleaseStrongRefAction; + auto action = p->addWeakReleaseStrongRef(); + switch (action) + { + case noop: + break; + case destroy: + // We just added a weak ref. How could we destroy? + UNREACHABLE( + "ripple::SharedWeakUnion::convertToWeak : destroying freshly " + "added ref"); + delete p; + unsafeSetRawPtr(nullptr); + return true; // Should never happen + case partialDestroy: + // This is a weird case. We just converted the last strong + // pointer to a weak pointer. + p->partialDestructor(); + partialDestructorFinished(&p); + // p is null and may no longer be used + break; + } + unsafeSetRawPtr(p, RefStrength::weak); + return true; +} + +template +T* +SharedWeakUnion::unsafeGetRawPtr() const +{ + return reinterpret_cast(tp_ & ptrMask); +} + +template +void +SharedWeakUnion::unsafeSetRawPtr(T* p, RefStrength rs) +{ + tp_ = reinterpret_cast(p); + if (tp_ && rs == RefStrength::weak) + tp_ |= tagMask; +} + +template +void +SharedWeakUnion::unsafeSetRawPtr(std::nullptr_t) +{ + tp_ = 0; +} + +template +void +SharedWeakUnion::unsafeReleaseNoStore() +{ + auto p = unsafeGetRawPtr(); + if (!p) + return; + + if (isStrong()) + { + using enum ReleaseStrongRefAction; + auto strongAction = p->releaseStrongRef(); + switch (strongAction) + { + case noop: + break; + case destroy: + delete p; + break; + case partialDestroy: + p->partialDestructor(); + partialDestructorFinished(&p); + // p is null and may no longer be used + break; + } + } + else + { + using enum ReleaseWeakRefAction; + auto weakAction = p->releaseWeakRef(); + switch (weakAction) + { + case noop: + break; + case destroy: + delete p; + break; + } + } +} + +} // namespace ripple +#endif diff --git a/include/xrpl/basics/IntrusiveRefCounts.h b/include/xrpl/basics/IntrusiveRefCounts.h new file mode 100644 index 0000000000..f3c707422b --- /dev/null +++ b/include/xrpl/basics/IntrusiveRefCounts.h @@ -0,0 +1,502 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED +#define RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED + +#include + +#include +#include + +namespace ripple { + +/** Action to perform when releasing a strong pointer. + + noop: Do nothing. For example, a `noop` action will occur when a count is + decremented to a non-zero value. + + partialDestroy: Run the `partialDestructor`. This action will happen when a + strong count is decremented to zero and the weak count is non-zero. + + destroy: Run the destructor. This action will occur when either the strong + count or weak count is decremented and the other count is also zero. + */ +enum class ReleaseStrongRefAction { noop, partialDestroy, destroy }; + +/** Action to perform when releasing a weak pointer. + + noop: Do nothing. For example, a `noop` action will occur when a count is + decremented to a non-zero value. + + destroy: Run the destructor. This action will occur when either the strong + count or weak count is decremented and the other count is also zero. + */ +enum class ReleaseWeakRefAction { noop, destroy }; + +/** Implement the strong count, weak count, and bit flags for an intrusive + pointer. + + A class can satisfy the requirements of a ripple::IntrusivePointer by + inheriting from this class. + */ +struct IntrusiveRefCounts +{ + virtual ~IntrusiveRefCounts() noexcept; + + // This must be `noexcept` or the make_SharedIntrusive function could leak + // memory. + void + addStrongRef() const noexcept; + + void + addWeakRef() const noexcept; + + ReleaseStrongRefAction + releaseStrongRef() const; + + // Same as: + // { + // addWeakRef(); + // return releaseStrongRef; + // } + // done as one atomic operation + ReleaseStrongRefAction + addWeakReleaseStrongRef() const; + + ReleaseWeakRefAction + releaseWeakRef() const; + + // Returns true is able to checkout a strong ref. False otherwise + bool + checkoutStrongRefFromWeak() const noexcept; + + bool + expired() const noexcept; + + std::size_t + use_count() const noexcept; + + // This function MUST be called after a partial destructor finishes running. + // Calling this function may cause other threads to delete the object + // pointed to by `o`, so `o` should never be used after calling this + // function. The parameter will be set to a `nullptr` after calling this + // function to emphasize that it should not be used. + // Note: This is intentionally NOT called at the end of `partialDestructor`. + // The reason for this is if new classes are written to support this smart + // pointer class, they need to write their own `partialDestructor` function + // and ensure `partialDestructorFinished` is called at the end. Putting this + // call inside the smart pointer class itself is expected to be less error + // prone. + // Note: The "two-star" programming is intentional. It emphasizes that `o` + // may be deleted and the unergonomic API is meant to signal the special + // nature of this function call to callers. + // Note: This is a template to support incompletely defined classes. + template + friend void + partialDestructorFinished(T** o); + +private: + // TODO: We may need to use a uint64_t for both counts. This will reduce the + // memory savings. We need to audit the code to make sure 16 bit counts are + // enough for strong pointers and 14 bit counts are enough for weak + // pointers. Use type aliases to make it easy to switch types. + using CountType = std::uint16_t; + static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8; + static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2; + using FieldType = std::uint32_t; + static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8; + static constexpr FieldType one = 1; + + /** `refCounts` consists of four fields that are treated atomically: + + 1. Strong count. This is a count of the number of shared pointers that + hold a reference to this object. When the strong counts goes to zero, + if the weak count is zero, the destructor is run. If the weak count is + non-zero when the strong count goes to zero then the partialDestructor + is run. + + 2. Weak count. This is a count of the number of weak pointer that hold + a reference to this object. When the weak count goes to zero and the + strong count is also zero, then the destructor is run. + + 3. Partial destroy started bit. This bit is set if the + `partialDestructor` function has been started (or is about to be + started). This is used to prevent the destructor from running + concurrently with the partial destructor. This can easily happen when + the last strong pointer release its reference in one thread and starts + the partialDestructor, while in another thread the last weak pointer + goes out of scope and starts the destructor while the partialDestructor + is still running. Both a start and finished bit is needed to handle a + corner-case where the last strong pointer goes out of scope, then then + last `weakPointer` goes out of scope, but this happens before the + `partialDestructor` bit is set. It would be possible to use a single + bit if it could also be set atomically when the strong count goes to + zero and the weak count is non-zero, but that would add complexity (and + likely slow down common cases as well). + + 4. Partial destroy finished bit. This bit is set when the + `partialDestructor` has finished running. See (3) above for more + information. + + */ + + mutable std::atomic refCounts{strongDelta}; + + /** Amount to change the strong count when adding or releasing a reference + + Note: The strong count is stored in the low `StrongCountNumBits` bits + of refCounts + */ + static constexpr FieldType strongDelta = 1; + + /** Amount to change the weak count when adding or releasing a reference + + Note: The weak count is stored in the high `WeakCountNumBits` bits of + refCounts + */ + static constexpr FieldType weakDelta = (one << StrongCountNumBits); + + /** Flag that is set when the partialDestroy function has started running + (or is about to start running). + + See description of the `refCounts` field for a fuller description of + this field. + */ + static constexpr FieldType partialDestroyStartedMask = + (one << (FieldTypeBits - 1)); + + /** Flag that is set when the partialDestroy function has finished running + + See description of the `refCounts` field for a fuller description of + this field. + */ + static constexpr FieldType partialDestroyFinishedMask = + (one << (FieldTypeBits - 2)); + + /** Mask that will zero out all the `count` bits and leave the tag bits + unchanged. + */ + static constexpr FieldType tagMask = + partialDestroyStartedMask | partialDestroyFinishedMask; + + /** Mask that will zero out the `tag` bits and leave the count bits + unchanged. + */ + static constexpr FieldType valueMask = ~tagMask; + + /** Mask that will zero out everything except the strong count. + */ + static constexpr FieldType strongMask = + ((one << StrongCountNumBits) - 1) & valueMask; + + /** Mask that will zero out everything except the weak count. + */ + static constexpr FieldType weakMask = + (((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask; + + /** Unpack the count and tag fields from the packed atomic integer form. */ + struct RefCountPair + { + CountType strong; + CountType weak; + /** The `partialDestroyStartedBit` is set to on when the partial + destroy function is started. It is not a boolean; it is a uint32 + with all bits zero with the possible exception of the + `partialDestroyStartedMask` bit. This is done so it can be directly + masked into the `combinedValue`. + */ + FieldType partialDestroyStartedBit{0}; + /** The `partialDestroyFinishedBit` is set to on when the partial + destroy function has finished. + */ + FieldType partialDestroyFinishedBit{0}; + RefCountPair(FieldType v) noexcept; + RefCountPair(CountType s, CountType w) noexcept; + + /** Convert back to the packed integer form. */ + FieldType + combinedValue() const noexcept; + + static constexpr CountType maxStrongValue = + static_cast((one << StrongCountNumBits) - 1); + static constexpr CountType maxWeakValue = + static_cast((one << WeakCountNumBits) - 1); + /** Put an extra margin to detect when running up against limits. + This is only used in debug code, and is useful if we reduce the + number of bits in the strong and weak counts (to 16 and 14 bits). + */ + static constexpr CountType checkStrongMaxValue = maxStrongValue - 32; + static constexpr CountType checkWeakMaxValue = maxWeakValue - 32; + }; +}; + +inline void +IntrusiveRefCounts::addStrongRef() const noexcept +{ + refCounts.fetch_add(strongDelta, std::memory_order_acq_rel); +} + +inline void +IntrusiveRefCounts::addWeakRef() const noexcept +{ + refCounts.fetch_add(weakDelta, std::memory_order_acq_rel); +} + +inline ReleaseStrongRefAction +IntrusiveRefCounts::releaseStrongRef() const +{ + // Subtract `strongDelta` from refCounts. If this releases the last strong + // ref, set the `partialDestroyStarted` bit. It is important that the ref + // count and the `partialDestroyStartedBit` are changed atomically (hence + // the loop and `compare_exchange` op). If this didn't need to be done + // atomically, the loop could be replaced with a `fetch_sub` and a + // conditional `fetch_or`. This loop will almost always run once. + + using enum ReleaseStrongRefAction; + auto prevIntVal = refCounts.load(std::memory_order_acquire); + while (1) + { + RefCountPair const prevVal{prevIntVal}; + XRPL_ASSERT( + (prevVal.strong >= strongDelta), + "ripple::IntrusiveRefCounts::releaseStrongRef : previous ref " + "higher than new"); + auto nextIntVal = prevIntVal - strongDelta; + ReleaseStrongRefAction action = noop; + if (prevVal.strong == 1) + { + if (prevVal.weak == 0) + { + action = destroy; + } + else + { + nextIntVal |= partialDestroyStartedMask; + action = partialDestroy; + } + } + + if (refCounts.compare_exchange_weak( + prevIntVal, nextIntVal, std::memory_order_release)) + { + // Can't be in partial destroy because only decrementing the strong + // count to zero can start a partial destroy, and that can't happen + // twice. + XRPL_ASSERT( + (action == noop) || !(prevIntVal & partialDestroyStartedMask), + "ripple::IntrusiveRefCounts::releaseStrongRef : not in partial " + "destroy"); + return action; + } + } +} + +inline ReleaseStrongRefAction +IntrusiveRefCounts::addWeakReleaseStrongRef() const +{ + using enum ReleaseStrongRefAction; + + static_assert(weakDelta > strongDelta); + auto constexpr delta = weakDelta - strongDelta; + auto prevIntVal = refCounts.load(std::memory_order_acquire); + // This loop will almost always run once. The loop is needed to atomically + // change the counts and flags (the count could be atomically changed, but + // the flags depend on the current value of the counts). + // + // Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask` + // may be able to be set non-atomically. But it is easier to reason about + // the code if the flag is set atomically. + while (1) + { + RefCountPair const prevVal{prevIntVal}; + // Converted the last strong pointer to a weak pointer. + // + // Can't be in partial destroy because only decrementing the + // strong count to zero can start a partial destroy, and that + // can't happen twice. + XRPL_ASSERT( + (!prevVal.partialDestroyStartedBit), + "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not in " + "partial destroy"); + + auto nextIntVal = prevIntVal + delta; + ReleaseStrongRefAction action = noop; + if (prevVal.strong == 1) + { + if (prevVal.weak == 0) + { + action = noop; + } + else + { + nextIntVal |= partialDestroyStartedMask; + action = partialDestroy; + } + } + if (refCounts.compare_exchange_weak( + prevIntVal, nextIntVal, std::memory_order_release)) + { + XRPL_ASSERT( + (!(prevIntVal & partialDestroyStartedMask)), + "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not " + "started partial destroy"); + return action; + } + } +} + +inline ReleaseWeakRefAction +IntrusiveRefCounts::releaseWeakRef() const +{ + auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel); + RefCountPair prev = prevIntVal; + if (prev.weak == 1 && prev.strong == 0) + { + if (!prev.partialDestroyStartedBit) + { + // This case should only be hit if the partialDestroyStartedBit is + // set non-atomically (and even then very rarely). The code is kept + // in case we need to set the flag non-atomically for perf reasons. + refCounts.wait(prevIntVal, std::memory_order_acq_rel); + prevIntVal = refCounts.load(std::memory_order_acquire); + prev = RefCountPair{prevIntVal}; + } + if (!prev.partialDestroyFinishedBit) + { + // partial destroy MUST finish before running a full destroy (when + // using weak pointers) + refCounts.wait(prevIntVal - weakDelta, std::memory_order_acq_rel); + } + return ReleaseWeakRefAction::destroy; + } + return ReleaseWeakRefAction::noop; +} + +inline bool +IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept +{ + auto curValue = RefCountPair{1, 1}.combinedValue(); + auto desiredValue = RefCountPair{2, 1}.combinedValue(); + + while (!refCounts.compare_exchange_weak( + curValue, desiredValue, std::memory_order_release)) + { + RefCountPair const prev{curValue}; + if (!prev.strong) + return false; + + desiredValue = curValue + strongDelta; + } + return true; +} + +inline bool +IntrusiveRefCounts::expired() const noexcept +{ + RefCountPair const val = refCounts.load(std::memory_order_acquire); + return val.strong == 0; +} + +inline std::size_t +IntrusiveRefCounts::use_count() const noexcept +{ + RefCountPair const val = refCounts.load(std::memory_order_acquire); + return val.strong; +} + +inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept +{ +#ifndef NDEBUG + auto v = refCounts.load(std::memory_order_acquire); + XRPL_ASSERT( + (!(v & valueMask)), + "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero"); + auto t = v & tagMask; + XRPL_ASSERT( + (!t || t == tagMask), + "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag"); +#endif +} + +//------------------------------------------------------------------------------ + +inline IntrusiveRefCounts::RefCountPair::RefCountPair( + IntrusiveRefCounts::FieldType v) noexcept + : strong{static_cast(v & strongMask)} + , weak{static_cast((v & weakMask) >> StrongCountNumBits)} + , partialDestroyStartedBit{v & partialDestroyStartedMask} + , partialDestroyFinishedBit{v & partialDestroyFinishedMask} +{ + XRPL_ASSERT( + (strong < checkStrongMaxValue && weak < checkWeakMaxValue), + "ripple::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside " + "range"); +} + +inline IntrusiveRefCounts::RefCountPair::RefCountPair( + IntrusiveRefCounts::CountType s, + IntrusiveRefCounts::CountType w) noexcept + : strong{s}, weak{w} +{ + XRPL_ASSERT( + (strong < checkStrongMaxValue && weak < checkWeakMaxValue), + "ripple::IntrusiveRefCounts::RefCountPair(CountType, CountType) : " + "inputs inside range"); +} + +inline IntrusiveRefCounts::FieldType +IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept +{ + XRPL_ASSERT( + (strong < checkStrongMaxValue && weak < checkWeakMaxValue), + "ripple::IntrusiveRefCounts::RefCountPair::combinedValue : inputs " + "inside range"); + return (static_cast(weak) + << IntrusiveRefCounts::StrongCountNumBits) | + static_cast(strong) | + partialDestroyStartedBit | partialDestroyFinishedBit; +} + +template +inline void +partialDestructorFinished(T** o) +{ + T& self = **o; + IntrusiveRefCounts::RefCountPair p = + self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask); + XRPL_ASSERT( + (!p.partialDestroyFinishedBit && p.partialDestroyStartedBit && + !p.strong), + "ripple::partialDestructorFinished : not a weak ref"); + if (!p.weak) + { + // There was a weak count before the partial destructor ran (or we would + // have run the full destructor) and now there isn't a weak count. Some + // thread is waiting to run the destructor. + self.refCounts.notify_one(); + } + // Set the pointer to null to emphasize that the object shouldn't be used + // after calling this function as it may be destroyed in another thread. + *o = nullptr; +} +//------------------------------------------------------------------------------ + +} // namespace ripple +#endif diff --git a/include/xrpl/basics/SharedWeakCachePointer.h b/include/xrpl/basics/SharedWeakCachePointer.h new file mode 100644 index 0000000000..78e25b4485 --- /dev/null +++ b/include/xrpl/basics/SharedWeakCachePointer.h @@ -0,0 +1,135 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED +#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED + +#include +#include + +namespace ripple { + +/** A combination of a std::shared_ptr and a std::weak_pointer. + + +This class is a wrapper to a `std::variant` +This class is useful for storing intrusive pointers in tagged caches using less +memory than storing both pointers directly. +*/ + +template +class SharedWeakCachePointer +{ +public: + SharedWeakCachePointer() = default; + + SharedWeakCachePointer(SharedWeakCachePointer const& rhs); + + template + requires std::convertible_to + SharedWeakCachePointer(std::shared_ptr const& rhs); + + SharedWeakCachePointer(SharedWeakCachePointer&& rhs); + + template + requires std::convertible_to + SharedWeakCachePointer(std::shared_ptr&& rhs); + + SharedWeakCachePointer& + operator=(SharedWeakCachePointer const& rhs); + + template + requires std::convertible_to + SharedWeakCachePointer& + operator=(std::shared_ptr const& rhs); + + template + requires std::convertible_to + SharedWeakCachePointer& + operator=(std::shared_ptr&& rhs); + + ~SharedWeakCachePointer(); + + /** Return a strong pointer if this is already a strong pointer (i.e. don't + lock the weak pointer. Use the `lock` method if that's what's needed) + */ + std::shared_ptr const& + getStrong() const; + + /** Return true if this is a strong pointer and the strong pointer is + seated. + */ + explicit + operator bool() const noexcept; + + /** Set the pointer to null, decrement the appropriate ref count, and run + the appropriate release action. + */ + void + reset(); + + /** If this is a strong pointer, return the raw pointer. Otherwise return + null. + */ + T* + get() const; + + /** If this is a strong pointer, return the strong count. Otherwise return 0 + */ + std::size_t + use_count() const; + + /** Return true if there is a non-zero strong count. */ + bool + expired() const; + + /** If this is a strong pointer, return the strong pointer. Otherwise + attempt to lock the weak pointer. + */ + std::shared_ptr + lock() const; + + /** Return true is this represents a strong pointer. */ + bool + isStrong() const; + + /** Return true is this represents a weak pointer. */ + bool + isWeak() const; + + /** If this is a weak pointer, attempt to convert it to a strong pointer. + + @return true if successfully converted to a strong pointer (or was + already a strong pointer). Otherwise false. + */ + bool + convertToStrong(); + + /** If this is a strong pointer, attempt to convert it to a weak pointer. + + @return false if the pointer is null. Otherwise return true. + */ + bool + convertToWeak(); + +private: + std::variant, std::weak_ptr> combo_; +}; +} // namespace ripple +#endif diff --git a/include/xrpl/basics/SharedWeakCachePointer.ipp b/include/xrpl/basics/SharedWeakCachePointer.ipp new file mode 100644 index 0000000000..5a3b6e72a1 --- /dev/null +++ b/include/xrpl/basics/SharedWeakCachePointer.ipp @@ -0,0 +1,192 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED +#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED + +#include + +namespace ripple { +template +SharedWeakCachePointer::SharedWeakCachePointer( + SharedWeakCachePointer const& rhs) = default; + +template +template + requires std::convertible_to +SharedWeakCachePointer::SharedWeakCachePointer( + std::shared_ptr const& rhs) + : combo_{rhs} +{ +} + +template +SharedWeakCachePointer::SharedWeakCachePointer( + SharedWeakCachePointer&& rhs) = default; + +template +template + requires std::convertible_to +SharedWeakCachePointer::SharedWeakCachePointer(std::shared_ptr&& rhs) + : combo_{std::move(rhs)} +{ +} + +template +SharedWeakCachePointer& +SharedWeakCachePointer::operator=(SharedWeakCachePointer const& rhs) = + default; + +template +template + requires std::convertible_to +SharedWeakCachePointer& +SharedWeakCachePointer::operator=(std::shared_ptr const& rhs) +{ + combo_ = rhs; + return *this; +} + +template +template + requires std::convertible_to +SharedWeakCachePointer& +SharedWeakCachePointer::operator=(std::shared_ptr&& rhs) +{ + combo_ = std::move(rhs); + return *this; +} + +template +SharedWeakCachePointer::~SharedWeakCachePointer() = default; + +// Return a strong pointer if this is already a strong pointer (i.e. don't +// lock the weak pointer. Use the `lock` method if that's what's needed) +template +std::shared_ptr const& +SharedWeakCachePointer::getStrong() const +{ + static std::shared_ptr const empty; + if (auto p = std::get_if>(&combo_)) + return *p; + return empty; +} + +template +SharedWeakCachePointer::operator bool() const noexcept +{ + return !!std::get_if>(&combo_); +} + +template +void +SharedWeakCachePointer::reset() +{ + combo_ = std::shared_ptr{}; +} + +template +T* +SharedWeakCachePointer::get() const +{ + return std::get_if>(&combo_).get(); +} + +template +std::size_t +SharedWeakCachePointer::use_count() const +{ + if (auto p = std::get_if>(&combo_)) + return p->use_count(); + return 0; +} + +template +bool +SharedWeakCachePointer::expired() const +{ + if (auto p = std::get_if>(&combo_)) + return p->expired(); + return !std::get_if>(&combo_); +} + +template +std::shared_ptr +SharedWeakCachePointer::lock() const +{ + if (auto p = std::get_if>(&combo_)) + return *p; + + if (auto p = std::get_if>(&combo_)) + return p->lock(); + + return {}; +} + +template +bool +SharedWeakCachePointer::isStrong() const +{ + if (auto p = std::get_if>(&combo_)) + return !!p->get(); + return false; +} + +template +bool +SharedWeakCachePointer::isWeak() const +{ + return !isStrong(); +} + +template +bool +SharedWeakCachePointer::convertToStrong() +{ + if (isStrong()) + return true; + + if (auto p = std::get_if>(&combo_)) + { + if (auto s = p->lock()) + { + combo_ = std::move(s); + return true; + } + } + return false; +} + +template +bool +SharedWeakCachePointer::convertToWeak() +{ + if (isWeak()) + return true; + + if (auto p = std::get_if>(&combo_)) + { + combo_ = std::weak_ptr(*p); + return true; + } + + return false; +} +} // namespace ripple +#endif diff --git a/include/xrpl/basics/TaggedCache.h b/include/xrpl/basics/TaggedCache.h index dcc7c0def9..64570ae061 100644 --- a/include/xrpl/basics/TaggedCache.h +++ b/include/xrpl/basics/TaggedCache.h @@ -20,7 +20,9 @@ #ifndef RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED #define RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED +#include #include +#include #include #include #include @@ -51,6 +53,8 @@ template < class Key, class T, bool IsKeyCache = false, + class SharedWeakUnionPointerType = SharedWeakCachePointer, + class SharedPointerType = std::shared_ptr, class Hash = hardened_hash<>, class KeyEqual = std::equal_to, class Mutex = std::recursive_mutex> @@ -61,6 +65,8 @@ public: using key_type = Key; using mapped_type = T; using clock_type = beast::abstract_clock; + using shared_weak_combo_pointer_type = SharedWeakUnionPointerType; + using shared_pointer_type = SharedPointerType; public: TaggedCache( @@ -70,231 +76,48 @@ public: clock_type& clock, beast::Journal journal, beast::insight::Collector::ptr const& collector = - beast::insight::NullCollector::New()) - : m_journal(journal) - , m_clock(clock) - , m_stats( - name, - std::bind(&TaggedCache::collect_metrics, this), - collector) - , m_name(name) - , m_target_size(size) - , m_target_age(expiration) - , m_cache_count(0) - , m_hits(0) - , m_misses(0) - { - } + beast::insight::NullCollector::New()); public: /** Return the clock associated with the cache. */ clock_type& - clock() - { - return m_clock; - } + clock(); /** Returns the number of items in the container. */ std::size_t - size() const - { - std::lock_guard lock(m_mutex); - return m_cache.size(); - } - - void - setTargetSize(int s) - { - std::lock_guard lock(m_mutex); - m_target_size = s; - - if (s > 0) - { - for (auto& partition : m_cache.map()) - { - partition.rehash(static_cast( - (s + (s >> 2)) / - (partition.max_load_factor() * m_cache.partitions()) + - 1)); - } - } - - JLOG(m_journal.debug()) << m_name << " target size set to " << s; - } - - clock_type::duration - getTargetAge() const - { - std::lock_guard lock(m_mutex); - return m_target_age; - } - - void - setTargetAge(clock_type::duration s) - { - std::lock_guard lock(m_mutex); - m_target_age = s; - JLOG(m_journal.debug()) - << m_name << " target age set to " << m_target_age.count(); - } + size() const; int - getCacheSize() const - { - std::lock_guard lock(m_mutex); - return m_cache_count; - } + getCacheSize() const; int - getTrackSize() const - { - std::lock_guard lock(m_mutex); - return m_cache.size(); - } + getTrackSize() const; float - getHitRate() - { - std::lock_guard lock(m_mutex); - auto const total = static_cast(m_hits + m_misses); - return m_hits * (100.0f / std::max(1.0f, total)); - } + getHitRate(); void - clear() - { - std::lock_guard lock(m_mutex); - m_cache.clear(); - m_cache_count = 0; - } + clear(); void - reset() - { - std::lock_guard lock(m_mutex); - m_cache.clear(); - m_cache_count = 0; - m_hits = 0; - m_misses = 0; - } + reset(); /** Refresh the last access time on a key if present. @return `true` If the key was found. */ template bool - touch_if_exists(KeyComparable const& key) - { - std::lock_guard lock(m_mutex); - auto const iter(m_cache.find(key)); - if (iter == m_cache.end()) - { - ++m_stats.misses; - return false; - } - iter->second.touch(m_clock.now()); - ++m_stats.hits; - return true; - } + touch_if_exists(KeyComparable const& key); - using SweptPointersVector = std::pair< - std::vector>, - std::vector>>; + using SweptPointersVector = std::vector; void - sweep() - { - // Keep references to all the stuff we sweep - // For performance, each worker thread should exit before the swept data - // is destroyed but still within the main cache lock. - std::vector allStuffToSweep(m_cache.partitions()); - - clock_type::time_point const now(m_clock.now()); - clock_type::time_point when_expire; - - auto const start = std::chrono::steady_clock::now(); - { - std::lock_guard lock(m_mutex); - - if (m_target_size == 0 || - (static_cast(m_cache.size()) <= m_target_size)) - { - when_expire = now - m_target_age; - } - else - { - when_expire = - now - m_target_age * m_target_size / m_cache.size(); - - clock_type::duration const minimumAge(std::chrono::seconds(1)); - if (when_expire > (now - minimumAge)) - when_expire = now - minimumAge; - - JLOG(m_journal.trace()) - << m_name << " is growing fast " << m_cache.size() << " of " - << m_target_size << " aging at " - << (now - when_expire).count() << " of " - << m_target_age.count(); - } - - std::vector workers; - workers.reserve(m_cache.partitions()); - std::atomic allRemovals = 0; - - for (std::size_t p = 0; p < m_cache.partitions(); ++p) - { - workers.push_back(sweepHelper( - when_expire, - now, - m_cache.map()[p], - allStuffToSweep[p], - allRemovals, - lock)); - } - for (std::thread& worker : workers) - worker.join(); - - m_cache_count -= allRemovals; - } - // At this point allStuffToSweep will go out of scope outside the lock - // and decrement the reference count on each strong pointer. - JLOG(m_journal.debug()) - << m_name << " TaggedCache sweep lock duration " - << std::chrono::duration_cast( - std::chrono::steady_clock::now() - start) - .count() - << "ms"; - } + sweep(); bool - del(const key_type& key, bool valid) - { - // Remove from cache, if !valid, remove from map too. Returns true if - // removed from cache - std::lock_guard lock(m_mutex); - - auto cit = m_cache.find(key); - - if (cit == m_cache.end()) - return false; - - Entry& entry = cit->second; - - bool ret = false; - - if (entry.isCached()) - { - --m_cache_count; - entry.ptr.reset(); - ret = true; - } - - if (!valid || entry.isExpired()) - m_cache.erase(cit); - - return ret; - } + del(const key_type& key, bool valid); +public: /** Replace aliased objects with originals. Due to concurrency it is possible for two separate objects with @@ -308,100 +131,23 @@ public: @return `true` If the key already existed. */ -public: + template bool canonicalize( const key_type& key, - std::shared_ptr& data, - std::function const&)>&& replace) - { - // Return canonical value, store if needed, refresh in cache - // Return values: true=we had the data already - std::lock_guard lock(m_mutex); - - auto cit = m_cache.find(key); - - if (cit == m_cache.end()) - { - m_cache.emplace( - std::piecewise_construct, - std::forward_as_tuple(key), - std::forward_as_tuple(m_clock.now(), data)); - ++m_cache_count; - return false; - } - - Entry& entry = cit->second; - entry.touch(m_clock.now()); - - if (entry.isCached()) - { - if (replace(entry.ptr)) - { - entry.ptr = data; - entry.weak_ptr = data; - } - else - { - data = entry.ptr; - } - - return true; - } - - auto cachedData = entry.lock(); - - if (cachedData) - { - if (replace(entry.ptr)) - { - entry.ptr = data; - entry.weak_ptr = data; - } - else - { - entry.ptr = cachedData; - data = cachedData; - } - - ++m_cache_count; - return true; - } - - entry.ptr = data; - entry.weak_ptr = data; - ++m_cache_count; - - return false; - } + SharedPointerType& data, + R&& replaceCallback); bool canonicalize_replace_cache( const key_type& key, - std::shared_ptr const& data) - { - return canonicalize( - key, - const_cast&>(data), - [](std::shared_ptr const&) { return true; }); - } + SharedPointerType const& data); bool - canonicalize_replace_client(const key_type& key, std::shared_ptr& data) - { - return canonicalize( - key, data, [](std::shared_ptr const&) { return false; }); - } + canonicalize_replace_client(const key_type& key, SharedPointerType& data); - std::shared_ptr - fetch(const key_type& key) - { - std::lock_guard l(m_mutex); - auto ret = initialFetch(key, l); - if (!ret) - ++m_misses; - return ret; - } + SharedPointerType + fetch(const key_type& key); /** Insert the element into the container. If the key already exists, nothing happens. @@ -410,26 +156,11 @@ public: template auto insert(key_type const& key, T const& value) - -> std::enable_if_t - { - auto p = std::make_shared(std::cref(value)); - return canonicalize_replace_client(key, p); - } + -> std::enable_if_t; template auto - insert(key_type const& key) -> std::enable_if_t - { - std::lock_guard lock(m_mutex); - clock_type::time_point const now(m_clock.now()); - auto [it, inserted] = m_cache.emplace( - std::piecewise_construct, - std::forward_as_tuple(key), - std::forward_as_tuple(now)); - if (!inserted) - it->second.last_access = now; - return inserted; - } + insert(key_type const& key) -> std::enable_if_t; // VFALCO NOTE It looks like this returns a copy of the data in // the output parameter 'data'. This could be expensive. @@ -437,50 +168,18 @@ public: // simply return an iterator. // bool - retrieve(const key_type& key, T& data) - { - // retrieve the value of the stored data - auto entry = fetch(key); - - if (!entry) - return false; - - data = *entry; - return true; - } + retrieve(const key_type& key, T& data); mutex_type& - peekMutex() - { - return m_mutex; - } + peekMutex(); std::vector - getKeys() const - { - std::vector v; - - { - std::lock_guard lock(m_mutex); - v.reserve(m_cache.size()); - for (auto const& _ : m_cache) - v.push_back(_.first); - } - - return v; - } + getKeys() const; // CachedSLEs functions. /** Returns the fraction of cache hits. */ double - rate() const - { - std::lock_guard lock(m_mutex); - auto const tot = m_hits + m_misses; - if (tot == 0) - return 0; - return double(m_hits) / tot; - } + rate() const; /** Fetch an item from the cache. If the digest was not found, Handler @@ -488,73 +187,16 @@ public: std::shared_ptr(void) */ template - std::shared_ptr - fetch(key_type const& digest, Handler const& h) - { - { - std::lock_guard l(m_mutex); - if (auto ret = initialFetch(digest, l)) - return ret; - } - - auto sle = h(); - if (!sle) - return {}; - - std::lock_guard l(m_mutex); - ++m_misses; - auto const [it, inserted] = - m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); - if (!inserted) - it->second.touch(m_clock.now()); - return it->second.ptr; - } + SharedPointerType + fetch(key_type const& digest, Handler const& h); // End CachedSLEs functions. private: - std::shared_ptr - initialFetch(key_type const& key, std::lock_guard const& l) - { - auto cit = m_cache.find(key); - if (cit == m_cache.end()) - return {}; - - Entry& entry = cit->second; - if (entry.isCached()) - { - ++m_hits; - entry.touch(m_clock.now()); - return entry.ptr; - } - entry.ptr = entry.lock(); - if (entry.isCached()) - { - // independent of cache size, so not counted as a hit - ++m_cache_count; - entry.touch(m_clock.now()); - return entry.ptr; - } - - m_cache.erase(cit); - return {}; - } + SharedPointerType + initialFetch(key_type const& key, std::lock_guard const& l); void - collect_metrics() - { - m_stats.size.set(getCacheSize()); - - { - beast::insight::Gauge::value_type hit_rate(0); - { - std::lock_guard lock(m_mutex); - auto const total(m_hits + m_misses); - if (total != 0) - hit_rate = (m_hits * 100) / total; - } - m_stats.hit_rate.set(hit_rate); - } - } + collect_metrics(); private: struct Stats @@ -600,36 +242,37 @@ private: class ValueEntry { public: - std::shared_ptr ptr; - std::weak_ptr weak_ptr; + shared_weak_combo_pointer_type ptr; clock_type::time_point last_access; ValueEntry( clock_type::time_point const& last_access_, - std::shared_ptr const& ptr_) - : ptr(ptr_), weak_ptr(ptr_), last_access(last_access_) + shared_pointer_type const& ptr_) + : ptr(ptr_), last_access(last_access_) { } bool isWeak() const { - return ptr == nullptr; + if (!ptr) + return true; + return ptr.isWeak(); } bool isCached() const { - return ptr != nullptr; + return ptr && ptr.isStrong(); } bool isExpired() const { - return weak_ptr.expired(); + return ptr.expired(); } - std::shared_ptr + SharedPointerType lock() { - return weak_ptr.lock(); + return ptr.lock(); } void touch(clock_type::time_point const& now) @@ -658,72 +301,7 @@ private: typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, std::atomic& allRemovals, - std::lock_guard const&) - { - return std::thread([&, this]() { - int cacheRemovals = 0; - int mapRemovals = 0; - - // Keep references to all the stuff we sweep - // so that we can destroy them outside the lock. - stuffToSweep.first.reserve(partition.size()); - stuffToSweep.second.reserve(partition.size()); - { - auto cit = partition.begin(); - while (cit != partition.end()) - { - if (cit->second.isWeak()) - { - // weak - if (cit->second.isExpired()) - { - stuffToSweep.second.push_back( - std::move(cit->second.weak_ptr)); - ++mapRemovals; - cit = partition.erase(cit); - } - else - { - ++cit; - } - } - else if (cit->second.last_access <= when_expire) - { - // strong, expired - ++cacheRemovals; - if (cit->second.ptr.use_count() == 1) - { - stuffToSweep.first.push_back( - std::move(cit->second.ptr)); - ++mapRemovals; - cit = partition.erase(cit); - } - else - { - // remains weakly cached - cit->second.ptr.reset(); - ++cit; - } - } - else - { - // strong, not expired - ++cit; - } - } - } - - if (mapRemovals || cacheRemovals) - { - JLOG(m_journal.debug()) - << "TaggedCache partition sweep " << m_name - << ": cache = " << partition.size() << "-" << cacheRemovals - << ", map-=" << mapRemovals; - } - - allRemovals += cacheRemovals; - }); - } + std::lock_guard const&); [[nodiscard]] std::thread sweepHelper( @@ -732,45 +310,7 @@ private: typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const&) - { - return std::thread([&, this]() { - int cacheRemovals = 0; - int mapRemovals = 0; - - // Keep references to all the stuff we sweep - // so that we can destroy them outside the lock. - { - auto cit = partition.begin(); - while (cit != partition.end()) - { - if (cit->second.last_access > now) - { - cit->second.last_access = now; - ++cit; - } - else if (cit->second.last_access <= when_expire) - { - cit = partition.erase(cit); - } - else - { - ++cit; - } - } - } - - if (mapRemovals || cacheRemovals) - { - JLOG(m_journal.debug()) - << "TaggedCache partition sweep " << m_name - << ": cache = " << partition.size() << "-" << cacheRemovals - << ", map-=" << mapRemovals; - } - - allRemovals += cacheRemovals; - }); - }; + std::lock_guard const&); beast::Journal m_journal; clock_type& m_clock; @@ -782,10 +322,10 @@ private: std::string m_name; // Desired number of cache entries (0 = ignore) - int m_target_size; + const int m_target_size; // Desired maximum cache age - clock_type::duration m_target_age; + const clock_type::duration m_target_age; // Number of items cached int m_cache_count; diff --git a/include/xrpl/basics/TaggedCache.ipp b/include/xrpl/basics/TaggedCache.ipp new file mode 100644 index 0000000000..0108061680 --- /dev/null +++ b/include/xrpl/basics/TaggedCache.ipp @@ -0,0 +1,1029 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_TAGGEDCACHE_IPP_INCLUDED +#define RIPPLE_BASICS_TAGGEDCACHE_IPP_INCLUDED + +#include +#include + +namespace ripple { + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + TaggedCache( + std::string const& name, + int size, + clock_type::duration expiration, + clock_type& clock, + beast::Journal journal, + beast::insight::Collector::ptr const& collector) + : m_journal(journal) + , m_clock(clock) + , m_stats(name, std::bind(&TaggedCache::collect_metrics, this), collector) + , m_name(name) + , m_target_size(size) + , m_target_age(expiration) + , m_cache_count(0) + , m_hits(0) + , m_misses(0) +{ +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::clock() -> clock_type& +{ + return m_clock; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline std::size_t +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::size() const +{ + std::lock_guard lock(m_mutex); + return m_cache.size(); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline int +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::getCacheSize() const +{ + std::lock_guard lock(m_mutex); + return m_cache_count; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline int +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::getTrackSize() const +{ + std::lock_guard lock(m_mutex); + return m_cache.size(); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline float +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::getHitRate() +{ + std::lock_guard lock(m_mutex); + auto const total = static_cast(m_hits + m_misses); + return m_hits * (100.0f / std::max(1.0f, total)); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline void +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::clear() +{ + std::lock_guard lock(m_mutex); + m_cache.clear(); + m_cache_count = 0; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline void +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::reset() +{ + std::lock_guard lock(m_mutex); + m_cache.clear(); + m_cache_count = 0; + m_hits = 0; + m_misses = 0; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +template +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::touch_if_exists(KeyComparable const& key) +{ + std::lock_guard lock(m_mutex); + auto const iter(m_cache.find(key)); + if (iter == m_cache.end()) + { + ++m_stats.misses; + return false; + } + iter->second.touch(m_clock.now()); + ++m_stats.hits; + return true; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline void +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::sweep() +{ + // Keep references to all the stuff we sweep + // For performance, each worker thread should exit before the swept data + // is destroyed but still within the main cache lock. + std::vector allStuffToSweep(m_cache.partitions()); + + clock_type::time_point const now(m_clock.now()); + clock_type::time_point when_expire; + + auto const start = std::chrono::steady_clock::now(); + { + std::lock_guard lock(m_mutex); + + if (m_target_size == 0 || + (static_cast(m_cache.size()) <= m_target_size)) + { + when_expire = now - m_target_age; + } + else + { + when_expire = now - m_target_age * m_target_size / m_cache.size(); + + clock_type::duration const minimumAge(std::chrono::seconds(1)); + if (when_expire > (now - minimumAge)) + when_expire = now - minimumAge; + + JLOG(m_journal.trace()) + << m_name << " is growing fast " << m_cache.size() << " of " + << m_target_size << " aging at " << (now - when_expire).count() + << " of " << m_target_age.count(); + } + + std::vector workers; + workers.reserve(m_cache.partitions()); + std::atomic allRemovals = 0; + + for (std::size_t p = 0; p < m_cache.partitions(); ++p) + { + workers.push_back(sweepHelper( + when_expire, + now, + m_cache.map()[p], + allStuffToSweep[p], + allRemovals, + lock)); + } + for (std::thread& worker : workers) + worker.join(); + + m_cache_count -= allRemovals; + } + // At this point allStuffToSweep will go out of scope outside the lock + // and decrement the reference count on each strong pointer. + JLOG(m_journal.debug()) + << m_name << " TaggedCache sweep lock duration " + << std::chrono::duration_cast( + std::chrono::steady_clock::now() - start) + .count() + << "ms"; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::del(const key_type& key, bool valid) +{ + // Remove from cache, if !valid, remove from map too. Returns true if + // removed from cache + std::lock_guard lock(m_mutex); + + auto cit = m_cache.find(key); + + if (cit == m_cache.end()) + return false; + + Entry& entry = cit->second; + + bool ret = false; + + if (entry.isCached()) + { + --m_cache_count; + entry.ptr.convertToWeak(); + ret = true; + } + + if (!valid || entry.isExpired()) + m_cache.erase(cit); + + return ret; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +template +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + canonicalize( + const key_type& key, + SharedPointerType& data, + R&& replaceCallback) +{ + // Return canonical value, store if needed, refresh in cache + // Return values: true=we had the data already + std::lock_guard lock(m_mutex); + + auto cit = m_cache.find(key); + + if (cit == m_cache.end()) + { + m_cache.emplace( + std::piecewise_construct, + std::forward_as_tuple(key), + std::forward_as_tuple(m_clock.now(), data)); + ++m_cache_count; + return false; + } + + Entry& entry = cit->second; + entry.touch(m_clock.now()); + + auto shouldReplace = [&] { + if constexpr (std::is_invocable_r_v) + { + // The reason for this extra complexity is for intrusive + // strong/weak combo getting a strong is relatively expensive + // and not needed for many cases. + return replaceCallback(); + } + else + { + return replaceCallback(entry.ptr.getStrong()); + } + }; + + if (entry.isCached()) + { + if (shouldReplace()) + { + entry.ptr = data; + } + else + { + data = entry.ptr.getStrong(); + } + + return true; + } + + auto cachedData = entry.lock(); + + if (cachedData) + { + if (shouldReplace()) + { + entry.ptr = data; + } + else + { + entry.ptr.convertToStrong(); + data = cachedData; + } + + ++m_cache_count; + return true; + } + + entry.ptr = data; + ++m_cache_count; + + return false; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + canonicalize_replace_cache( + const key_type& key, + SharedPointerType const& data) +{ + return canonicalize( + key, const_cast(data), []() { return true; }); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + canonicalize_replace_client(const key_type& key, SharedPointerType& data) +{ + return canonicalize(key, data, []() { return false; }); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline SharedPointerType +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::fetch(const key_type& key) +{ + std::lock_guard l(m_mutex); + auto ret = initialFetch(key, l); + if (!ret) + ++m_misses; + return ret; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +template +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::insert(key_type const& key, T const& value) + -> std::enable_if_t +{ + static_assert( + std::is_same_v, SharedPointerType> || + std::is_same_v, SharedPointerType>); + + if constexpr (std::is_same_v, SharedPointerType>) + { + auto p = std::make_shared(std::cref(value)); + return canonicalize_replace_client(key, p); + } + if constexpr (std::is_same_v, SharedPointerType>) + { + auto p = intr_ptr::make_shared(std::cref(value)); + return canonicalize_replace_client(key, p); + } +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +template +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::insert(key_type const& key) + -> std::enable_if_t +{ + std::lock_guard lock(m_mutex); + clock_type::time_point const now(m_clock.now()); + auto [it, inserted] = m_cache.emplace( + std::piecewise_construct, + std::forward_as_tuple(key), + std::forward_as_tuple(now)); + if (!inserted) + it->second.last_access = now; + return inserted; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline bool +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::retrieve(const key_type& key, T& data) +{ + // retrieve the value of the stored data + auto entry = fetch(key); + + if (!entry) + return false; + + data = *entry; + return true; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::peekMutex() -> mutex_type& +{ + return m_mutex; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::getKeys() const -> std::vector +{ + std::vector v; + + { + std::lock_guard lock(m_mutex); + v.reserve(m_cache.size()); + for (auto const& _ : m_cache) + v.push_back(_.first); + } + + return v; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline double +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::rate() const +{ + std::lock_guard lock(m_mutex); + auto const tot = m_hits + m_misses; + if (tot == 0) + return 0; + return double(m_hits) / tot; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +template +inline SharedPointerType +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::fetch(key_type const& digest, Handler const& h) +{ + { + std::lock_guard l(m_mutex); + if (auto ret = initialFetch(digest, l)) + return ret; + } + + auto sle = h(); + if (!sle) + return {}; + + std::lock_guard l(m_mutex); + ++m_misses; + auto const [it, inserted] = + m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); + if (!inserted) + it->second.touch(m_clock.now()); + return it->second.ptr.getStrong(); +} +// End CachedSLEs functions. + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline SharedPointerType +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + initialFetch(key_type const& key, std::lock_guard const& l) +{ + auto cit = m_cache.find(key); + if (cit == m_cache.end()) + return {}; + + Entry& entry = cit->second; + if (entry.isCached()) + { + ++m_hits; + entry.touch(m_clock.now()); + return entry.ptr.getStrong(); + } + entry.ptr = entry.lock(); + if (entry.isCached()) + { + // independent of cache size, so not counted as a hit + ++m_cache_count; + entry.touch(m_clock.now()); + return entry.ptr.getStrong(); + } + + m_cache.erase(cit); + return {}; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline void +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::collect_metrics() +{ + m_stats.size.set(getCacheSize()); + + { + beast::insight::Gauge::value_type hit_rate(0); + { + std::lock_guard lock(m_mutex); + auto const total(m_hits + m_misses); + if (total != 0) + hit_rate = (m_hits * 100) / total; + } + m_stats.hit_rate.set(hit_rate); + } +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline std::thread +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + sweepHelper( + clock_type::time_point const& when_expire, + [[maybe_unused]] clock_type::time_point const& now, + typename KeyValueCacheType::map_type& partition, + SweptPointersVector& stuffToSweep, + std::atomic& allRemovals, + std::lock_guard const&) +{ + return std::thread([&, this]() { + int cacheRemovals = 0; + int mapRemovals = 0; + + // Keep references to all the stuff we sweep + // so that we can destroy them outside the lock. + stuffToSweep.reserve(partition.size()); + { + auto cit = partition.begin(); + while (cit != partition.end()) + { + if (cit->second.isWeak()) + { + // weak + if (cit->second.isExpired()) + { + stuffToSweep.emplace_back(std::move(cit->second.ptr)); + ++mapRemovals; + cit = partition.erase(cit); + } + else + { + ++cit; + } + } + else if (cit->second.last_access <= when_expire) + { + // strong, expired + ++cacheRemovals; + if (cit->second.ptr.use_count() == 1) + { + stuffToSweep.emplace_back(std::move(cit->second.ptr)); + ++mapRemovals; + cit = partition.erase(cit); + } + else + { + // remains weakly cached + cit->second.ptr.convertToWeak(); + ++cit; + } + } + else + { + // strong, not expired + ++cit; + } + } + } + + if (mapRemovals || cacheRemovals) + { + JLOG(m_journal.debug()) + << "TaggedCache partition sweep " << m_name + << ": cache = " << partition.size() << "-" << cacheRemovals + << ", map-=" << mapRemovals; + } + + allRemovals += cacheRemovals; + }); +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline std::thread +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>:: + sweepHelper( + clock_type::time_point const& when_expire, + clock_type::time_point const& now, + typename KeyOnlyCacheType::map_type& partition, + SweptPointersVector&, + std::atomic& allRemovals, + std::lock_guard const&) +{ + return std::thread([&, this]() { + int cacheRemovals = 0; + int mapRemovals = 0; + + // Keep references to all the stuff we sweep + // so that we can destroy them outside the lock. + { + auto cit = partition.begin(); + while (cit != partition.end()) + { + if (cit->second.last_access > now) + { + cit->second.last_access = now; + ++cit; + } + else if (cit->second.last_access <= when_expire) + { + cit = partition.erase(cit); + } + else + { + ++cit; + } + } + } + + if (mapRemovals || cacheRemovals) + { + JLOG(m_journal.debug()) + << "TaggedCache partition sweep " << m_name + << ": cache = " << partition.size() << "-" << cacheRemovals + << ", map-=" << mapRemovals; + } + + allRemovals += cacheRemovals; + }); +} + +} // namespace ripple + +#endif diff --git a/src/test/basics/IntrusiveShared_test.cpp b/src/test/basics/IntrusiveShared_test.cpp new file mode 100644 index 0000000000..fe0cdba777 --- /dev/null +++ b/src/test/basics/IntrusiveShared_test.cpp @@ -0,0 +1,842 @@ +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace tests { + +namespace { +enum class TrackedState : std::uint8_t { + uninitialized, + alive, + partiallyDeletedStarted, + partiallyDeleted, + deletedStarted, + deleted +}; + +class TIBase : public IntrusiveRefCounts +{ +public: + static constexpr std::size_t maxStates = 128; + static std::array, maxStates> state; + static std::atomic nextId; + static TrackedState + getState(int id) + { + assert(id < state.size()); + return state[id].load(std::memory_order_acquire); + } + static void + resetStates(bool resetCallback) + { + for (int i = 0; i < maxStates; ++i) + { + state[i].store( + TrackedState::uninitialized, std::memory_order_release); + } + nextId.store(0, std::memory_order_release); + if (resetCallback) + TIBase::tracingCallback_ = [](TrackedState, + std::optional) {}; + } + + struct ResetStatesGuard + { + bool resetCallback_{false}; + + ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback} + { + TIBase::resetStates(resetCallback_); + } + ~ResetStatesGuard() + { + TIBase::resetStates(resetCallback_); + } + }; + + TIBase() : id_{checkoutID()} + { + assert(state.size() > id_); + state[id_].store(TrackedState::alive, std::memory_order_relaxed); + } + ~TIBase() + { + using enum TrackedState; + + assert(state.size() > id_); + tracingCallback_( + state[id_].load(std::memory_order_relaxed), deletedStarted); + + assert(state.size() > id_); + // Use relaxed memory order to try to avoid atomic operations from + // adding additional memory synchronizations that may hide threading + // errors in the underlying shared pointer class. + state[id_].store(deletedStarted, std::memory_order_relaxed); + + tracingCallback_(deletedStarted, deleted); + + assert(state.size() > id_); + state[id_].store(TrackedState::deleted, std::memory_order_relaxed); + + tracingCallback_(TrackedState::deleted, std::nullopt); + } + + void + partialDestructor() + { + using enum TrackedState; + + assert(state.size() > id_); + tracingCallback_( + state[id_].load(std::memory_order_relaxed), + partiallyDeletedStarted); + + assert(state.size() > id_); + state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed); + + tracingCallback_(partiallyDeletedStarted, partiallyDeleted); + + assert(state.size() > id_); + state[id_].store(partiallyDeleted, std::memory_order_relaxed); + + tracingCallback_(partiallyDeleted, std::nullopt); + } + + static std::function)> + tracingCallback_; + + int id_; + +private: + static int + checkoutID() + { + return nextId.fetch_add(1, std::memory_order_acq_rel); + } +}; + +std::array, TIBase::maxStates> TIBase::state; +std::atomic TIBase::nextId{0}; + +std::function)> + TIBase::tracingCallback_ = [](TrackedState, std::optional) {}; + +} // namespace + +class IntrusiveShared_test : public beast::unit_test::suite +{ +public: + void + testBasics() + { + testcase("Basics"); + + { + TIBase::ResetStatesGuard rsg{true}; + + TIBase b; + BEAST_EXPECT(b.use_count() == 1); + b.addWeakRef(); + BEAST_EXPECT(b.use_count() == 1); + auto s = b.releaseStrongRef(); + BEAST_EXPECT(s == ReleaseStrongRefAction::partialDestroy); + BEAST_EXPECT(b.use_count() == 0); + TIBase* pb = &b; + partialDestructorFinished(&pb); + BEAST_EXPECT(!pb); + auto w = b.releaseWeakRef(); + BEAST_EXPECT(w == ReleaseWeakRefAction::destroy); + } + + std::vector> strong; + std::vector> weak; + { + TIBase::ResetStatesGuard rsg{true}; + + using enum TrackedState; + auto b = make_SharedIntrusive(); + auto id = b->id_; + BEAST_EXPECT(TIBase::getState(id) == alive); + BEAST_EXPECT(b->use_count() == 1); + for (int i = 0; i < 10; ++i) + { + strong.push_back(b); + } + b.reset(); + BEAST_EXPECT(TIBase::getState(id) == alive); + strong.resize(strong.size() - 1); + BEAST_EXPECT(TIBase::getState(id) == alive); + strong.clear(); + BEAST_EXPECT(TIBase::getState(id) == deleted); + + b = make_SharedIntrusive(); + id = b->id_; + BEAST_EXPECT(TIBase::getState(id) == alive); + BEAST_EXPECT(b->use_count() == 1); + for (int i = 0; i < 10; ++i) + { + weak.push_back(b); + BEAST_EXPECT(b->use_count() == 1); + } + BEAST_EXPECT(TIBase::getState(id) == alive); + weak.resize(weak.size() - 1); + BEAST_EXPECT(TIBase::getState(id) == alive); + b.reset(); + BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted); + while (!weak.empty()) + { + weak.resize(weak.size() - 1); + if (weak.size()) + BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted); + } + BEAST_EXPECT(TIBase::getState(id) == deleted); + } + { + TIBase::ResetStatesGuard rsg{true}; + + using enum TrackedState; + auto b = make_SharedIntrusive(); + auto id = b->id_; + BEAST_EXPECT(TIBase::getState(id) == alive); + WeakIntrusive w{b}; + BEAST_EXPECT(TIBase::getState(id) == alive); + auto s = w.lock(); + BEAST_EXPECT(s && s->use_count() == 2); + b.reset(); + BEAST_EXPECT(TIBase::getState(id) == alive); + BEAST_EXPECT(s && s->use_count() == 1); + s.reset(); + BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted); + BEAST_EXPECT(w.expired()); + s = w.lock(); + // Cannot convert a weak pointer to a strong pointer if object is + // already partially deleted + BEAST_EXPECT(!s); + w.reset(); + BEAST_EXPECT(TIBase::getState(id) == deleted); + } + { + TIBase::ResetStatesGuard rsg{true}; + + using enum TrackedState; + using swu = SharedWeakUnion; + swu b = make_SharedIntrusive(); + BEAST_EXPECT(b.isStrong() && b.use_count() == 1); + auto id = b.get()->id_; + BEAST_EXPECT(TIBase::getState(id) == alive); + swu w = b; + BEAST_EXPECT(TIBase::getState(id) == alive); + BEAST_EXPECT(w.isStrong() && b.use_count() == 2); + w.convertToWeak(); + BEAST_EXPECT(w.isWeak() && b.use_count() == 1); + swu s = w; + BEAST_EXPECT(s.isWeak() && b.use_count() == 1); + s.convertToStrong(); + BEAST_EXPECT(s.isStrong() && b.use_count() == 2); + b.reset(); + BEAST_EXPECT(TIBase::getState(id) == alive); + BEAST_EXPECT(s.use_count() == 1); + BEAST_EXPECT(!w.expired()); + s.reset(); + BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted); + BEAST_EXPECT(w.expired()); + w.convertToStrong(); + // Cannot convert a weak pointer to a strong pointer if object is + // already partially deleted + BEAST_EXPECT(w.isWeak()); + w.reset(); + BEAST_EXPECT(TIBase::getState(id) == deleted); + } + { + // Testing SharedWeakUnion assignment operator + + TIBase::ResetStatesGuard rsg{true}; + + auto strong1 = make_SharedIntrusive(); + auto strong2 = make_SharedIntrusive(); + + auto id1 = strong1->id_; + auto id2 = strong2->id_; + + BEAST_EXPECT(id1 != id2); + + SharedWeakUnion union1 = strong1; + SharedWeakUnion union2 = strong2; + + BEAST_EXPECT(union1.isStrong()); + BEAST_EXPECT(union2.isStrong()); + BEAST_EXPECT(union1.get() == strong1.get()); + BEAST_EXPECT(union2.get() == strong2.get()); + + // 1) Normal assignment: explicitly calls SharedWeakUnion assignment + union1 = union2; + BEAST_EXPECT(union1.isStrong()); + BEAST_EXPECT(union2.isStrong()); + BEAST_EXPECT(union1.get() == union2.get()); + BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive); + BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive); + + // 2) Test self-assignment + BEAST_EXPECT(union1.isStrong()); + BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive); + int initialRefCount = strong1->use_count(); +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wself-assign-overloaded" + union1 = union1; // Self-assignment +#pragma clang diagnostic pop + BEAST_EXPECT(union1.isStrong()); + BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive); + BEAST_EXPECT(strong1->use_count() == initialRefCount); + + // 3) Test assignment from null union pointer + union1 = SharedWeakUnion(); + BEAST_EXPECT(union1.get() == nullptr); + + // 4) Test assignment to expired union pointer + strong2.reset(); + union2.reset(); + union1 = union2; + BEAST_EXPECT(union1.get() == nullptr); + BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted); + } + } + + void + testPartialDelete() + { + testcase("Partial Delete"); + + // This test creates two threads. One with a strong pointer and one + // with a weak pointer. The strong pointer is reset while the weak + // pointer still holds a reference, triggering a partial delete. + // While the partial delete function runs (a sleep is inserted) the + // weak pointer is reset. The destructor should wait to run until + // after the partial delete function has completed running. + + using enum TrackedState; + + TIBase::ResetStatesGuard rsg{true}; + + auto strong = make_SharedIntrusive(); + WeakIntrusive weak{strong}; + bool destructorRan = false; + bool partialDeleteRan = false; + std::latch partialDeleteStartedSyncPoint{2}; + strong->tracingCallback_ = [&](TrackedState cur, + std::optional next) { + using enum TrackedState; + if (next == deletedStarted) + { + // strong goes out of scope while weak is still in scope + // This checks that partialDelete has run to completion + // before the desturctor is called. A sleep is inserted + // inside the partial delete to make sure the destructor is + // given an opportunity to run durring partial delete. + BEAST_EXPECT(cur == partiallyDeleted); + } + if (next == partiallyDeletedStarted) + { + partialDeleteStartedSyncPoint.arrive_and_wait(); + using namespace std::chrono_literals; + // Sleep and let the weak pointer go out of scope, + // potentially triggering a destructor while partial delete + // is running. The test is to make sure that doesn't happen. + std::this_thread::sleep_for(800ms); + } + if (next == partiallyDeleted) + { + BEAST_EXPECT(!partialDeleteRan && !destructorRan); + partialDeleteRan = true; + } + if (next == deleted) + { + BEAST_EXPECT(!destructorRan); + destructorRan = true; + } + }; + std::thread t1{[&] { + partialDeleteStartedSyncPoint.arrive_and_wait(); + weak.reset(); // Trigger a full delete as soon as the partial + // delete starts + }}; + std::thread t2{[&] { + strong.reset(); // Trigger a partial delete + }}; + t1.join(); + t2.join(); + + BEAST_EXPECT(destructorRan && partialDeleteRan); + } + + void + testDestructor() + { + testcase("Destructor"); + + // This test creates two threads. One with a strong pointer and one + // with a weak pointer. The weak pointer is reset while the strong + // pointer still holds a reference. Then the strong pointer is + // reset. Only the destructor should run. The partial destructor + // should not be called. Since the weak reset runs to completion + // before the strong pointer is reset, threading doesn't add much to + // this test, but there is no harm in keeping it. + + using enum TrackedState; + + TIBase::ResetStatesGuard rsg{true}; + + auto strong = make_SharedIntrusive(); + WeakIntrusive weak{strong}; + bool destructorRan = false; + bool partialDeleteRan = false; + std::latch weakResetSyncPoint{2}; + strong->tracingCallback_ = [&](TrackedState cur, + std::optional next) { + using enum TrackedState; + if (next == partiallyDeleted) + { + BEAST_EXPECT(!partialDeleteRan && !destructorRan); + partialDeleteRan = true; + } + if (next == deleted) + { + BEAST_EXPECT(!destructorRan); + destructorRan = true; + } + }; + std::thread t1{[&] { + weak.reset(); + weakResetSyncPoint.arrive_and_wait(); + }}; + std::thread t2{[&] { + weakResetSyncPoint.arrive_and_wait(); + strong.reset(); // Trigger a partial delete + }}; + t1.join(); + t2.join(); + + BEAST_EXPECT(destructorRan && !partialDeleteRan); + } + + void + testMultithreadedClearMixedVariant() + { + testcase("Multithreaded Clear Mixed Variant"); + + // This test creates and destroys many strong and weak pointers in a + // loop. There is a random mix of strong and weak pointers stored in + // a vector (held as a variant). Both threads clear all the pointers + // and check that the invariants hold. + + using enum TrackedState; + TIBase::ResetStatesGuard rsg{true}; + + std::atomic destructionState{0}; + // returns destructorRan and partialDestructorRan (in that order) + auto getDestructorState = [&]() -> std::pair { + int s = destructionState.load(std::memory_order_relaxed); + return {(s & 1) != 0, (s & 2) != 0}; + }; + auto setDestructorRan = [&]() -> void { + destructionState.fetch_or(1, std::memory_order_acq_rel); + }; + auto setPartialDeleteRan = [&]() -> void { + destructionState.fetch_or(2, std::memory_order_acq_rel); + }; + auto tracingCallback = [&](TrackedState cur, + std::optional next) { + using enum TrackedState; + auto [destructorRan, partialDeleteRan] = getDestructorState(); + if (next == partiallyDeleted) + { + BEAST_EXPECT(!partialDeleteRan && !destructorRan); + setPartialDeleteRan(); + } + if (next == deleted) + { + BEAST_EXPECT(!destructorRan); + setDestructorRan(); + } + }; + auto createVecOfPointers = [&](auto const& toClone, + std::default_random_engine& eng) + -> std::vector< + std::variant, WeakIntrusive>> { + std::vector< + std::variant, WeakIntrusive>> + result; + std::uniform_int_distribution<> toCreateDist(4, 64); + std::uniform_int_distribution<> isStrongDist(0, 1); + auto numToCreate = toCreateDist(eng); + result.reserve(numToCreate); + for (int i = 0; i < numToCreate; ++i) + { + if (isStrongDist(eng)) + { + result.push_back(SharedIntrusive(toClone)); + } + else + { + result.push_back(WeakIntrusive(toClone)); + } + } + return result; + }; + constexpr int loopIters = 2 * 1024; + constexpr int numThreads = 16; + std::vector> toClone; + std::barrier loopStartSyncPoint{numThreads}; + std::barrier postCreateToCloneSyncPoint{numThreads}; + std::barrier postCreateVecOfPointersSyncPoint{numThreads}; + auto engines = [&]() -> std::vector { + std::random_device rd; + std::vector result; + result.reserve(numThreads); + for (int i = 0; i < numThreads; ++i) + result.emplace_back(rd()); + return result; + }(); + + // cloneAndDestroy clones the strong pointer into a vector of mixed + // strong and weak pointers and destroys them all at once. + // threadId==0 is special. + auto cloneAndDestroy = [&](int threadId) { + for (int i = 0; i < loopIters; ++i) + { + // ------ Sync Point ------ + loopStartSyncPoint.arrive_and_wait(); + + // only thread 0 should reset the state + std::optional rsg; + if (threadId == 0) + { + // Thread 0 is the genesis thread. It creates the strong + // pointers to be cloned by the other threads. This + // thread will also check that the destructor ran and + // clear the temporary variables. + + rsg.emplace(false); + auto [destructorRan, partialDeleteRan] = + getDestructorState(); + BEAST_EXPECT(!i || destructorRan); + destructionState.store(0, std::memory_order_release); + + toClone.clear(); + toClone.resize(numThreads); + auto strong = make_SharedIntrusive(); + strong->tracingCallback_ = tracingCallback; + std::fill(toClone.begin(), toClone.end(), strong); + } + + // ------ Sync Point ------ + postCreateToCloneSyncPoint.arrive_and_wait(); + + auto v = + createVecOfPointers(toClone[threadId], engines[threadId]); + toClone[threadId].reset(); + + // ------ Sync Point ------ + postCreateVecOfPointersSyncPoint.arrive_and_wait(); + + v.clear(); + } + }; + std::vector threads; + for (int i = 0; i < numThreads; ++i) + { + threads.emplace_back(cloneAndDestroy, i); + } + for (int i = 0; i < numThreads; ++i) + { + threads[i].join(); + } + } + + void + testMultithreadedClearMixedUnion() + { + testcase("Multithreaded Clear Mixed Union"); + + // This test creates and destroys many SharedWeak pointers in a + // loop. All the pointers start as strong and a loop randomly + // convert them between strong and weak pointers. Both threads clear + // all the pointers and check that the invariants hold. + // + // Note: This test also differs from the test above in that the pointers + // randomly change from strong to weak and from weak to strong in a + // loop. This can't be done in the variant test above because variant is + // not thread safe while the SharedWeakUnion is thread safe. + + using enum TrackedState; + + TIBase::ResetStatesGuard rsg{true}; + + std::atomic destructionState{0}; + // returns destructorRan and partialDestructorRan (in that order) + auto getDestructorState = [&]() -> std::pair { + int s = destructionState.load(std::memory_order_relaxed); + return {(s & 1) != 0, (s & 2) != 0}; + }; + auto setDestructorRan = [&]() -> void { + destructionState.fetch_or(1, std::memory_order_acq_rel); + }; + auto setPartialDeleteRan = [&]() -> void { + destructionState.fetch_or(2, std::memory_order_acq_rel); + }; + auto tracingCallback = [&](TrackedState cur, + std::optional next) { + using enum TrackedState; + auto [destructorRan, partialDeleteRan] = getDestructorState(); + if (next == partiallyDeleted) + { + BEAST_EXPECT(!partialDeleteRan && !destructorRan); + setPartialDeleteRan(); + } + if (next == deleted) + { + BEAST_EXPECT(!destructorRan); + setDestructorRan(); + } + }; + auto createVecOfPointers = [&](auto const& toClone, + std::default_random_engine& eng) + -> std::vector> { + std::vector> result; + std::uniform_int_distribution<> toCreateDist(4, 64); + auto numToCreate = toCreateDist(eng); + result.reserve(numToCreate); + for (int i = 0; i < numToCreate; ++i) + result.push_back(SharedIntrusive(toClone)); + return result; + }; + constexpr int loopIters = 2 * 1024; + constexpr int flipPointersLoopIters = 256; + constexpr int numThreads = 16; + std::vector> toClone; + std::barrier loopStartSyncPoint{numThreads}; + std::barrier postCreateToCloneSyncPoint{numThreads}; + std::barrier postCreateVecOfPointersSyncPoint{numThreads}; + std::barrier postFlipPointersLoopSyncPoint{numThreads}; + auto engines = [&]() -> std::vector { + std::random_device rd; + std::vector result; + result.reserve(numThreads); + for (int i = 0; i < numThreads; ++i) + result.emplace_back(rd()); + return result; + }(); + + // cloneAndDestroy clones the strong pointer into a vector of + // mixed strong and weak pointers, runs a loop that randomly + // changes strong pointers to weak pointers, and destroys them + // all at once. + auto cloneAndDestroy = [&](int threadId) { + for (int i = 0; i < loopIters; ++i) + { + // ------ Sync Point ------ + loopStartSyncPoint.arrive_and_wait(); + + // only thread 0 should reset the state + std::optional rsg; + if (threadId == 0) + { + // threadId 0 is the genesis thread. It creates the + // strong point to be cloned by the other threads. This + // thread will also check that the destructor ran and + // clear the temporary variables. + rsg.emplace(false); + auto [destructorRan, partialDeleteRan] = + getDestructorState(); + BEAST_EXPECT(!i || destructorRan); + destructionState.store(0, std::memory_order_release); + + toClone.clear(); + toClone.resize(numThreads); + auto strong = make_SharedIntrusive(); + strong->tracingCallback_ = tracingCallback; + std::fill(toClone.begin(), toClone.end(), strong); + } + + // ------ Sync Point ------ + postCreateToCloneSyncPoint.arrive_and_wait(); + + auto v = + createVecOfPointers(toClone[threadId], engines[threadId]); + toClone[threadId].reset(); + + // ------ Sync Point ------ + postCreateVecOfPointersSyncPoint.arrive_and_wait(); + + std::uniform_int_distribution<> isStrongDist(0, 1); + for (int f = 0; f < flipPointersLoopIters; ++f) + { + for (auto& p : v) + { + if (isStrongDist(engines[threadId])) + { + p.convertToStrong(); + } + else + { + p.convertToWeak(); + } + } + } + + // ------ Sync Point ------ + postFlipPointersLoopSyncPoint.arrive_and_wait(); + + v.clear(); + } + }; + std::vector threads; + for (int i = 0; i < numThreads; ++i) + { + threads.emplace_back(cloneAndDestroy, i); + } + for (int i = 0; i < numThreads; ++i) + { + threads[i].join(); + } + } + + void + testMultithreadedLockingWeak() + { + testcase("Multithreaded Locking Weak"); + + // This test creates a single shared atomic pointer that multiple thread + // create weak pointers from. The threads then lock the weak pointers. + // Both threads clear all the pointers and check that the invariants + // hold. + + using enum TrackedState; + + TIBase::ResetStatesGuard rsg{true}; + + std::atomic destructionState{0}; + // returns destructorRan and partialDestructorRan (in that order) + auto getDestructorState = [&]() -> std::pair { + int s = destructionState.load(std::memory_order_relaxed); + return {(s & 1) != 0, (s & 2) != 0}; + }; + auto setDestructorRan = [&]() -> void { + destructionState.fetch_or(1, std::memory_order_acq_rel); + }; + auto setPartialDeleteRan = [&]() -> void { + destructionState.fetch_or(2, std::memory_order_acq_rel); + }; + auto tracingCallback = [&](TrackedState cur, + std::optional next) { + using enum TrackedState; + auto [destructorRan, partialDeleteRan] = getDestructorState(); + if (next == partiallyDeleted) + { + BEAST_EXPECT(!partialDeleteRan && !destructorRan); + setPartialDeleteRan(); + } + if (next == deleted) + { + BEAST_EXPECT(!destructorRan); + setDestructorRan(); + } + }; + + constexpr int loopIters = 2 * 1024; + constexpr int lockWeakLoopIters = 256; + constexpr int numThreads = 16; + std::vector> toLock; + std::barrier loopStartSyncPoint{numThreads}; + std::barrier postCreateToLockSyncPoint{numThreads}; + std::barrier postLockWeakLoopSyncPoint{numThreads}; + + // lockAndDestroy creates weak pointers from the strong pointer + // and runs a loop that locks the weak pointer. At the end of the loop + // all the pointers are destroyed all at once. + auto lockAndDestroy = [&](int threadId) { + for (int i = 0; i < loopIters; ++i) + { + // ------ Sync Point ------ + loopStartSyncPoint.arrive_and_wait(); + + // only thread 0 should reset the state + std::optional rsg; + if (threadId == 0) + { + // threadId 0 is the genesis thread. It creates the + // strong point to be locked by the other threads. This + // thread will also check that the destructor ran and + // clear the temporary variables. + rsg.emplace(false); + auto [destructorRan, partialDeleteRan] = + getDestructorState(); + BEAST_EXPECT(!i || destructorRan); + destructionState.store(0, std::memory_order_release); + + toLock.clear(); + toLock.resize(numThreads); + auto strong = make_SharedIntrusive(); + strong->tracingCallback_ = tracingCallback; + std::fill(toLock.begin(), toLock.end(), strong); + } + + // ------ Sync Point ------ + postCreateToLockSyncPoint.arrive_and_wait(); + + // Multiple threads all create a weak pointer from the same + // strong pointer + WeakIntrusive weak{toLock[threadId]}; + for (int wi = 0; wi < lockWeakLoopIters; ++wi) + { + BEAST_EXPECT(!weak.expired()); + auto strong = weak.lock(); + BEAST_EXPECT(strong); + } + + // ------ Sync Point ------ + postLockWeakLoopSyncPoint.arrive_and_wait(); + + toLock[threadId].reset(); + } + }; + std::vector threads; + for (int i = 0; i < numThreads; ++i) + { + threads.emplace_back(lockAndDestroy, i); + } + for (int i = 0; i < numThreads; ++i) + { + threads[i].join(); + } + } + + void + run() override + { + testBasics(); + testPartialDelete(); + testDestructor(); + testMultithreadedClearMixedVariant(); + testMultithreadedClearMixedUnion(); + testMultithreadedLockingWeak(); + } +}; // namespace tests + +BEAST_DEFINE_TESTSUITE(IntrusiveShared, ripple_basics, ripple); +} // namespace tests +} // namespace ripple diff --git a/src/test/basics/KeyCache_test.cpp b/src/test/basics/KeyCache_test.cpp index d39fd4a716..d12dd59af0 100644 --- a/src/test/basics/KeyCache_test.cpp +++ b/src/test/basics/KeyCache_test.cpp @@ -20,9 +20,8 @@ #include #include +#include #include -#include -#include #include namespace ripple { diff --git a/src/test/basics/TaggedCache_test.cpp b/src/test/basics/TaggedCache_test.cpp index 519bc235cd..797838fcfa 100644 --- a/src/test/basics/TaggedCache_test.cpp +++ b/src/test/basics/TaggedCache_test.cpp @@ -20,9 +20,8 @@ #include #include +#include #include -#include -#include #include namespace ripple { diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 0c6a5e786c..65b3257208 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -35,15 +34,12 @@ #include #include #include -#include -#include #include #include #include #include #include -#include #include #include #include @@ -57,7 +53,6 @@ #include #include #include -#include #include #include diff --git a/src/xrpld/app/ledger/detail/TransactionMaster.cpp b/src/xrpld/app/ledger/detail/TransactionMaster.cpp index 7acc3579b0..ea13ad53e4 100644 --- a/src/xrpld/app/ledger/detail/TransactionMaster.cpp +++ b/src/xrpld/app/ledger/detail/TransactionMaster.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index 27d1840766..1bc4998aa1 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -55,6 +55,8 @@ template < class Key, class T, bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, class Hash, class KeyEqual, class Mutex> diff --git a/src/xrpld/ledger/detail/CachedView.cpp b/src/xrpld/ledger/detail/CachedView.cpp index 5d6d354688..365d63e400 100644 --- a/src/xrpld/ledger/detail/CachedView.cpp +++ b/src/xrpld/ledger/detail/CachedView.cpp @@ -19,6 +19,8 @@ #include +#include + namespace ripple { namespace detail { diff --git a/src/xrpld/nodestore/Database.h b/src/xrpld/nodestore/Database.h index e1aa801c44..403a5ea5ee 100644 --- a/src/xrpld/nodestore/Database.h +++ b/src/xrpld/nodestore/Database.h @@ -26,6 +26,7 @@ #include #include +#include #include #include diff --git a/src/xrpld/shamap/SHAMap.h b/src/xrpld/shamap/SHAMap.h index e63245b51c..5771f3ec1d 100644 --- a/src/xrpld/shamap/SHAMap.h +++ b/src/xrpld/shamap/SHAMap.h @@ -29,7 +29,10 @@ #include #include #include +#include +#include +#include #include #include @@ -103,7 +106,7 @@ private: /** The sequence of the ledger that this map references, if any. */ std::uint32_t ledgerSeq_ = 0; - std::shared_ptr root_; + intr_ptr::SharedPtr root_; mutable SHAMapState state_; SHAMapType const type_; bool backed_ = true; // Map is backed by the database @@ -365,29 +368,30 @@ public: invariants() const; private: - using SharedPtrNodeStack = - std::stack, SHAMapNodeID>>; + using SharedPtrNodeStack = std::stack< + std::pair, SHAMapNodeID>>; using DeltaRef = std::pair< boost::intrusive_ptr, boost::intrusive_ptr>; // tree node cache operations - std::shared_ptr + intr_ptr::SharedPtr cacheLookup(SHAMapHash const& hash) const; + void - canonicalize(SHAMapHash const& hash, std::shared_ptr&) + canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr&) const; // database operations - std::shared_ptr + intr_ptr::SharedPtr fetchNodeFromDB(SHAMapHash const& hash) const; - std::shared_ptr + intr_ptr::SharedPtr fetchNodeNT(SHAMapHash const& hash) const; - std::shared_ptr + intr_ptr::SharedPtr fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const; - std::shared_ptr + intr_ptr::SharedPtr fetchNode(SHAMapHash const& hash) const; - std::shared_ptr + intr_ptr::SharedPtr checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const; /** Update hashes up to the root */ @@ -395,7 +399,7 @@ private: dirtyUp( SharedPtrNodeStack& stack, uint256 const& target, - std::shared_ptr terminal); + intr_ptr::SharedPtr terminal); /** Walk towards the specified id, returning the node. Caller must check if the return is nullptr, and if not, if the node->peekItem()->key() == @@ -409,36 +413,36 @@ private: /** Unshare the node, allowing it to be modified */ template - std::shared_ptr - unshareNode(std::shared_ptr, SHAMapNodeID const& nodeID); + intr_ptr::SharedPtr + unshareNode(intr_ptr::SharedPtr, SHAMapNodeID const& nodeID); /** prepare a node to be modified before flushing */ template - std::shared_ptr - preFlushNode(std::shared_ptr node) const; + intr_ptr::SharedPtr + preFlushNode(intr_ptr::SharedPtr node) const; /** write and canonicalize modified node */ - std::shared_ptr - writeNode(NodeObjectType t, std::shared_ptr node) const; + intr_ptr::SharedPtr + writeNode(NodeObjectType t, intr_ptr::SharedPtr node) const; // returns the first item at or below this node SHAMapLeafNode* firstBelow( - std::shared_ptr, + intr_ptr::SharedPtr, SharedPtrNodeStack& stack, int branch = 0) const; // returns the last item at or below this node SHAMapLeafNode* lastBelow( - std::shared_ptr node, + intr_ptr::SharedPtr node, SharedPtrNodeStack& stack, int branch = branchFactor) const; // helper function for firstBelow and lastBelow SHAMapLeafNode* belowHelper( - std::shared_ptr node, + intr_ptr::SharedPtr node, SharedPtrNodeStack& stack, int branch, std::tuple< @@ -452,15 +456,15 @@ private: descend(SHAMapInnerNode*, int branch) const; SHAMapTreeNode* descendThrow(SHAMapInnerNode*, int branch) const; - std::shared_ptr - descend(std::shared_ptr const&, int branch) const; - std::shared_ptr - descendThrow(std::shared_ptr const&, int branch) const; + intr_ptr::SharedPtr + descend(SHAMapInnerNode&, int branch) const; + intr_ptr::SharedPtr + descendThrow(SHAMapInnerNode&, int branch) const; // Descend with filter // If pending, callback is called as if it called fetchNodeNT - using descendCallback = - std::function, SHAMapHash const&)>; + using descendCallback = std::function< + void(intr_ptr::SharedPtr, SHAMapHash const&)>; SHAMapTreeNode* descendAsync( SHAMapInnerNode* parent, @@ -478,8 +482,8 @@ private: // Non-storing // Does not hook the returned node to its parent - std::shared_ptr - descendNoStore(std::shared_ptr const&, int branch) const; + intr_ptr::SharedPtr + descendNoStore(SHAMapInnerNode&, int branch) const; /** If there is only one leaf below this node, get its contents */ boost::intrusive_ptr const& @@ -540,10 +544,10 @@ private: // nodes we may have acquired from deferred reads using DeferredNode = std::tuple< - SHAMapInnerNode*, // parent node - SHAMapNodeID, // parent node ID - int, // branch - std::shared_ptr>; // node + SHAMapInnerNode*, // parent node + SHAMapNodeID, // parent node ID + int, // branch + intr_ptr::SharedPtr>; // node int deferred_; std::mutex deferLock_; @@ -577,7 +581,7 @@ private: gmn_ProcessDeferredReads(MissingNodes&); // fetch from DB helper function - std::shared_ptr + intr_ptr::SharedPtr finishFetch( SHAMapHash const& hash, std::shared_ptr const& object) const; diff --git a/src/xrpld/shamap/SHAMapAccountStateLeafNode.h b/src/xrpld/shamap/SHAMapAccountStateLeafNode.h index 1069c363ec..f6b5e0827c 100644 --- a/src/xrpld/shamap/SHAMapAccountStateLeafNode.h +++ b/src/xrpld/shamap/SHAMapAccountStateLeafNode.h @@ -51,10 +51,10 @@ public: { } - std::shared_ptr + intr_ptr::SharedPtr clone(std::uint32_t cowid) const final override { - return std::make_shared( + return intr_ptr::make_shared( item_, cowid, hash_); } diff --git a/src/xrpld/shamap/SHAMapInnerNode.h b/src/xrpld/shamap/SHAMapInnerNode.h index c45621d0c1..5c064fd9da 100644 --- a/src/xrpld/shamap/SHAMapInnerNode.h +++ b/src/xrpld/shamap/SHAMapInnerNode.h @@ -21,9 +21,10 @@ #define RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED #include -#include #include +#include + #include #include #include @@ -41,7 +42,7 @@ public: private: /** Opaque type that contains the `hashes` array (array of type `SHAMapHash`) and the `children` array (array of type - `std::shared_ptr`). + `intr_ptr::SharedPtr`). */ TaggedPointer hashesAndChildren_; @@ -106,7 +107,11 @@ public: operator=(SHAMapInnerNode const&) = delete; ~SHAMapInnerNode(); - std::shared_ptr + // Needed to support intrusive weak pointers + void + partialDestructor() override; + + intr_ptr::SharedPtr clone(std::uint32_t cowid) const override; SHAMapNodeType @@ -140,19 +145,19 @@ public: getChildHash(int m) const; void - setChild(int m, std::shared_ptr child); + setChild(int m, intr_ptr::SharedPtr child); void - shareChild(int m, std::shared_ptr const& child); + shareChild(int m, intr_ptr::SharedPtr const& child); SHAMapTreeNode* getChildPointer(int branch); - std::shared_ptr + intr_ptr::SharedPtr getChild(int branch); - std::shared_ptr - canonicalizeChild(int branch, std::shared_ptr node); + intr_ptr::SharedPtr + canonicalizeChild(int branch, intr_ptr::SharedPtr node); // sync functions bool @@ -180,10 +185,10 @@ public: void invariants(bool is_root = false) const override; - static std::shared_ptr + static intr_ptr::SharedPtr makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid); - static std::shared_ptr + static intr_ptr::SharedPtr makeCompressedInner(Slice data); }; diff --git a/src/xrpld/shamap/SHAMapLeafNode.h b/src/xrpld/shamap/SHAMapLeafNode.h index 9ff46a9bb7..c0f9422a38 100644 --- a/src/xrpld/shamap/SHAMapLeafNode.h +++ b/src/xrpld/shamap/SHAMapLeafNode.h @@ -21,7 +21,6 @@ #define RIPPLE_SHAMAP_SHAMAPLEAFNODE_H_INCLUDED #include -#include #include #include diff --git a/src/xrpld/shamap/SHAMapTreeNode.h b/src/xrpld/shamap/SHAMapTreeNode.h index 6f209d13d7..2c4a349019 100644 --- a/src/xrpld/shamap/SHAMapTreeNode.h +++ b/src/xrpld/shamap/SHAMapTreeNode.h @@ -23,6 +23,8 @@ #include #include +#include +#include #include #include @@ -46,7 +48,7 @@ enum class SHAMapNodeType { tnACCOUNT_STATE = 4 }; -class SHAMapTreeNode +class SHAMapTreeNode : public IntrusiveRefCounts { protected: SHAMapHash hash_; @@ -85,15 +87,19 @@ protected: public: virtual ~SHAMapTreeNode() noexcept = default; + // Needed to support weak intrusive pointers + virtual void + partialDestructor() {}; + /** \defgroup SHAMap Copy-on-Write Support - By nature, a node may appear in multiple SHAMap instances. Rather than - actually duplicating these nodes, SHAMap opts to be memory efficient - and uses copy-on-write semantics for nodes. + By nature, a node may appear in multiple SHAMap instances. Rather + than actually duplicating these nodes, SHAMap opts to be memory + efficient and uses copy-on-write semantics for nodes. - Only nodes that are not modified and don't need to be flushed back can - be shared. Once a node needs to be changed, it must first be copied and - the copy must marked as not shareable. + Only nodes that are not modified and don't need to be flushed back + can be shared. Once a node needs to be changed, it must first be + copied and the copy must marked as not shareable. Note that just because a node may not be *owned* by a given SHAMap instance does not mean that the node is NOT a part of any SHAMap. It @@ -105,8 +111,8 @@ public: /** @{ */ /** Returns the SHAMap that owns this node. - @return the ID of the SHAMap that owns this node, or 0 if the node - is not owned by any SHAMap and is a candidate for sharing. + @return the ID of the SHAMap that owns this node, or 0 if the + node is not owned by any SHAMap and is a candidate for sharing. */ std::uint32_t cowid() const @@ -126,7 +132,7 @@ public: } /** Make a copy of this node, setting the owner. */ - virtual std::shared_ptr + virtual intr_ptr::SharedPtr clone(std::uint32_t cowid) const = 0; /** @} */ @@ -167,20 +173,20 @@ public: virtual void invariants(bool is_root = false) const = 0; - static std::shared_ptr + static intr_ptr::SharedPtr makeFromPrefix(Slice rawNode, SHAMapHash const& hash); - static std::shared_ptr + static intr_ptr::SharedPtr makeFromWire(Slice rawNode); private: - static std::shared_ptr + static intr_ptr::SharedPtr makeTransaction(Slice data, SHAMapHash const& hash, bool hashValid); - static std::shared_ptr + static intr_ptr::SharedPtr makeAccountState(Slice data, SHAMapHash const& hash, bool hashValid); - static std::shared_ptr + static intr_ptr::SharedPtr makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid); }; diff --git a/src/xrpld/shamap/SHAMapTxLeafNode.h b/src/xrpld/shamap/SHAMapTxLeafNode.h index 7444e61813..50f426a581 100644 --- a/src/xrpld/shamap/SHAMapTxLeafNode.h +++ b/src/xrpld/shamap/SHAMapTxLeafNode.h @@ -50,10 +50,10 @@ public: { } - std::shared_ptr + intr_ptr::SharedPtr clone(std::uint32_t cowid) const final override { - return std::make_shared(item_, cowid, hash_); + return intr_ptr::make_shared(item_, cowid, hash_); } SHAMapNodeType diff --git a/src/xrpld/shamap/SHAMapTxPlusMetaLeafNode.h b/src/xrpld/shamap/SHAMapTxPlusMetaLeafNode.h index 78cc919b4e..cc34d8f4ae 100644 --- a/src/xrpld/shamap/SHAMapTxPlusMetaLeafNode.h +++ b/src/xrpld/shamap/SHAMapTxPlusMetaLeafNode.h @@ -51,10 +51,11 @@ public: { } - std::shared_ptr + intr_ptr::SharedPtr clone(std::uint32_t cowid) const override { - return std::make_shared(item_, cowid, hash_); + return intr_ptr::make_shared( + item_, cowid, hash_); } SHAMapNodeType diff --git a/src/xrpld/shamap/TreeNodeCache.h b/src/xrpld/shamap/TreeNodeCache.h index 3eead799a3..b41ae5e99e 100644 --- a/src/xrpld/shamap/TreeNodeCache.h +++ b/src/xrpld/shamap/TreeNodeCache.h @@ -22,12 +22,17 @@ #include +#include #include namespace ripple { -using TreeNodeCache = TaggedCache; - +using TreeNodeCache = TaggedCache< + uint256, + SHAMapTreeNode, + /*IsKeyCache*/ false, + intr_ptr::SharedWeakUnionPtr, + intr_ptr::SharedPtr>; } // namespace ripple #endif diff --git a/src/xrpld/shamap/detail/NodeFamily.cpp b/src/xrpld/shamap/detail/NodeFamily.cpp index 21dbe7469e..6126534966 100644 --- a/src/xrpld/shamap/detail/NodeFamily.cpp +++ b/src/xrpld/shamap/detail/NodeFamily.cpp @@ -22,7 +22,7 @@ #include #include -#include +#include namespace ripple { diff --git a/src/xrpld/shamap/detail/SHAMap.cpp b/src/xrpld/shamap/detail/SHAMap.cpp index 7c3fadbec9..ab511f343f 100644 --- a/src/xrpld/shamap/detail/SHAMap.cpp +++ b/src/xrpld/shamap/detail/SHAMap.cpp @@ -24,25 +24,26 @@ #include #include +#include #include namespace ripple { -[[nodiscard]] std::shared_ptr +[[nodiscard]] intr_ptr::SharedPtr makeTypedLeaf( SHAMapNodeType type, boost::intrusive_ptr item, std::uint32_t owner) { if (type == SHAMapNodeType::tnTRANSACTION_NM) - return std::make_shared(std::move(item), owner); + return intr_ptr::make_shared(std::move(item), owner); if (type == SHAMapNodeType::tnTRANSACTION_MD) - return std::make_shared( + return intr_ptr::make_shared( std::move(item), owner); if (type == SHAMapNodeType::tnACCOUNT_STATE) - return std::make_shared( + return intr_ptr::make_shared( std::move(item), owner); LogicError( @@ -54,7 +55,7 @@ makeTypedLeaf( SHAMap::SHAMap(SHAMapType t, Family& f) : f_(f), journal_(f.journal()), state_(SHAMapState::Modifying), type_(t) { - root_ = std::make_shared(cowid_); + root_ = intr_ptr::make_shared(cowid_); } // The `hash` parameter is unused. It is part of the interface so it's clear @@ -64,7 +65,7 @@ SHAMap::SHAMap(SHAMapType t, Family& f) SHAMap::SHAMap(SHAMapType t, uint256 const& hash, Family& f) : f_(f), journal_(f.journal()), state_(SHAMapState::Synching), type_(t) { - root_ = std::make_shared(cowid_); + root_ = intr_ptr::make_shared(cowid_); } SHAMap::SHAMap(SHAMap const& other, bool isMutable) @@ -95,7 +96,7 @@ void SHAMap::dirtyUp( SharedPtrNodeStack& stack, uint256 const& target, - std::shared_ptr child) + intr_ptr::SharedPtr child) { // walk the tree up from through the inner nodes to the root_ // update hashes and links @@ -112,7 +113,7 @@ SHAMap::dirtyUp( while (!stack.empty()) { auto node = - std::dynamic_pointer_cast(stack.top().first); + intr_ptr::dynamic_pointer_cast(stack.top().first); SHAMapNodeID nodeID = stack.top().second; stack.pop(); XRPL_ASSERT(node, "ripple::SHAMap::dirtyUp : non-null node"); @@ -141,12 +142,13 @@ SHAMap::walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack) const if (stack != nullptr) stack->push({inNode, nodeID}); - auto const inner = std::static_pointer_cast(inNode); + auto const inner = + intr_ptr::static_pointer_cast(inNode); auto const branch = selectBranch(nodeID, id); if (inner->isEmptyBranch(branch)) return nullptr; - inNode = descendThrow(inner, branch); + inNode = descendThrow(*inner, branch); nodeID = nodeID.getChildNodeID(branch); } @@ -164,7 +166,7 @@ SHAMap::findKey(uint256 const& id) const return leaf; } -std::shared_ptr +intr_ptr::SharedPtr SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const { XRPL_ASSERT(backed_, "ripple::SHAMap::fetchNodeFromDB : is backed"); @@ -172,7 +174,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const return finishFetch(hash, obj); } -std::shared_ptr +intr_ptr::SharedPtr SHAMap::finishFetch( SHAMapHash const& hash, std::shared_ptr const& object) const @@ -211,7 +213,7 @@ SHAMap::finishFetch( } // See if a sync filter has a node -std::shared_ptr +intr_ptr::SharedPtr SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { if (auto nodeData = filter->getNode(hash)) @@ -244,7 +246,7 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const // Get a node without throwing // Used on maps where missing nodes are expected -std::shared_ptr +intr_ptr::SharedPtr SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { auto node = cacheLookup(hash); @@ -267,7 +269,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const return node; } -std::shared_ptr +intr_ptr::SharedPtr SHAMap::fetchNodeNT(SHAMapHash const& hash) const { auto node = cacheLookup(hash); @@ -279,7 +281,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash) const } // Throw if the node is missing -std::shared_ptr +intr_ptr::SharedPtr SHAMap::fetchNode(SHAMapHash const& hash) const { auto node = fetchNodeNT(hash); @@ -301,14 +303,13 @@ SHAMap::descendThrow(SHAMapInnerNode* parent, int branch) const return ret; } -std::shared_ptr -SHAMap::descendThrow(std::shared_ptr const& parent, int branch) - const +intr_ptr::SharedPtr +SHAMap::descendThrow(SHAMapInnerNode& parent, int branch) const { - std::shared_ptr ret = descend(parent, branch); + intr_ptr::SharedPtr ret = descend(parent, branch); - if (!ret && !parent->isEmptyBranch(branch)) - Throw(type_, parent->getChildHash(branch)); + if (!ret && !parent.isEmptyBranch(branch)) + Throw(type_, parent.getChildHash(branch)); return ret; } @@ -320,7 +321,7 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const if (ret || !backed_) return ret; - std::shared_ptr node = + intr_ptr::SharedPtr node = fetchNodeNT(parent->getChildHash(branch)); if (!node) return nullptr; @@ -329,32 +330,29 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const return node.get(); } -std::shared_ptr -SHAMap::descend(std::shared_ptr const& parent, int branch) - const +intr_ptr::SharedPtr +SHAMap::descend(SHAMapInnerNode& parent, int branch) const { - std::shared_ptr node = parent->getChild(branch); + intr_ptr::SharedPtr node = parent.getChild(branch); if (node || !backed_) return node; - node = fetchNode(parent->getChildHash(branch)); + node = fetchNode(parent.getChildHash(branch)); if (!node) - return nullptr; + return {}; - node = parent->canonicalizeChild(branch, std::move(node)); + node = parent.canonicalizeChild(branch, std::move(node)); return node; } // Gets the node that would be hooked to this branch, // but doesn't hook it up. -std::shared_ptr -SHAMap::descendNoStore( - std::shared_ptr const& parent, - int branch) const +intr_ptr::SharedPtr +SHAMap::descendNoStore(SHAMapInnerNode& parent, int branch) const { - std::shared_ptr ret = parent->getChild(branch); + intr_ptr::SharedPtr ret = parent.getChild(branch); if (!ret && backed_) - ret = fetchNode(parent->getChildHash(branch)); + ret = fetchNode(parent.getChildHash(branch)); return ret; } @@ -379,7 +377,7 @@ SHAMap::descend( if (!child) { auto const& childHash = parent->getChildHash(branch); - std::shared_ptr childNode = + intr_ptr::SharedPtr childNode = fetchNodeNT(childHash, filter); if (childNode) @@ -436,8 +434,8 @@ SHAMap::descendAsync( } template -std::shared_ptr -SHAMap::unshareNode(std::shared_ptr node, SHAMapNodeID const& nodeID) +intr_ptr::SharedPtr +SHAMap::unshareNode(intr_ptr::SharedPtr node, SHAMapNodeID const& nodeID) { // make sure the node is suitable for the intended operation (copy on write) XRPL_ASSERT( @@ -449,7 +447,7 @@ SHAMap::unshareNode(std::shared_ptr node, SHAMapNodeID const& nodeID) XRPL_ASSERT( state_ != SHAMapState::Immutable, "ripple::SHAMap::unshareNode : not immutable"); - node = std::static_pointer_cast(node->clone(cowid_)); + node = intr_ptr::static_pointer_cast(node->clone(cowid_)); if (nodeID.isRoot()) root_ = node; } @@ -458,7 +456,7 @@ SHAMap::unshareNode(std::shared_ptr node, SHAMapNodeID const& nodeID) SHAMapLeafNode* SHAMap::belowHelper( - std::shared_ptr node, + intr_ptr::SharedPtr node, SharedPtrNodeStack& stack, int branch, std::tuple, std::function> const& @@ -467,11 +465,11 @@ SHAMap::belowHelper( auto& [init, cmp, incr] = loopParams; if (node->isLeaf()) { - auto n = std::static_pointer_cast(node); + auto n = intr_ptr::static_pointer_cast(node); stack.push({node, {leafDepth, n->peekItem()->key()}}); return n.get(); } - auto inner = std::static_pointer_cast(node); + auto inner = intr_ptr::static_pointer_cast(node); if (stack.empty()) stack.push({inner, SHAMapNodeID{}}); else @@ -480,17 +478,17 @@ SHAMap::belowHelper( { if (!inner->isEmptyBranch(i)) { - node = descendThrow(inner, i); + node.adopt(descendThrow(inner.get(), i)); XRPL_ASSERT( !stack.empty(), "ripple::SHAMap::belowHelper : non-empty stack"); if (node->isLeaf()) { - auto n = std::static_pointer_cast(node); + auto n = intr_ptr::static_pointer_cast(node); stack.push({n, {leafDepth, n->peekItem()->key()}}); return n.get(); } - inner = std::static_pointer_cast(node); + inner = intr_ptr::static_pointer_cast(node); stack.push({inner, stack.top().second.getChildNodeID(branch)}); i = init; // descend and reset loop } @@ -501,7 +499,7 @@ SHAMap::belowHelper( } SHAMapLeafNode* SHAMap::lastBelow( - std::shared_ptr node, + intr_ptr::SharedPtr node, SharedPtrNodeStack& stack, int branch) const { @@ -513,7 +511,7 @@ SHAMap::lastBelow( } SHAMapLeafNode* SHAMap::firstBelow( - std::shared_ptr node, + intr_ptr::SharedPtr node, SharedPtrNodeStack& stack, int branch) const { @@ -593,12 +591,12 @@ SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const XRPL_ASSERT( !node->isLeaf(), "ripple::SHAMap::peekNextItem : another node is not leaf"); - auto inner = std::static_pointer_cast(node); + auto inner = intr_ptr::static_pointer_cast(node); for (auto i = selectBranch(nodeID, id) + 1; i < branchFactor; ++i) { if (!inner->isEmptyBranch(i)) { - node = descendThrow(inner, i); + node = descendThrow(*inner, i); auto leaf = firstBelow(node, stack, i); if (!leaf) Throw(type_, id); @@ -654,14 +652,14 @@ SHAMap::upper_bound(uint256 const& id) const } else { - auto inner = std::static_pointer_cast(node); + auto inner = intr_ptr::static_pointer_cast(node); for (auto branch = selectBranch(nodeID, id) + 1; branch < branchFactor; ++branch) { if (!inner->isEmptyBranch(branch)) { - node = descendThrow(inner, branch); + node = descendThrow(*inner, branch); auto leaf = firstBelow(node, stack, branch); if (!leaf) Throw(type_, id); @@ -691,13 +689,13 @@ SHAMap::lower_bound(uint256 const& id) const } else { - auto inner = std::static_pointer_cast(node); + auto inner = intr_ptr::static_pointer_cast(node); for (int branch = selectBranch(nodeID, id) - 1; branch >= 0; --branch) { if (!inner->isEmptyBranch(branch)) { - node = descendThrow(inner, branch); + node = descendThrow(*inner, branch); auto leaf = lastBelow(node, stack, branch); if (!leaf) Throw(type_, id); @@ -732,7 +730,8 @@ SHAMap::delItem(uint256 const& id) if (stack.empty()) Throw(type_, id); - auto leaf = std::dynamic_pointer_cast(stack.top().first); + auto leaf = + intr_ptr::dynamic_pointer_cast(stack.top().first); stack.pop(); if (!leaf || (leaf->peekItem()->key() != id)) @@ -742,12 +741,12 @@ SHAMap::delItem(uint256 const& id) // What gets attached to the end of the chain // (For now, nothing, since we deleted the leaf) - std::shared_ptr prevNode; + intr_ptr::SharedPtr prevNode; while (!stack.empty()) { auto node = - std::static_pointer_cast(stack.top().first); + intr_ptr::static_pointer_cast(stack.top().first); SHAMapNodeID nodeID = stack.top().second; stack.pop(); @@ -775,7 +774,8 @@ SHAMap::delItem(uint256 const& id) { if (!node->isEmptyBranch(i)) { - node->setChild(i, nullptr); + node->setChild( + i, intr_ptr::SharedPtr{}); break; } } @@ -824,7 +824,7 @@ SHAMap::addGiveItem( if (node->isLeaf()) { - auto leaf = std::static_pointer_cast(node); + auto leaf = intr_ptr::static_pointer_cast(node); if (leaf->peekItem()->key() == tag) return false; } @@ -832,7 +832,7 @@ SHAMap::addGiveItem( if (node->isInner()) { // easy case, we end on an inner node - auto inner = std::static_pointer_cast(node); + auto inner = intr_ptr::static_pointer_cast(node); int branch = selectBranch(nodeID, tag); XRPL_ASSERT( inner->isEmptyBranch(branch), @@ -843,13 +843,13 @@ SHAMap::addGiveItem( { // this is a leaf node that has to be made an inner node holding two // items - auto leaf = std::static_pointer_cast(node); + auto leaf = intr_ptr::static_pointer_cast(node); auto otherItem = leaf->peekItem(); XRPL_ASSERT( otherItem && (tag != otherItem->key()), "ripple::SHAMap::addGiveItem : non-null item"); - node = std::make_shared(node->cowid()); + node = intr_ptr::make_shared(node->cowid()); unsigned int b1, b2; @@ -861,7 +861,7 @@ SHAMap::addGiveItem( // we need a new inner node, since both go on same branch at this // level nodeID = nodeID.getChildNodeID(b1); - node = std::make_shared(cowid_); + node = intr_ptr::make_shared(cowid_); } // we can add the two leaf nodes here @@ -915,7 +915,8 @@ SHAMap::updateGiveItem( if (stack.empty()) Throw(type_, tag); - auto node = std::dynamic_pointer_cast(stack.top().first); + auto node = + intr_ptr::dynamic_pointer_cast(stack.top().first); auto nodeID = stack.top().second; stack.pop(); @@ -987,8 +988,9 @@ SHAMap::fetchRoot(SHAMapHash const& hash, SHAMapSyncFilter* filter) @note The node must have already been unshared by having the caller first call SHAMapTreeNode::unshare(). */ -std::shared_ptr -SHAMap::writeNode(NodeObjectType t, std::shared_ptr node) const +intr_ptr::SharedPtr +SHAMap::writeNode(NodeObjectType t, intr_ptr::SharedPtr node) + const { XRPL_ASSERT( node->cowid() == 0, "ripple::SHAMap::writeNode : valid input node"); @@ -1007,8 +1009,8 @@ SHAMap::writeNode(NodeObjectType t, std::shared_ptr node) const // pointer to because flushing modifies inner nodes -- it // makes them point to canonical/shared nodes. template -std::shared_ptr -SHAMap::preFlushNode(std::shared_ptr node) const +intr_ptr::SharedPtr +SHAMap::preFlushNode(intr_ptr::SharedPtr node) const { // A shared node should never need to be flushed // because that would imply someone modified it @@ -1019,7 +1021,7 @@ SHAMap::preFlushNode(std::shared_ptr node) const { // Node is not uniquely ours, so unshare it before // possibly modifying it - node = std::static_pointer_cast(node->clone(cowid_)); + node = intr_ptr::static_pointer_cast(node->clone(cowid_)); } return node; } @@ -1061,17 +1063,17 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t) return 1; } - auto node = std::static_pointer_cast(root_); + auto node = intr_ptr::static_pointer_cast(root_); if (node->isEmpty()) { // replace empty root with a new empty root - root_ = std::make_shared(0); + root_ = intr_ptr::make_shared(0); return 1; } // Stack of {parent,index,child} pointers representing // inner nodes we are in the process of flushing - using StackEntry = std::pair, int>; + using StackEntry = std::pair, int>; std::stack> stack; node = preFlushNode(std::move(node)); @@ -1108,7 +1110,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t) // The semantics of this changes when we move to c++-20 // Right now no move will occur; With c++-20 child will // be moved from. - node = std::static_pointer_cast( + node = intr_ptr::static_pointer_cast( std::move(child)); pos = 0; } @@ -1140,7 +1142,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t) node->unshare(); if (doWrite) - node = std::static_pointer_cast( + node = intr_ptr::static_pointer_cast( writeNode(t, std::move(node))); ++flushed; @@ -1214,7 +1216,7 @@ SHAMap::dump(bool hash) const JLOG(journal_.info()) << leafCount << " resident leaves"; } -std::shared_ptr +intr_ptr::SharedPtr SHAMap::cacheLookup(SHAMapHash const& hash) const { auto ret = f_.getTreeNodeCache()->fetch(hash.as_uint256()); @@ -1227,7 +1229,7 @@ SHAMap::cacheLookup(SHAMapHash const& hash) const void SHAMap::canonicalize( SHAMapHash const& hash, - std::shared_ptr& node) const + intr_ptr::SharedPtr& node) const { XRPL_ASSERT(backed_, "ripple::SHAMap::canonicalize : is backed"); XRPL_ASSERT( diff --git a/src/xrpld/shamap/detail/SHAMapDelta.cpp b/src/xrpld/shamap/detail/SHAMapDelta.cpp index a6c54b6f22..2adce62efc 100644 --- a/src/xrpld/shamap/detail/SHAMapDelta.cpp +++ b/src/xrpld/shamap/detail/SHAMapDelta.cpp @@ -19,6 +19,7 @@ #include +#include #include #include @@ -242,28 +243,28 @@ SHAMap::walkMap(std::vector& missingNodes, int maxMissing) if (!root_->isInner()) // root_ is only node, and we have it return; - using StackEntry = std::shared_ptr; + using StackEntry = intr_ptr::SharedPtr; std::stack> nodeStack; - nodeStack.push(std::static_pointer_cast(root_)); + nodeStack.push(intr_ptr::static_pointer_cast(root_)); while (!nodeStack.empty()) { - std::shared_ptr node = std::move(nodeStack.top()); + intr_ptr::SharedPtr node = std::move(nodeStack.top()); nodeStack.pop(); for (int i = 0; i < 16; ++i) { if (!node->isEmptyBranch(i)) { - std::shared_ptr nextNode = - descendNoStore(node, i); + intr_ptr::SharedPtr nextNode = + descendNoStore(*node, i); if (nextNode) { if (nextNode->isInner()) nodeStack.push( - std::static_pointer_cast( + intr_ptr::static_pointer_cast( nextNode)); } else @@ -285,15 +286,15 @@ SHAMap::walkMapParallel( if (!root_->isInner()) // root_ is only node, and we have it return false; - using StackEntry = std::shared_ptr; - std::array, 16> topChildren; + using StackEntry = intr_ptr::SharedPtr; + std::array, 16> topChildren; { auto const& innerRoot = - std::static_pointer_cast(root_); + intr_ptr::static_pointer_cast(root_); for (int i = 0; i < 16; ++i) { if (!innerRoot->isEmptyBranch(i)) - topChildren[i] = descendNoStore(innerRoot, i); + topChildren[i] = descendNoStore(*innerRoot, i); } } std::vector workers; @@ -314,7 +315,7 @@ SHAMap::walkMapParallel( continue; nodeStacks[rootChildIndex].push( - std::static_pointer_cast(child)); + intr_ptr::static_pointer_cast(child)); JLOG(journal_.debug()) << "starting worker " << rootChildIndex; workers.push_back(std::thread( @@ -324,7 +325,7 @@ SHAMap::walkMapParallel( { while (!nodeStack.empty()) { - std::shared_ptr node = + intr_ptr::SharedPtr node = std::move(nodeStack.top()); XRPL_ASSERT( node, @@ -335,14 +336,15 @@ SHAMap::walkMapParallel( { if (node->isEmptyBranch(i)) continue; - std::shared_ptr nextNode = - descendNoStore(node, i); + intr_ptr::SharedPtr nextNode = + descendNoStore(*node, i); if (nextNode) { if (nextNode->isInner()) - nodeStack.push(std::static_pointer_cast< - SHAMapInnerNode>(nextNode)); + nodeStack.push( + intr_ptr::static_pointer_cast< + SHAMapInnerNode>(nextNode)); } else { diff --git a/src/xrpld/shamap/detail/SHAMapInnerNode.cpp b/src/xrpld/shamap/detail/SHAMapInnerNode.cpp index 7d3c3c8fb2..8ec581b475 100644 --- a/src/xrpld/shamap/detail/SHAMapInnerNode.cpp +++ b/src/xrpld/shamap/detail/SHAMapInnerNode.cpp @@ -21,15 +21,13 @@ #include #include +#include #include #include #include #include #include -#include -#include - namespace ripple { SHAMapInnerNode::SHAMapInnerNode( @@ -41,6 +39,17 @@ SHAMapInnerNode::SHAMapInnerNode( SHAMapInnerNode::~SHAMapInnerNode() = default; +void +SHAMapInnerNode::partialDestructor() +{ + intr_ptr::SharedPtr* children; + // structured bindings can't be captured in c++ 17; use tie instead + std::tie(std::ignore, std::ignore, children) = + hashesAndChildren_.getHashesAndChildren(); + iterNonEmptyChildIndexes( + [&](auto branchNum, auto indexNum) { children[indexNum].reset(); }); +} + template void SHAMapInnerNode::iterChildren(F&& f) const @@ -68,17 +77,17 @@ SHAMapInnerNode::getChildIndex(int i) const return hashesAndChildren_.getChildIndex(isBranch_, i); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapInnerNode::clone(std::uint32_t cowid) const { auto const branchCount = getBranchCount(); auto const thisIsSparse = !hashesAndChildren_.isDense(); - auto p = std::make_shared(cowid, branchCount); + auto p = intr_ptr::make_shared(cowid, branchCount); p->hash_ = hash_; p->isBranch_ = isBranch_; p->fullBelowGen_ = fullBelowGen_; SHAMapHash *cloneHashes, *thisHashes; - std::shared_ptr*cloneChildren, *thisChildren; + intr_ptr::SharedPtr*cloneChildren, *thisChildren; // structured bindings can't be captured in c++ 17; use tie instead std::tie(std::ignore, cloneHashes, cloneChildren) = p->hashesAndChildren_.getHashesAndChildren(); @@ -119,7 +128,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const return p; } -std::shared_ptr +intr_ptr::SharedPtr SHAMapInnerNode::makeFullInner( Slice data, SHAMapHash const& hash, @@ -129,7 +138,7 @@ SHAMapInnerNode::makeFullInner( if (data.size() != branchFactor * uint256::bytes) Throw("Invalid FI node"); - auto ret = std::make_shared(0, branchFactor); + auto ret = intr_ptr::make_shared(0, branchFactor); SerialIter si(data); @@ -153,7 +162,7 @@ SHAMapInnerNode::makeFullInner( return ret; } -std::shared_ptr +intr_ptr::SharedPtr SHAMapInnerNode::makeCompressedInner(Slice data) { // A compressed inner node is serialized as a series of 33 byte chunks, @@ -166,7 +175,7 @@ SHAMapInnerNode::makeCompressedInner(Slice data) SerialIter si(data); - auto ret = std::make_shared(0, branchFactor); + auto ret = intr_ptr::make_shared(0, branchFactor); auto hashes = ret->hashesAndChildren_.getHashes(); @@ -208,13 +217,13 @@ void SHAMapInnerNode::updateHashDeep() { SHAMapHash* hashes; - std::shared_ptr* children; + intr_ptr::SharedPtr* children; // structured bindings can't be captured in c++ 17; use tie instead std::tie(std::ignore, hashes, children) = hashesAndChildren_.getHashesAndChildren(); iterNonEmptyChildIndexes([&](auto branchNum, auto indexNum) { - if (children[indexNum] != nullptr) - hashes[indexNum] = children[indexNum]->getHash(); + if (auto p = children[indexNum].get()) + hashes[indexNum] = p->getHash(); }); updateHash(); } @@ -272,7 +281,7 @@ SHAMapInnerNode::getString(const SHAMapNodeID& id) const // We are modifying an inner node void -SHAMapInnerNode::setChild(int m, std::shared_ptr child) +SHAMapInnerNode::setChild(int m, intr_ptr::SharedPtr child) { XRPL_ASSERT( (m >= 0) && (m < branchFactor), @@ -314,7 +323,9 @@ SHAMapInnerNode::setChild(int m, std::shared_ptr child) // finished modifying, now make shareable void -SHAMapInnerNode::shareChild(int m, std::shared_ptr const& child) +SHAMapInnerNode::shareChild( + int m, + intr_ptr::SharedPtr const& child) { XRPL_ASSERT( (m >= 0) && (m < branchFactor), @@ -349,7 +360,7 @@ SHAMapInnerNode::getChildPointer(int branch) return hashesAndChildren_.getChildren()[index].get(); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapInnerNode::getChild(int branch) { XRPL_ASSERT( @@ -378,10 +389,10 @@ SHAMapInnerNode::getChildHash(int m) const return zeroSHAMapHash; } -std::shared_ptr +intr_ptr::SharedPtr SHAMapInnerNode::canonicalizeChild( int branch, - std::shared_ptr node) + intr_ptr::SharedPtr node) { XRPL_ASSERT( branch >= 0 && branch < branchFactor, diff --git a/src/xrpld/shamap/detail/SHAMapSync.cpp b/src/xrpld/shamap/detail/SHAMapSync.cpp index 092b1efc5a..d43b1ff024 100644 --- a/src/xrpld/shamap/detail/SHAMapSync.cpp +++ b/src/xrpld/shamap/detail/SHAMapSync.cpp @@ -47,10 +47,10 @@ SHAMap::visitNodes(std::function const& function) const if (!root_->isInner()) return; - using StackEntry = std::pair>; + using StackEntry = std::pair>; std::stack> stack; - auto node = std::static_pointer_cast(root_); + auto node = intr_ptr::static_pointer_cast(root_); int pos = 0; while (true) @@ -59,8 +59,8 @@ SHAMap::visitNodes(std::function const& function) const { if (!node->isEmptyBranch(pos)) { - std::shared_ptr child = - descendNoStore(node, pos); + intr_ptr::SharedPtr child = + descendNoStore(*node, pos); if (!function(*child)) return; @@ -79,7 +79,8 @@ SHAMap::visitNodes(std::function const& function) const } // descend to the child's first position - node = std::static_pointer_cast(child); + node = + intr_ptr::static_pointer_cast(child); pos = 0; } } @@ -115,7 +116,7 @@ SHAMap::visitDifferences( if (root_->isLeaf()) { - auto leaf = std::static_pointer_cast(root_); + auto leaf = intr_ptr::static_pointer_cast(root_); if (!have || !have->hasLeafNode(leaf->peekItem()->key(), leaf->getHash())) function(*root_); @@ -202,7 +203,8 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) mn.filter_, pending, [node, nodeID, branch, &mn]( - std::shared_ptr found, SHAMapHash const&) { + intr_ptr::SharedPtr found, + SHAMapHash const&) { // a read completed asynchronously std::unique_lock lock{mn.deferLock_}; mn.finishedReads_.emplace_back( @@ -271,7 +273,7 @@ SHAMap::gmn_ProcessDeferredReads(MissingNodes& mn) SHAMapInnerNode*, SHAMapNodeID, int, - std::shared_ptr> + intr_ptr::SharedPtr> deferredNode; { std::unique_lock lock{mn.deferLock_}; @@ -327,7 +329,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter) f_.getFullBelowCache()->getGeneration()); if (!root_->isInner() || - std::static_pointer_cast(root_)->isFullBelow( + intr_ptr::static_pointer_cast(root_)->isFullBelow( mn.generation_)) { clearSynching(); @@ -807,8 +809,9 @@ SHAMap::getProofPath(uint256 const& key) const } if (auto const& node = stack.top().first; !node || node->isInner() || - std::static_pointer_cast(node)->peekItem()->key() != - key) + intr_ptr::static_pointer_cast(node) + ->peekItem() + ->key() != key) { JLOG(journal_.debug()) << "no path to " << key; return {}; diff --git a/src/xrpld/shamap/detail/SHAMapTreeNode.cpp b/src/xrpld/shamap/detail/SHAMapTreeNode.cpp index c50683213c..6acf3f3bfc 100644 --- a/src/xrpld/shamap/detail/SHAMapTreeNode.cpp +++ b/src/xrpld/shamap/detail/SHAMapTreeNode.cpp @@ -23,18 +23,16 @@ #include #include -#include +#include #include #include #include #include #include -#include - namespace ripple { -std::shared_ptr +intr_ptr::SharedPtr SHAMapTreeNode::makeTransaction( Slice data, SHAMapHash const& hash, @@ -44,12 +42,13 @@ SHAMapTreeNode::makeTransaction( make_shamapitem(sha512Half(HashPrefix::transactionID, data), data); if (hashValid) - return std::make_shared(std::move(item), 0, hash); + return intr_ptr::make_shared( + std::move(item), 0, hash); - return std::make_shared(std::move(item), 0); + return intr_ptr::make_shared(std::move(item), 0); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapTreeNode::makeTransactionWithMeta( Slice data, SHAMapHash const& hash, @@ -72,13 +71,13 @@ SHAMapTreeNode::makeTransactionWithMeta( auto item = make_shamapitem(tag, s.slice()); if (hashValid) - return std::make_shared( + return intr_ptr::make_shared( std::move(item), 0, hash); - return std::make_shared(std::move(item), 0); + return intr_ptr::make_shared(std::move(item), 0); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapTreeNode::makeAccountState( Slice data, SHAMapHash const& hash, @@ -104,13 +103,14 @@ SHAMapTreeNode::makeAccountState( auto item = make_shamapitem(tag, s.slice()); if (hashValid) - return std::make_shared( + return intr_ptr::make_shared( std::move(item), 0, hash); - return std::make_shared(std::move(item), 0); + return intr_ptr::make_shared( + std::move(item), 0); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapTreeNode::makeFromWire(Slice rawNode) { if (rawNode.empty()) @@ -142,7 +142,7 @@ SHAMapTreeNode::makeFromWire(Slice rawNode) "wire: Unknown type (" + std::to_string(type) + ")"); } -std::shared_ptr +intr_ptr::SharedPtr SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash) { if (rawNode.size() < 4) diff --git a/src/xrpld/shamap/detail/TaggedPointer.h b/src/xrpld/shamap/detail/TaggedPointer.h index beee32a945..11ab1f57fc 100644 --- a/src/xrpld/shamap/detail/TaggedPointer.h +++ b/src/xrpld/shamap/detail/TaggedPointer.h @@ -22,6 +22,8 @@ #include +#include + #include #include #include @@ -57,6 +59,7 @@ namespace ripple { */ class TaggedPointer { +private: static_assert( alignof(SHAMapHash) >= 4, "Bad alignment: Tag pointer requires low two bits to be zero."); @@ -170,7 +173,7 @@ public: of each array. */ [[nodiscard]] std:: - tuple*> + tuple*> getHashesAndChildren() const; /** Get the `hashes` array */ @@ -178,7 +181,7 @@ public: getHashes() const; /** Get the `children` array */ - [[nodiscard]] std::shared_ptr* + [[nodiscard]] intr_ptr::SharedPtr* getChildren() const; /** Call the `f` callback for all 16 (branchFactor) branches - even if diff --git a/src/xrpld/shamap/detail/TaggedPointer.ipp b/src/xrpld/shamap/detail/TaggedPointer.ipp index bed821b5c9..f5d40f24fa 100644 --- a/src/xrpld/shamap/detail/TaggedPointer.ipp +++ b/src/xrpld/shamap/detail/TaggedPointer.ipp @@ -50,7 +50,7 @@ static_assert( // contains multiple chunks. This is the terminology the boost documentation // uses. Pools use "Simple Segregated Storage" as their storage format. constexpr size_t elementSizeBytes = - (sizeof(SHAMapHash) + sizeof(std::shared_ptr)); + (sizeof(SHAMapHash) + sizeof(intr_ptr::SharedPtr)); constexpr size_t blockSizeBytes = kilobytes(512); @@ -240,7 +240,7 @@ TaggedPointer::destroyHashesAndChildren() for (std::size_t i = 0; i < numAllocated; ++i) { hashes[i].~SHAMapHash(); - children[i].~shared_ptr(); + std::destroy_at(&children[i]); } auto [tag, ptr] = decode(); @@ -397,8 +397,10 @@ inline TaggedPointer::TaggedPointer( { // keep new (&dstHashes[dstIndex]) SHAMapHash{srcHashes[srcIndex]}; - new (&dstChildren[dstIndex]) std::shared_ptr{ - std::move(srcChildren[srcIndex])}; + + new (&dstChildren[dstIndex]) + intr_ptr::SharedPtr{ + std::move(srcChildren[srcIndex])}; ++dstIndex; ++srcIndex; } @@ -410,7 +412,7 @@ inline TaggedPointer::TaggedPointer( { new (&dstHashes[dstIndex]) SHAMapHash{}; new (&dstChildren[dstIndex]) - std::shared_ptr{}; + intr_ptr::SharedPtr{}; ++dstIndex; } } @@ -418,7 +420,8 @@ inline TaggedPointer::TaggedPointer( { // add new (&dstHashes[dstIndex]) SHAMapHash{}; - new (&dstChildren[dstIndex]) std::shared_ptr{}; + new (&dstChildren[dstIndex]) + intr_ptr::SharedPtr{}; ++dstIndex; if (srcIsDense) { @@ -432,7 +435,7 @@ inline TaggedPointer::TaggedPointer( { new (&dstHashes[dstIndex]) SHAMapHash{}; new (&dstChildren[dstIndex]) - std::shared_ptr{}; + intr_ptr::SharedPtr{}; ++dstIndex; } if (srcIsDense) @@ -449,7 +452,7 @@ inline TaggedPointer::TaggedPointer( for (int i = dstIndex; i < dstNumAllocated; ++i) { new (&dstHashes[i]) SHAMapHash{}; - new (&dstChildren[i]) std::shared_ptr{}; + new (&dstChildren[i]) intr_ptr::SharedPtr{}; } *this = std::move(dst); } @@ -469,7 +472,7 @@ inline TaggedPointer::TaggedPointer( // allocate hashes and children, but do not run constructors TaggedPointer newHashesAndChildren{RawAllocateTag{}, toAllocate}; SHAMapHash *newHashes, *oldHashes; - std::shared_ptr*newChildren, *oldChildren; + intr_ptr::SharedPtr*newChildren, *oldChildren; std::uint8_t newNumAllocated; // structured bindings can't be captured in c++ 17; use tie instead std::tie(newNumAllocated, newHashes, newChildren) = @@ -481,7 +484,7 @@ inline TaggedPointer::TaggedPointer( // new arrays are dense, old arrays are sparse iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) { new (&newHashes[branchNum]) SHAMapHash{oldHashes[indexNum]}; - new (&newChildren[branchNum]) std::shared_ptr{ + new (&newChildren[branchNum]) intr_ptr::SharedPtr{ std::move(oldChildren[indexNum])}; }); // Run the constructors for the remaining elements @@ -490,7 +493,7 @@ inline TaggedPointer::TaggedPointer( if ((1 << i) & isBranch) continue; new (&newHashes[i]) SHAMapHash{}; - new (&newChildren[i]) std::shared_ptr{}; + new (&newChildren[i]) intr_ptr::SharedPtr{}; } } else @@ -501,7 +504,7 @@ inline TaggedPointer::TaggedPointer( new (&newHashes[curCompressedIndex]) SHAMapHash{oldHashes[indexNum]}; new (&newChildren[curCompressedIndex]) - std::shared_ptr{ + intr_ptr::SharedPtr{ std::move(oldChildren[indexNum])}; ++curCompressedIndex; }); @@ -509,7 +512,7 @@ inline TaggedPointer::TaggedPointer( for (int i = curCompressedIndex; i < newNumAllocated; ++i) { new (&newHashes[i]) SHAMapHash{}; - new (&newChildren[i]) std::shared_ptr{}; + new (&newChildren[i]) intr_ptr::SharedPtr{}; } } @@ -523,7 +526,7 @@ inline TaggedPointer::TaggedPointer(std::uint8_t numChildren) for (std::size_t i = 0; i < numAllocated; ++i) { new (&hashes[i]) SHAMapHash{}; - new (&children[i]) std::shared_ptr{}; + new (&children[i]) intr_ptr::SharedPtr{}; } } @@ -562,14 +565,15 @@ TaggedPointer::isDense() const } [[nodiscard]] inline std:: - tuple*> + tuple*> TaggedPointer::getHashesAndChildren() const { auto const [tag, ptr] = decode(); auto const hashes = reinterpret_cast(ptr); std::uint8_t numAllocated = boundaries[tag]; - auto const children = reinterpret_cast*>( - hashes + numAllocated); + auto const children = + reinterpret_cast*>( + hashes + numAllocated); return {numAllocated, hashes, children}; }; @@ -579,7 +583,7 @@ TaggedPointer::getHashes() const return reinterpret_cast(tp_ & ptrMask); }; -[[nodiscard]] inline std::shared_ptr* +[[nodiscard]] inline intr_ptr::SharedPtr* TaggedPointer::getChildren() const { auto [unused1, unused2, result] = getHashesAndChildren();