Intrusive SHAMap smart pointers for efficient memory use and lock-free synchronization (#5152)

The main goal of this optimisation is memory reduction in SHAMapTreeNodes by introducing intrusive pointers instead of standard std::shared_ptr and std::weak_ptr.
This commit is contained in:
Valentin Balaschenko
2025-03-25 18:40:25 +00:00
committed by GitHub
parent 2bc5cb240f
commit fc204773d6
32 changed files with 4294 additions and 743 deletions

View File

@@ -0,0 +1,515 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
#define RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
#include <concepts>
#include <cstdint>
#include <type_traits>
#include <utility>
namespace ripple {
//------------------------------------------------------------------------------
/** Tag to create an intrusive pointer from another intrusive pointer by using a
static cast. This is useful to create an intrusive pointer to a derived
class from an intrusive pointer to a base class.
*/
struct StaticCastTagSharedIntrusive
{
};
/** Tag to create an intrusive pointer from another intrusive pointer by using a
dynamic cast. This is useful to create an intrusive pointer to a derived
class from an intrusive pointer to a base class. If the cast fails an empty
(null) intrusive pointer is created.
*/
struct DynamicCastTagSharedIntrusive
{
};
/** When creating or adopting a raw pointer, controls whether the strong count
is incremented or not. Use this tag to increment the strong count.
*/
struct SharedIntrusiveAdoptIncrementStrongTag
{
};
/** When creating or adopting a raw pointer, controls whether the strong count
is incremented or not. Use this tag to leave the strong count unchanged.
*/
struct SharedIntrusiveAdoptNoIncrementTag
{
};
//------------------------------------------------------------------------------
//
template <class T>
concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
//------------------------------------------------------------------------------
/** A shared intrusive pointer class that supports weak pointers.
This is meant to be used for SHAMapInnerNodes, but may be useful for other
cases. Since the reference counts are stored on the pointee, the pointee is
not destroyed until both the strong _and_ weak pointer counts go to zero.
When the strong pointer count goes to zero, the "partialDestructor" is
called. This can be used to destroy as much of the object as possible while
still retaining the reference counts. For example, for SHAMapInnerNodes the
children may be reset in that function. Note that std::shared_poiner WILL
run the destructor when the strong count reaches zero, but may not free the
memory used by the object until the weak count reaches zero. In rippled, we
typically allocate shared pointers with the `make_shared` function. When
that is used, the memory is not reclaimed until the weak count reaches zero.
*/
template <class T>
class SharedIntrusive
{
public:
SharedIntrusive() = default;
template <CAdoptTag TAdoptTag>
SharedIntrusive(T* p, TAdoptTag) noexcept;
SharedIntrusive(SharedIntrusive const& rhs);
template <class TT>
// TODO: convertible_to isn't quite right. That include a static castable.
// Find the right concept.
requires std::convertible_to<TT*, T*>
SharedIntrusive(SharedIntrusive<TT> const& rhs);
SharedIntrusive(SharedIntrusive&& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive(SharedIntrusive<TT>&& rhs);
SharedIntrusive&
operator=(SharedIntrusive const& rhs);
bool
operator!=(std::nullptr_t) const;
bool
operator==(std::nullptr_t) const;
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive&
operator=(SharedIntrusive<TT> const& rhs);
SharedIntrusive&
operator=(SharedIntrusive&& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive&
operator=(SharedIntrusive<TT>&& rhs);
/** Adopt the raw pointer. The strong reference may or may not be
incremented, depending on the TAdoptTag
*/
template <CAdoptTag TAdoptTag = SharedIntrusiveAdoptIncrementStrongTag>
void
adopt(T* p);
~SharedIntrusive();
/** Create a new SharedIntrusive by statically casting the pointer
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs);
/** Create a new SharedIntrusive by statically casting the pointer
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
/** Create a new SharedIntrusive by dynamically casting the pointer
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs);
/** Create a new SharedIntrusive by dynamically casting the pointer
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
T&
operator*() const noexcept;
T*
operator->() const noexcept;
explicit
operator bool() const noexcept;
/** Set the pointer to null, decrement the strong count, and run the
appropriate release action.
*/
void
reset();
/** Get the raw pointer */
T*
get() const;
/** Return the strong count */
std::size_t
use_count() const;
template <class TT, class... Args>
friend SharedIntrusive<TT>
make_SharedIntrusive(Args&&... args);
template <class TT>
friend class SharedIntrusive;
template <class TT>
friend class SharedWeakUnion;
template <class TT>
friend class WeakIntrusive;
private:
/** Return the raw pointer held by this object. */
T*
unsafeGetRawPtr() const;
/** Exchange the current raw pointer held by this object with the given
pointer. Decrement the strong count of the raw pointer previously held
by this object and run the appropriate release action.
*/
void
unsafeReleaseAndStore(T* next);
/** Set the raw pointer directly. This is wrapped in a function so the class
can support both atomic and non-atomic pointers in a future patch.
*/
void
unsafeSetRawPtr(T* p);
/** Exchange the raw pointer directly.
This sets the raw pointer to the given value and returns the previous
value. This is wrapped in a function so the class can support both
atomic and non-atomic pointers in a future patch.
*/
T*
unsafeExchange(T* p);
/** pointer to the type with an intrusive count */
T* ptr_{nullptr};
};
//------------------------------------------------------------------------------
/** A weak intrusive pointer class for the SharedIntrusive pointer class.
Note that this weak pointer class asks differently from normal weak pointer
classes. When the strong pointer count goes to zero, the "partialDestructor"
is called. See the comment on SharedIntrusive for a fuller explanation.
*/
template <class T>
class WeakIntrusive
{
public:
WeakIntrusive() = default;
WeakIntrusive(WeakIntrusive const& rhs);
WeakIntrusive(WeakIntrusive&& rhs);
WeakIntrusive(SharedIntrusive<T> const& rhs);
// There is no move constructor from a strong intrusive ptr because
// moving would be move expensive than copying in this case (the strong
// ref would need to be decremented)
WeakIntrusive(SharedIntrusive<T> const&& rhs) = delete;
// Since there are no current use cases for copy assignment in
// WeakIntrusive, we delete this operator to simplify the implementation. If
// a need arises in the future, we can reintroduce it with proper
// consideration."
WeakIntrusive&
operator=(WeakIntrusive const&) = delete;
template <class TT>
requires std::convertible_to<TT*, T*>
WeakIntrusive&
operator=(SharedIntrusive<TT> const& rhs);
/** Adopt the raw pointer and increment the weak count. */
void
adopt(T* ptr);
~WeakIntrusive();
/** Get a strong pointer from the weak pointer, if possible. This will
only return a seated pointer if the strong count on the raw pointer
is non-zero before locking.
*/
SharedIntrusive<T>
lock() const;
/** Return true if the strong count is zero. */
bool
expired() const;
/** Set the pointer to null and decrement the weak count.
Note: This may run the destructor if the strong count is zero.
*/
void
reset();
private:
T* ptr_ = nullptr;
/** Decrement the weak count. This does _not_ set the raw pointer to
null.
Note: This may run the destructor if the strong count is zero.
*/
void
unsafeReleaseNoStore();
};
//------------------------------------------------------------------------------
/** A combination of a strong and a weak intrusive pointer stored in the
space of a single pointer.
This class is similar to a `std::variant<SharedIntrusive,WeakIntrusive>`
with some optimizations. In particular, it uses a low-order bit to
determine if the raw pointer represents a strong pointer or a weak
pointer. It can also be quickly switched between its strong pointer and
weak pointer representations. This class is useful for storing intrusive
pointers in tagged caches.
*/
template <class T>
class SharedWeakUnion
{
// Tagged pointer. Low bit determines if this is a strong or a weak
// pointer. The low bit must be masked to zero when converting back to a
// pointer. If the low bit is '1', this is a weak pointer.
static_assert(
alignof(T) >= 2,
"Bad alignment: Combo pointer requires low bit to be zero");
public:
SharedWeakUnion() = default;
SharedWeakUnion(SharedWeakUnion const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion(SharedIntrusive<TT> const& rhs);
SharedWeakUnion(SharedWeakUnion&& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion(SharedIntrusive<TT>&& rhs);
SharedWeakUnion&
operator=(SharedWeakUnion const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion&
operator=(SharedIntrusive<TT> const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion&
operator=(SharedIntrusive<TT>&& rhs);
~SharedWeakUnion();
/** Return a strong pointer if this is already a strong pointer (i.e.
don't lock the weak pointer. Use the `lock` method if that's what's
needed)
*/
SharedIntrusive<T>
getStrong() const;
/** Return true if this is a strong pointer and the strong pointer is
seated.
*/
explicit
operator bool() const noexcept;
/** Set the pointer to null, decrement the appropriate ref count, and
run the appropriate release action.
*/
void
reset();
/** If this is a strong pointer, return the raw pointer. Otherwise
return null.
*/
T*
get() const;
/** If this is a strong pointer, return the strong count. Otherwise
* return 0
*/
std::size_t
use_count() const;
/** Return true if there is a non-zero strong count. */
bool
expired() const;
/** If this is a strong pointer, return the strong pointer. Otherwise
attempt to lock the weak pointer.
*/
SharedIntrusive<T>
lock() const;
/** Return true is this represents a strong pointer. */
bool
isStrong() const;
/** Return true is this represents a weak pointer. */
bool
isWeak() const;
/** If this is a weak pointer, attempt to convert it to a strong
pointer.
@return true if successfully converted to a strong pointer (or was
already a strong pointer). Otherwise false.
*/
bool
convertToStrong();
/** If this is a strong pointer, attempt to convert it to a weak
pointer.
@return false if the pointer is null. Otherwise return true.
*/
bool
convertToWeak();
private:
// Tagged pointer. Low bit determines if this is a strong or a weak
// pointer. The low bit must be masked to zero when converting back to a
// pointer. If the low bit is '1', this is a weak pointer.
std::uintptr_t tp_{0};
static constexpr std::uintptr_t tagMask = 1;
static constexpr std::uintptr_t ptrMask = ~tagMask;
private:
/** Return the raw pointer held by this object.
*/
T*
unsafeGetRawPtr() const;
enum class RefStrength { strong, weak };
/** Set the raw pointer and tag bit directly.
*/
void
unsafeSetRawPtr(T* p, RefStrength rs);
/** Set the raw pointer and tag bit to all zeros (strong null pointer).
*/
void unsafeSetRawPtr(std::nullptr_t);
/** Decrement the appropriate ref count, and run the appropriate release
action. Note: this does _not_ set the raw pointer to null.
*/
void
unsafeReleaseNoStore();
};
//------------------------------------------------------------------------------
/** Create a shared intrusive pointer.
Note: unlike std::shared_ptr, where there is an advantage of allocating
the pointer and control block together, there is no benefit for intrusive
pointers.
*/
template <class TT, class... Args>
SharedIntrusive<TT>
make_SharedIntrusive(Args&&... args)
{
auto p = new TT(std::forward<Args>(args)...);
static_assert(
noexcept(SharedIntrusive<TT>(
std::declval<TT*>(),
std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
"SharedIntrusive constructor should not throw or this can leak "
"memory");
return SharedIntrusive<TT>(p, SharedIntrusiveAdoptNoIncrementTag{});
}
//------------------------------------------------------------------------------
namespace intr_ptr {
template <class T>
using SharedPtr = SharedIntrusive<T>;
template <class T>
using WeakPtr = WeakIntrusive<T>;
template <class T>
using SharedWeakUnionPtr = SharedWeakUnion<T>;
template <class T, class... A>
SharedPtr<T>
make_shared(A&&... args)
{
return make_SharedIntrusive<T>(std::forward<A>(args)...);
}
template <class T, class TT>
SharedPtr<T>
static_pointer_cast(TT const& v)
{
return SharedPtr<T>(StaticCastTagSharedIntrusive{}, v);
}
template <class T, class TT>
SharedPtr<T>
dynamic_pointer_cast(TT const& v)
{
return SharedPtr<T>(DynamicCastTagSharedIntrusive{}, v);
}
} // namespace intr_ptr
} // namespace ripple
#endif

View File

@@ -0,0 +1,740 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
#define RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/IntrusiveRefCounts.h>
#include <utility>
namespace ripple {
template <class T>
template <CAdoptTag TAdoptTag>
SharedIntrusive<T>::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p}
{
if constexpr (std::is_same_v<
TAdoptTag,
SharedIntrusiveAdoptIncrementStrongTag>)
{
if (p)
p->addStrongRef();
}
}
template <class T>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive const& rhs)
: ptr_{[&] {
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
return p;
}()}
{
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT> const& rhs)
: ptr_{[&] {
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
return p;
}()}
{
}
template <class T>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs)
: ptr_{rhs.unsafeExchange(nullptr)}
{
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs)
: ptr_{rhs.unsafeExchange(nullptr)}
{
}
template <class T>
SharedIntrusive<T>&
SharedIntrusive<T>::operator=(SharedIntrusive const& rhs)
{
if (this == &rhs)
return *this;
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
unsafeReleaseAndStore(p);
return *this;
}
template <class T>
template <class TT>
// clang-format off
requires std::convertible_to<TT*, T*>
// clang-format on
SharedIntrusive<T>&
SharedIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
{
if constexpr (std::is_same_v<T, TT>)
{
// This case should never be hit. The operator above will run instead.
// (The normal operator= is needed or it will be marked `deleted`)
if (this == &rhs)
return *this;
}
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
unsafeReleaseAndStore(p);
return *this;
}
template <class T>
SharedIntrusive<T>&
SharedIntrusive<T>::operator=(SharedIntrusive&& rhs)
{
if (this == &rhs)
return *this;
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
return *this;
}
template <class T>
template <class TT>
// clang-format off
requires std::convertible_to<TT*, T*>
// clang-format on
SharedIntrusive<T>&
SharedIntrusive<T>::operator=(SharedIntrusive<TT>&& rhs)
{
static_assert(
!std::is_same_v<T, TT>,
"This overload should not be instantiated for T == TT");
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
return *this;
}
template <class T>
bool
SharedIntrusive<T>::operator!=(std::nullptr_t) const
{
return this->get() != nullptr;
}
template <class T>
bool
SharedIntrusive<T>::operator==(std::nullptr_t) const
{
return this->get() == nullptr;
}
template <class T>
template <CAdoptTag TAdoptTag>
void
SharedIntrusive<T>::adopt(T* p)
{
if constexpr (std::is_same_v<
TAdoptTag,
SharedIntrusiveAdoptIncrementStrongTag>)
{
if (p)
p->addStrongRef();
}
unsafeReleaseAndStore(p);
}
template <class T>
SharedIntrusive<T>::~SharedIntrusive()
{
unsafeReleaseAndStore(nullptr);
};
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs)
: ptr_{[&] {
auto p = static_cast<T*>(rhs.unsafeGetRawPtr());
if (p)
p->addStrongRef();
return p;
}()}
{
}
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT>&& rhs)
: ptr_{static_cast<T*>(rhs.unsafeExchange(nullptr))}
{
}
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs)
: ptr_{[&] {
auto p = dynamic_cast<T*>(rhs.unsafeGetRawPtr());
if (p)
p->addStrongRef();
return p;
}()}
{
}
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT>&& rhs)
{
// This can be simplified without the `exchange`, but the `exchange` is kept
// in anticipation of supporting atomic operations.
auto toSet = rhs.unsafeExchange(nullptr);
if (toSet)
{
ptr_ = dynamic_cast<T*>(toSet);
if (!ptr_)
// need to set the pointer back or will leak
rhs.unsafeExchange(toSet);
}
}
template <class T>
T&
SharedIntrusive<T>::operator*() const noexcept
{
return *unsafeGetRawPtr();
}
template <class T>
T*
SharedIntrusive<T>::operator->() const noexcept
{
return unsafeGetRawPtr();
}
template <class T>
SharedIntrusive<T>::operator bool() const noexcept
{
return bool(unsafeGetRawPtr());
}
template <class T>
void
SharedIntrusive<T>::reset()
{
unsafeReleaseAndStore(nullptr);
}
template <class T>
T*
SharedIntrusive<T>::get() const
{
return unsafeGetRawPtr();
}
template <class T>
std::size_t
SharedIntrusive<T>::use_count() const
{
if (auto p = unsafeGetRawPtr())
return p->use_count();
return 0;
}
template <class T>
T*
SharedIntrusive<T>::unsafeGetRawPtr() const
{
return ptr_;
}
template <class T>
void
SharedIntrusive<T>::unsafeSetRawPtr(T* p)
{
ptr_ = p;
}
template <class T>
T*
SharedIntrusive<T>::unsafeExchange(T* p)
{
return std::exchange(ptr_, p);
}
template <class T>
void
SharedIntrusive<T>::unsafeReleaseAndStore(T* next)
{
auto prev = unsafeExchange(next);
if (!prev)
return;
using enum ReleaseStrongRefAction;
auto action = prev->releaseStrongRef();
switch (action)
{
case noop:
break;
case destroy:
delete prev;
break;
case partialDestroy:
prev->partialDestructor();
partialDestructorFinished(&prev);
// prev is null and may no longer be used
break;
}
}
//------------------------------------------------------------------------------
template <class T>
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive const& rhs) : ptr_{rhs.ptr_}
{
if (ptr_)
ptr_->addWeakRef();
}
template <class T>
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_}
{
rhs.ptr_ = nullptr;
}
template <class T>
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs)
: ptr_{rhs.unsafeGetRawPtr()}
{
if (ptr_)
ptr_->addWeakRef();
}
template <class T>
template <class TT>
// clang-format off
requires std::convertible_to<TT*, T*>
// clang-format on
WeakIntrusive<T>&
WeakIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
{
unsafeReleaseNoStore();
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addWeakRef();
return *this;
}
template <class T>
void
WeakIntrusive<T>::adopt(T* ptr)
{
unsafeReleaseNoStore();
if (ptr)
ptr->addWeakRef();
ptr_ = ptr;
}
template <class T>
WeakIntrusive<T>::~WeakIntrusive()
{
unsafeReleaseNoStore();
}
template <class T>
SharedIntrusive<T>
WeakIntrusive<T>::lock() const
{
if (ptr_ && ptr_->checkoutStrongRefFromWeak())
{
return SharedIntrusive<T>{ptr_, SharedIntrusiveAdoptNoIncrementTag{}};
}
return {};
}
template <class T>
bool
WeakIntrusive<T>::expired() const
{
return (!ptr_ || ptr_->expired());
}
template <class T>
void
WeakIntrusive<T>::reset()
{
unsafeReleaseNoStore();
ptr_ = nullptr;
}
template <class T>
void
WeakIntrusive<T>::unsafeReleaseNoStore()
{
if (!ptr_)
return;
using enum ReleaseWeakRefAction;
auto action = ptr_->releaseWeakRef();
switch (action)
{
case noop:
break;
case destroy:
delete ptr_;
break;
}
}
//------------------------------------------------------------------------------
template <class T>
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion const& rhs) : tp_{rhs.tp_}
{
auto p = rhs.unsafeGetRawPtr();
if (!p)
return;
if (rhs.isStrong())
p->addStrongRef();
else
p->addWeakRef();
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT> const& rhs)
{
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
unsafeSetRawPtr(p, RefStrength::strong);
}
template <class T>
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion&& rhs) : tp_{rhs.tp_}
{
rhs.unsafeSetRawPtr(nullptr);
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT>&& rhs)
{
auto p = rhs.unsafeGetRawPtr();
if (p)
unsafeSetRawPtr(p, RefStrength::strong);
rhs.unsafeSetRawPtr(nullptr);
}
template <class T>
SharedWeakUnion<T>&
SharedWeakUnion<T>::operator=(SharedWeakUnion const& rhs)
{
if (this == &rhs)
return *this;
unsafeReleaseNoStore();
if (auto p = rhs.unsafeGetRawPtr())
{
if (rhs.isStrong())
{
p->addStrongRef();
unsafeSetRawPtr(p, RefStrength::strong);
}
else
{
p->addWeakRef();
unsafeSetRawPtr(p, RefStrength::weak);
}
}
else
{
unsafeSetRawPtr(nullptr);
}
return *this;
}
template <class T>
template <class TT>
// clang-format off
requires std::convertible_to<TT*, T*>
// clang-format on
SharedWeakUnion<T>&
SharedWeakUnion<T>::operator=(SharedIntrusive<TT> const& rhs)
{
unsafeReleaseNoStore();
auto p = rhs.unsafeGetRawPtr();
if (p)
p->addStrongRef();
unsafeSetRawPtr(p, RefStrength::strong);
return *this;
}
template <class T>
template <class TT>
// clang-format off
requires std::convertible_to<TT*, T*>
// clang-format on
SharedWeakUnion<T>&
SharedWeakUnion<T>::operator=(SharedIntrusive<TT>&& rhs)
{
unsafeReleaseNoStore();
unsafeSetRawPtr(rhs.unsafeGetRawPtr(), RefStrength::strong);
rhs.unsafeSetRawPtr(nullptr);
return *this;
}
template <class T>
SharedWeakUnion<T>::~SharedWeakUnion()
{
unsafeReleaseNoStore();
};
// Return a strong pointer if this is already a strong pointer (i.e. don't
// lock the weak pointer. Use the `lock` method if that's what's needed)
template <class T>
SharedIntrusive<T>
SharedWeakUnion<T>::getStrong() const
{
SharedIntrusive<T> result;
auto p = unsafeGetRawPtr();
if (p && isStrong())
{
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
}
return result;
}
template <class T>
SharedWeakUnion<T>::operator bool() const noexcept
{
return bool(get());
}
template <class T>
void
SharedWeakUnion<T>::reset()
{
unsafeReleaseNoStore();
unsafeSetRawPtr(nullptr);
}
template <class T>
T*
SharedWeakUnion<T>::get() const
{
return isStrong() ? unsafeGetRawPtr() : nullptr;
}
template <class T>
std::size_t
SharedWeakUnion<T>::use_count() const
{
if (auto p = get())
return p->use_count();
return 0;
}
template <class T>
bool
SharedWeakUnion<T>::expired() const
{
auto p = unsafeGetRawPtr();
return (!p || p->expired());
}
template <class T>
SharedIntrusive<T>
SharedWeakUnion<T>::lock() const
{
SharedIntrusive<T> result;
auto p = unsafeGetRawPtr();
if (!p)
return result;
if (isStrong())
{
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
return result;
}
if (p->checkoutStrongRefFromWeak())
{
result.template adopt<SharedIntrusiveAdoptNoIncrementTag>(p);
return result;
}
return result;
}
template <class T>
bool
SharedWeakUnion<T>::isStrong() const
{
return !(tp_ & tagMask);
}
template <class T>
bool
SharedWeakUnion<T>::isWeak() const
{
return tp_ & tagMask;
}
template <class T>
bool
SharedWeakUnion<T>::convertToStrong()
{
if (isStrong())
return true;
auto p = unsafeGetRawPtr();
if (p && p->checkoutStrongRefFromWeak())
{
[[maybe_unused]] auto action = p->releaseWeakRef();
XRPL_ASSERT(
(action == ReleaseWeakRefAction::noop),
"ripple::SharedWeakUnion::convertToStrong : "
"action is noop");
unsafeSetRawPtr(p, RefStrength::strong);
return true;
}
return false;
}
template <class T>
bool
SharedWeakUnion<T>::convertToWeak()
{
if (isWeak())
return true;
auto p = unsafeGetRawPtr();
if (!p)
return false;
using enum ReleaseStrongRefAction;
auto action = p->addWeakReleaseStrongRef();
switch (action)
{
case noop:
break;
case destroy:
// We just added a weak ref. How could we destroy?
UNREACHABLE(
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
"added ref");
delete p;
unsafeSetRawPtr(nullptr);
return true; // Should never happen
case partialDestroy:
// This is a weird case. We just converted the last strong
// pointer to a weak pointer.
p->partialDestructor();
partialDestructorFinished(&p);
// p is null and may no longer be used
break;
}
unsafeSetRawPtr(p, RefStrength::weak);
return true;
}
template <class T>
T*
SharedWeakUnion<T>::unsafeGetRawPtr() const
{
return reinterpret_cast<T*>(tp_ & ptrMask);
}
template <class T>
void
SharedWeakUnion<T>::unsafeSetRawPtr(T* p, RefStrength rs)
{
tp_ = reinterpret_cast<std::uintptr_t>(p);
if (tp_ && rs == RefStrength::weak)
tp_ |= tagMask;
}
template <class T>
void
SharedWeakUnion<T>::unsafeSetRawPtr(std::nullptr_t)
{
tp_ = 0;
}
template <class T>
void
SharedWeakUnion<T>::unsafeReleaseNoStore()
{
auto p = unsafeGetRawPtr();
if (!p)
return;
if (isStrong())
{
using enum ReleaseStrongRefAction;
auto strongAction = p->releaseStrongRef();
switch (strongAction)
{
case noop:
break;
case destroy:
delete p;
break;
case partialDestroy:
p->partialDestructor();
partialDestructorFinished(&p);
// p is null and may no longer be used
break;
}
}
else
{
using enum ReleaseWeakRefAction;
auto weakAction = p->releaseWeakRef();
switch (weakAction)
{
case noop:
break;
case destroy:
delete p;
break;
}
}
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,502 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
#define RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
#include <xrpl/beast/utility/instrumentation.h>
#include <atomic>
#include <cstdint>
namespace ripple {
/** Action to perform when releasing a strong pointer.
noop: Do nothing. For example, a `noop` action will occur when a count is
decremented to a non-zero value.
partialDestroy: Run the `partialDestructor`. This action will happen when a
strong count is decremented to zero and the weak count is non-zero.
destroy: Run the destructor. This action will occur when either the strong
count or weak count is decremented and the other count is also zero.
*/
enum class ReleaseStrongRefAction { noop, partialDestroy, destroy };
/** Action to perform when releasing a weak pointer.
noop: Do nothing. For example, a `noop` action will occur when a count is
decremented to a non-zero value.
destroy: Run the destructor. This action will occur when either the strong
count or weak count is decremented and the other count is also zero.
*/
enum class ReleaseWeakRefAction { noop, destroy };
/** Implement the strong count, weak count, and bit flags for an intrusive
pointer.
A class can satisfy the requirements of a ripple::IntrusivePointer by
inheriting from this class.
*/
struct IntrusiveRefCounts
{
virtual ~IntrusiveRefCounts() noexcept;
// This must be `noexcept` or the make_SharedIntrusive function could leak
// memory.
void
addStrongRef() const noexcept;
void
addWeakRef() const noexcept;
ReleaseStrongRefAction
releaseStrongRef() const;
// Same as:
// {
// addWeakRef();
// return releaseStrongRef;
// }
// done as one atomic operation
ReleaseStrongRefAction
addWeakReleaseStrongRef() const;
ReleaseWeakRefAction
releaseWeakRef() const;
// Returns true is able to checkout a strong ref. False otherwise
bool
checkoutStrongRefFromWeak() const noexcept;
bool
expired() const noexcept;
std::size_t
use_count() const noexcept;
// This function MUST be called after a partial destructor finishes running.
// Calling this function may cause other threads to delete the object
// pointed to by `o`, so `o` should never be used after calling this
// function. The parameter will be set to a `nullptr` after calling this
// function to emphasize that it should not be used.
// Note: This is intentionally NOT called at the end of `partialDestructor`.
// The reason for this is if new classes are written to support this smart
// pointer class, they need to write their own `partialDestructor` function
// and ensure `partialDestructorFinished` is called at the end. Putting this
// call inside the smart pointer class itself is expected to be less error
// prone.
// Note: The "two-star" programming is intentional. It emphasizes that `o`
// may be deleted and the unergonomic API is meant to signal the special
// nature of this function call to callers.
// Note: This is a template to support incompletely defined classes.
template <class T>
friend void
partialDestructorFinished(T** o);
private:
// TODO: We may need to use a uint64_t for both counts. This will reduce the
// memory savings. We need to audit the code to make sure 16 bit counts are
// enough for strong pointers and 14 bit counts are enough for weak
// pointers. Use type aliases to make it easy to switch types.
using CountType = std::uint16_t;
static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
using FieldType = std::uint32_t;
static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
static constexpr FieldType one = 1;
/** `refCounts` consists of four fields that are treated atomically:
1. Strong count. This is a count of the number of shared pointers that
hold a reference to this object. When the strong counts goes to zero,
if the weak count is zero, the destructor is run. If the weak count is
non-zero when the strong count goes to zero then the partialDestructor
is run.
2. Weak count. This is a count of the number of weak pointer that hold
a reference to this object. When the weak count goes to zero and the
strong count is also zero, then the destructor is run.
3. Partial destroy started bit. This bit is set if the
`partialDestructor` function has been started (or is about to be
started). This is used to prevent the destructor from running
concurrently with the partial destructor. This can easily happen when
the last strong pointer release its reference in one thread and starts
the partialDestructor, while in another thread the last weak pointer
goes out of scope and starts the destructor while the partialDestructor
is still running. Both a start and finished bit is needed to handle a
corner-case where the last strong pointer goes out of scope, then then
last `weakPointer` goes out of scope, but this happens before the
`partialDestructor` bit is set. It would be possible to use a single
bit if it could also be set atomically when the strong count goes to
zero and the weak count is non-zero, but that would add complexity (and
likely slow down common cases as well).
4. Partial destroy finished bit. This bit is set when the
`partialDestructor` has finished running. See (3) above for more
information.
*/
mutable std::atomic<FieldType> refCounts{strongDelta};
/** Amount to change the strong count when adding or releasing a reference
Note: The strong count is stored in the low `StrongCountNumBits` bits
of refCounts
*/
static constexpr FieldType strongDelta = 1;
/** Amount to change the weak count when adding or releasing a reference
Note: The weak count is stored in the high `WeakCountNumBits` bits of
refCounts
*/
static constexpr FieldType weakDelta = (one << StrongCountNumBits);
/** Flag that is set when the partialDestroy function has started running
(or is about to start running).
See description of the `refCounts` field for a fuller description of
this field.
*/
static constexpr FieldType partialDestroyStartedMask =
(one << (FieldTypeBits - 1));
/** Flag that is set when the partialDestroy function has finished running
See description of the `refCounts` field for a fuller description of
this field.
*/
static constexpr FieldType partialDestroyFinishedMask =
(one << (FieldTypeBits - 2));
/** Mask that will zero out all the `count` bits and leave the tag bits
unchanged.
*/
static constexpr FieldType tagMask =
partialDestroyStartedMask | partialDestroyFinishedMask;
/** Mask that will zero out the `tag` bits and leave the count bits
unchanged.
*/
static constexpr FieldType valueMask = ~tagMask;
/** Mask that will zero out everything except the strong count.
*/
static constexpr FieldType strongMask =
((one << StrongCountNumBits) - 1) & valueMask;
/** Mask that will zero out everything except the weak count.
*/
static constexpr FieldType weakMask =
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
/** Unpack the count and tag fields from the packed atomic integer form. */
struct RefCountPair
{
CountType strong;
CountType weak;
/** The `partialDestroyStartedBit` is set to on when the partial
destroy function is started. It is not a boolean; it is a uint32
with all bits zero with the possible exception of the
`partialDestroyStartedMask` bit. This is done so it can be directly
masked into the `combinedValue`.
*/
FieldType partialDestroyStartedBit{0};
/** The `partialDestroyFinishedBit` is set to on when the partial
destroy function has finished.
*/
FieldType partialDestroyFinishedBit{0};
RefCountPair(FieldType v) noexcept;
RefCountPair(CountType s, CountType w) noexcept;
/** Convert back to the packed integer form. */
FieldType
combinedValue() const noexcept;
static constexpr CountType maxStrongValue =
static_cast<CountType>((one << StrongCountNumBits) - 1);
static constexpr CountType maxWeakValue =
static_cast<CountType>((one << WeakCountNumBits) - 1);
/** Put an extra margin to detect when running up against limits.
This is only used in debug code, and is useful if we reduce the
number of bits in the strong and weak counts (to 16 and 14 bits).
*/
static constexpr CountType checkStrongMaxValue = maxStrongValue - 32;
static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
};
};
inline void
IntrusiveRefCounts::addStrongRef() const noexcept
{
refCounts.fetch_add(strongDelta, std::memory_order_acq_rel);
}
inline void
IntrusiveRefCounts::addWeakRef() const noexcept
{
refCounts.fetch_add(weakDelta, std::memory_order_acq_rel);
}
inline ReleaseStrongRefAction
IntrusiveRefCounts::releaseStrongRef() const
{
// Subtract `strongDelta` from refCounts. If this releases the last strong
// ref, set the `partialDestroyStarted` bit. It is important that the ref
// count and the `partialDestroyStartedBit` are changed atomically (hence
// the loop and `compare_exchange` op). If this didn't need to be done
// atomically, the loop could be replaced with a `fetch_sub` and a
// conditional `fetch_or`. This loop will almost always run once.
using enum ReleaseStrongRefAction;
auto prevIntVal = refCounts.load(std::memory_order_acquire);
while (1)
{
RefCountPair const prevVal{prevIntVal};
XRPL_ASSERT(
(prevVal.strong >= strongDelta),
"ripple::IntrusiveRefCounts::releaseStrongRef : previous ref "
"higher than new");
auto nextIntVal = prevIntVal - strongDelta;
ReleaseStrongRefAction action = noop;
if (prevVal.strong == 1)
{
if (prevVal.weak == 0)
{
action = destroy;
}
else
{
nextIntVal |= partialDestroyStartedMask;
action = partialDestroy;
}
}
if (refCounts.compare_exchange_weak(
prevIntVal, nextIntVal, std::memory_order_release))
{
// Can't be in partial destroy because only decrementing the strong
// count to zero can start a partial destroy, and that can't happen
// twice.
XRPL_ASSERT(
(action == noop) || !(prevIntVal & partialDestroyStartedMask),
"ripple::IntrusiveRefCounts::releaseStrongRef : not in partial "
"destroy");
return action;
}
}
}
inline ReleaseStrongRefAction
IntrusiveRefCounts::addWeakReleaseStrongRef() const
{
using enum ReleaseStrongRefAction;
static_assert(weakDelta > strongDelta);
auto constexpr delta = weakDelta - strongDelta;
auto prevIntVal = refCounts.load(std::memory_order_acquire);
// This loop will almost always run once. The loop is needed to atomically
// change the counts and flags (the count could be atomically changed, but
// the flags depend on the current value of the counts).
//
// Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
// may be able to be set non-atomically. But it is easier to reason about
// the code if the flag is set atomically.
while (1)
{
RefCountPair const prevVal{prevIntVal};
// Converted the last strong pointer to a weak pointer.
//
// Can't be in partial destroy because only decrementing the
// strong count to zero can start a partial destroy, and that
// can't happen twice.
XRPL_ASSERT(
(!prevVal.partialDestroyStartedBit),
"ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not in "
"partial destroy");
auto nextIntVal = prevIntVal + delta;
ReleaseStrongRefAction action = noop;
if (prevVal.strong == 1)
{
if (prevVal.weak == 0)
{
action = noop;
}
else
{
nextIntVal |= partialDestroyStartedMask;
action = partialDestroy;
}
}
if (refCounts.compare_exchange_weak(
prevIntVal, nextIntVal, std::memory_order_release))
{
XRPL_ASSERT(
(!(prevIntVal & partialDestroyStartedMask)),
"ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not "
"started partial destroy");
return action;
}
}
}
inline ReleaseWeakRefAction
IntrusiveRefCounts::releaseWeakRef() const
{
auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
RefCountPair prev = prevIntVal;
if (prev.weak == 1 && prev.strong == 0)
{
if (!prev.partialDestroyStartedBit)
{
// This case should only be hit if the partialDestroyStartedBit is
// set non-atomically (and even then very rarely). The code is kept
// in case we need to set the flag non-atomically for perf reasons.
refCounts.wait(prevIntVal, std::memory_order_acq_rel);
prevIntVal = refCounts.load(std::memory_order_acquire);
prev = RefCountPair{prevIntVal};
}
if (!prev.partialDestroyFinishedBit)
{
// partial destroy MUST finish before running a full destroy (when
// using weak pointers)
refCounts.wait(prevIntVal - weakDelta, std::memory_order_acq_rel);
}
return ReleaseWeakRefAction::destroy;
}
return ReleaseWeakRefAction::noop;
}
inline bool
IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept
{
auto curValue = RefCountPair{1, 1}.combinedValue();
auto desiredValue = RefCountPair{2, 1}.combinedValue();
while (!refCounts.compare_exchange_weak(
curValue, desiredValue, std::memory_order_release))
{
RefCountPair const prev{curValue};
if (!prev.strong)
return false;
desiredValue = curValue + strongDelta;
}
return true;
}
inline bool
IntrusiveRefCounts::expired() const noexcept
{
RefCountPair const val = refCounts.load(std::memory_order_acquire);
return val.strong == 0;
}
inline std::size_t
IntrusiveRefCounts::use_count() const noexcept
{
RefCountPair const val = refCounts.load(std::memory_order_acquire);
return val.strong;
}
inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
{
#ifndef NDEBUG
auto v = refCounts.load(std::memory_order_acquire);
XRPL_ASSERT(
(!(v & valueMask)),
"ripple::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
auto t = v & tagMask;
XRPL_ASSERT(
(!t || t == tagMask),
"ripple::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
#endif
}
//------------------------------------------------------------------------------
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
IntrusiveRefCounts::FieldType v) noexcept
: strong{static_cast<CountType>(v & strongMask)}
, weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
, partialDestroyStartedBit{v & partialDestroyStartedMask}
, partialDestroyFinishedBit{v & partialDestroyFinishedMask}
{
XRPL_ASSERT(
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
"ripple::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside "
"range");
}
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
IntrusiveRefCounts::CountType s,
IntrusiveRefCounts::CountType w) noexcept
: strong{s}, weak{w}
{
XRPL_ASSERT(
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
"ripple::IntrusiveRefCounts::RefCountPair(CountType, CountType) : "
"inputs inside range");
}
inline IntrusiveRefCounts::FieldType
IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
{
XRPL_ASSERT(
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
"ripple::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
"inside range");
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
<< IntrusiveRefCounts::StrongCountNumBits) |
static_cast<IntrusiveRefCounts::FieldType>(strong) |
partialDestroyStartedBit | partialDestroyFinishedBit;
}
template <class T>
inline void
partialDestructorFinished(T** o)
{
T& self = **o;
IntrusiveRefCounts::RefCountPair p =
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
XRPL_ASSERT(
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit &&
!p.strong),
"ripple::partialDestructorFinished : not a weak ref");
if (!p.weak)
{
// There was a weak count before the partial destructor ran (or we would
// have run the full destructor) and now there isn't a weak count. Some
// thread is waiting to run the destructor.
self.refCounts.notify_one();
}
// Set the pointer to null to emphasize that the object shouldn't be used
// after calling this function as it may be destroyed in another thread.
*o = nullptr;
}
//------------------------------------------------------------------------------
} // namespace ripple
#endif

View File

@@ -0,0 +1,135 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
#include <memory>
#include <variant>
namespace ripple {
/** A combination of a std::shared_ptr and a std::weak_pointer.
This class is a wrapper to a `std::variant<std::shared_ptr,std::weak_ptr>`
This class is useful for storing intrusive pointers in tagged caches using less
memory than storing both pointers directly.
*/
template <class T>
class SharedWeakCachePointer
{
public:
SharedWeakCachePointer() = default;
SharedWeakCachePointer(SharedWeakCachePointer const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer(std::shared_ptr<TT> const& rhs);
SharedWeakCachePointer(SharedWeakCachePointer&& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer(std::shared_ptr<TT>&& rhs);
SharedWeakCachePointer&
operator=(SharedWeakCachePointer const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer&
operator=(std::shared_ptr<TT> const& rhs);
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer&
operator=(std::shared_ptr<TT>&& rhs);
~SharedWeakCachePointer();
/** Return a strong pointer if this is already a strong pointer (i.e. don't
lock the weak pointer. Use the `lock` method if that's what's needed)
*/
std::shared_ptr<T> const&
getStrong() const;
/** Return true if this is a strong pointer and the strong pointer is
seated.
*/
explicit
operator bool() const noexcept;
/** Set the pointer to null, decrement the appropriate ref count, and run
the appropriate release action.
*/
void
reset();
/** If this is a strong pointer, return the raw pointer. Otherwise return
null.
*/
T*
get() const;
/** If this is a strong pointer, return the strong count. Otherwise return 0
*/
std::size_t
use_count() const;
/** Return true if there is a non-zero strong count. */
bool
expired() const;
/** If this is a strong pointer, return the strong pointer. Otherwise
attempt to lock the weak pointer.
*/
std::shared_ptr<T>
lock() const;
/** Return true is this represents a strong pointer. */
bool
isStrong() const;
/** Return true is this represents a weak pointer. */
bool
isWeak() const;
/** If this is a weak pointer, attempt to convert it to a strong pointer.
@return true if successfully converted to a strong pointer (or was
already a strong pointer). Otherwise false.
*/
bool
convertToStrong();
/** If this is a strong pointer, attempt to convert it to a weak pointer.
@return false if the pointer is null. Otherwise return true.
*/
bool
convertToWeak();
private:
std::variant<std::shared_ptr<T>, std::weak_ptr<T>> combo_;
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,192 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
#include <xrpl/basics/SharedWeakCachePointer.h>
namespace ripple {
template <class T>
SharedWeakCachePointer<T>::SharedWeakCachePointer(
SharedWeakCachePointer const& rhs) = default;
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>::SharedWeakCachePointer(
std::shared_ptr<TT> const& rhs)
: combo_{rhs}
{
}
template <class T>
SharedWeakCachePointer<T>::SharedWeakCachePointer(
SharedWeakCachePointer&& rhs) = default;
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs)
: combo_{std::move(rhs)}
{
}
template <class T>
SharedWeakCachePointer<T>&
SharedWeakCachePointer<T>::operator=(SharedWeakCachePointer const& rhs) =
default;
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>&
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT> const& rhs)
{
combo_ = rhs;
return *this;
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>&
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT>&& rhs)
{
combo_ = std::move(rhs);
return *this;
}
template <class T>
SharedWeakCachePointer<T>::~SharedWeakCachePointer() = default;
// Return a strong pointer if this is already a strong pointer (i.e. don't
// lock the weak pointer. Use the `lock` method if that's what's needed)
template <class T>
std::shared_ptr<T> const&
SharedWeakCachePointer<T>::getStrong() const
{
static std::shared_ptr<T> const empty;
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
return *p;
return empty;
}
template <class T>
SharedWeakCachePointer<T>::operator bool() const noexcept
{
return !!std::get_if<std::shared_ptr<T>>(&combo_);
}
template <class T>
void
SharedWeakCachePointer<T>::reset()
{
combo_ = std::shared_ptr<T>{};
}
template <class T>
T*
SharedWeakCachePointer<T>::get() const
{
return std::get_if<std::shared_ptr<T>>(&combo_).get();
}
template <class T>
std::size_t
SharedWeakCachePointer<T>::use_count() const
{
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
return p->use_count();
return 0;
}
template <class T>
bool
SharedWeakCachePointer<T>::expired() const
{
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
return p->expired();
return !std::get_if<std::shared_ptr<T>>(&combo_);
}
template <class T>
std::shared_ptr<T>
SharedWeakCachePointer<T>::lock() const
{
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
return *p;
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
return p->lock();
return {};
}
template <class T>
bool
SharedWeakCachePointer<T>::isStrong() const
{
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
return !!p->get();
return false;
}
template <class T>
bool
SharedWeakCachePointer<T>::isWeak() const
{
return !isStrong();
}
template <class T>
bool
SharedWeakCachePointer<T>::convertToStrong()
{
if (isStrong())
return true;
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
{
if (auto s = p->lock())
{
combo_ = std::move(s);
return true;
}
}
return false;
}
template <class T>
bool
SharedWeakCachePointer<T>::convertToWeak()
{
if (isWeak())
return true;
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
{
combo_ = std::weak_ptr<T>(*p);
return true;
}
return false;
}
} // namespace ripple
#endif

View File

@@ -20,7 +20,9 @@
#ifndef RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
#define RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/SharedWeakCachePointer.ipp>
#include <xrpl/basics/UnorderedContainers.h>
#include <xrpl/basics/hardened_hash.h>
#include <xrpl/beast/clock/abstract_clock.h>
@@ -51,6 +53,8 @@ template <
class Key,
class T,
bool IsKeyCache = false,
class SharedWeakUnionPointerType = SharedWeakCachePointer<T>,
class SharedPointerType = std::shared_ptr<T>,
class Hash = hardened_hash<>,
class KeyEqual = std::equal_to<Key>,
class Mutex = std::recursive_mutex>
@@ -61,6 +65,8 @@ public:
using key_type = Key;
using mapped_type = T;
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
using shared_weak_combo_pointer_type = SharedWeakUnionPointerType;
using shared_pointer_type = SharedPointerType;
public:
TaggedCache(
@@ -70,231 +76,48 @@ public:
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector =
beast::insight::NullCollector::New())
: m_journal(journal)
, m_clock(clock)
, m_stats(
name,
std::bind(&TaggedCache::collect_metrics, this),
collector)
, m_name(name)
, m_target_size(size)
, m_target_age(expiration)
, m_cache_count(0)
, m_hits(0)
, m_misses(0)
{
}
beast::insight::NullCollector::New());
public:
/** Return the clock associated with the cache. */
clock_type&
clock()
{
return m_clock;
}
clock();
/** Returns the number of items in the container. */
std::size_t
size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
}
void
setTargetSize(int s)
{
std::lock_guard lock(m_mutex);
m_target_size = s;
if (s > 0)
{
for (auto& partition : m_cache.map())
{
partition.rehash(static_cast<std::size_t>(
(s + (s >> 2)) /
(partition.max_load_factor() * m_cache.partitions()) +
1));
}
}
JLOG(m_journal.debug()) << m_name << " target size set to " << s;
}
clock_type::duration
getTargetAge() const
{
std::lock_guard lock(m_mutex);
return m_target_age;
}
void
setTargetAge(clock_type::duration s)
{
std::lock_guard lock(m_mutex);
m_target_age = s;
JLOG(m_journal.debug())
<< m_name << " target age set to " << m_target_age.count();
}
size() const;
int
getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
}
getCacheSize() const;
int
getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
}
getTrackSize() const;
float
getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
return m_hits * (100.0f / std::max(1.0f, total));
}
getHitRate();
void
clear()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
m_cache_count = 0;
}
clear();
void
reset()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
m_cache_count = 0;
m_hits = 0;
m_misses = 0;
}
reset();
/** Refresh the last access time on a key if present.
@return `true` If the key was found.
*/
template <class KeyComparable>
bool
touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
++m_stats.misses;
return false;
}
iter->second.touch(m_clock.now());
++m_stats.hits;
return true;
}
touch_if_exists(KeyComparable const& key);
using SweptPointersVector = std::pair<
std::vector<std::shared_ptr<mapped_type>>,
std::vector<std::weak_ptr<mapped_type>>>;
using SweptPointersVector = std::vector<SharedWeakUnionPointerType>;
void
sweep()
{
// Keep references to all the stuff we sweep
// For performance, each worker thread should exit before the swept data
// is destroyed but still within the main cache lock.
std::vector<SweptPointersVector> allStuffToSweep(m_cache.partitions());
clock_type::time_point const now(m_clock.now());
clock_type::time_point when_expire;
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
when_expire = now - m_target_age;
}
else
{
when_expire =
now - m_target_age * m_target_size / m_cache.size();
clock_type::duration const minimumAge(std::chrono::seconds(1));
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of "
<< m_target_size << " aging at "
<< (now - when_expire).count() << " of "
<< m_target_age.count();
}
std::vector<std::thread> workers;
workers.reserve(m_cache.partitions());
std::atomic<int> allRemovals = 0;
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
{
workers.push_back(sweepHelper(
when_expire,
now,
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
}
for (std::thread& worker : workers)
worker.join();
m_cache_count -= allRemovals;
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
JLOG(m_journal.debug())
<< m_name << " TaggedCache sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
sweep();
bool
del(const key_type& key, bool valid)
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
std::lock_guard lock(m_mutex);
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return false;
Entry& entry = cit->second;
bool ret = false;
if (entry.isCached())
{
--m_cache_count;
entry.ptr.reset();
ret = true;
}
if (!valid || entry.isExpired())
m_cache.erase(cit);
return ret;
}
del(const key_type& key, bool valid);
public:
/** Replace aliased objects with originals.
Due to concurrency it is possible for two separate objects with
@@ -308,100 +131,23 @@ public:
@return `true` If the key already existed.
*/
public:
template <class R>
bool
canonicalize(
const key_type& key,
std::shared_ptr<T>& data,
std::function<bool(std::shared_ptr<T> const&)>&& replace)
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
return false;
}
Entry& entry = cit->second;
entry.touch(m_clock.now());
if (entry.isCached())
{
if (replace(entry.ptr))
{
entry.ptr = data;
entry.weak_ptr = data;
}
else
{
data = entry.ptr;
}
return true;
}
auto cachedData = entry.lock();
if (cachedData)
{
if (replace(entry.ptr))
{
entry.ptr = data;
entry.weak_ptr = data;
}
else
{
entry.ptr = cachedData;
data = cachedData;
}
++m_cache_count;
return true;
}
entry.ptr = data;
entry.weak_ptr = data;
++m_cache_count;
return false;
}
SharedPointerType& data,
R&& replaceCallback);
bool
canonicalize_replace_cache(
const key_type& key,
std::shared_ptr<T> const& data)
{
return canonicalize(
key,
const_cast<std::shared_ptr<T>&>(data),
[](std::shared_ptr<T> const&) { return true; });
}
SharedPointerType const& data);
bool
canonicalize_replace_client(const key_type& key, std::shared_ptr<T>& data)
{
return canonicalize(
key, data, [](std::shared_ptr<T> const&) { return false; });
}
canonicalize_replace_client(const key_type& key, SharedPointerType& data);
std::shared_ptr<T>
fetch(const key_type& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
if (!ret)
++m_misses;
return ret;
}
SharedPointerType
fetch(const key_type& key);
/** Insert the element into the container.
If the key already exists, nothing happens.
@@ -410,26 +156,11 @@ public:
template <class ReturnType = bool>
auto
insert(key_type const& key, T const& value)
-> std::enable_if_t<!IsKeyCache, ReturnType>
{
auto p = std::make_shared<T>(std::cref(value));
return canonicalize_replace_client(key, p);
}
-> std::enable_if_t<!IsKeyCache, ReturnType>;
template <class ReturnType = bool>
auto
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(now));
if (!inserted)
it->second.last_access = now;
return inserted;
}
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>;
// VFALCO NOTE It looks like this returns a copy of the data in
// the output parameter 'data'. This could be expensive.
@@ -437,50 +168,18 @@ public:
// simply return an iterator.
//
bool
retrieve(const key_type& key, T& data)
{
// retrieve the value of the stored data
auto entry = fetch(key);
if (!entry)
return false;
data = *entry;
return true;
}
retrieve(const key_type& key, T& data);
mutex_type&
peekMutex()
{
return m_mutex;
}
peekMutex();
std::vector<key_type>
getKeys() const
{
std::vector<key_type> v;
{
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size());
for (auto const& _ : m_cache)
v.push_back(_.first);
}
return v;
}
getKeys() const;
// CachedSLEs functions.
/** Returns the fraction of cache hits. */
double
rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
if (tot == 0)
return 0;
return double(m_hits) / tot;
}
rate() const;
/** Fetch an item from the cache.
If the digest was not found, Handler
@@ -488,73 +187,16 @@ public:
std::shared_ptr<SLE const>(void)
*/
template <class Handler>
std::shared_ptr<T>
fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
auto sle = h();
if (!sle)
return {};
std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
it->second.touch(m_clock.now());
return it->second.ptr;
}
SharedPointerType
fetch(key_type const& digest, Handler const& h);
// End CachedSLEs functions.
private:
std::shared_ptr<T>
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
Entry& entry = cit->second;
if (entry.isCached())
{
++m_hits;
entry.touch(m_clock.now());
return entry.ptr;
}
entry.ptr = entry.lock();
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
entry.touch(m_clock.now());
return entry.ptr;
}
m_cache.erase(cit);
return {};
}
SharedPointerType
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
void
collect_metrics()
{
m_stats.size.set(getCacheSize());
{
beast::insight::Gauge::value_type hit_rate(0);
{
std::lock_guard lock(m_mutex);
auto const total(m_hits + m_misses);
if (total != 0)
hit_rate = (m_hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
}
collect_metrics();
private:
struct Stats
@@ -600,36 +242,37 @@ private:
class ValueEntry
{
public:
std::shared_ptr<mapped_type> ptr;
std::weak_ptr<mapped_type> weak_ptr;
shared_weak_combo_pointer_type ptr;
clock_type::time_point last_access;
ValueEntry(
clock_type::time_point const& last_access_,
std::shared_ptr<mapped_type> const& ptr_)
: ptr(ptr_), weak_ptr(ptr_), last_access(last_access_)
shared_pointer_type const& ptr_)
: ptr(ptr_), last_access(last_access_)
{
}
bool
isWeak() const
{
return ptr == nullptr;
if (!ptr)
return true;
return ptr.isWeak();
}
bool
isCached() const
{
return ptr != nullptr;
return ptr && ptr.isStrong();
}
bool
isExpired() const
{
return weak_ptr.expired();
return ptr.expired();
}
std::shared_ptr<mapped_type>
SharedPointerType
lock()
{
return weak_ptr.lock();
return ptr.lock();
}
void
touch(clock_type::time_point const& now)
@@ -658,72 +301,7 @@ private:
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.first.reserve(partition.size());
stuffToSweep.second.reserve(partition.size());
{
auto cit = partition.begin();
while (cit != partition.end())
{
if (cit->second.isWeak())
{
// weak
if (cit->second.isExpired())
{
stuffToSweep.second.push_back(
std::move(cit->second.weak_ptr));
++mapRemovals;
cit = partition.erase(cit);
}
else
{
++cit;
}
}
else if (cit->second.last_access <= when_expire)
{
// strong, expired
++cacheRemovals;
if (cit->second.ptr.use_count() == 1)
{
stuffToSweep.first.push_back(
std::move(cit->second.ptr));
++mapRemovals;
cit = partition.erase(cit);
}
else
{
// remains weakly cached
cit->second.ptr.reset();
++cit;
}
}
else
{
// strong, not expired
++cit;
}
}
}
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
});
}
std::lock_guard<std::recursive_mutex> const&);
[[nodiscard]] std::thread
sweepHelper(
@@ -732,45 +310,7 @@ private:
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
{
auto cit = partition.begin();
while (cit != partition.end())
{
if (cit->second.last_access > now)
{
cit->second.last_access = now;
++cit;
}
else if (cit->second.last_access <= when_expire)
{
cit = partition.erase(cit);
}
else
{
++cit;
}
}
}
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
});
};
std::lock_guard<std::recursive_mutex> const&);
beast::Journal m_journal;
clock_type& m_clock;
@@ -782,10 +322,10 @@ private:
std::string m_name;
// Desired number of cache entries (0 = ignore)
int m_target_size;
const int m_target_size;
// Desired maximum cache age
clock_type::duration m_target_age;
const clock_type::duration m_target_age;
// Number of items cached
int m_cache_count;

File diff suppressed because it is too large Load Diff