mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-04 18:55:49 +00:00
Compare commits
7 Commits
patch-rema
...
shamap-upd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2185401dd5 | ||
|
|
95fe3672a8 | ||
|
|
d8475f50c2 | ||
|
|
b284d43dd9 | ||
|
|
5b93d08a3e | ||
|
|
0b65657a09 | ||
|
|
11931751a8 |
@@ -179,6 +179,8 @@ install (
|
||||
src/ripple/basics/contract.h
|
||||
src/ripple/basics/FeeUnits.h
|
||||
src/ripple/basics/hardened_hash.h
|
||||
src/ripple/basics/IntrusivePointer.h
|
||||
src/ripple/basics/SharedWeakCachePointer.h
|
||||
src/ripple/basics/strHex.h
|
||||
DESTINATION include/ripple/basics)
|
||||
install (
|
||||
@@ -769,6 +771,7 @@ if (tests)
|
||||
src/test/basics/DetectCrash_test.cpp
|
||||
src/test/basics/Expected_test.cpp
|
||||
src/test/basics/FileUtilities_test.cpp
|
||||
src/test/basics/IntrusiveShared_test.cpp
|
||||
src/test/basics/IOUAmount_test.cpp
|
||||
src/test/basics/KeyCache_test.cpp
|
||||
src/test/basics/Number_test.cpp
|
||||
|
||||
53
reverse_migrate.py
Normal file
53
reverse_migrate.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import os
|
||||
|
||||
def replace_in_file(file_path):
|
||||
"""Replace occurrences of 'from xrpl' with 'from xahau' in the given file."""
|
||||
try:
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Replace the text
|
||||
new_content = (
|
||||
content.replace("xrpld/app/", "ripple/app/")
|
||||
.replace("xrpld/core/", "ripple/core/")
|
||||
.replace("xrpld/nodestore/", "ripple/nodestore/")
|
||||
.replace("xrpl/basics/", "ripple/basics/")
|
||||
.replace("xrpl/protocol/", "ripple/protocol/")
|
||||
.replace("xrpl/json/", "ripple/json/")
|
||||
.replace("xrpld/overlay/", "ripple/overlay/")
|
||||
.replace("xrpl/resource/", "ripple/resource/")
|
||||
.replace("xrpl/crypto/", "ripple/crypto/")
|
||||
.replace("xrpl/beast/", "ripple/beast/")
|
||||
.replace("xrpld/shamap/", "ripple/shamap/")
|
||||
.replace("xrpld/rpc/", "ripple/rpc/")
|
||||
.replace("xrpld/perflog/", "ripple/perflog/")
|
||||
.replace("xrpld/nodestore/detail/", "ripple/nodestore/impl/")
|
||||
.replace("xrpld/ledger/", "ripple/ledger/")
|
||||
.replace("xrpld/app/misc/detail/AccountTxPaging.h", "ripple/app/misc/impl/AccountTxPaging.h")
|
||||
.replace("xrpld/perflog/PerfLog.h", "ripple/basics/PerfLog.h")
|
||||
.replace("xrpld/rpc/detail/RPCHelpers.h", "ripple/rpc/impl/RPCHelpers.h")
|
||||
.replace("xrpld/protocol/RPCErr.h", "ripple/net/RPCErr.h")
|
||||
)
|
||||
|
||||
# Write the changes back to the file
|
||||
with open(file_path, "w") as file:
|
||||
file.write(new_content)
|
||||
print(f"Updated: {file_path}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing file {file_path}: {e}")
|
||||
|
||||
|
||||
def search_and_replace_in_folders(folder_paths):
|
||||
"""Search for Python files in the given list of folders and replace text."""
|
||||
for folder_path in folder_paths:
|
||||
for root, dirs, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
if file.endswith(".cpp") or file.endswith(".h"):
|
||||
file_path = os.path.join(root, file)
|
||||
replace_in_file(file_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
folder_list = ["src/ripple", "src/test"]
|
||||
search_and_replace_in_folders(folder_list)
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/misc/Transaction.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/core/JobQueue.h>
|
||||
#include <ripple/nodestore/Database.h>
|
||||
#include <ripple/protocol/HashPrefix.h>
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <ripple/app/ledger/LedgerHistory.h>
|
||||
#include <ripple/app/ledger/LedgerToJson.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/json/to_string.h>
|
||||
|
||||
@@ -34,18 +34,16 @@
|
||||
#include <ripple/app/misc/TxQ.h>
|
||||
#include <ripple/app/misc/ValidatorList.h>
|
||||
#include <ripple/app/paths/PathRequests.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/MathUtilities.h>
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/UptimeClock.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/basics/safe_cast.h>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/core/Pg.h>
|
||||
#include <ripple/core/TimeKeeper.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/overlay/Overlay.h>
|
||||
#include <ripple/overlay/Peer.h>
|
||||
#include <ripple/protocol/BuildInfo.h>
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <ripple/app/ledger/TransactionMaster.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/Transaction.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
#include <ripple/app/misc/ValidatorKeys.h>
|
||||
#include <ripple/app/misc/ValidatorSite.h>
|
||||
#include <ripple/app/paths/PathRequests.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/rdb/Wallet.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/reporting/ReportingETL.h>
|
||||
@@ -52,6 +53,7 @@
|
||||
#include <ripple/basics/ByteUtilities.h>
|
||||
#include <ripple/basics/PerfLog.h>
|
||||
#include <ripple/basics/ResolverAsio.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/random.h>
|
||||
#include <ripple/basics/safe_cast.h>
|
||||
#include <ripple/beast/asio/io_latency_probe.h>
|
||||
@@ -59,7 +61,6 @@
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/crypto/csprng.h>
|
||||
#include <ripple/json/json_reader.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/nodestore/DummyScheduler.h>
|
||||
#include <ripple/overlay/Cluster.h>
|
||||
#include <ripple/overlay/PeerReservationTable.h>
|
||||
|
||||
@@ -58,6 +58,8 @@ template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <ripple/app/ledger/TransactionMaster.h>
|
||||
#include <ripple/app/main/LoadManager.h>
|
||||
#include <ripple/app/misc/AmendmentTable.h>
|
||||
// #include <ripple/app/misc/DeliverMax.h>
|
||||
#include <ripple/app/misc/HashRouter.h>
|
||||
#include <ripple/app/misc/LoadFeeTrack.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
@@ -38,11 +39,10 @@
|
||||
#include <ripple/app/misc/ValidatorKeys.h>
|
||||
#include <ripple/app/misc/ValidatorList.h>
|
||||
#include <ripple/app/misc/impl/AccountTxPaging.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/app/reporting/ReportingETL.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/PerfLog.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/UptimeClock.h>
|
||||
#include <ripple/basics/mulDiv.h>
|
||||
#include <ripple/basics/safe_cast.h>
|
||||
@@ -53,19 +53,24 @@
|
||||
#include <ripple/crypto/RFC1751.h>
|
||||
#include <ripple/crypto/csprng.h>
|
||||
#include <ripple/json/to_string.h>
|
||||
#include <ripple/net/RPCErr.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/overlay/Cluster.h>
|
||||
#include <ripple/overlay/Overlay.h>
|
||||
#include <ripple/overlay/predicates.h>
|
||||
#include <ripple/protocol/BuildInfo.h>
|
||||
#include <ripple/protocol/Feature.h>
|
||||
#include <ripple/rpc/BookChanges.h>
|
||||
#include <ripple/rpc/DeliveredAmount.h>
|
||||
#include <ripple/rpc/ServerHandler.h>
|
||||
// #include <ripple/protocol/MultiApiJson.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/reporting/ReportingETL.h>
|
||||
#include <ripple/net/RPCErr.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/protocol/STParsedJSON.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <ripple/resource/Fees.h>
|
||||
#include <ripple/resource/ResourceManager.h>
|
||||
#include <ripple/rpc/BookChanges.h>
|
||||
#include <ripple/rpc/CTID.h>
|
||||
#include <ripple/rpc/DeliveredAmount.h>
|
||||
#include <ripple/rpc/impl/RPCHelpers.h>
|
||||
#include <boost/asio/ip/host_name.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
|
||||
@@ -24,9 +24,9 @@
|
||||
#include <ripple/app/misc/SHAMapStore.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/rdb/State.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/nodestore/DatabaseRotating.h>
|
||||
|
||||
#include <ripple/nodestore/Scheduler.h>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <ripple/app/rdb/backend/detail/Node.h>
|
||||
#include <ripple/basics/BasicConfig.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/core/SociDB.h>
|
||||
#include <ripple/json/to_string.h>
|
||||
|
||||
494
src/ripple/basics/IntrusivePointer.h
Normal file
494
src/ripple/basics/IntrusivePointer.h
Normal file
@@ -0,0 +1,494 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEPOINTER_H_INCLUDED
|
||||
|
||||
// shared pointer class for tree pointers
|
||||
// The ref counts are kept on the tree pointers themselves
|
||||
// I.e. this is an intrusive pointer type.
|
||||
|
||||
#include <cassert>
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Tag to create an intrusive pointer from another intrusive pointer by using a
|
||||
static cast. This is useful to create an intrusive pointer to a derived
|
||||
class from an intrusive pointer to a base class.
|
||||
*/
|
||||
struct StaticCastTagSharedIntrusive
|
||||
{
|
||||
};
|
||||
|
||||
/** Tag to create an intrusive pointer from another intrusive pointer by using a
|
||||
static cast. This is useful to create an intrusive pointer to a derived
|
||||
class from an intrusive pointer to a base class. If the cast fails an empty
|
||||
(null) intrusive pointer is created.
|
||||
*/
|
||||
struct DynamicCastTagSharedIntrusive
|
||||
{
|
||||
};
|
||||
|
||||
/** When creating or adopting a raw pointer, controls whether the strong count
|
||||
is incremented or not. Use this tag to increment the strong count.
|
||||
*/
|
||||
struct SharedIntrusiveAdoptIncrementStrongTag
|
||||
{
|
||||
};
|
||||
|
||||
/** When creating or adopting a raw pointer, controls whether the strong count
|
||||
is incremented or not. Use this tag to leave the strong count unchanged.
|
||||
*/
|
||||
struct SharedIntrusiveAdoptNoIncrementTag
|
||||
{
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
template <class T>
|
||||
concept CAdoptTag =
|
||||
std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
|
||||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
|
||||
// clang-format on
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A shared intrusive pointer class that supports weak pointers.
|
||||
|
||||
This is meant to be used for SHAMapInnerNodes, but may be useful for other
|
||||
cases. Since the reference counts are stored on the pointee, the pointee is
|
||||
not destroyed until both the strong _and_ weak pointer counts go to zero.
|
||||
When the strong pointer count goes to zero, the "partialDestructor" is
|
||||
called. This can be used to destroy as much of the object as possible while
|
||||
still retaining the reference counts. For example, for SHAMapInnerNodes the
|
||||
children may be reset in that function. Note that std::shared_poiner WILL
|
||||
run the destructor when the strong count reaches zero, but may not free the
|
||||
memory used by the object until the weak count reaches zero. In rippled, we
|
||||
typically allocate shared pointers with the `make_shared` function. When
|
||||
that is used, the memory is not reclaimed until the weak count reaches zero.
|
||||
*/
|
||||
template <class T>
|
||||
class SharedIntrusive
|
||||
{
|
||||
public:
|
||||
SharedIntrusive() = default;
|
||||
|
||||
template <CAdoptTag TAdoptTag>
|
||||
SharedIntrusive(T* p, TAdoptTag) noexcept;
|
||||
|
||||
SharedIntrusive(SharedIntrusive const& rhs);
|
||||
|
||||
template <class TT>
|
||||
// TODO: convertible_to isn't quite right. That include a static castable.
|
||||
// Find the right concept.
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedIntrusive(SharedIntrusive&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive const& rhs);
|
||||
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive&& rhs);
|
||||
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive&
|
||||
operator=(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
/** Adopt the raw pointer. The strong reference may or may not be
|
||||
incremented, depending on the TAdoptTag
|
||||
*/
|
||||
template <CAdoptTag TAdoptTag = SharedIntrusiveAdoptIncrementStrongTag>
|
||||
void
|
||||
adopt(T* p);
|
||||
|
||||
~SharedIntrusive();
|
||||
|
||||
/** Create a new SharedIntrusive by statically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by statically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by dynamically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Create a new SharedIntrusive by dynamically casting the pointer
|
||||
controlled by the rhs param.
|
||||
*/
|
||||
template <class TT>
|
||||
SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs);
|
||||
|
||||
T&
|
||||
operator*() const noexcept;
|
||||
|
||||
T*
|
||||
operator->() const noexcept;
|
||||
|
||||
explicit operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the strong count, and run the
|
||||
appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** Get the raw pointer */
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** Return the strong count */
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
template <class TT, class... Args>
|
||||
friend SharedIntrusive<TT>
|
||||
make_SharedIntrusive(Args&&... args);
|
||||
|
||||
/** Return the raw pointer held by this object. */
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
/** Exchange the current raw pointer held by this object with the given
|
||||
pointer. Decrement the strong count of the raw pointer previously held
|
||||
by this object and run the appropriate release action.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseAndStore(T* next);
|
||||
|
||||
/** Set the raw pointer directly. This is wrapped in a function so the class
|
||||
can support both atomic and non-atomic pointers in a future patch.
|
||||
*/
|
||||
void
|
||||
unsafeSetRawPtr(T* p);
|
||||
|
||||
/** Exchange the raw pointer directly.
|
||||
This sets the raw pointer to the given value and returns the previous
|
||||
value. This is wrapped in a function so the class can support both
|
||||
atomic and non-atomic pointers in a future patch.
|
||||
*/
|
||||
T*
|
||||
unsafeExchange(T* p);
|
||||
|
||||
private:
|
||||
/** pointer to the type with an intrusive count */
|
||||
T* ptr_{nullptr};
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A weak intrusive pointer class for the SharedIntrusive pointer class.
|
||||
|
||||
Note that this weak pointer class asks differently from normal weak pointer
|
||||
classes. When the strong pointer count goes to zero, the "partialDestructor"
|
||||
is called. See the comment on SharedIntrusive for a fuller explanation.
|
||||
*/
|
||||
template <class T>
|
||||
class WeakIntrusive
|
||||
{
|
||||
public:
|
||||
WeakIntrusive() = default;
|
||||
|
||||
WeakIntrusive(WeakIntrusive const& rhs);
|
||||
|
||||
WeakIntrusive(WeakIntrusive&& rhs);
|
||||
|
||||
WeakIntrusive(SharedIntrusive<T> const& rhs);
|
||||
|
||||
// There is no move constructor from a strong intrusive ptr because
|
||||
// moving would be move expensive than copying in this case (the strong
|
||||
// ref would need to be decremented)
|
||||
WeakIntrusive(SharedIntrusive<T> const&& rhs) = delete;
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> WeakIntrusive&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
/** Adopt the raw pointer and increment the weak count. */
|
||||
void
|
||||
adopt(T* ptr);
|
||||
|
||||
~WeakIntrusive();
|
||||
|
||||
/** Get a strong pointer from the weak pointer, if possible. This will
|
||||
only return a seated pointer if the strong count on the raw pointer
|
||||
is non-zero before locking.
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true if the strong count is zero. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** Set the pointer to null and decrement the weak count.
|
||||
|
||||
Note: This may run the destructor if the strong count is zero.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
private:
|
||||
T* ptr_ = nullptr;
|
||||
|
||||
/** Decrement the weak count. This does _not_ set the raw pointer to
|
||||
null.
|
||||
|
||||
Note: This may run the destructor if the strong count is zero.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** A combination of a strong and a weak intrusive pointer stored in the
|
||||
space of a single pointer.
|
||||
|
||||
This class is similar to a `std::variant<SharedIntrusive,WeakIntrusive>`
|
||||
with some optimizations. In particular, it uses a low-order bit to
|
||||
determine if the raw pointer represents a strong pointer or a weak
|
||||
pointer. It can also be quickly switched between its strong pointer and
|
||||
weak pointer representations. This class is useful for storing intrusive
|
||||
pointers in tagged caches.
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class SharedWeakUnion
|
||||
{
|
||||
static_assert(
|
||||
alignof(T) >= 2,
|
||||
"Bad alignment: Combo pointer requires low bit to be zero");
|
||||
|
||||
public:
|
||||
SharedWeakUnion() = default;
|
||||
|
||||
SharedWeakUnion(SharedWeakUnion const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
SharedWeakUnion(SharedWeakUnion&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
SharedWeakUnion&
|
||||
operator=(SharedWeakUnion const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakUnion&
|
||||
operator=(SharedIntrusive<TT> const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakUnion&
|
||||
operator=(SharedIntrusive<TT>&& rhs);
|
||||
|
||||
~SharedWeakUnion();
|
||||
|
||||
/** Return a strong pointer if this is already a strong pointer (i.e.
|
||||
don't lock the weak pointer. Use the `lock` method if that's what's
|
||||
needed)
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
seated.
|
||||
*/
|
||||
explicit operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the appropriate ref count, and
|
||||
run the appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise
|
||||
return null.
|
||||
*/
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise
|
||||
* return 0
|
||||
*/
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
SharedIntrusive<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong
|
||||
pointer.
|
||||
|
||||
@return true if successfully converted to a strong pointer (or was
|
||||
already a strong pointer). Otherwise false.
|
||||
*/
|
||||
bool
|
||||
convertToStrong();
|
||||
|
||||
/** If this is a strong pointer, attempt to convert it to a weak
|
||||
pointer.
|
||||
|
||||
@return false if the pointer is null. Otherwise return true.
|
||||
*/
|
||||
bool
|
||||
convertToWeak();
|
||||
|
||||
private:
|
||||
// Tagged pointer. Low bit determines if this is a strong or a weak
|
||||
// pointer. The low bit must be masked to zero when converting back to a
|
||||
// pointer. If the low bit is '1', this is a weak pointer.
|
||||
std::uintptr_t tp_{0};
|
||||
static constexpr std::uintptr_t tagMask = 1;
|
||||
static constexpr std::uintptr_t ptrMask = ~tagMask;
|
||||
|
||||
private:
|
||||
/** Return the raw pointer held by this object.
|
||||
*/
|
||||
T*
|
||||
unsafeGetRawPtr() const;
|
||||
|
||||
enum class RefStrength { strong, weak };
|
||||
/** Set the raw pointer and tag bit directly.
|
||||
*/
|
||||
void
|
||||
unsafeSetRawPtr(T* p, RefStrength rs);
|
||||
|
||||
/** Set the raw pointer and tag bit to all zeros (strong null pointer).
|
||||
*/
|
||||
void unsafeSetRawPtr(std::nullptr_t);
|
||||
|
||||
/** Decrement the appropriate ref count, and run the appropriate release
|
||||
action. Note: this does _not_ set the raw pointer to null.
|
||||
*/
|
||||
void
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Create a shared intrusive pointer.
|
||||
|
||||
Note: unlike std::shared_ptr, where there is an advantage of allocating
|
||||
the pointer and control block together, there is no benefit for intrusive
|
||||
pointers.
|
||||
*/
|
||||
template <class TT, class... Args>
|
||||
SharedIntrusive<TT>
|
||||
make_SharedIntrusive(Args&&... args)
|
||||
{
|
||||
auto p = new TT(std::forward<Args>(args)...);
|
||||
|
||||
static_assert(
|
||||
noexcept(SharedIntrusive<TT>(
|
||||
std::declval<TT*>(),
|
||||
std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
|
||||
"SharedIntrusive constructor should not throw or this can leak "
|
||||
"memory");
|
||||
|
||||
return SharedIntrusive<TT>(p, SharedIntrusiveAdoptNoIncrementTag{});
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
namespace intr_ptr {
|
||||
template <class T>
|
||||
using SharedPtr = SharedIntrusive<T>;
|
||||
|
||||
template <class T>
|
||||
using WeakPtr = WeakIntrusive<T>;
|
||||
|
||||
template <class T, class... A>
|
||||
SharedPtr<T>
|
||||
make_shared(A&&... args)
|
||||
{
|
||||
return make_SharedIntrusive<T>(std::forward<A>(args)...);
|
||||
}
|
||||
|
||||
template <class T, class TT>
|
||||
SharedPtr<T>
|
||||
static_pointer_cast(TT const& v)
|
||||
{
|
||||
return SharedPtr<T>(StaticCastTagSharedIntrusive{}, v);
|
||||
}
|
||||
|
||||
template <class T, class TT>
|
||||
SharedPtr<T>
|
||||
dynamic_pointer_cast(TT const& v)
|
||||
{
|
||||
return SharedPtr<T>(DynamicCastTagSharedIntrusive{}, v);
|
||||
}
|
||||
} // namespace intr_ptr
|
||||
} // namespace ripple
|
||||
#endif
|
||||
720
src/ripple/basics/IntrusivePointer.ipp
Normal file
720
src/ripple/basics/IntrusivePointer.ipp
Normal file
@@ -0,0 +1,720 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEPOINTER_IPP_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
|
||||
#include <ripple/basics/IntrusiveRefCounts.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
template <class T>
|
||||
template <CAdoptTag TAdoptTag>
|
||||
SharedIntrusive<T>::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p}
|
||||
{
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{rhs.unsafeExchange(nullptr)}
|
||||
{
|
||||
}
|
||||
template <class T>
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive const& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeReleaseAndStore(p);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, TT>)
|
||||
{
|
||||
// This case should never be hit. The operator above will run instead.
|
||||
// (The normal operator= is needed or it will be marked `deleted`)
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
}
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeReleaseAndStore(p);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive&& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
|
||||
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedIntrusive<T>&
|
||||
SharedIntrusive<T>::operator=(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, TT>)
|
||||
{
|
||||
// This case should never be hit. The operator above will run instead.
|
||||
// (The normal operator= is needed or it will be marked `deleted`)
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
}
|
||||
|
||||
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <CAdoptTag TAdoptTag>
|
||||
void
|
||||
SharedIntrusive<T>::adopt(T* p)
|
||||
{
|
||||
if constexpr (std::is_same_v<
|
||||
TAdoptTag,
|
||||
SharedIntrusiveAdoptIncrementStrongTag>)
|
||||
{
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
}
|
||||
unsafeReleaseAndStore(p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::~SharedIntrusive()
|
||||
{
|
||||
unsafeReleaseAndStore(nullptr);
|
||||
};
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = static_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
StaticCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
: ptr_{static_cast<T*>(rhs.unsafeExchange(nullptr))}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT> const& rhs)
|
||||
: ptr_{[&] {
|
||||
auto p = dynamic_cast<T*>(rhs.unsafeGetRawPtr());
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
return p;
|
||||
}()}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
SharedIntrusive<T>::SharedIntrusive(
|
||||
DynamicCastTagSharedIntrusive,
|
||||
SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
// This can be simplified without the `exchange`, but the `exchange` is kept
|
||||
// in anticipation of supporting atomic operations.
|
||||
auto toSet = rhs.unsafeExchange(nullptr);
|
||||
if (toSet)
|
||||
{
|
||||
ptr_ = dynamic_cast<T*>(toSet);
|
||||
if (!ptr_)
|
||||
// need to set the pointer back or will leak
|
||||
rhs.unsafeExchange(toSet);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T&
|
||||
SharedIntrusive<T>::operator*() const noexcept
|
||||
{
|
||||
return *unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::operator->() const noexcept
|
||||
{
|
||||
return unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>::operator bool() const noexcept
|
||||
{
|
||||
return bool(unsafeGetRawPtr());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::reset()
|
||||
{
|
||||
unsafeReleaseAndStore(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::get() const
|
||||
{
|
||||
return unsafeGetRawPtr();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedIntrusive<T>::use_count() const
|
||||
{
|
||||
if (auto p = unsafeGetRawPtr())
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::unsafeGetRawPtr() const
|
||||
{
|
||||
return ptr_;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::unsafeSetRawPtr(T* p)
|
||||
{
|
||||
ptr_ = p;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedIntrusive<T>::unsafeExchange(T* p)
|
||||
{
|
||||
return std::exchange(ptr_, p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedIntrusive<T>::unsafeReleaseAndStore(T* next)
|
||||
{
|
||||
auto prev = unsafeExchange(next);
|
||||
if (!prev)
|
||||
return;
|
||||
|
||||
using enum ReleaseRefAction;
|
||||
auto action = prev->releaseStrongRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete prev;
|
||||
break;
|
||||
case partialDestroy:
|
||||
prev->partialDestructor();
|
||||
partialDestructorFinished(&prev);
|
||||
// prev is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive const& rhs) : ptr_{rhs.ptr_}
|
||||
{
|
||||
if (ptr_)
|
||||
ptr_->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_}
|
||||
{
|
||||
rhs.ptr_ = nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs)
|
||||
: ptr_{rhs.unsafeGetRawPtr()}
|
||||
{
|
||||
if (ptr_)
|
||||
ptr_->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
WeakIntrusive<T>&
|
||||
WeakIntrusive<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addWeakRef();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::adopt(T* ptr)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
if (ptr)
|
||||
ptr->addWeakRef();
|
||||
ptr_ = ptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
WeakIntrusive<T>::~WeakIntrusive()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
WeakIntrusive<T>::lock() const
|
||||
{
|
||||
if (ptr_ && ptr_->checkoutStrongRefFromWeak())
|
||||
{
|
||||
return SharedIntrusive<T>{ptr_, SharedIntrusiveAdoptNoIncrementTag{}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
WeakIntrusive<T>::expired() const
|
||||
{
|
||||
return (!ptr_ || ptr_->expired());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::reset()
|
||||
{
|
||||
if (!ptr_)
|
||||
return;
|
||||
|
||||
unsafeReleaseNoStore();
|
||||
ptr_ = nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
WeakIntrusive<T>::unsafeReleaseNoStore()
|
||||
{
|
||||
if (!ptr_)
|
||||
return;
|
||||
|
||||
using enum ReleaseRefAction;
|
||||
auto action = ptr_->releaseWeakRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete ptr_;
|
||||
break;
|
||||
case partialDestroy:
|
||||
assert(0); // only a strong pointer should case a
|
||||
// partialDestruction
|
||||
ptr_->partialDestructor();
|
||||
partialDestructorFinished(&ptr_);
|
||||
// ptr_ is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion const& rhs) : tp_{rhs.tp_}
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
if (rhs.isStrong())
|
||||
p->addStrongRef();
|
||||
else
|
||||
p->addWeakRef();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedWeakUnion&& rhs) : tp_{rhs.tp_}
|
||||
{
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakUnion<T>::SharedWeakUnion(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedWeakUnion const& rhs)
|
||||
{
|
||||
if (this == &rhs)
|
||||
return *this;
|
||||
unsafeReleaseNoStore();
|
||||
|
||||
if (auto p = rhs.unsafeGetRawPtr())
|
||||
{
|
||||
if (rhs.isStrong())
|
||||
{
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
}
|
||||
else
|
||||
{
|
||||
p->addWeakRef();
|
||||
unsafeSetRawPtr(p, RefStrength::weak);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedIntrusive<TT> const& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
auto p = rhs.unsafeGetRawPtr();
|
||||
if (p)
|
||||
p->addStrongRef();
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
// clang-format off
|
||||
requires std::convertible_to<TT*, T*>
|
||||
// clang-format on
|
||||
SharedWeakUnion<T>&
|
||||
SharedWeakUnion<T>::operator=(SharedIntrusive<TT>&& rhs)
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
unsafeSetRawPtr(rhs.unsafeGetRawPtr(), RefStrength::strong);
|
||||
rhs.unsafeSetRawPtr(nullptr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::~SharedWeakUnion()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
};
|
||||
|
||||
// Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
// lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
SharedWeakUnion<T>::getStrong() const
|
||||
{
|
||||
SharedIntrusive<T> result;
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (p && isStrong())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakUnion<T>::operator bool() const noexcept
|
||||
{
|
||||
return bool(get());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::reset()
|
||||
{
|
||||
unsafeReleaseNoStore();
|
||||
unsafeSetRawPtr(nullptr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakUnion<T>::get() const
|
||||
{
|
||||
return isStrong() ? unsafeGetRawPtr() : nullptr;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedWeakUnion<T>::use_count() const
|
||||
{
|
||||
if (auto p = get())
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::expired() const
|
||||
{
|
||||
auto p = unsafeGetRawPtr();
|
||||
return (!p || p->expired());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedIntrusive<T>
|
||||
SharedWeakUnion<T>::lock() const
|
||||
{
|
||||
SharedIntrusive<T> result;
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return result;
|
||||
|
||||
if (isStrong())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptIncrementStrongTag>(p);
|
||||
return result;
|
||||
}
|
||||
|
||||
if (p->checkoutStrongRefFromWeak())
|
||||
{
|
||||
result.template adopt<SharedIntrusiveAdoptNoIncrementTag>(p);
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::isStrong() const
|
||||
{
|
||||
return !(tp_ & tagMask);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::isWeak() const
|
||||
{
|
||||
return tp_ & tagMask;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::convertToStrong()
|
||||
{
|
||||
if (isStrong())
|
||||
return true;
|
||||
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (p && p->checkoutStrongRefFromWeak())
|
||||
{
|
||||
auto action = p->releaseWeakRef();
|
||||
(void)action;
|
||||
assert(action == ReleaseRefAction::noop);
|
||||
unsafeSetRawPtr(p, RefStrength::strong);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakUnion<T>::convertToWeak()
|
||||
{
|
||||
if (isWeak())
|
||||
return true;
|
||||
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return false;
|
||||
|
||||
using enum ReleaseRefAction;
|
||||
auto action = p->addWeakReleaseStrongRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
// We just added a weak ref. How could we destroy?
|
||||
assert(0);
|
||||
delete p;
|
||||
unsafeSetRawPtr(nullptr);
|
||||
return true; // Should never happen
|
||||
case partialDestroy:
|
||||
// This is a weird case. We just converted the last strong
|
||||
// pointer to a weak pointer.
|
||||
p->partialDestructor();
|
||||
partialDestructorFinished(&p);
|
||||
// p is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
unsafeSetRawPtr(p, RefStrength::weak);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakUnion<T>::unsafeGetRawPtr() const
|
||||
{
|
||||
return reinterpret_cast<T*>(tp_ & ptrMask);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::unsafeSetRawPtr(T* p, RefStrength rs)
|
||||
{
|
||||
tp_ = reinterpret_cast<std::uintptr_t>(p);
|
||||
if (tp_ && rs == RefStrength::weak)
|
||||
tp_ |= tagMask;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void SharedWeakUnion<T>::unsafeSetRawPtr(std::nullptr_t)
|
||||
{
|
||||
tp_ = 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakUnion<T>::unsafeReleaseNoStore()
|
||||
{
|
||||
auto p = unsafeGetRawPtr();
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
using enum ReleaseRefAction;
|
||||
auto action = isStrong() ? p->releaseStrongRef() : p->releaseWeakRef();
|
||||
switch (action)
|
||||
{
|
||||
case noop:
|
||||
break;
|
||||
case destroy:
|
||||
delete p;
|
||||
break;
|
||||
case partialDestroy:
|
||||
p->partialDestructor();
|
||||
partialDestructorFinished(&p);
|
||||
// p is null and may no longer be used
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
466
src/ripple/basics/IntrusiveRefCounts.h
Normal file
466
src/ripple/basics/IntrusiveRefCounts.h
Normal file
@@ -0,0 +1,466 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
#define RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** Action to perform when releasing a strong or weak pointer.
|
||||
|
||||
noop: Do nothing. For example, a `noop` action will occur when a count is
|
||||
decremented to a non-zero value.
|
||||
|
||||
partialDestroy: Run the `partialDestructor`. This action will happen when a
|
||||
strong count is decremented to zero and the weak count is non-zero.
|
||||
|
||||
destroy: Run the destructor. This action will occur when either the strong
|
||||
count or weak count is decremented and the other count is also zero.
|
||||
*/
|
||||
enum class ReleaseRefAction { noop, partialDestroy, destroy };
|
||||
|
||||
/** Implement the strong count, weak count, and bit flags for an intrusive
|
||||
pointer.
|
||||
|
||||
A class can satisfy the requirements of a ripple::IntrusivePointer by
|
||||
inheriting from this class.
|
||||
*/
|
||||
struct IntrusiveRefCounts
|
||||
{
|
||||
virtual ~IntrusiveRefCounts() noexcept;
|
||||
|
||||
// This must be `noexcept` or the make_SharedIntrusive function could leak
|
||||
// memory.
|
||||
void
|
||||
addStrongRef() const noexcept;
|
||||
|
||||
void
|
||||
addWeakRef() const noexcept;
|
||||
|
||||
ReleaseRefAction
|
||||
releaseStrongRef() const;
|
||||
|
||||
// Same as:
|
||||
// {
|
||||
// addWeakRef();
|
||||
// return releaseStrongRef;
|
||||
// }
|
||||
// done as one atomic operation
|
||||
ReleaseRefAction
|
||||
addWeakReleaseStrongRef() const;
|
||||
|
||||
ReleaseRefAction
|
||||
releaseWeakRef() const;
|
||||
|
||||
// Returns true is able to checkout a strong ref. False otherwise
|
||||
bool
|
||||
checkoutStrongRefFromWeak() const noexcept;
|
||||
|
||||
bool
|
||||
expired() const noexcept;
|
||||
|
||||
std::size_t
|
||||
use_count() const noexcept;
|
||||
|
||||
// This function MUST be called after a partial destructor finishes running.
|
||||
// Calling this function may cause other threads to delete the object
|
||||
// pointed to by `o`, so `o` should never be used after calling this
|
||||
// function. The parameter will be set to a `nullptr` after calling this
|
||||
// function to emphasize that it should not be used.
|
||||
// Note: This is intentionally NOT called at the end of `partialDestructor`.
|
||||
// The reason for this is if new classes are written to support this smart
|
||||
// pointer class, they need to write their own `partialDestructor` function
|
||||
// and ensure `partialDestructorFinished` is called at the end. Putting this
|
||||
// call inside the smart pointer class itself is expected to be less error
|
||||
// prone.
|
||||
// Note: The "two-star" programming is intentional. It emphasizes that `o`
|
||||
// may be deleted and the unergonomic API is meant to signal the special
|
||||
// nature of this function call to callers.
|
||||
// Note: This is a template to support incompletely defined classes.
|
||||
template <class T>
|
||||
friend void
|
||||
partialDestructorFinished(T** o);
|
||||
|
||||
private:
|
||||
// TODO: We may need to use a uint64_t for both counts. This will reduce the
|
||||
// memory savings. We need to audit the code to make sure 16 bit counts are
|
||||
// enough for strong pointers and 14 bit counts are enough for weak
|
||||
// pointers. Use type aliases to make it easy to switch types.
|
||||
using CountType = std::uint16_t;
|
||||
static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
|
||||
static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
|
||||
using FieldType = std::uint32_t;
|
||||
static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
|
||||
static constexpr FieldType one = 1;
|
||||
|
||||
/** `refCounts` consists of four fields that are treated atomically:
|
||||
|
||||
1. Strong count. This is a count of the number of shared pointers that
|
||||
hold a reference to this object. When the strong counts goes to zero,
|
||||
if the weak count is zero, the destructor is run. If the weak count is
|
||||
non-zero when the strong count goes to zero then the partialDestructor
|
||||
is run.
|
||||
|
||||
2. Weak count. This is a count of the number of weak pointer that hold
|
||||
a reference to this object. When the weak count goes to zero and the
|
||||
strong count is also zero, then the destructor is run.
|
||||
|
||||
3. Partial destroy started bit. This bit is set if the
|
||||
`partialDestructor` function has been started (or is about to be
|
||||
started). This is used to prevent the destructor from running
|
||||
concurrently with the partial destructor. This can easily happen when
|
||||
the last strong pointer release its reference in one thread and starts
|
||||
the partialDestructor, while in another thread the last weak pointer
|
||||
goes out of scope and starts the destructor while the partialDestructor
|
||||
is still running. Both a start and finished bit is needed to handle a
|
||||
corner-case where the last strong pointer goes out of scope, then then
|
||||
last `weakPointer` goes out of scope, but this happens before the
|
||||
`partialDestructor` bit is set. It would be possible to use a single
|
||||
bit if it could also be set atomically when the strong count goes to
|
||||
zero and the weak count is non-zero, but that would add complexity (and
|
||||
likely slow down common cases as well).
|
||||
|
||||
4. Partial destroy finished bit. This bit is set when the
|
||||
`partialDestructor` has finished running. See (3) above for more
|
||||
information.
|
||||
|
||||
*/
|
||||
|
||||
mutable std::atomic<FieldType> refCounts{strongDelta};
|
||||
|
||||
/** Amount to change the strong count when adding or releasing a reference
|
||||
|
||||
Note: The strong count is stored in the low `StrongCountNumBits` bits
|
||||
of refCounts
|
||||
*/
|
||||
static constexpr FieldType strongDelta = 1;
|
||||
|
||||
/** Amount to change the weak count when adding or releasing a reference
|
||||
|
||||
Note: The weak count is stored in the high `WeakCountNumBits` bits of
|
||||
refCounts
|
||||
*/
|
||||
static constexpr FieldType weakDelta = (one << StrongCountNumBits);
|
||||
|
||||
/** Flag that is set when the partialDestroy function has started running
|
||||
(or is about to start running).
|
||||
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyStartedMask =
|
||||
(one << (FieldTypeBits - 1));
|
||||
|
||||
/** Flag that is set when the partialDestroy function has finished running
|
||||
|
||||
See description of the `refCounts` field for a fuller description of
|
||||
this field.
|
||||
*/
|
||||
static constexpr FieldType partialDestroyFinishedMask =
|
||||
(one << (FieldTypeBits - 2));
|
||||
|
||||
/** Mask that will zero out all the `count` bits and leave the tag bits
|
||||
unchanged.
|
||||
*/
|
||||
static constexpr FieldType tagMask =
|
||||
partialDestroyStartedMask | partialDestroyFinishedMask;
|
||||
|
||||
/** Mask that will zero out the `tag` bits and leave the count bits
|
||||
unchanged.
|
||||
*/
|
||||
static constexpr FieldType valueMask = ~tagMask;
|
||||
|
||||
/** Mask that will zero out everything except the strong count.
|
||||
*/
|
||||
static constexpr FieldType strongMask =
|
||||
((one << StrongCountNumBits) - 1) & valueMask;
|
||||
|
||||
/** Mask that will zero out everything except the weak count.
|
||||
*/
|
||||
static constexpr FieldType weakMask =
|
||||
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
|
||||
|
||||
/** Unpack the count and tag fields from the packed atomic integer form. */
|
||||
struct RefCountPair
|
||||
{
|
||||
CountType strong;
|
||||
CountType weak;
|
||||
/** The `partialDestroyStartedBit` is set to on when the partial
|
||||
destroy function is started. It is not a boolean; it is a uint32
|
||||
with all bits zero with the possible exception of the
|
||||
`partialDestroyStartedMask` bit. This is done so it can be directly
|
||||
masked into the `combinedValue`.
|
||||
*/
|
||||
FieldType partialDestroyStartedBit{0};
|
||||
/** The `partialDestroyFinishedBit` is set to on when the partial
|
||||
destroy function has finished.
|
||||
*/
|
||||
FieldType partialDestroyFinishedBit{0};
|
||||
RefCountPair(FieldType v) noexcept;
|
||||
RefCountPair(CountType s, CountType w) noexcept;
|
||||
|
||||
/** Convert back to the packed integer form. */
|
||||
FieldType
|
||||
combinedValue() const noexcept;
|
||||
|
||||
static constexpr CountType maxStrongValue =
|
||||
static_cast<CountType>((one << StrongCountNumBits) - 1);
|
||||
static constexpr CountType maxWeakValue =
|
||||
static_cast<CountType>((one << WeakCountNumBits) - 1);
|
||||
/** Put an extra margin to detect when running up against limits.
|
||||
This is only used in debug code, and is useful if we reduce the
|
||||
number of bits in the strong and weak counts (to 16 and 14 bits).
|
||||
*/
|
||||
static constexpr CountType checkStrongMaxValue = maxStrongValue - 32;
|
||||
static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
|
||||
};
|
||||
};
|
||||
|
||||
inline void
|
||||
IntrusiveRefCounts::addStrongRef() const noexcept
|
||||
{
|
||||
refCounts.fetch_add(strongDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
inline void
|
||||
IntrusiveRefCounts::addWeakRef() const noexcept
|
||||
{
|
||||
refCounts.fetch_add(weakDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
inline ReleaseRefAction
|
||||
IntrusiveRefCounts::releaseStrongRef() const
|
||||
{
|
||||
// Subtract `strongDelta` from refCounts. If this releases the last strong
|
||||
// ref, set the `partialDestroyStarted` bit. It is important that the ref
|
||||
// count and the `partialDestroyStartedBit` are changed atomically (hence
|
||||
// the loop and `compare_exchange` op). If this didn't need to be done
|
||||
// atomically, the loop could be replaced with a `fetch_sub` and a
|
||||
// conditional `fetch_or`. This loop will almost always run once.
|
||||
|
||||
using enum ReleaseRefAction;
|
||||
auto prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
while (1)
|
||||
{
|
||||
RefCountPair const prevVal{prevIntVal};
|
||||
assert(prevVal.strong >= strongDelta);
|
||||
auto nextIntVal = prevIntVal - strongDelta;
|
||||
ReleaseRefAction action = noop;
|
||||
if (prevVal.strong == 1)
|
||||
{
|
||||
if (prevVal.weak == 0)
|
||||
{
|
||||
action = destroy;
|
||||
}
|
||||
else
|
||||
{
|
||||
nextIntVal |= partialDestroyStartedMask;
|
||||
action = partialDestroy;
|
||||
}
|
||||
}
|
||||
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_release))
|
||||
{
|
||||
// Can't be in partial destroy because only decrementing the strong
|
||||
// count to zero can start a partial destroy, and that can't happen
|
||||
// twice.
|
||||
assert(
|
||||
(action == noop) || !(prevIntVal & partialDestroyStartedMask));
|
||||
return action;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline ReleaseRefAction
|
||||
IntrusiveRefCounts::addWeakReleaseStrongRef() const
|
||||
{
|
||||
using enum ReleaseRefAction;
|
||||
|
||||
static_assert(weakDelta > strongDelta);
|
||||
auto constexpr delta = weakDelta - strongDelta;
|
||||
auto prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
// This loop will almost always run once. The loop is needed to atomically
|
||||
// change the counts and flags (the count could be atomically changed, but
|
||||
// the flags depend on the current value of the counts).
|
||||
//
|
||||
// Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
|
||||
// may be able to be set non-atomically. But it is easier to reason about
|
||||
// the code if the flag is set atomically.
|
||||
while (1)
|
||||
{
|
||||
RefCountPair const prevVal{prevIntVal};
|
||||
// Converted the last strong pointer to a weak pointer.
|
||||
//
|
||||
// Can't be in partial destroy because only decrementing the
|
||||
// strong count to zero can start a partial destroy, and that
|
||||
// can't happen twice.
|
||||
assert(!prevVal.partialDestroyStartedBit);
|
||||
|
||||
auto nextIntVal = prevIntVal + delta;
|
||||
ReleaseRefAction action = noop;
|
||||
if (prevVal.strong == 1)
|
||||
{
|
||||
if (prevVal.weak == 0)
|
||||
{
|
||||
action = noop;
|
||||
}
|
||||
else
|
||||
{
|
||||
nextIntVal |= partialDestroyStartedMask;
|
||||
action = partialDestroy;
|
||||
}
|
||||
}
|
||||
if (refCounts.compare_exchange_weak(
|
||||
prevIntVal, nextIntVal, std::memory_order_release))
|
||||
{
|
||||
assert(!(prevIntVal & partialDestroyStartedMask));
|
||||
return action;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline ReleaseRefAction
|
||||
IntrusiveRefCounts::releaseWeakRef() const
|
||||
{
|
||||
auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
|
||||
RefCountPair prev = prevIntVal;
|
||||
if (prev.weak == 1 && prev.strong == 0)
|
||||
{
|
||||
if (!prev.partialDestroyStartedBit)
|
||||
{
|
||||
// This case should only be hit if the partialDestroyStartedBit is
|
||||
// set non-atomically (and even then very rarely). The code is kept
|
||||
// in case we need to set the flag non-atomically for perf reasons.
|
||||
refCounts.wait(prevIntVal, std::memory_order_acq_rel);
|
||||
prevIntVal = refCounts.load(std::memory_order_acquire);
|
||||
prev = RefCountPair{prevIntVal};
|
||||
}
|
||||
if (!prev.partialDestroyFinishedBit)
|
||||
{
|
||||
// partial destroy MUST finish before running a full destroy (when
|
||||
// using weak pointers)
|
||||
refCounts.wait(prevIntVal - weakDelta, std::memory_order_acq_rel);
|
||||
}
|
||||
return ReleaseRefAction::destroy;
|
||||
}
|
||||
return ReleaseRefAction::noop;
|
||||
}
|
||||
|
||||
inline bool
|
||||
IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept
|
||||
{
|
||||
auto curValue = RefCountPair{1, 1}.combinedValue();
|
||||
auto desiredValue = RefCountPair{2, 1}.combinedValue();
|
||||
|
||||
while (!refCounts.compare_exchange_weak(
|
||||
curValue, desiredValue, std::memory_order_release))
|
||||
{
|
||||
RefCountPair const prev{curValue};
|
||||
if (!prev.strong)
|
||||
return false;
|
||||
|
||||
desiredValue = curValue + strongDelta;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool
|
||||
IntrusiveRefCounts::expired() const noexcept
|
||||
{
|
||||
RefCountPair const val = refCounts.load(std::memory_order_acquire);
|
||||
return val.strong == 0;
|
||||
}
|
||||
|
||||
inline std::size_t
|
||||
IntrusiveRefCounts::use_count() const noexcept
|
||||
{
|
||||
RefCountPair const val = refCounts.load(std::memory_order_acquire);
|
||||
return val.strong;
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
auto v = refCounts.load(std::memory_order_acquire);
|
||||
assert(!(v & valueMask));
|
||||
auto t = v & tagMask;
|
||||
assert(!t || t == tagMask);
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
|
||||
IntrusiveRefCounts::FieldType v) noexcept
|
||||
: strong{static_cast<CountType>(v & strongMask)}
|
||||
, weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
|
||||
, partialDestroyStartedBit{v & partialDestroyStartedMask}
|
||||
, partialDestroyFinishedBit{v & partialDestroyFinishedMask}
|
||||
{
|
||||
assert(strong < checkStrongMaxValue && weak < checkWeakMaxValue);
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
|
||||
IntrusiveRefCounts::CountType s,
|
||||
IntrusiveRefCounts::CountType w) noexcept
|
||||
: strong{s}, weak{w}
|
||||
{
|
||||
assert(strong < checkStrongMaxValue && weak < checkWeakMaxValue);
|
||||
}
|
||||
|
||||
inline IntrusiveRefCounts::FieldType
|
||||
IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
|
||||
{
|
||||
assert(strong < checkStrongMaxValue && weak < checkWeakMaxValue);
|
||||
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
|
||||
<< IntrusiveRefCounts::StrongCountNumBits) |
|
||||
static_cast<IntrusiveRefCounts::FieldType>(strong) |
|
||||
partialDestroyStartedBit | partialDestroyFinishedBit;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void
|
||||
partialDestructorFinished(T** o)
|
||||
{
|
||||
T& self = **o;
|
||||
IntrusiveRefCounts::RefCountPair p =
|
||||
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
|
||||
assert(
|
||||
!p.partialDestroyFinishedBit && p.partialDestroyStartedBit &&
|
||||
!p.strong);
|
||||
if (!p.weak)
|
||||
{
|
||||
// There was a weak count before the partial destructor ran (or we would
|
||||
// have run the full destructor) and now there isn't a weak count. Some
|
||||
// thread is waiting to run the destructor.
|
||||
self.refCounts.notify_one();
|
||||
}
|
||||
// Set the pointer to null to emphasize that the object shouldn't be used
|
||||
// after calling this function as it may be destroyed in another thread.
|
||||
*o = nullptr;
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
132
src/ripple/basics/SharedWeakCachePointer.h
Normal file
132
src/ripple/basics/SharedWeakCachePointer.h
Normal file
@@ -0,0 +1,132 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
|
||||
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_H_INCLUDED
|
||||
|
||||
#include <memory>
|
||||
#include <variant>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** A combination of a std::shared_ptr and a std::weak_pointer.
|
||||
|
||||
|
||||
This class is a wrapper to a `std::variant<std::shared_ptr,std::weak_ptr>`
|
||||
This class is useful for storing intrusive pointers in tagged caches using less
|
||||
memory than storing both pointers directly.
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class SharedWeakCachePointer
|
||||
{
|
||||
public:
|
||||
SharedWeakCachePointer() = default;
|
||||
|
||||
SharedWeakCachePointer(SharedWeakCachePointer const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer(std::shared_ptr<TT> const& rhs);
|
||||
|
||||
SharedWeakCachePointer(SharedWeakCachePointer&& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer(std::shared_ptr<TT>&& rhs);
|
||||
|
||||
SharedWeakCachePointer&
|
||||
operator=(SharedWeakCachePointer const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakCachePointer&
|
||||
operator=(std::shared_ptr<TT> const& rhs);
|
||||
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakCachePointer&
|
||||
operator=(std::shared_ptr<TT>&& rhs);
|
||||
|
||||
~SharedWeakCachePointer();
|
||||
|
||||
/** Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
*/
|
||||
std::shared_ptr<T> const&
|
||||
getStrong() const;
|
||||
|
||||
/** Return true if this is a strong pointer and the strong pointer is
|
||||
seated.
|
||||
*/
|
||||
explicit operator bool() const noexcept;
|
||||
|
||||
/** Set the pointer to null, decrement the appropriate ref count, and run
|
||||
the appropriate release action.
|
||||
*/
|
||||
void
|
||||
reset();
|
||||
|
||||
/** If this is a strong pointer, return the raw pointer. Otherwise return
|
||||
null.
|
||||
*/
|
||||
T*
|
||||
get() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong count. Otherwise return 0
|
||||
*/
|
||||
std::size_t
|
||||
use_count() const;
|
||||
|
||||
/** Return true if there is a non-zero strong count. */
|
||||
bool
|
||||
expired() const;
|
||||
|
||||
/** If this is a strong pointer, return the strong pointer. Otherwise
|
||||
attempt to lock the weak pointer.
|
||||
*/
|
||||
std::shared_ptr<T>
|
||||
lock() const;
|
||||
|
||||
/** Return true is this represents a strong pointer. */
|
||||
bool
|
||||
isStrong() const;
|
||||
|
||||
/** Return true is this represents a weak pointer. */
|
||||
bool
|
||||
isWeak() const;
|
||||
|
||||
/** If this is a weak pointer, attempt to convert it to a strong pointer.
|
||||
|
||||
@return true if successfully converted to a strong pointer (or was
|
||||
already a strong pointer). Otherwise false.
|
||||
*/
|
||||
bool
|
||||
convertToStrong();
|
||||
|
||||
/** If this is a strong pointer, attempt to convert it to a weak pointer.
|
||||
|
||||
@return false if the pointer is null. Otherwise return true.
|
||||
*/
|
||||
bool
|
||||
convertToWeak();
|
||||
|
||||
private:
|
||||
std::variant<std::shared_ptr<T>, std::weak_ptr<T>> combo_;
|
||||
};
|
||||
} // namespace ripple
|
||||
#endif
|
||||
190
src/ripple/basics/SharedWeakCachePointer.ipp
Normal file
190
src/ripple/basics/SharedWeakCachePointer.ipp
Normal file
@@ -0,0 +1,190 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
|
||||
#define RIPPLE_BASICS_SHAREDWEAKCACHEPOINTER_IPP_INCLUDED
|
||||
|
||||
#include <ripple/basics/SharedWeakCachePointer.h>
|
||||
|
||||
namespace ripple {
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
SharedWeakCachePointer const& rhs) = default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
std::shared_ptr<TT> const& rhs)
|
||||
: combo_{rhs}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(
|
||||
SharedWeakCachePointer&& rhs) = default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*>
|
||||
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs)
|
||||
: combo_{std::move(rhs)}
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(SharedWeakCachePointer const& rhs) =
|
||||
default;
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT> const& rhs)
|
||||
{
|
||||
combo_ = rhs;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
template <class TT>
|
||||
requires std::convertible_to<TT*, T*> SharedWeakCachePointer<T>&
|
||||
SharedWeakCachePointer<T>::operator=(std::shared_ptr<TT>&& rhs)
|
||||
{
|
||||
combo_ = std::move(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::~SharedWeakCachePointer() = default;
|
||||
|
||||
// Return a strong pointer if this is already a strong pointer (i.e. don't
|
||||
// lock the weak pointer. Use the `lock` method if that's what's needed)
|
||||
template <class T>
|
||||
std::shared_ptr<T> const&
|
||||
SharedWeakCachePointer<T>::getStrong() const
|
||||
{
|
||||
static std::shared_ptr<T> const empty;
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return *p;
|
||||
return empty;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SharedWeakCachePointer<T>::operator bool() const noexcept
|
||||
{
|
||||
return !!std::get_if<std::shared_ptr<T>>(&combo_);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void
|
||||
SharedWeakCachePointer<T>::reset()
|
||||
{
|
||||
combo_ = std::shared_ptr<T>{};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T*
|
||||
SharedWeakCachePointer<T>::get() const
|
||||
{
|
||||
return std::get_if<std::shared_ptr<T>>(&combo_).get();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::size_t
|
||||
SharedWeakCachePointer<T>::use_count() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return p->use_count();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::expired() const
|
||||
{
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
return p->expired();
|
||||
return !std::get_if<std::shared_ptr<T>>(&combo_);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::shared_ptr<T>
|
||||
SharedWeakCachePointer<T>::lock() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return *p;
|
||||
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
return p->lock();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::isStrong() const
|
||||
{
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
return !!p->get();
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::isWeak() const
|
||||
{
|
||||
return !isStrong();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::convertToStrong()
|
||||
{
|
||||
if (isStrong())
|
||||
return true;
|
||||
|
||||
if (auto p = std::get_if<std::weak_ptr<T>>(&combo_))
|
||||
{
|
||||
if (auto s = p->lock())
|
||||
{
|
||||
combo_ = std::move(s);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
SharedWeakCachePointer<T>::convertToWeak()
|
||||
{
|
||||
if (isWeak())
|
||||
return true;
|
||||
|
||||
if (auto p = std::get_if<std::shared_ptr<T>>(&combo_))
|
||||
{
|
||||
combo_ = std::weak_ptr<T>(*p);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -20,7 +20,9 @@
|
||||
#ifndef RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
|
||||
#define RIPPLE_BASICS_TAGGEDCACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/SharedWeakCachePointer.ipp>
|
||||
#include <ripple/basics/UnorderedContainers.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <ripple/beast/clock/abstract_clock.h>
|
||||
@@ -50,6 +52,8 @@ template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache = false,
|
||||
class SharedWeakUnionPointerType = SharedWeakCachePointer<T>,
|
||||
class SharedPointerType = std::shared_ptr<T>,
|
||||
class Hash = hardened_hash<>,
|
||||
class KeyEqual = std::equal_to<Key>,
|
||||
class Mutex = std::recursive_mutex>
|
||||
@@ -60,6 +64,8 @@ public:
|
||||
using key_type = Key;
|
||||
using mapped_type = T;
|
||||
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
|
||||
using shared_weak_combo_pointer_type = SharedWeakUnionPointerType;
|
||||
using shared_pointer_type = SharedPointerType;
|
||||
|
||||
public:
|
||||
TaggedCache(
|
||||
@@ -69,230 +75,55 @@ public:
|
||||
clock_type& clock,
|
||||
beast::Journal journal,
|
||||
beast::insight::Collector::ptr const& collector =
|
||||
beast::insight::NullCollector::New())
|
||||
: m_journal(journal)
|
||||
, m_clock(clock)
|
||||
, m_stats(
|
||||
name,
|
||||
std::bind(&TaggedCache::collect_metrics, this),
|
||||
collector)
|
||||
, m_name(name)
|
||||
, m_target_size(size)
|
||||
, m_target_age(expiration)
|
||||
, m_cache_count(0)
|
||||
, m_hits(0)
|
||||
, m_misses(0)
|
||||
{
|
||||
}
|
||||
beast::insight::NullCollector::New());
|
||||
|
||||
public:
|
||||
/** Return the clock associated with the cache. */
|
||||
clock_type&
|
||||
clock()
|
||||
{
|
||||
return m_clock;
|
||||
}
|
||||
clock();
|
||||
|
||||
/** Returns the number of items in the container. */
|
||||
std::size_t
|
||||
size() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
size() const;
|
||||
|
||||
void
|
||||
setTargetSize(int s)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_target_size = s;
|
||||
|
||||
if (s > 0)
|
||||
{
|
||||
for (auto& partition : m_cache.map())
|
||||
{
|
||||
partition.rehash(static_cast<std::size_t>(
|
||||
(s + (s >> 2)) /
|
||||
(partition.max_load_factor() * m_cache.partitions()) +
|
||||
1));
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(m_journal.debug()) << m_name << " target size set to " << s;
|
||||
}
|
||||
setTargetSize(int s);
|
||||
|
||||
clock_type::duration
|
||||
getTargetAge() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_target_age;
|
||||
}
|
||||
getTargetAge() const;
|
||||
|
||||
void
|
||||
setTargetAge(clock_type::duration s)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_target_age = s;
|
||||
JLOG(m_journal.debug())
|
||||
<< m_name << " target age set to " << m_target_age.count();
|
||||
}
|
||||
setTargetAge(clock_type::duration s);
|
||||
|
||||
int
|
||||
getCacheSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache_count;
|
||||
}
|
||||
getCacheSize() const;
|
||||
|
||||
int
|
||||
getTrackSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
getTrackSize() const;
|
||||
|
||||
float
|
||||
getHitRate()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total = static_cast<float>(m_hits + m_misses);
|
||||
return m_hits * (100.0f / std::max(1.0f, total));
|
||||
}
|
||||
getHitRate();
|
||||
|
||||
void
|
||||
clear()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
}
|
||||
clear();
|
||||
|
||||
void
|
||||
reset()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
m_hits = 0;
|
||||
m_misses = 0;
|
||||
}
|
||||
reset();
|
||||
|
||||
/** Refresh the last access time on a key if present.
|
||||
@return `true` If the key was found.
|
||||
*/
|
||||
template <class KeyComparable>
|
||||
bool
|
||||
touch_if_exists(KeyComparable const& key)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const iter(m_cache.find(key));
|
||||
if (iter == m_cache.end())
|
||||
{
|
||||
++m_stats.misses;
|
||||
return false;
|
||||
}
|
||||
iter->second.touch(m_clock.now());
|
||||
++m_stats.hits;
|
||||
return true;
|
||||
}
|
||||
touch_if_exists(KeyComparable const& key);
|
||||
|
||||
using SweptPointersVector = std::pair<
|
||||
std::vector<std::shared_ptr<mapped_type>>,
|
||||
std::vector<std::weak_ptr<mapped_type>>>;
|
||||
using SweptPointersVector = std::vector<SharedWeakUnionPointerType>;
|
||||
|
||||
void
|
||||
sweep()
|
||||
{
|
||||
// Keep references to all the stuff we sweep
|
||||
// For performance, each worker thread should exit before the swept data
|
||||
// is destroyed but still within the main cache lock.
|
||||
std::vector<SweptPointersVector> allStuffToSweep(m_cache.partitions());
|
||||
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
clock_type::time_point when_expire;
|
||||
|
||||
auto const start = std::chrono::steady_clock::now();
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
if (m_target_size == 0 ||
|
||||
(static_cast<int>(m_cache.size()) <= m_target_size))
|
||||
{
|
||||
when_expire = now - m_target_age;
|
||||
}
|
||||
else
|
||||
{
|
||||
when_expire =
|
||||
now - m_target_age * m_target_size / m_cache.size();
|
||||
|
||||
clock_type::duration const minimumAge(std::chrono::seconds(1));
|
||||
if (when_expire > (now - minimumAge))
|
||||
when_expire = now - minimumAge;
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< m_name << " is growing fast " << m_cache.size() << " of "
|
||||
<< m_target_size << " aging at "
|
||||
<< (now - when_expire).count() << " of "
|
||||
<< m_target_age.count();
|
||||
}
|
||||
|
||||
std::vector<std::thread> workers;
|
||||
workers.reserve(m_cache.partitions());
|
||||
std::atomic<int> allRemovals = 0;
|
||||
|
||||
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
|
||||
{
|
||||
workers.push_back(sweepHelper(
|
||||
when_expire,
|
||||
now,
|
||||
m_cache.map()[p],
|
||||
allStuffToSweep[p],
|
||||
allRemovals,
|
||||
lock));
|
||||
}
|
||||
for (std::thread& worker : workers)
|
||||
worker.join();
|
||||
|
||||
m_cache_count -= allRemovals;
|
||||
}
|
||||
// At this point allStuffToSweep will go out of scope outside the lock
|
||||
// and decrement the reference count on each strong pointer.
|
||||
JLOG(m_journal.debug())
|
||||
<< m_name << " TaggedCache sweep lock duration "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - start)
|
||||
.count()
|
||||
<< "ms";
|
||||
}
|
||||
sweep();
|
||||
|
||||
bool
|
||||
del(const key_type& key, bool valid)
|
||||
{
|
||||
// Remove from cache, if !valid, remove from map too. Returns true if
|
||||
// removed from cache
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
return false;
|
||||
|
||||
Entry& entry = cit->second;
|
||||
|
||||
bool ret = false;
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
--m_cache_count;
|
||||
entry.ptr.reset();
|
||||
ret = true;
|
||||
}
|
||||
|
||||
if (!valid || entry.isExpired())
|
||||
m_cache.erase(cit);
|
||||
|
||||
return ret;
|
||||
}
|
||||
del(const key_type& key, bool valid);
|
||||
|
||||
/** Replace aliased objects with originals.
|
||||
|
||||
@@ -308,99 +139,23 @@ public:
|
||||
@return `true` If the key already existed.
|
||||
*/
|
||||
public:
|
||||
template <class R>
|
||||
bool
|
||||
canonicalize(
|
||||
const key_type& key,
|
||||
std::shared_ptr<T>& data,
|
||||
std::function<bool(std::shared_ptr<T> const&)>&& replace)
|
||||
{
|
||||
// Return canonical value, store if needed, refresh in cache
|
||||
// Return values: true=we had the data already
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
{
|
||||
m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(m_clock.now(), data));
|
||||
++m_cache_count;
|
||||
return false;
|
||||
}
|
||||
|
||||
Entry& entry = cit->second;
|
||||
entry.touch(m_clock.now());
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
if (replace(entry.ptr))
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
}
|
||||
else
|
||||
{
|
||||
data = entry.ptr;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto cachedData = entry.lock();
|
||||
|
||||
if (cachedData)
|
||||
{
|
||||
if (replace(entry.ptr))
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry.ptr = cachedData;
|
||||
data = cachedData;
|
||||
}
|
||||
|
||||
++m_cache_count;
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
++m_cache_count;
|
||||
|
||||
return false;
|
||||
}
|
||||
SharedPointerType& data,
|
||||
R&& replaceCallback);
|
||||
|
||||
bool
|
||||
canonicalize_replace_cache(
|
||||
const key_type& key,
|
||||
std::shared_ptr<T> const& data)
|
||||
{
|
||||
return canonicalize(
|
||||
key,
|
||||
const_cast<std::shared_ptr<T>&>(data),
|
||||
[](std::shared_ptr<T> const&) { return true; });
|
||||
}
|
||||
SharedPointerType const& data);
|
||||
|
||||
bool
|
||||
canonicalize_replace_client(const key_type& key, std::shared_ptr<T>& data)
|
||||
{
|
||||
return canonicalize(
|
||||
key, data, [](std::shared_ptr<T> const&) { return false; });
|
||||
}
|
||||
canonicalize_replace_client(const key_type& key, SharedPointerType& data);
|
||||
|
||||
std::shared_ptr<T>
|
||||
fetch(const key_type& key)
|
||||
{
|
||||
std::lock_guard<mutex_type> l(m_mutex);
|
||||
auto ret = initialFetch(key, l);
|
||||
if (!ret)
|
||||
++m_misses;
|
||||
return ret;
|
||||
}
|
||||
SharedPointerType
|
||||
fetch(const key_type& key);
|
||||
|
||||
/** Insert the element into the container.
|
||||
If the key already exists, nothing happens.
|
||||
@@ -409,26 +164,11 @@ public:
|
||||
template <class ReturnType = bool>
|
||||
auto
|
||||
insert(key_type const& key, T const& value)
|
||||
-> std::enable_if_t<!IsKeyCache, ReturnType>
|
||||
{
|
||||
auto p = std::make_shared<T>(std::cref(value));
|
||||
return canonicalize_replace_client(key, p);
|
||||
}
|
||||
-> std::enable_if_t<!IsKeyCache, ReturnType>;
|
||||
|
||||
template <class ReturnType = bool>
|
||||
auto
|
||||
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
auto [it, inserted] = m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(now));
|
||||
if (!inserted)
|
||||
it->second.last_access = now;
|
||||
return inserted;
|
||||
}
|
||||
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>;
|
||||
|
||||
// VFALCO NOTE It looks like this returns a copy of the data in
|
||||
// the output parameter 'data'. This could be expensive.
|
||||
@@ -436,50 +176,18 @@ public:
|
||||
// simply return an iterator.
|
||||
//
|
||||
bool
|
||||
retrieve(const key_type& key, T& data)
|
||||
{
|
||||
// retrieve the value of the stored data
|
||||
auto entry = fetch(key);
|
||||
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
data = *entry;
|
||||
return true;
|
||||
}
|
||||
retrieve(const key_type& key, T& data);
|
||||
|
||||
mutex_type&
|
||||
peekMutex()
|
||||
{
|
||||
return m_mutex;
|
||||
}
|
||||
peekMutex();
|
||||
|
||||
std::vector<key_type>
|
||||
getKeys() const
|
||||
{
|
||||
std::vector<key_type> v;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
v.reserve(m_cache.size());
|
||||
for (auto const& _ : m_cache)
|
||||
v.push_back(_.first);
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
getKeys() const;
|
||||
|
||||
// CachedSLEs functions.
|
||||
/** Returns the fraction of cache hits. */
|
||||
double
|
||||
rate() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const tot = m_hits + m_misses;
|
||||
if (tot == 0)
|
||||
return 0;
|
||||
return double(m_hits) / tot;
|
||||
}
|
||||
rate() const;
|
||||
|
||||
/** Fetch an item from the cache.
|
||||
If the digest was not found, Handler
|
||||
@@ -487,73 +195,16 @@ public:
|
||||
std::shared_ptr<SLE const>(void)
|
||||
*/
|
||||
template <class Handler>
|
||||
std::shared_ptr<T>
|
||||
fetch(key_type const& digest, Handler const& h)
|
||||
{
|
||||
{
|
||||
std::lock_guard l(m_mutex);
|
||||
if (auto ret = initialFetch(digest, l))
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto sle = h();
|
||||
if (!sle)
|
||||
return {};
|
||||
|
||||
std::lock_guard l(m_mutex);
|
||||
++m_misses;
|
||||
auto const [it, inserted] =
|
||||
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
||||
if (!inserted)
|
||||
it->second.touch(m_clock.now());
|
||||
return it->second.ptr;
|
||||
}
|
||||
SharedPointerType
|
||||
fetch(key_type const& digest, Handler const& h);
|
||||
// End CachedSLEs functions.
|
||||
|
||||
private:
|
||||
std::shared_ptr<T>
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
|
||||
{
|
||||
auto cit = m_cache.find(key);
|
||||
if (cit == m_cache.end())
|
||||
return {};
|
||||
|
||||
Entry& entry = cit->second;
|
||||
if (entry.isCached())
|
||||
{
|
||||
++m_hits;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr;
|
||||
}
|
||||
entry.ptr = entry.lock();
|
||||
if (entry.isCached())
|
||||
{
|
||||
// independent of cache size, so not counted as a hit
|
||||
++m_cache_count;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr;
|
||||
}
|
||||
|
||||
m_cache.erase(cit);
|
||||
return {};
|
||||
}
|
||||
SharedPointerType
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
|
||||
|
||||
void
|
||||
collect_metrics()
|
||||
{
|
||||
m_stats.size.set(getCacheSize());
|
||||
|
||||
{
|
||||
beast::insight::Gauge::value_type hit_rate(0);
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total(m_hits + m_misses);
|
||||
if (total != 0)
|
||||
hit_rate = (m_hits * 100) / total;
|
||||
}
|
||||
m_stats.hit_rate.set(hit_rate);
|
||||
}
|
||||
}
|
||||
collect_metrics();
|
||||
|
||||
private:
|
||||
struct Stats
|
||||
@@ -599,36 +250,35 @@ private:
|
||||
class ValueEntry
|
||||
{
|
||||
public:
|
||||
std::shared_ptr<mapped_type> ptr;
|
||||
std::weak_ptr<mapped_type> weak_ptr;
|
||||
shared_weak_combo_pointer_type ptr;
|
||||
clock_type::time_point last_access;
|
||||
|
||||
ValueEntry(
|
||||
clock_type::time_point const& last_access_,
|
||||
std::shared_ptr<mapped_type> const& ptr_)
|
||||
: ptr(ptr_), weak_ptr(ptr_), last_access(last_access_)
|
||||
shared_pointer_type const& ptr_)
|
||||
: ptr(ptr_), last_access(last_access_)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
isWeak() const
|
||||
{
|
||||
return ptr == nullptr;
|
||||
return !ptr;
|
||||
}
|
||||
bool
|
||||
isCached() const
|
||||
{
|
||||
return ptr != nullptr;
|
||||
return !!ptr;
|
||||
}
|
||||
bool
|
||||
isExpired() const
|
||||
{
|
||||
return weak_ptr.expired();
|
||||
return ptr.expired();
|
||||
}
|
||||
std::shared_ptr<mapped_type>
|
||||
SharedPointerType
|
||||
lock()
|
||||
{
|
||||
return weak_ptr.lock();
|
||||
return ptr.lock();
|
||||
}
|
||||
void
|
||||
touch(clock_type::time_point const& now)
|
||||
@@ -657,72 +307,7 @@ private:
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
stuffToSweep.first.reserve(partition.size());
|
||||
stuffToSweep.second.reserve(partition.size());
|
||||
{
|
||||
auto cit = partition.begin();
|
||||
while (cit != partition.end())
|
||||
{
|
||||
if (cit->second.isWeak())
|
||||
{
|
||||
// weak
|
||||
if (cit->second.isExpired())
|
||||
{
|
||||
stuffToSweep.second.push_back(
|
||||
std::move(cit->second.weak_ptr));
|
||||
++mapRemovals;
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
else if (cit->second.last_access <= when_expire)
|
||||
{
|
||||
// strong, expired
|
||||
++cacheRemovals;
|
||||
if (cit->second.ptr.use_count() == 1)
|
||||
{
|
||||
stuffToSweep.first.push_back(
|
||||
std::move(cit->second.ptr));
|
||||
++mapRemovals;
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
// remains weakly cached
|
||||
cit->second.ptr.reset();
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// strong, not expired
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mapRemovals || cacheRemovals)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "TaggedCache partition sweep " << m_name
|
||||
<< ": cache = " << partition.size() << "-" << cacheRemovals
|
||||
<< ", map-=" << mapRemovals;
|
||||
}
|
||||
|
||||
allRemovals += cacheRemovals;
|
||||
});
|
||||
}
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
[[nodiscard]] std::thread
|
||||
sweepHelper(
|
||||
@@ -731,45 +316,7 @@ private:
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
{
|
||||
auto cit = partition.begin();
|
||||
while (cit != partition.end())
|
||||
{
|
||||
if (cit->second.last_access > now)
|
||||
{
|
||||
cit->second.last_access = now;
|
||||
++cit;
|
||||
}
|
||||
else if (cit->second.last_access <= when_expire)
|
||||
{
|
||||
cit = partition.erase(cit);
|
||||
}
|
||||
else
|
||||
{
|
||||
++cit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mapRemovals || cacheRemovals)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "TaggedCache partition sweep " << m_name
|
||||
<< ": cache = " << partition.size() << "-" << cacheRemovals
|
||||
<< ", map-=" << mapRemovals;
|
||||
}
|
||||
|
||||
allRemovals += cacheRemovals;
|
||||
});
|
||||
};
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
beast::Journal m_journal;
|
||||
clock_type& m_clock;
|
||||
|
||||
1117
src/ripple/basics/TaggedCache.ipp
Normal file
1117
src/ripple/basics/TaggedCache.ipp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -145,111 +145,78 @@ private:
|
||||
};
|
||||
|
||||
// VFALCO TODO This should only be enabled for maps.
|
||||
class pair_value_compare
|
||||
: public beast::detail::empty_base_optimization<Compare>
|
||||
#ifdef _LIBCPP_VERSION
|
||||
,
|
||||
public std::binary_function<value_type, value_type, bool>
|
||||
#endif
|
||||
class pair_value_compare : public Compare
|
||||
{
|
||||
public:
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument = value_type;
|
||||
using second_argument = value_type;
|
||||
using result_type = bool;
|
||||
#endif
|
||||
|
||||
bool
|
||||
operator()(value_type const& lhs, value_type const& rhs) const
|
||||
{
|
||||
return this->member()(lhs.first, rhs.first);
|
||||
return Compare::operator()(lhs.first, rhs.first);
|
||||
}
|
||||
|
||||
pair_value_compare()
|
||||
{
|
||||
}
|
||||
|
||||
pair_value_compare(pair_value_compare const& other)
|
||||
: beast::detail::empty_base_optimization<Compare>(other)
|
||||
pair_value_compare(pair_value_compare const& other) : Compare(other)
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
friend aged_ordered_container;
|
||||
|
||||
pair_value_compare(Compare const& compare)
|
||||
: beast::detail::empty_base_optimization<Compare>(compare)
|
||||
pair_value_compare(Compare const& compare) : Compare(compare)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
// Compares value_type against element, used in insert_check
|
||||
// VFALCO TODO hoist to remove template argument dependencies
|
||||
class KeyValueCompare
|
||||
: public beast::detail::empty_base_optimization<Compare>
|
||||
#ifdef _LIBCPP_VERSION
|
||||
,
|
||||
public std::binary_function<Key, element, bool>
|
||||
#endif
|
||||
class KeyValueCompare : public Compare
|
||||
{
|
||||
public:
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument = Key;
|
||||
using second_argument = element;
|
||||
using result_type = bool;
|
||||
#endif
|
||||
|
||||
KeyValueCompare() = default;
|
||||
|
||||
KeyValueCompare(Compare const& compare)
|
||||
: beast::detail::empty_base_optimization<Compare>(compare)
|
||||
KeyValueCompare(Compare const& compare) : Compare(compare)
|
||||
{
|
||||
}
|
||||
|
||||
// VFALCO NOTE WE might want only to enable these overloads
|
||||
// if Compare has is_transparent
|
||||
#if 0
|
||||
template <class K>
|
||||
bool operator() (K const& k, element const& e) const
|
||||
{
|
||||
return this->member() (k, extract (e.value));
|
||||
}
|
||||
|
||||
template <class K>
|
||||
bool operator() (element const& e, K const& k) const
|
||||
{
|
||||
return this->member() (extract (e.value), k);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
operator()(Key const& k, element const& e) const
|
||||
{
|
||||
return this->member()(k, extract(e.value));
|
||||
return Compare::operator()(k, extract(e.value));
|
||||
}
|
||||
|
||||
bool
|
||||
operator()(element const& e, Key const& k) const
|
||||
{
|
||||
return this->member()(extract(e.value), k);
|
||||
return Compare::operator()(extract(e.value), k);
|
||||
}
|
||||
|
||||
bool
|
||||
operator()(element const& x, element const& y) const
|
||||
{
|
||||
return this->member()(extract(x.value), extract(y.value));
|
||||
return Compare::operator()(extract(x.value), extract(y.value));
|
||||
}
|
||||
|
||||
Compare&
|
||||
compare()
|
||||
{
|
||||
return beast::detail::empty_base_optimization<Compare>::member();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Compare const&
|
||||
compare() const
|
||||
{
|
||||
return beast::detail::empty_base_optimization<Compare>::member();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -148,115 +148,84 @@ private:
|
||||
};
|
||||
|
||||
// VFALCO TODO hoist to remove template argument dependencies
|
||||
class ValueHash : private beast::detail::empty_base_optimization<Hash>
|
||||
#ifdef _LIBCPP_VERSION
|
||||
,
|
||||
public std::unary_function<element, std::size_t>
|
||||
#endif
|
||||
class ValueHash : public Hash
|
||||
{
|
||||
public:
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using argument_type = element;
|
||||
using result_type = size_t;
|
||||
#endif
|
||||
|
||||
ValueHash()
|
||||
{
|
||||
}
|
||||
|
||||
ValueHash(Hash const& h)
|
||||
: beast::detail::empty_base_optimization<Hash>(h)
|
||||
ValueHash(Hash const& h) : Hash(h)
|
||||
{
|
||||
}
|
||||
|
||||
std::size_t
|
||||
operator()(element const& e) const
|
||||
{
|
||||
return this->member()(extract(e.value));
|
||||
return Hash::operator()(extract(e.value));
|
||||
}
|
||||
|
||||
Hash&
|
||||
hash_function()
|
||||
{
|
||||
return this->member();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Hash const&
|
||||
hash_function() const
|
||||
{
|
||||
return this->member();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
// Compares value_type against element, used in find/insert_check
|
||||
// VFALCO TODO hoist to remove template argument dependencies
|
||||
class KeyValueEqual
|
||||
: private beast::detail::empty_base_optimization<KeyEqual>
|
||||
#ifdef _LIBCPP_VERSION
|
||||
,
|
||||
public std::binary_function<Key, element, bool>
|
||||
#endif
|
||||
class KeyValueEqual : public KeyEqual
|
||||
{
|
||||
public:
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument_type = Key;
|
||||
using second_argument_type = element;
|
||||
using result_type = bool;
|
||||
#endif
|
||||
|
||||
KeyValueEqual()
|
||||
{
|
||||
}
|
||||
|
||||
KeyValueEqual(KeyEqual const& keyEqual)
|
||||
: beast::detail::empty_base_optimization<KeyEqual>(keyEqual)
|
||||
KeyValueEqual(KeyEqual const& keyEqual) : KeyEqual(keyEqual)
|
||||
{
|
||||
}
|
||||
|
||||
// VFALCO NOTE WE might want only to enable these overloads
|
||||
// if KeyEqual has is_transparent
|
||||
#if 0
|
||||
template <class K>
|
||||
bool operator() (K const& k, element const& e) const
|
||||
{
|
||||
return this->member() (k, extract (e.value));
|
||||
}
|
||||
|
||||
template <class K>
|
||||
bool operator() (element const& e, K const& k) const
|
||||
{
|
||||
return this->member() (extract (e.value), k);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
operator()(Key const& k, element const& e) const
|
||||
{
|
||||
return this->member()(k, extract(e.value));
|
||||
return KeyEqual::operator()(k, extract(e.value));
|
||||
}
|
||||
|
||||
bool
|
||||
operator()(element const& e, Key const& k) const
|
||||
{
|
||||
return this->member()(extract(e.value), k);
|
||||
return KeyEqual::operator()(extract(e.value), k);
|
||||
}
|
||||
|
||||
bool
|
||||
operator()(element const& lhs, element const& rhs) const
|
||||
{
|
||||
return this->member()(extract(lhs.value), extract(rhs.value));
|
||||
return KeyEqual::operator()(extract(lhs.value), extract(rhs.value));
|
||||
}
|
||||
|
||||
KeyEqual&
|
||||
key_eq()
|
||||
{
|
||||
return this->member();
|
||||
return *this;
|
||||
}
|
||||
|
||||
KeyEqual const&
|
||||
key_eq() const
|
||||
{
|
||||
return this->member();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/ledger/CachedView.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#ifndef RIPPLE_NODESTORE_DATABASE_H_INCLUDED
|
||||
#define RIPPLE_NODESTORE_DATABASE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/nodestore/Backend.h>
|
||||
#include <ripple/nodestore/NodeObject.h>
|
||||
#include <ripple/nodestore/Scheduler.h>
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/nodestore/impl/DatabaseNodeImp.h>
|
||||
#include <ripple/protocol/HashPrefix.h>
|
||||
|
||||
|
||||
@@ -91,17 +91,10 @@ private:
|
||||
using value_type = map_type::value_type;
|
||||
|
||||
struct Transform
|
||||
#ifdef _LIBCPP_VERSION
|
||||
: std::unary_function<
|
||||
map_type::right_map::const_iterator::value_type const&,
|
||||
beast::IP::Endpoint const&>
|
||||
#endif
|
||||
{
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument_type =
|
||||
map_type::right_map::const_iterator::value_type const&;
|
||||
using result_type = beast::IP::Endpoint const&;
|
||||
#endif
|
||||
|
||||
explicit Transform() = default;
|
||||
|
||||
|
||||
@@ -69,14 +69,9 @@ public:
|
||||
public:
|
||||
// Iterator transformation to extract the endpoint from Element
|
||||
struct Transform
|
||||
#ifdef _LIBCPP_VERSION
|
||||
: public std::unary_function<Element, Endpoint>
|
||||
#endif
|
||||
{
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument = Element;
|
||||
using result_type = Endpoint;
|
||||
#endif
|
||||
|
||||
explicit Transform() = default;
|
||||
|
||||
@@ -239,15 +234,9 @@ public:
|
||||
|
||||
template <bool IsConst>
|
||||
struct Transform
|
||||
#ifdef _LIBCPP_VERSION
|
||||
: public std::
|
||||
unary_function<typename lists_type::value_type, Hop<IsConst>>
|
||||
#endif
|
||||
{
|
||||
#ifndef _LIBCPP_VERSION
|
||||
using first_argument = typename lists_type::value_type;
|
||||
using result_type = Hop<IsConst>;
|
||||
#endif
|
||||
|
||||
explicit Transform() = default;
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/UptimeClock.h>
|
||||
#include <ripple/json/json_value.h>
|
||||
#include <ripple/ledger/CachedSLEs.h>
|
||||
#include <ripple/net/RPCErr.h>
|
||||
#include <ripple/nodestore/Database.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <ripple/rpc/Context.h>
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
#include <ripple/basics/KeyCache.h>
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/beast/insight/Collector.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_SHAMAP_SHAMAP_H_INCLUDED
|
||||
#define RIPPLE_SHAMAP_SHAMAP_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/basics/UnorderedContainers.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/nodestore/Database.h>
|
||||
@@ -104,7 +105,7 @@ private:
|
||||
/** The sequence of the ledger that this map references, if any. */
|
||||
std::uint32_t ledgerSeq_ = 0;
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode> root_;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> root_;
|
||||
mutable SHAMapState state_;
|
||||
SHAMapType const type_;
|
||||
bool backed_ = true; // Map is backed by the database
|
||||
@@ -358,29 +359,30 @@ public:
|
||||
invariants() const;
|
||||
|
||||
private:
|
||||
using SharedPtrNodeStack =
|
||||
std::stack<std::pair<std::shared_ptr<SHAMapTreeNode>, SHAMapNodeID>>;
|
||||
using SharedPtrNodeStack = std::stack<
|
||||
std::pair<intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapNodeID>>;
|
||||
using DeltaRef = std::pair<
|
||||
std::shared_ptr<SHAMapItem const> const&,
|
||||
std::shared_ptr<SHAMapItem const> const&>;
|
||||
|
||||
// tree node cache operations
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
cacheLookup(SHAMapHash const& hash) const;
|
||||
|
||||
void
|
||||
canonicalize(SHAMapHash const& hash, std::shared_ptr<SHAMapTreeNode>&)
|
||||
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>&)
|
||||
const;
|
||||
|
||||
// database operations
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeFromDB(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
fetchNode(SHAMapHash const& hash) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
|
||||
|
||||
/** Update hashes up to the root */
|
||||
@@ -388,7 +390,7 @@ private:
|
||||
dirtyUp(
|
||||
SharedPtrNodeStack& stack,
|
||||
uint256 const& target,
|
||||
std::shared_ptr<SHAMapTreeNode> terminal);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> terminal);
|
||||
|
||||
/** Walk towards the specified id, returning the node. Caller must check
|
||||
if the return is nullptr, and if not, if the node->peekItem()->key() ==
|
||||
@@ -402,36 +404,36 @@ private:
|
||||
|
||||
/** Unshare the node, allowing it to be modified */
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
unshareNode(std::shared_ptr<Node>, SHAMapNodeID const& nodeID);
|
||||
intr_ptr::SharedPtr<Node>
|
||||
unshareNode(intr_ptr::SharedPtr<Node>, SHAMapNodeID const& nodeID);
|
||||
|
||||
/** prepare a node to be modified before flushing */
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
preFlushNode(std::shared_ptr<Node> node) const;
|
||||
intr_ptr::SharedPtr<Node>
|
||||
preFlushNode(intr_ptr::SharedPtr<Node> node) const;
|
||||
|
||||
/** write and canonicalize modified node */
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node) const;
|
||||
|
||||
// returns the first item at or below this node
|
||||
SHAMapLeafNode*
|
||||
firstBelow(
|
||||
std::shared_ptr<SHAMapTreeNode>,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch = 0) const;
|
||||
|
||||
// returns the last item at or below this node
|
||||
SHAMapLeafNode*
|
||||
lastBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch = branchFactor) const;
|
||||
|
||||
// helper function for firstBelow and lastBelow
|
||||
SHAMapLeafNode*
|
||||
belowHelper(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch,
|
||||
std::tuple<
|
||||
@@ -445,15 +447,15 @@ private:
|
||||
descend(SHAMapInnerNode*, int branch) const;
|
||||
SHAMapTreeNode*
|
||||
descendThrow(SHAMapInnerNode*, int branch) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descend(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descendThrow(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descend(SHAMapInnerNode&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descendThrow(SHAMapInnerNode&, int branch) const;
|
||||
|
||||
// Descend with filter
|
||||
// If pending, callback is called as if it called fetchNodeNT
|
||||
using descendCallback =
|
||||
std::function<void(std::shared_ptr<SHAMapTreeNode>, SHAMapHash const&)>;
|
||||
using descendCallback = std::function<
|
||||
void(intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapHash const&)>;
|
||||
SHAMapTreeNode*
|
||||
descendAsync(
|
||||
SHAMapInnerNode* parent,
|
||||
@@ -471,8 +473,8 @@ private:
|
||||
|
||||
// Non-storing
|
||||
// Does not hook the returned node to its parent
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
descendNoStore(std::shared_ptr<SHAMapInnerNode> const&, int branch) const;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
descendNoStore(SHAMapInnerNode&, int branch) const;
|
||||
|
||||
/** If there is only one leaf below this node, get its contents */
|
||||
std::shared_ptr<SHAMapItem const> const&
|
||||
@@ -533,10 +535,10 @@ private:
|
||||
|
||||
// nodes we may have acquired from deferred reads
|
||||
using DeferredNode = std::tuple<
|
||||
SHAMapInnerNode*, // parent node
|
||||
SHAMapNodeID, // parent node ID
|
||||
int, // branch
|
||||
std::shared_ptr<SHAMapTreeNode>>; // node
|
||||
SHAMapInnerNode*, // parent node
|
||||
SHAMapNodeID, // parent node ID
|
||||
int, // branch
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>>; // node
|
||||
|
||||
int deferred_;
|
||||
std::mutex deferLock_;
|
||||
@@ -570,7 +572,7 @@ private:
|
||||
gmn_ProcessDeferredReads(MissingNodes&);
|
||||
|
||||
// fetch from DB helper function
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
finishFetch(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<NodeObject> const& object) const;
|
||||
|
||||
@@ -51,10 +51,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const final override
|
||||
{
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
item_, cowid, hash_);
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED
|
||||
#define RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/shamap/SHAMapItem.h>
|
||||
@@ -48,7 +49,7 @@ public:
|
||||
private:
|
||||
/** Opaque type that contains the `hashes` array (array of type
|
||||
`SHAMapHash`) and the `children` array (array of type
|
||||
`std::shared_ptr<SHAMapInnerNode>`).
|
||||
`intr_ptr::SharedPtr<SHAMapInnerNode>`).
|
||||
*/
|
||||
TaggedPointer hashesAndChildren_;
|
||||
|
||||
@@ -113,7 +114,11 @@ public:
|
||||
operator=(SHAMapInnerNode const&) = delete;
|
||||
~SHAMapInnerNode();
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
// Needed to support intrusive weak pointers
|
||||
void
|
||||
partialDestructor() override;
|
||||
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const override;
|
||||
|
||||
SHAMapNodeType
|
||||
@@ -147,19 +152,20 @@ public:
|
||||
getChildHash(int m) const;
|
||||
|
||||
void
|
||||
setChild(int m, std::shared_ptr<SHAMapTreeNode> child);
|
||||
setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child);
|
||||
|
||||
void
|
||||
shareChild(int m, std::shared_ptr<SHAMapTreeNode> const& child);
|
||||
template <class T>
|
||||
requires std::derived_from<T, SHAMapTreeNode> void
|
||||
shareChild(int m, SharedIntrusive<T> const& child);
|
||||
|
||||
SHAMapTreeNode*
|
||||
getChildPointer(int branch);
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
getChild(int branch);
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
canonicalizeChild(int branch, std::shared_ptr<SHAMapTreeNode> node);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
canonicalizeChild(int branch, intr_ptr::SharedPtr<SHAMapTreeNode> node);
|
||||
|
||||
// sync functions
|
||||
bool
|
||||
@@ -187,10 +193,10 @@ public:
|
||||
void
|
||||
invariants(bool is_root = false) const override;
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeCompressedInner(Slice data);
|
||||
};
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
#define RIPPLE_SHAMAP_SHAMAPTREENODE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/CountedObject.h>
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/basics/IntrusiveRefCounts.h>
|
||||
#include <ripple/basics/SHAMapHash.h>
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
@@ -50,7 +52,7 @@ enum class SHAMapNodeType {
|
||||
tnACCOUNT_STATE = 4
|
||||
};
|
||||
|
||||
class SHAMapTreeNode
|
||||
class SHAMapTreeNode : public IntrusiveRefCounts
|
||||
{
|
||||
protected:
|
||||
SHAMapHash hash_;
|
||||
@@ -89,15 +91,19 @@ protected:
|
||||
public:
|
||||
virtual ~SHAMapTreeNode() noexcept = default;
|
||||
|
||||
// Needed to support weak intrusive pointers
|
||||
virtual void
|
||||
partialDestructor(){};
|
||||
|
||||
/** \defgroup SHAMap Copy-on-Write Support
|
||||
|
||||
By nature, a node may appear in multiple SHAMap instances. Rather than
|
||||
actually duplicating these nodes, SHAMap opts to be memory efficient
|
||||
and uses copy-on-write semantics for nodes.
|
||||
By nature, a node may appear in multiple SHAMap instances. Rather
|
||||
than actually duplicating these nodes, SHAMap opts to be memory
|
||||
efficient and uses copy-on-write semantics for nodes.
|
||||
|
||||
Only nodes that are not modified and don't need to be flushed back can
|
||||
be shared. Once a node needs to be changed, it must first be copied and
|
||||
the copy must marked as not shareable.
|
||||
Only nodes that are not modified and don't need to be flushed back
|
||||
can be shared. Once a node needs to be changed, it must first be
|
||||
copied and the copy must marked as not shareable.
|
||||
|
||||
Note that just because a node may not be *owned* by a given SHAMap
|
||||
instance does not mean that the node is NOT a part of any SHAMap. It
|
||||
@@ -109,8 +115,8 @@ public:
|
||||
/** @{ */
|
||||
/** Returns the SHAMap that owns this node.
|
||||
|
||||
@return the ID of the SHAMap that owns this node, or 0 if the node
|
||||
is not owned by any SHAMap and is a candidate for sharing.
|
||||
@return the ID of the SHAMap that owns this node, or 0 if the
|
||||
node is not owned by any SHAMap and is a candidate for sharing.
|
||||
*/
|
||||
std::uint32_t
|
||||
cowid() const
|
||||
@@ -130,7 +136,7 @@ public:
|
||||
}
|
||||
|
||||
/** Make a copy of this node, setting the owner. */
|
||||
virtual std::shared_ptr<SHAMapTreeNode>
|
||||
virtual intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const = 0;
|
||||
/** @} */
|
||||
|
||||
@@ -171,20 +177,20 @@ public:
|
||||
virtual void
|
||||
invariants(bool is_root = false) const = 0;
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFromPrefix(Slice rawNode, SHAMapHash const& hash);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeFromWire(Slice rawNode);
|
||||
|
||||
private:
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeTransaction(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeAccountState(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
|
||||
static std::shared_ptr<SHAMapTreeNode>
|
||||
static intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid);
|
||||
};
|
||||
|
||||
|
||||
@@ -50,10 +50,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const final override
|
||||
{
|
||||
return std::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);
|
||||
}
|
||||
|
||||
SHAMapNodeType
|
||||
|
||||
@@ -51,10 +51,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
clone(std::uint32_t cowid) const override
|
||||
{
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(item_, cowid, hash_);
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
item_, cowid, hash_);
|
||||
}
|
||||
|
||||
SHAMapNodeType
|
||||
|
||||
@@ -20,11 +20,18 @@
|
||||
#ifndef RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED
|
||||
#define RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/shamap/SHAMapTreeNode.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
using TreeNodeCache = TaggedCache<uint256, SHAMapTreeNode>;
|
||||
using TreeNodeCache = TaggedCache<
|
||||
uint256,
|
||||
SHAMapTreeNode,
|
||||
/*IsKeyCache*/ false,
|
||||
SharedWeakUnion<SHAMapTreeNode>,
|
||||
SharedIntrusive<SHAMapTreeNode>>;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/main/Tuning.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/shamap/NodeFamily.h>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
#include <ripple/shamap/SHAMapAccountStateLeafNode.h>
|
||||
@@ -27,21 +28,21 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
[[nodiscard]] std::shared_ptr<SHAMapLeafNode>
|
||||
[[nodiscard]] intr_ptr::SharedPtr<SHAMapLeafNode>
|
||||
makeTypedLeaf(
|
||||
SHAMapNodeType type,
|
||||
std::shared_ptr<SHAMapItem const> item,
|
||||
std::uint32_t owner)
|
||||
{
|
||||
if (type == SHAMapNodeType::tnTRANSACTION_NM)
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), owner);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(std::move(item), owner);
|
||||
|
||||
if (type == SHAMapNodeType::tnTRANSACTION_MD)
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
std::move(item), owner);
|
||||
|
||||
if (type == SHAMapNodeType::tnACCOUNT_STATE)
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), owner);
|
||||
|
||||
LogicError(
|
||||
@@ -53,7 +54,7 @@ makeTypedLeaf(
|
||||
SHAMap::SHAMap(SHAMapType t, Family& f)
|
||||
: f_(f), journal_(f.journal()), state_(SHAMapState::Modifying), type_(t)
|
||||
{
|
||||
root_ = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
// The `hash` parameter is unused. It is part of the interface so it's clear
|
||||
@@ -63,7 +64,7 @@ SHAMap::SHAMap(SHAMapType t, Family& f)
|
||||
SHAMap::SHAMap(SHAMapType t, uint256 const& hash, Family& f)
|
||||
: f_(f), journal_(f.journal()), state_(SHAMapState::Synching), type_(t)
|
||||
{
|
||||
root_ = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMap>
|
||||
@@ -94,7 +95,7 @@ void
|
||||
SHAMap::dirtyUp(
|
||||
SharedPtrNodeStack& stack,
|
||||
uint256 const& target,
|
||||
std::shared_ptr<SHAMapTreeNode> child)
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> child)
|
||||
{
|
||||
// walk the tree up from through the inner nodes to the root_
|
||||
// update hashes and links
|
||||
@@ -109,10 +110,10 @@ SHAMap::dirtyUp(
|
||||
while (!stack.empty())
|
||||
{
|
||||
auto node =
|
||||
std::dynamic_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
SHAMapNodeID nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
assert(node != nullptr);
|
||||
assert(node);
|
||||
|
||||
int branch = selectBranch(nodeID, target);
|
||||
assert(branch >= 0);
|
||||
@@ -136,12 +137,13 @@ SHAMap::walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack) const
|
||||
if (stack != nullptr)
|
||||
stack->push({inNode, nodeID});
|
||||
|
||||
auto const inner = std::static_pointer_cast<SHAMapInnerNode>(inNode);
|
||||
auto const inner =
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(inNode);
|
||||
auto const branch = selectBranch(nodeID, id);
|
||||
if (inner->isEmptyBranch(branch))
|
||||
return nullptr;
|
||||
|
||||
inNode = descendThrow(inner, branch);
|
||||
inNode = descendThrow(*inner, branch);
|
||||
nodeID = nodeID.getChildNodeID(branch);
|
||||
}
|
||||
|
||||
@@ -159,7 +161,7 @@ SHAMap::findKey(uint256 const& id) const
|
||||
return leaf;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
|
||||
{
|
||||
assert(backed_);
|
||||
@@ -167,14 +169,14 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const
|
||||
return finishFetch(hash, obj);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::finishFetch(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<NodeObject> const& object) const
|
||||
{
|
||||
assert(backed_);
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode> node;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node;
|
||||
try
|
||||
{
|
||||
if (!object)
|
||||
@@ -207,11 +209,11 @@ SHAMap::finishFetch(
|
||||
JLOG(journal_.warn()) << "Invalid DB node " << hash;
|
||||
}
|
||||
|
||||
return std::shared_ptr<SHAMapTreeNode>();
|
||||
return intr_ptr::SharedPtr<SHAMapTreeNode>();
|
||||
}
|
||||
|
||||
// See if a sync filter has a node
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
if (auto nodeData = filter->getNode(hash))
|
||||
@@ -244,7 +246,7 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
|
||||
// Get a node without throwing
|
||||
// Used on maps where missing nodes are expected
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
auto node = cacheLookup(hash);
|
||||
@@ -267,7 +269,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
return node;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash) const
|
||||
{
|
||||
auto node = cacheLookup(hash);
|
||||
@@ -279,7 +281,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash) const
|
||||
}
|
||||
|
||||
// Throw if the node is missing
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::fetchNode(SHAMapHash const& hash) const
|
||||
{
|
||||
auto node = fetchNodeNT(hash);
|
||||
@@ -301,14 +303,13 @@ SHAMap::descendThrow(SHAMapInnerNode* parent, int branch) const
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descendThrow(std::shared_ptr<SHAMapInnerNode> const& parent, int branch)
|
||||
const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descendThrow(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> ret = descend(parent, branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> ret = descend(parent, branch);
|
||||
|
||||
if (!ret && !parent->isEmptyBranch(branch))
|
||||
Throw<SHAMapMissingNode>(type_, parent->getChildHash(branch));
|
||||
if (!ret && !parent.isEmptyBranch(branch))
|
||||
Throw<SHAMapMissingNode>(type_, parent.getChildHash(branch));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -320,7 +321,7 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
|
||||
if (ret || !backed_)
|
||||
return ret;
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode> node =
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node =
|
||||
fetchNodeNT(parent->getChildHash(branch));
|
||||
if (!node)
|
||||
return nullptr;
|
||||
@@ -329,32 +330,29 @@ SHAMap::descend(SHAMapInnerNode* parent, int branch) const
|
||||
return node.get();
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descend(std::shared_ptr<SHAMapInnerNode> const& parent, int branch)
|
||||
const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descend(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> node = parent->getChild(branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node = parent.getChild(branch);
|
||||
if (node || !backed_)
|
||||
return node;
|
||||
|
||||
node = fetchNode(parent->getChildHash(branch));
|
||||
node = fetchNode(parent.getChildHash(branch));
|
||||
if (!node)
|
||||
return nullptr;
|
||||
return {};
|
||||
|
||||
node = parent->canonicalizeChild(branch, std::move(node));
|
||||
node = parent.canonicalizeChild(branch, std::move(node));
|
||||
return node;
|
||||
}
|
||||
|
||||
// Gets the node that would be hooked to this branch,
|
||||
// but doesn't hook it up.
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::descendNoStore(
|
||||
std::shared_ptr<SHAMapInnerNode> const& parent,
|
||||
int branch) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::descendNoStore(SHAMapInnerNode& parent, int branch) const
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> ret = parent->getChild(branch);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> ret = parent.getChild(branch);
|
||||
if (!ret && backed_)
|
||||
ret = fetchNode(parent->getChildHash(branch));
|
||||
ret = fetchNode(parent.getChildHash(branch));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -374,7 +372,7 @@ SHAMap::descend(
|
||||
if (!child)
|
||||
{
|
||||
auto const& childHash = parent->getChildHash(branch);
|
||||
std::shared_ptr<SHAMapTreeNode> childNode =
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> childNode =
|
||||
fetchNodeNT(childHash, filter);
|
||||
|
||||
if (childNode)
|
||||
@@ -431,8 +429,8 @@ SHAMap::descendAsync(
|
||||
}
|
||||
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
intr_ptr::SharedPtr<Node>
|
||||
SHAMap::unshareNode(intr_ptr::SharedPtr<Node> node, SHAMapNodeID const& nodeID)
|
||||
{
|
||||
// make sure the node is suitable for the intended operation (copy on write)
|
||||
assert(node->cowid() <= cowid_);
|
||||
@@ -440,7 +438,7 @@ SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
{
|
||||
// have a CoW
|
||||
assert(state_ != SHAMapState::Immutable);
|
||||
node = std::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
node = intr_ptr::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
if (nodeID.isRoot())
|
||||
root_ = node;
|
||||
}
|
||||
@@ -449,7 +447,7 @@ SHAMap::unshareNode(std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
|
||||
|
||||
SHAMapLeafNode*
|
||||
SHAMap::belowHelper(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch,
|
||||
std::tuple<int, std::function<bool(int)>, std::function<void(int&)>> const&
|
||||
@@ -458,11 +456,11 @@ SHAMap::belowHelper(
|
||||
auto& [init, cmp, incr] = loopParams;
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto n = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto n = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
stack.push({node, {leafDepth, n->peekItem()->key()}});
|
||||
return n.get();
|
||||
}
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
if (stack.empty())
|
||||
stack.push({inner, SHAMapNodeID{}});
|
||||
else
|
||||
@@ -471,15 +469,15 @@ SHAMap::belowHelper(
|
||||
{
|
||||
if (!inner->isEmptyBranch(i))
|
||||
{
|
||||
node = descendThrow(inner, i);
|
||||
node = descendThrow(*inner, i);
|
||||
assert(!stack.empty());
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto n = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto n = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
stack.push({n, {leafDepth, n->peekItem()->key()}});
|
||||
return n.get();
|
||||
}
|
||||
inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
stack.push({inner, stack.top().second.getChildNodeID(branch)});
|
||||
i = init; // descend and reset loop
|
||||
}
|
||||
@@ -490,7 +488,7 @@ SHAMap::belowHelper(
|
||||
}
|
||||
SHAMapLeafNode*
|
||||
SHAMap::lastBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch) const
|
||||
{
|
||||
@@ -502,7 +500,7 @@ SHAMap::lastBelow(
|
||||
}
|
||||
SHAMapLeafNode*
|
||||
SHAMap::firstBelow(
|
||||
std::shared_ptr<SHAMapTreeNode> node,
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node,
|
||||
SharedPtrNodeStack& stack,
|
||||
int branch) const
|
||||
{
|
||||
@@ -574,12 +572,12 @@ SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const
|
||||
{
|
||||
auto [node, nodeID] = stack.top();
|
||||
assert(!node->isLeaf());
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (auto i = selectBranch(nodeID, id) + 1; i < branchFactor; ++i)
|
||||
{
|
||||
if (!inner->isEmptyBranch(i))
|
||||
{
|
||||
node = descendThrow(inner, i);
|
||||
node = descendThrow(*inner, i);
|
||||
auto leaf = firstBelow(node, stack, i);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -633,14 +631,14 @@ SHAMap::upper_bound(uint256 const& id) const
|
||||
}
|
||||
else
|
||||
{
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (auto branch = selectBranch(nodeID, id) + 1;
|
||||
branch < branchFactor;
|
||||
++branch)
|
||||
{
|
||||
if (!inner->isEmptyBranch(branch))
|
||||
{
|
||||
node = descendThrow(inner, branch);
|
||||
node = descendThrow(*inner, branch);
|
||||
auto leaf = firstBelow(node, stack, branch);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -670,13 +668,13 @@ SHAMap::lower_bound(uint256 const& id) const
|
||||
}
|
||||
else
|
||||
{
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
for (int branch = selectBranch(nodeID, id) - 1; branch >= 0;
|
||||
--branch)
|
||||
{
|
||||
if (!inner->isEmptyBranch(branch))
|
||||
{
|
||||
node = descendThrow(inner, branch);
|
||||
node = descendThrow(*inner, branch);
|
||||
auto leaf = lastBelow(node, stack, branch);
|
||||
if (!leaf)
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
@@ -709,7 +707,8 @@ SHAMap::delItem(uint256 const& id)
|
||||
if (stack.empty())
|
||||
Throw<SHAMapMissingNode>(type_, id);
|
||||
|
||||
auto leaf = std::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto leaf =
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
stack.pop();
|
||||
|
||||
if (!leaf || (leaf->peekItem()->key() != id))
|
||||
@@ -719,12 +718,12 @@ SHAMap::delItem(uint256 const& id)
|
||||
|
||||
// What gets attached to the end of the chain
|
||||
// (For now, nothing, since we deleted the leaf)
|
||||
std::shared_ptr<SHAMapTreeNode> prevNode;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> prevNode;
|
||||
|
||||
while (!stack.empty())
|
||||
{
|
||||
auto node =
|
||||
std::static_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(stack.top().first);
|
||||
SHAMapNodeID nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
|
||||
@@ -752,7 +751,8 @@ SHAMap::delItem(uint256 const& id)
|
||||
{
|
||||
if (!node->isEmptyBranch(i))
|
||||
{
|
||||
node->setChild(i, nullptr);
|
||||
node->setChild(
|
||||
i, intr_ptr::SharedPtr<SHAMapTreeNode>{});
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -795,7 +795,7 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr<SHAMapItem const> item)
|
||||
|
||||
if (node->isLeaf())
|
||||
{
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
if (leaf->peekItem()->key() == tag)
|
||||
return false;
|
||||
}
|
||||
@@ -803,7 +803,7 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr<SHAMapItem const> item)
|
||||
if (node->isInner())
|
||||
{
|
||||
// easy case, we end on an inner node
|
||||
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
auto inner = intr_ptr::static_pointer_cast<SHAMapInnerNode>(node);
|
||||
int branch = selectBranch(nodeID, tag);
|
||||
assert(inner->isEmptyBranch(branch));
|
||||
inner->setChild(branch, makeTypedLeaf(type, std::move(item), cowid_));
|
||||
@@ -812,11 +812,11 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr<SHAMapItem const> item)
|
||||
{
|
||||
// this is a leaf node that has to be made an inner node holding two
|
||||
// items
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
std::shared_ptr<SHAMapItem const> otherItem = leaf->peekItem();
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(node);
|
||||
auto otherItem = leaf->peekItem();
|
||||
assert(otherItem && (tag != otherItem->key()));
|
||||
|
||||
node = std::make_shared<SHAMapInnerNode>(node->cowid());
|
||||
node = intr_ptr::make_shared<SHAMapInnerNode>(node->cowid());
|
||||
|
||||
unsigned int b1, b2;
|
||||
|
||||
@@ -828,7 +828,7 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr<SHAMapItem const> item)
|
||||
// we need a new inner node, since both go on same branch at this
|
||||
// level
|
||||
nodeID = nodeID.getChildNodeID(b1);
|
||||
node = std::make_shared<SHAMapInnerNode>(cowid_);
|
||||
node = intr_ptr::make_shared<SHAMapInnerNode>(cowid_);
|
||||
}
|
||||
|
||||
// we can add the two leaf nodes here
|
||||
@@ -877,7 +877,8 @@ SHAMap::updateGiveItem(
|
||||
if (stack.empty())
|
||||
Throw<SHAMapMissingNode>(type_, tag);
|
||||
|
||||
auto node = std::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto node =
|
||||
intr_ptr::dynamic_pointer_cast<SHAMapLeafNode>(stack.top().first);
|
||||
auto nodeID = stack.top().second;
|
||||
stack.pop();
|
||||
|
||||
@@ -947,8 +948,9 @@ SHAMap::fetchRoot(SHAMapHash const& hash, SHAMapSyncFilter* filter)
|
||||
@note The node must have already been unshared by having the caller
|
||||
first call SHAMapTreeNode::unshare().
|
||||
*/
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node)
|
||||
const
|
||||
{
|
||||
assert(node->cowid() == 0);
|
||||
assert(backed_);
|
||||
@@ -966,8 +968,8 @@ SHAMap::writeNode(NodeObjectType t, std::shared_ptr<SHAMapTreeNode> node) const
|
||||
// pointer to because flushing modifies inner nodes -- it
|
||||
// makes them point to canonical/shared nodes.
|
||||
template <class Node>
|
||||
std::shared_ptr<Node>
|
||||
SHAMap::preFlushNode(std::shared_ptr<Node> node) const
|
||||
intr_ptr::SharedPtr<Node>
|
||||
SHAMap::preFlushNode(intr_ptr::SharedPtr<Node> node) const
|
||||
{
|
||||
// A shared node should never need to be flushed
|
||||
// because that would imply someone modified it
|
||||
@@ -977,7 +979,7 @@ SHAMap::preFlushNode(std::shared_ptr<Node> node) const
|
||||
{
|
||||
// Node is not uniquely ours, so unshare it before
|
||||
// possibly modifying it
|
||||
node = std::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
node = intr_ptr::static_pointer_cast<Node>(node->clone(cowid_));
|
||||
}
|
||||
return node;
|
||||
}
|
||||
@@ -1018,17 +1020,17 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
return 1;
|
||||
}
|
||||
|
||||
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
auto node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
|
||||
if (node->isEmpty())
|
||||
{ // replace empty root with a new empty root
|
||||
root_ = std::make_shared<SHAMapInnerNode>(0);
|
||||
root_ = intr_ptr::make_shared<SHAMapInnerNode>(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Stack of {parent,index,child} pointers representing
|
||||
// inner nodes we are in the process of flushing
|
||||
using StackEntry = std::pair<std::shared_ptr<SHAMapInnerNode>, int>;
|
||||
using StackEntry = std::pair<intr_ptr::SharedPtr<SHAMapInnerNode>, int>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> stack;
|
||||
|
||||
node = preFlushNode(std::move(node));
|
||||
@@ -1065,7 +1067,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
// The semantics of this changes when we move to c++-20
|
||||
// Right now no move will occur; With c++-20 child will
|
||||
// be moved from.
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(
|
||||
node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
std::move(child));
|
||||
pos = 0;
|
||||
}
|
||||
@@ -1094,7 +1096,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
|
||||
node->unshare();
|
||||
|
||||
if (doWrite)
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(
|
||||
node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
writeNode(t, std::move(node)));
|
||||
|
||||
++flushed;
|
||||
@@ -1164,7 +1166,7 @@ SHAMap::dump(bool hash) const
|
||||
JLOG(journal_.info()) << leafCount << " resident leaves";
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMap::cacheLookup(SHAMapHash const& hash) const
|
||||
{
|
||||
auto ret = f_.getTreeNodeCache(ledgerSeq_)->fetch(hash.as_uint256());
|
||||
@@ -1175,7 +1177,7 @@ SHAMap::cacheLookup(SHAMapHash const& hash) const
|
||||
void
|
||||
SHAMap::canonicalize(
|
||||
SHAMapHash const& hash,
|
||||
std::shared_ptr<SHAMapTreeNode>& node) const
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>& node) const
|
||||
{
|
||||
assert(backed_);
|
||||
assert(node->cowid() == 0);
|
||||
@@ -1190,7 +1192,7 @@ SHAMap::invariants() const
|
||||
{
|
||||
(void)getHash(); // update node hashes
|
||||
auto node = root_.get();
|
||||
assert(node != nullptr);
|
||||
assert(node);
|
||||
assert(!node->isLeaf());
|
||||
SharedPtrNodeStack stack;
|
||||
for (auto leaf = peekFirstItem(stack); leaf != nullptr;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.ipp>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
|
||||
@@ -255,28 +256,28 @@ SHAMap::walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing)
|
||||
if (!root_->isInner()) // root_ is only node, and we have it
|
||||
return;
|
||||
|
||||
using StackEntry = std::shared_ptr<SHAMapInnerNode>;
|
||||
using StackEntry = intr_ptr::SharedPtr<SHAMapInnerNode>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> nodeStack;
|
||||
|
||||
nodeStack.push(std::static_pointer_cast<SHAMapInnerNode>(root_));
|
||||
nodeStack.push(intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_));
|
||||
|
||||
while (!nodeStack.empty())
|
||||
{
|
||||
std::shared_ptr<SHAMapInnerNode> node = std::move(nodeStack.top());
|
||||
intr_ptr::SharedPtr<SHAMapInnerNode> node = std::move(nodeStack.top());
|
||||
nodeStack.pop();
|
||||
|
||||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
if (!node->isEmptyBranch(i))
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(node, i);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(*node, i);
|
||||
|
||||
if (nextNode)
|
||||
{
|
||||
if (nextNode->isInner())
|
||||
nodeStack.push(
|
||||
std::static_pointer_cast<SHAMapInnerNode>(
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(
|
||||
nextNode));
|
||||
}
|
||||
else
|
||||
@@ -298,15 +299,15 @@ SHAMap::walkMapParallel(
|
||||
if (!root_->isInner()) // root_ is only node, and we have it
|
||||
return false;
|
||||
|
||||
using StackEntry = std::shared_ptr<SHAMapInnerNode>;
|
||||
std::array<std::shared_ptr<SHAMapTreeNode>, 16> topChildren;
|
||||
using StackEntry = intr_ptr::SharedPtr<SHAMapInnerNode>;
|
||||
std::array<intr_ptr::SharedPtr<SHAMapTreeNode>, 16> topChildren;
|
||||
{
|
||||
auto const& innerRoot =
|
||||
std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
if (!innerRoot->isEmptyBranch(i))
|
||||
topChildren[i] = descendNoStore(innerRoot, i);
|
||||
topChildren[i] = descendNoStore(*innerRoot, i);
|
||||
}
|
||||
}
|
||||
std::vector<std::thread> workers;
|
||||
@@ -327,7 +328,7 @@ SHAMap::walkMapParallel(
|
||||
continue;
|
||||
|
||||
nodeStacks[rootChildIndex].push(
|
||||
std::static_pointer_cast<SHAMapInnerNode>(child));
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(child));
|
||||
|
||||
JLOG(journal_.debug()) << "starting worker " << rootChildIndex;
|
||||
workers.push_back(std::thread(
|
||||
@@ -337,7 +338,7 @@ SHAMap::walkMapParallel(
|
||||
{
|
||||
while (!nodeStack.empty())
|
||||
{
|
||||
std::shared_ptr<SHAMapInnerNode> node =
|
||||
intr_ptr::SharedPtr<SHAMapInnerNode> node =
|
||||
std::move(nodeStack.top());
|
||||
assert(node);
|
||||
nodeStack.pop();
|
||||
@@ -346,14 +347,15 @@ SHAMap::walkMapParallel(
|
||||
{
|
||||
if (node->isEmptyBranch(i))
|
||||
continue;
|
||||
std::shared_ptr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(node, i);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> nextNode =
|
||||
descendNoStore(*node, i);
|
||||
|
||||
if (nextNode)
|
||||
{
|
||||
if (nextNode->isInner())
|
||||
nodeStack.push(std::static_pointer_cast<
|
||||
SHAMapInnerNode>(nextNode));
|
||||
nodeStack.push(
|
||||
intr_ptr::static_pointer_cast<
|
||||
SHAMapInnerNode>(nextNode));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <ripple/shamap/SHAMapInnerNode.h>
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.ipp>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/Slice.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
@@ -44,6 +45,17 @@ SHAMapInnerNode::SHAMapInnerNode(
|
||||
|
||||
SHAMapInnerNode::~SHAMapInnerNode() = default;
|
||||
|
||||
void
|
||||
SHAMapInnerNode::partialDestructor()
|
||||
{
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>* children;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, std::ignore, children) =
|
||||
hashesAndChildren_.getHashesAndChildren();
|
||||
iterNonEmptyChildIndexes(
|
||||
[&](auto branchNum, auto indexNum) { children[indexNum].reset(); });
|
||||
}
|
||||
|
||||
template <class F>
|
||||
void
|
||||
SHAMapInnerNode::iterChildren(F&& f) const
|
||||
@@ -71,17 +83,17 @@ SHAMapInnerNode::getChildIndex(int i) const
|
||||
return hashesAndChildren_.getChildIndex(isBranch_, i);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::clone(std::uint32_t cowid) const
|
||||
{
|
||||
auto const branchCount = getBranchCount();
|
||||
auto const thisIsSparse = !hashesAndChildren_.isDense();
|
||||
auto p = std::make_shared<SHAMapInnerNode>(cowid, branchCount);
|
||||
auto p = intr_ptr::make_shared<SHAMapInnerNode>(cowid, branchCount);
|
||||
p->hash_ = hash_;
|
||||
p->isBranch_ = isBranch_;
|
||||
p->fullBelowGen_ = fullBelowGen_;
|
||||
SHAMapHash *cloneHashes, *thisHashes;
|
||||
std::shared_ptr<SHAMapTreeNode>*cloneChildren, *thisChildren;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>*cloneChildren, *thisChildren;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, cloneHashes, cloneChildren) =
|
||||
p->hashesAndChildren_.getHashesAndChildren();
|
||||
@@ -122,7 +134,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const
|
||||
return p;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::makeFullInner(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -132,7 +144,7 @@ SHAMapInnerNode::makeFullInner(
|
||||
if (data.size() != branchFactor * uint256::bytes)
|
||||
Throw<std::runtime_error>("Invalid FI node");
|
||||
|
||||
auto ret = std::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
auto ret = intr_ptr::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
|
||||
SerialIter si(data);
|
||||
|
||||
@@ -156,7 +168,7 @@ SHAMapInnerNode::makeFullInner(
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::makeCompressedInner(Slice data)
|
||||
{
|
||||
// A compressed inner node is serialized as a series of 33 byte chunks,
|
||||
@@ -169,7 +181,7 @@ SHAMapInnerNode::makeCompressedInner(Slice data)
|
||||
|
||||
SerialIter si(data);
|
||||
|
||||
auto ret = std::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
auto ret = intr_ptr::make_shared<SHAMapInnerNode>(0, branchFactor);
|
||||
|
||||
auto hashes = ret->hashesAndChildren_.getHashes();
|
||||
|
||||
@@ -211,13 +223,13 @@ void
|
||||
SHAMapInnerNode::updateHashDeep()
|
||||
{
|
||||
SHAMapHash* hashes;
|
||||
std::shared_ptr<SHAMapTreeNode>* children;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>* children;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(std::ignore, hashes, children) =
|
||||
hashesAndChildren_.getHashesAndChildren();
|
||||
iterNonEmptyChildIndexes([&](auto branchNum, auto indexNum) {
|
||||
if (children[indexNum] != nullptr)
|
||||
hashes[indexNum] = children[indexNum]->getHash();
|
||||
if (auto p = children[indexNum].get())
|
||||
hashes[indexNum] = p->getHash();
|
||||
});
|
||||
updateHash();
|
||||
}
|
||||
@@ -284,7 +296,7 @@ SHAMapInnerNode::getString(const SHAMapNodeID& id) const
|
||||
|
||||
// We are modifying an inner node
|
||||
void
|
||||
SHAMapInnerNode::setChild(int m, std::shared_ptr<SHAMapTreeNode> child)
|
||||
SHAMapInnerNode::setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child)
|
||||
{
|
||||
assert((m >= 0) && (m < branchFactor));
|
||||
assert(cowid_ != 0);
|
||||
@@ -319,8 +331,9 @@ SHAMapInnerNode::setChild(int m, std::shared_ptr<SHAMapTreeNode> child)
|
||||
}
|
||||
|
||||
// finished modifying, now make shareable
|
||||
void
|
||||
SHAMapInnerNode::shareChild(int m, std::shared_ptr<SHAMapTreeNode> const& child)
|
||||
template <class T>
|
||||
requires std::derived_from<T, SHAMapTreeNode> void
|
||||
SHAMapInnerNode::shareChild(int m, SharedIntrusive<T> const& child)
|
||||
{
|
||||
assert((m >= 0) && (m < branchFactor));
|
||||
assert(cowid_ != 0);
|
||||
@@ -344,7 +357,7 @@ SHAMapInnerNode::getChildPointer(int branch)
|
||||
return hashesAndChildren_.getChildren()[index].get();
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::getChild(int branch)
|
||||
{
|
||||
assert(branch >= 0 && branch < branchFactor);
|
||||
@@ -367,10 +380,10 @@ SHAMapInnerNode::getChildHash(int m) const
|
||||
return zeroSHAMapHash;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapInnerNode::canonicalizeChild(
|
||||
int branch,
|
||||
std::shared_ptr<SHAMapTreeNode> node)
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> node)
|
||||
{
|
||||
assert(branch >= 0 && branch < branchFactor);
|
||||
assert(node);
|
||||
@@ -398,7 +411,7 @@ SHAMapInnerNode::canonicalizeChild(
|
||||
void
|
||||
SHAMapInnerNode::invariants(bool is_root) const
|
||||
{
|
||||
unsigned count = 0;
|
||||
[[maybe_unused]] unsigned count = 0;
|
||||
auto [numAllocated, hashes, children] =
|
||||
hashesAndChildren_.getHashesAndChildren();
|
||||
|
||||
@@ -408,8 +421,8 @@ SHAMapInnerNode::invariants(bool is_root) const
|
||||
for (int i = 0; i < branchCount; ++i)
|
||||
{
|
||||
assert(hashes[i].isNonZero());
|
||||
if (children[i] != nullptr)
|
||||
children[i]->invariants();
|
||||
if (auto p = children[i].get())
|
||||
p->invariants();
|
||||
++count;
|
||||
}
|
||||
}
|
||||
@@ -420,8 +433,8 @@ SHAMapInnerNode::invariants(bool is_root) const
|
||||
if (hashes[i].isNonZero())
|
||||
{
|
||||
assert((isBranch_ & (1 << i)) != 0);
|
||||
if (children[i] != nullptr)
|
||||
children[i]->invariants();
|
||||
if (auto p = children[i].get())
|
||||
p->invariants();
|
||||
++count;
|
||||
}
|
||||
else
|
||||
@@ -439,4 +452,14 @@ SHAMapInnerNode::invariants(bool is_root) const
|
||||
assert((count == 0) ? hash_.isZero() : hash_.isNonZero());
|
||||
}
|
||||
|
||||
template void
|
||||
ripple::SHAMapInnerNode::shareChild<ripple::SHAMapTreeNode>(
|
||||
int,
|
||||
ripple::SharedIntrusive<ripple::SHAMapTreeNode> const&);
|
||||
|
||||
template void
|
||||
ripple::SHAMapInnerNode::shareChild<ripple::SHAMapInnerNode>(
|
||||
int,
|
||||
ripple::SharedIntrusive<ripple::SHAMapInnerNode> const&);
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.ipp>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/beast/core/LexicalCast.h>
|
||||
#include <ripple/shamap/SHAMapLeafNode.h>
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/random.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
#include <ripple/shamap/SHAMapSyncFilter.h>
|
||||
@@ -46,10 +47,10 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
if (!root_->isInner())
|
||||
return;
|
||||
|
||||
using StackEntry = std::pair<int, std::shared_ptr<SHAMapInnerNode>>;
|
||||
using StackEntry = std::pair<int, intr_ptr::SharedPtr<SHAMapInnerNode>>;
|
||||
std::stack<StackEntry, std::vector<StackEntry>> stack;
|
||||
|
||||
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
auto node = intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_);
|
||||
int pos = 0;
|
||||
|
||||
while (true)
|
||||
@@ -58,8 +59,8 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
{
|
||||
if (!node->isEmptyBranch(pos))
|
||||
{
|
||||
std::shared_ptr<SHAMapTreeNode> child =
|
||||
descendNoStore(node, pos);
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> child =
|
||||
descendNoStore(*node, pos);
|
||||
if (!function(*child))
|
||||
return;
|
||||
|
||||
@@ -78,7 +79,8 @@ SHAMap::visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const
|
||||
}
|
||||
|
||||
// descend to the child's first position
|
||||
node = std::static_pointer_cast<SHAMapInnerNode>(child);
|
||||
node =
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(child);
|
||||
pos = 0;
|
||||
}
|
||||
}
|
||||
@@ -114,7 +116,7 @@ SHAMap::visitDifferences(
|
||||
|
||||
if (root_->isLeaf())
|
||||
{
|
||||
auto leaf = std::static_pointer_cast<SHAMapLeafNode>(root_);
|
||||
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(root_);
|
||||
if (!have ||
|
||||
!have->hasLeafNode(leaf->peekItem()->key(), leaf->getHash()))
|
||||
function(*root_);
|
||||
@@ -202,7 +204,8 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se)
|
||||
mn.filter_,
|
||||
pending,
|
||||
[node, nodeID, branch, &mn](
|
||||
std::shared_ptr<SHAMapTreeNode> found, SHAMapHash const&) {
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode> found,
|
||||
SHAMapHash const&) {
|
||||
// a read completed asynchronously
|
||||
std::unique_lock<std::mutex> lock{mn.deferLock_};
|
||||
mn.finishedReads_.emplace_back(
|
||||
@@ -272,7 +275,7 @@ SHAMap::gmn_ProcessDeferredReads(MissingNodes& mn)
|
||||
SHAMapInnerNode*,
|
||||
SHAMapNodeID,
|
||||
int,
|
||||
std::shared_ptr<SHAMapTreeNode>>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>>
|
||||
deferredNode;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock{mn.deferLock_};
|
||||
@@ -326,7 +329,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter)
|
||||
f_.getFullBelowCache(ledgerSeq_)->getGeneration());
|
||||
|
||||
if (!root_->isInner() ||
|
||||
std::static_pointer_cast<SHAMapInnerNode>(root_)->isFullBelow(
|
||||
intr_ptr::static_pointer_cast<SHAMapInnerNode>(root_)->isFullBelow(
|
||||
mn.generation_))
|
||||
{
|
||||
clearSynching();
|
||||
@@ -800,8 +803,9 @@ SHAMap::getProofPath(uint256 const& key) const
|
||||
}
|
||||
|
||||
if (auto const& node = stack.top().first; !node || node->isInner() ||
|
||||
std::static_pointer_cast<SHAMapLeafNode>(node)->peekItem()->key() !=
|
||||
key)
|
||||
intr_ptr::static_pointer_cast<SHAMapLeafNode>(node)
|
||||
->peekItem()
|
||||
->key() != key)
|
||||
{
|
||||
JLOG(journal_.debug()) << "no path to " << key;
|
||||
return {};
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.ipp>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/Slice.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
@@ -36,7 +37,7 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeTransaction(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -46,12 +47,13 @@ SHAMapTreeNode::makeTransaction(
|
||||
sha512Half(HashPrefix::transactionID, data), data);
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), 0, hash);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapTxLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapTxLeafNode>(std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeTransactionWithMeta(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -74,13 +76,13 @@ SHAMapTreeNode::makeTransactionWithMeta(
|
||||
auto item = std::make_shared<SHAMapItem const>(tag, s.slice());
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapTxPlusMetaLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeAccountState(
|
||||
Slice data,
|
||||
SHAMapHash const& hash,
|
||||
@@ -106,13 +108,14 @@ SHAMapTreeNode::makeAccountState(
|
||||
auto item = std::make_shared<SHAMapItem const>(tag, s.slice());
|
||||
|
||||
if (hashValid)
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), 0, hash);
|
||||
|
||||
return std::make_shared<SHAMapAccountStateLeafNode>(std::move(item), 0);
|
||||
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
|
||||
std::move(item), 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeFromWire(Slice rawNode)
|
||||
{
|
||||
if (rawNode.empty())
|
||||
@@ -144,7 +147,7 @@ SHAMapTreeNode::makeFromWire(Slice rawNode)
|
||||
"wire: Unknown type (" + std::to_string(type) + ")");
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>
|
||||
SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash)
|
||||
{
|
||||
if (rawNode.size() < 4)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_SHAMAP_TAGGEDPOINTER_H_INCLUDED
|
||||
#define RIPPLE_SHAMAP_TAGGEDPOINTER_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/IntrusivePointer.h>
|
||||
#include <ripple/shamap/SHAMapTreeNode.h>
|
||||
|
||||
#include <cstdint>
|
||||
@@ -56,6 +57,7 @@ namespace ripple {
|
||||
*/
|
||||
class TaggedPointer
|
||||
{
|
||||
private:
|
||||
static_assert(
|
||||
alignof(SHAMapHash) >= 4,
|
||||
"Bad alignment: Tag pointer requires low two bits to be zero.");
|
||||
@@ -169,7 +171,7 @@ public:
|
||||
of each array.
|
||||
*/
|
||||
[[nodiscard]] std::
|
||||
tuple<std::uint8_t, SHAMapHash*, std::shared_ptr<SHAMapTreeNode>*>
|
||||
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
|
||||
getHashesAndChildren() const;
|
||||
|
||||
/** Get the `hashes` array */
|
||||
@@ -177,7 +179,7 @@ public:
|
||||
getHashes() const;
|
||||
|
||||
/** Get the `children` array */
|
||||
[[nodiscard]] std::shared_ptr<SHAMapTreeNode>*
|
||||
[[nodiscard]] intr_ptr::SharedPtr<SHAMapTreeNode>*
|
||||
getChildren() const;
|
||||
|
||||
/** Call the `f` callback for all 16 (branchFactor) branches - even if
|
||||
|
||||
@@ -49,7 +49,7 @@ static_assert(
|
||||
// contains multiple chunks. This is the terminology the boost documentation
|
||||
// uses. Pools use "Simple Segregated Storage" as their storage format.
|
||||
constexpr size_t elementSizeBytes =
|
||||
(sizeof(SHAMapHash) + sizeof(std::shared_ptr<SHAMapTreeNode>));
|
||||
(sizeof(SHAMapHash) + sizeof(intr_ptr::SharedPtr<SHAMapTreeNode>));
|
||||
|
||||
constexpr size_t blockSizeBytes = kilobytes(512);
|
||||
|
||||
@@ -256,7 +256,7 @@ TaggedPointer::destroyHashesAndChildren()
|
||||
for (std::size_t i = 0; i < numAllocated; ++i)
|
||||
{
|
||||
hashes[i].~SHAMapHash();
|
||||
children[i].~shared_ptr<SHAMapTreeNode>();
|
||||
std::destroy_at(&children[i]);
|
||||
}
|
||||
|
||||
auto [tag, ptr] = decode();
|
||||
@@ -405,8 +405,10 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
// keep
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{srcHashes[srcIndex]};
|
||||
new (&dstChildren[dstIndex]) std::shared_ptr<SHAMapTreeNode>{
|
||||
std::move(srcChildren[srcIndex])};
|
||||
|
||||
new (&dstChildren[dstIndex])
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(srcChildren[srcIndex])};
|
||||
++dstIndex;
|
||||
++srcIndex;
|
||||
}
|
||||
@@ -418,7 +420,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{};
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
}
|
||||
}
|
||||
@@ -426,7 +428,8 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
// add
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&dstChildren[dstIndex])
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
if (srcIsDense)
|
||||
{
|
||||
@@ -440,7 +443,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
{
|
||||
new (&dstHashes[dstIndex]) SHAMapHash{};
|
||||
new (&dstChildren[dstIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{};
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
++dstIndex;
|
||||
}
|
||||
if (srcIsDense)
|
||||
@@ -454,7 +457,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
for (int i = dstIndex; i < dstNumAllocated; ++i)
|
||||
{
|
||||
new (&dstHashes[i]) SHAMapHash{};
|
||||
new (&dstChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&dstChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
*this = std::move(dst);
|
||||
}
|
||||
@@ -474,7 +477,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
// allocate hashes and children, but do not run constructors
|
||||
TaggedPointer newHashesAndChildren{RawAllocateTag{}, toAllocate};
|
||||
SHAMapHash *newHashes, *oldHashes;
|
||||
std::shared_ptr<SHAMapTreeNode>*newChildren, *oldChildren;
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>*newChildren, *oldChildren;
|
||||
std::uint8_t newNumAllocated;
|
||||
// structured bindings can't be captured in c++ 17; use tie instead
|
||||
std::tie(newNumAllocated, newHashes, newChildren) =
|
||||
@@ -486,7 +489,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
// new arrays are dense, old arrays are sparse
|
||||
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
|
||||
new (&newHashes[branchNum]) SHAMapHash{oldHashes[indexNum]};
|
||||
new (&newChildren[branchNum]) std::shared_ptr<SHAMapTreeNode>{
|
||||
new (&newChildren[branchNum]) intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(oldChildren[indexNum])};
|
||||
});
|
||||
// Run the constructors for the remaining elements
|
||||
@@ -495,7 +498,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
if ((1 << i) & isBranch)
|
||||
continue;
|
||||
new (&newHashes[i]) SHAMapHash{};
|
||||
new (&newChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -506,7 +509,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
new (&newHashes[curCompressedIndex])
|
||||
SHAMapHash{oldHashes[indexNum]};
|
||||
new (&newChildren[curCompressedIndex])
|
||||
std::shared_ptr<SHAMapTreeNode>{
|
||||
intr_ptr::SharedPtr<SHAMapTreeNode>{
|
||||
std::move(oldChildren[indexNum])};
|
||||
++curCompressedIndex;
|
||||
});
|
||||
@@ -514,7 +517,7 @@ inline TaggedPointer::TaggedPointer(
|
||||
for (int i = curCompressedIndex; i < newNumAllocated; ++i)
|
||||
{
|
||||
new (&newHashes[i]) SHAMapHash{};
|
||||
new (&newChildren[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,7 +531,7 @@ inline TaggedPointer::TaggedPointer(std::uint8_t numChildren)
|
||||
for (std::size_t i = 0; i < numAllocated; ++i)
|
||||
{
|
||||
new (&hashes[i]) SHAMapHash{};
|
||||
new (&children[i]) std::shared_ptr<SHAMapTreeNode>{};
|
||||
new (&children[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -567,14 +570,15 @@ TaggedPointer::isDense() const
|
||||
}
|
||||
|
||||
[[nodiscard]] inline std::
|
||||
tuple<std::uint8_t, SHAMapHash*, std::shared_ptr<SHAMapTreeNode>*>
|
||||
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
|
||||
TaggedPointer::getHashesAndChildren() const
|
||||
{
|
||||
auto const [tag, ptr] = decode();
|
||||
auto const hashes = reinterpret_cast<SHAMapHash*>(ptr);
|
||||
std::uint8_t numAllocated = boundaries[tag];
|
||||
auto const children = reinterpret_cast<std::shared_ptr<SHAMapTreeNode>*>(
|
||||
hashes + numAllocated);
|
||||
auto const children =
|
||||
reinterpret_cast<intr_ptr::SharedPtr<SHAMapTreeNode>*>(
|
||||
hashes + numAllocated);
|
||||
return {numAllocated, hashes, children};
|
||||
};
|
||||
|
||||
@@ -584,7 +588,7 @@ TaggedPointer::getHashes() const
|
||||
return reinterpret_cast<SHAMapHash*>(tp_ & ptrMask);
|
||||
};
|
||||
|
||||
[[nodiscard]] inline std::shared_ptr<SHAMapTreeNode>*
|
||||
[[nodiscard]] inline intr_ptr::SharedPtr<SHAMapTreeNode>*
|
||||
TaggedPointer::getChildren() const
|
||||
{
|
||||
auto [unused1, unused2, result] = getHashesAndChildren();
|
||||
|
||||
787
src/test/basics/IntrusiveShared_test.cpp
Normal file
787
src/test/basics/IntrusiveShared_test.cpp
Normal file
@@ -0,0 +1,787 @@
|
||||
#include <ripple/basics/IntrusivePointer.ipp>
|
||||
#include <ripple/basics/IntrusiveRefCounts.h>
|
||||
#include <ripple/beast/unit_test.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <barrier>
|
||||
#include <chrono>
|
||||
#include <latch>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace ripple {
|
||||
namespace tests {
|
||||
|
||||
namespace {
|
||||
enum class TrackedState : std::uint8_t {
|
||||
uninitialized,
|
||||
alive,
|
||||
partiallyDeletedStarted,
|
||||
partiallyDeleted,
|
||||
deletedStarted,
|
||||
deleted
|
||||
};
|
||||
|
||||
class TIBase : public IntrusiveRefCounts
|
||||
{
|
||||
public:
|
||||
static constexpr std::size_t maxStates = 128;
|
||||
static std::array<std::atomic<TrackedState>, maxStates> state;
|
||||
static std::atomic<int> nextId;
|
||||
static TrackedState
|
||||
getState(int id)
|
||||
{
|
||||
assert(id < state.size());
|
||||
return state[id].load(std::memory_order_acquire);
|
||||
}
|
||||
static void
|
||||
resetStates(bool resetCallback)
|
||||
{
|
||||
for (int i = 0; i < maxStates; ++i)
|
||||
{
|
||||
state[i].store(
|
||||
TrackedState::uninitialized, std::memory_order_release);
|
||||
}
|
||||
nextId.store(0, std::memory_order_release);
|
||||
if (resetCallback)
|
||||
TIBase::tracingCallback_ = [](TrackedState,
|
||||
std::optional<TrackedState>) {};
|
||||
}
|
||||
|
||||
struct ResetStatesGuard
|
||||
{
|
||||
bool resetCallback_{false};
|
||||
|
||||
ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
|
||||
{
|
||||
TIBase::resetStates(resetCallback_);
|
||||
}
|
||||
~ResetStatesGuard()
|
||||
{
|
||||
TIBase::resetStates(resetCallback_);
|
||||
}
|
||||
};
|
||||
|
||||
TIBase() : id_{checkoutID()}
|
||||
{
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(TrackedState::alive, std::memory_order_relaxed);
|
||||
}
|
||||
~TIBase()
|
||||
{
|
||||
using enum TrackedState;
|
||||
|
||||
assert(state.size() > id_);
|
||||
tracingCallback_(
|
||||
state[id_].load(std::memory_order_relaxed), deletedStarted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
// Use relaxed memory order to try to avoid atomic operations from
|
||||
// adding additional memory synchronizations that may hide threading
|
||||
// errors in the underlying shared pointer class.
|
||||
state[id_].store(deletedStarted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(deletedStarted, deleted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(TrackedState::deleted, std::nullopt);
|
||||
}
|
||||
|
||||
void
|
||||
partialDestructor()
|
||||
{
|
||||
using enum TrackedState;
|
||||
|
||||
assert(state.size() > id_);
|
||||
tracingCallback_(
|
||||
state[id_].load(std::memory_order_relaxed),
|
||||
partiallyDeletedStarted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
|
||||
|
||||
assert(state.size() > id_);
|
||||
state[id_].store(partiallyDeleted, std::memory_order_relaxed);
|
||||
|
||||
tracingCallback_(partiallyDeleted, std::nullopt);
|
||||
}
|
||||
|
||||
static std::function<void(TrackedState, std::optional<TrackedState>)>
|
||||
tracingCallback_;
|
||||
|
||||
int id_;
|
||||
|
||||
private:
|
||||
static int
|
||||
checkoutID()
|
||||
{
|
||||
return nextId.fetch_add(1, std::memory_order_acq_rel);
|
||||
}
|
||||
};
|
||||
|
||||
std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
|
||||
std::atomic<int> TIBase::nextId{0};
|
||||
|
||||
std::function<void(TrackedState, std::optional<TrackedState>)>
|
||||
TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
|
||||
|
||||
} // namespace
|
||||
|
||||
class IntrusiveShared_test : public beast::unit_test::suite
|
||||
{
|
||||
public:
|
||||
void
|
||||
testBasics()
|
||||
{
|
||||
testcase("Basics");
|
||||
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
TIBase b;
|
||||
BEAST_EXPECT(b.use_count() == 1);
|
||||
b.addWeakRef();
|
||||
BEAST_EXPECT(b.use_count() == 1);
|
||||
auto a = b.releaseStrongRef();
|
||||
BEAST_EXPECT(a == ReleaseRefAction::partialDestroy);
|
||||
BEAST_EXPECT(b.use_count() == 0);
|
||||
TIBase* pb = &b;
|
||||
partialDestructorFinished(&pb);
|
||||
BEAST_EXPECT(!pb);
|
||||
a = b.releaseWeakRef();
|
||||
BEAST_EXPECT(a == ReleaseRefAction::destroy);
|
||||
}
|
||||
|
||||
std::vector<SharedIntrusive<TIBase>> strong;
|
||||
std::vector<WeakIntrusive<TIBase>> weak;
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
auto b = make_SharedIntrusive<TIBase>();
|
||||
auto id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
strong.push_back(b);
|
||||
}
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
strong.resize(strong.size() - 1);
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
strong.clear();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
|
||||
b = make_SharedIntrusive<TIBase>();
|
||||
id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
weak.push_back(b);
|
||||
BEAST_EXPECT(b->use_count() == 1);
|
||||
}
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
weak.resize(weak.size() - 1);
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
while (!weak.empty())
|
||||
{
|
||||
weak.resize(weak.size() - 1);
|
||||
if (weak.size())
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
}
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
auto b = make_SharedIntrusive<TIBase>();
|
||||
auto id = b->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
WeakIntrusive<TIBase> w{b};
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
auto s = w.lock();
|
||||
BEAST_EXPECT(s && s->use_count() == 2);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(s && s->use_count() == 1);
|
||||
s.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
BEAST_EXPECT(w.expired());
|
||||
s = w.lock();
|
||||
// Cannot convert a weak pointer to a strong pointer if object is
|
||||
// already partially deleted
|
||||
BEAST_EXPECT(!s);
|
||||
w.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
{
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
using enum TrackedState;
|
||||
using swu = SharedWeakUnion<TIBase>;
|
||||
swu b = make_SharedIntrusive<TIBase>();
|
||||
BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
|
||||
auto id = b.get()->id_;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
swu w = b;
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
|
||||
w.convertToWeak();
|
||||
BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
|
||||
swu s = w;
|
||||
BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
|
||||
s.convertToStrong();
|
||||
BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
|
||||
b.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == alive);
|
||||
BEAST_EXPECT(s.use_count() == 1);
|
||||
BEAST_EXPECT(!w.expired());
|
||||
s.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
|
||||
BEAST_EXPECT(w.expired());
|
||||
w.convertToStrong();
|
||||
// Cannot convert a weak pointer to a strong pointer if object is
|
||||
// already partially deleted
|
||||
BEAST_EXPECT(w.isWeak());
|
||||
w.reset();
|
||||
BEAST_EXPECT(TIBase::getState(id) == deleted);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testPartialDelete()
|
||||
{
|
||||
testcase("Partial Delete");
|
||||
|
||||
// This test creates two threads. One with a strong pointer and one
|
||||
// with a weak pointer. The strong pointer is reset while the weak
|
||||
// pointer still holds a reference, triggering a partial delete.
|
||||
// While the partial delete function runs (a sleep is inserted) the
|
||||
// weak pointer is reset. The destructor should wait to run until
|
||||
// after the partial delete function has completed running.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
WeakIntrusive<TIBase> weak{strong};
|
||||
bool destructorRan = false;
|
||||
bool partialDeleteRan = false;
|
||||
std::latch partialDeleteStartedSyncPoint{2};
|
||||
strong->tracingCallback_ = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
if (next == deletedStarted)
|
||||
{
|
||||
// strong goes out of scope while weak is still in scope
|
||||
// This checks that partialDelete has run to completion
|
||||
// before the desturctor is called. A sleep is inserted
|
||||
// inside the partial delete to make sure the destructor is
|
||||
// given an opportunity to run durring partial delete.
|
||||
BEAST_EXPECT(cur == partiallyDeleted);
|
||||
}
|
||||
if (next == partiallyDeletedStarted)
|
||||
{
|
||||
partialDeleteStartedSyncPoint.arrive_and_wait();
|
||||
using namespace std::chrono_literals;
|
||||
// Sleep and let the weak pointer go out of scope,
|
||||
// potentially triggering a destructor while partial delete
|
||||
// is running. The test is to make sure that doesn't happen.
|
||||
std::this_thread::sleep_for(800ms);
|
||||
}
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
partialDeleteRan = true;
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
destructorRan = true;
|
||||
}
|
||||
};
|
||||
std::thread t1{[&] {
|
||||
partialDeleteStartedSyncPoint.arrive_and_wait();
|
||||
weak.reset(); // Trigger a full delete as soon as the partial
|
||||
// delete starts
|
||||
}};
|
||||
std::thread t2{[&] {
|
||||
strong.reset(); // Trigger a partial delete
|
||||
}};
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
BEAST_EXPECT(destructorRan && partialDeleteRan);
|
||||
}
|
||||
|
||||
void
|
||||
testDestructor()
|
||||
{
|
||||
testcase("Destructor");
|
||||
|
||||
// This test creates two threads. One with a strong pointer and one
|
||||
// with a weak pointer. The weak pointer is reset while the strong
|
||||
// pointer still holds a reference. Then the strong pointer is
|
||||
// reset. Only the destructor should run. The partial destructor
|
||||
// should not be called. Since the weak reset runs to completion
|
||||
// before the strong pointer is reset, threading doesn't add much to
|
||||
// this test, but there is no harm in keeping it.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
WeakIntrusive<TIBase> weak{strong};
|
||||
bool destructorRan = false;
|
||||
bool partialDeleteRan = false;
|
||||
std::latch weakResetSyncPoint{2};
|
||||
strong->tracingCallback_ = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
partialDeleteRan = true;
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
destructorRan = true;
|
||||
}
|
||||
};
|
||||
std::thread t1{[&] {
|
||||
weak.reset();
|
||||
weakResetSyncPoint.arrive_and_wait();
|
||||
}};
|
||||
std::thread t2{[&] {
|
||||
weakResetSyncPoint.arrive_and_wait();
|
||||
strong.reset(); // Trigger a partial delete
|
||||
}};
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
BEAST_EXPECT(destructorRan && !partialDeleteRan);
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedClearMixedVariant()
|
||||
{
|
||||
testcase("Multithreaded Clear Mixed Variant");
|
||||
|
||||
// This test creates and destroys many strong and weak pointers in a
|
||||
// loop. There is a random mix of strong and weak pointers stored in
|
||||
// a vector (held as a variant). Both threads clear all the pointers
|
||||
// and check that the invariants hold.
|
||||
|
||||
using enum TrackedState;
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
auto createVecOfPointers = [&](auto const& toClone,
|
||||
std::default_random_engine& eng)
|
||||
-> std::vector<
|
||||
std::variant<SharedIntrusive<TIBase>, WeakIntrusive<TIBase>>> {
|
||||
std::vector<
|
||||
std::variant<SharedIntrusive<TIBase>, WeakIntrusive<TIBase>>>
|
||||
result;
|
||||
std::uniform_int_distribution<> toCreateDist(4, 64);
|
||||
std::uniform_int_distribution<> isStrongDist(0, 1);
|
||||
auto numToCreate = toCreateDist(eng);
|
||||
result.reserve(numToCreate);
|
||||
for (int i = 0; i < numToCreate; ++i)
|
||||
{
|
||||
if (isStrongDist(eng))
|
||||
{
|
||||
result.push_back(SharedIntrusive<TIBase>(toClone));
|
||||
}
|
||||
else
|
||||
{
|
||||
result.push_back(WeakIntrusive<TIBase>(toClone));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toClone;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToCloneSyncPoint{numThreads};
|
||||
std::barrier postCreateVecOfPointersSyncPoint{numThreads};
|
||||
auto engines = [&]() -> std::vector<std::default_random_engine> {
|
||||
std::random_device rd;
|
||||
std::vector<std::default_random_engine> result;
|
||||
result.reserve(numThreads);
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
result.emplace_back(rd());
|
||||
return result;
|
||||
}();
|
||||
|
||||
// cloneAndDestroy clones the strong pointer into a vector of mixed
|
||||
// strong and weak pointers and destroys them all at once.
|
||||
// threadId==0 is special.
|
||||
auto cloneAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// Thread 0 is the genesis thread. It creates the strong
|
||||
// pointers to be cloned by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toClone.clear();
|
||||
toClone.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toClone.begin(), toClone.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToCloneSyncPoint.arrive_and_wait();
|
||||
|
||||
auto v =
|
||||
createVecOfPointers(toClone[threadId], engines[threadId]);
|
||||
toClone[threadId].reset();
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateVecOfPointersSyncPoint.arrive_and_wait();
|
||||
|
||||
v.clear();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(cloneAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedClearMixedUnion()
|
||||
{
|
||||
testcase("Multithreaded Clear Mixed Union");
|
||||
|
||||
// This test creates and destroys many SharedWeak pointers in a
|
||||
// loop. All the pointers start as strong and a loop randomly
|
||||
// convert them between strong and weak pointers. Both threads clear
|
||||
// all the pointers and check that the invariants hold.
|
||||
//
|
||||
// Note: This test also differs from the test above in that the pointers
|
||||
// randomly change from strong to weak and from weak to strong in a
|
||||
// loop. This can't be done in the variant test above because variant is
|
||||
// not thread safe while the SharedWeakUnion is thread safe.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
auto createVecOfPointers = [&](auto const& toClone,
|
||||
std::default_random_engine& eng)
|
||||
-> std::vector<SharedWeakUnion<TIBase>> {
|
||||
std::vector<SharedWeakUnion<TIBase>> result;
|
||||
std::uniform_int_distribution<> toCreateDist(4, 64);
|
||||
auto numToCreate = toCreateDist(eng);
|
||||
result.reserve(numToCreate);
|
||||
for (int i = 0; i < numToCreate; ++i)
|
||||
result.push_back(SharedIntrusive<TIBase>(toClone));
|
||||
return result;
|
||||
};
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int flipPointersLoopIters = 256;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toClone;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToCloneSyncPoint{numThreads};
|
||||
std::barrier postCreateVecOfPointersSyncPoint{numThreads};
|
||||
std::barrier postFlipPointersLoopSyncPoint{numThreads};
|
||||
auto engines = [&]() -> std::vector<std::default_random_engine> {
|
||||
std::random_device rd;
|
||||
std::vector<std::default_random_engine> result;
|
||||
result.reserve(numThreads);
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
result.emplace_back(rd());
|
||||
return result;
|
||||
}();
|
||||
|
||||
// cloneAndDestroy clones the strong pointer into a vector of
|
||||
// mixed strong and weak pointers, runs a loop that randomly
|
||||
// changes strong pointers to weak pointers, and destroys them
|
||||
// all at once.
|
||||
auto cloneAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// threadId 0 is the genesis thread. It creates the
|
||||
// strong point to be cloned by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toClone.clear();
|
||||
toClone.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toClone.begin(), toClone.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToCloneSyncPoint.arrive_and_wait();
|
||||
|
||||
auto v =
|
||||
createVecOfPointers(toClone[threadId], engines[threadId]);
|
||||
toClone[threadId].reset();
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateVecOfPointersSyncPoint.arrive_and_wait();
|
||||
|
||||
std::uniform_int_distribution<> isStrongDist(0, 1);
|
||||
for (int f = 0; f < flipPointersLoopIters; ++f)
|
||||
{
|
||||
for (auto& p : v)
|
||||
{
|
||||
if (isStrongDist(engines[threadId]))
|
||||
{
|
||||
p.convertToStrong();
|
||||
}
|
||||
else
|
||||
{
|
||||
p.convertToWeak();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postFlipPointersLoopSyncPoint.arrive_and_wait();
|
||||
|
||||
v.clear();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(cloneAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testMultithreadedLockingWeak()
|
||||
{
|
||||
testcase("Multithreaded Locking Weak");
|
||||
|
||||
// This test creates a single shared atomic pointer that multiple thread
|
||||
// create weak pointers from. The threads then lock the weak pointers.
|
||||
// Both threads clear all the pointers and check that the invariants
|
||||
// hold.
|
||||
|
||||
using enum TrackedState;
|
||||
|
||||
TIBase::ResetStatesGuard rsg{true};
|
||||
|
||||
std::atomic<int> destructionState{0};
|
||||
// returns destructorRan and partialDestructorRan (in that order)
|
||||
auto getDestructorState = [&]() -> std::pair<bool, bool> {
|
||||
int s = destructionState.load(std::memory_order_relaxed);
|
||||
return {(s & 1) != 0, (s & 2) != 0};
|
||||
};
|
||||
auto setDestructorRan = [&]() -> void {
|
||||
destructionState.fetch_or(1, std::memory_order_acq_rel);
|
||||
};
|
||||
auto setPartialDeleteRan = [&]() -> void {
|
||||
destructionState.fetch_or(2, std::memory_order_acq_rel);
|
||||
};
|
||||
auto tracingCallback = [&](TrackedState cur,
|
||||
std::optional<TrackedState> next) {
|
||||
using enum TrackedState;
|
||||
auto [destructorRan, partialDeleteRan] = getDestructorState();
|
||||
if (next == partiallyDeleted)
|
||||
{
|
||||
BEAST_EXPECT(!partialDeleteRan && !destructorRan);
|
||||
setPartialDeleteRan();
|
||||
}
|
||||
if (next == deleted)
|
||||
{
|
||||
BEAST_EXPECT(!destructorRan);
|
||||
setDestructorRan();
|
||||
}
|
||||
};
|
||||
|
||||
constexpr int loopIters = 2 * 1024;
|
||||
constexpr int lockWeakLoopIters = 256;
|
||||
constexpr int numThreads = 16;
|
||||
std::vector<SharedIntrusive<TIBase>> toLock;
|
||||
std::barrier loopStartSyncPoint{numThreads};
|
||||
std::barrier postCreateToLockSyncPoint{numThreads};
|
||||
std::barrier postLockWeakLoopSyncPoint{numThreads};
|
||||
|
||||
// lockAndDestroy creates weak pointers from the strong pointer
|
||||
// and runs a loop that locks the weak pointer. At the end of the loop
|
||||
// all the pointers are destroyed all at once.
|
||||
auto lockAndDestroy = [&](int threadId) {
|
||||
for (int i = 0; i < loopIters; ++i)
|
||||
{
|
||||
// ------ Sync Point ------
|
||||
loopStartSyncPoint.arrive_and_wait();
|
||||
|
||||
// only thread 0 should reset the state
|
||||
std::optional<TIBase::ResetStatesGuard> rsg;
|
||||
if (threadId == 0)
|
||||
{
|
||||
// threadId 0 is the genesis thread. It creates the
|
||||
// strong point to be locked by the other threads. This
|
||||
// thread will also check that the destructor ran and
|
||||
// clear the temporary variables.
|
||||
rsg.emplace(false);
|
||||
auto [destructorRan, partialDeleteRan] =
|
||||
getDestructorState();
|
||||
BEAST_EXPECT(!i || destructorRan);
|
||||
destructionState.store(0, std::memory_order_release);
|
||||
|
||||
toLock.clear();
|
||||
toLock.resize(numThreads);
|
||||
auto strong = make_SharedIntrusive<TIBase>();
|
||||
strong->tracingCallback_ = tracingCallback;
|
||||
std::fill(toLock.begin(), toLock.end(), strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postCreateToLockSyncPoint.arrive_and_wait();
|
||||
|
||||
// Multiple threads all create a weak pointer from the same
|
||||
// strong pointer
|
||||
WeakIntrusive weak{toLock[threadId]};
|
||||
for (int wi = 0; wi < lockWeakLoopIters; ++wi)
|
||||
{
|
||||
BEAST_EXPECT(!weak.expired());
|
||||
auto strong = weak.lock();
|
||||
BEAST_EXPECT(strong);
|
||||
}
|
||||
|
||||
// ------ Sync Point ------
|
||||
postLockWeakLoopSyncPoint.arrive_and_wait();
|
||||
|
||||
toLock[threadId].reset();
|
||||
}
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads.emplace_back(lockAndDestroy, i);
|
||||
}
|
||||
for (int i = 0; i < numThreads; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testBasics();
|
||||
testPartialDelete();
|
||||
testDestructor();
|
||||
testMultithreadedClearMixedVariant();
|
||||
testMultithreadedClearMixedUnion();
|
||||
testMultithreadedLockingWeak();
|
||||
}
|
||||
}; // namespace tests
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(IntrusiveShared, ripple_basics, ripple);
|
||||
} // namespace tests
|
||||
} // namespace ripple
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/beast/clock/manual_clock.h>
|
||||
#include <ripple/beast/unit_test.h>
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/basics/TaggedCache.ipp>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/beast/clock/manual_clock.h>
|
||||
#include <ripple/beast/unit_test.h>
|
||||
|
||||
Reference in New Issue
Block a user