rippled
Loading...
Searching...
No Matches
IntrusiveRefCounts.h
1#ifndef XRPL_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
2#define XRPL_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
3
4#include <xrpl/beast/utility/instrumentation.h>
5
6#include <atomic>
7#include <cstdint>
8
9namespace ripple {
10
23
33
41{
42 virtual ~IntrusiveRefCounts() noexcept;
43
44 // This must be `noexcept` or the make_SharedIntrusive function could leak
45 // memory.
46 void
47 addStrongRef() const noexcept;
48
49 void
50 addWeakRef() const noexcept;
51
53 releaseStrongRef() const;
54
55 // Same as:
56 // {
57 // addWeakRef();
58 // return releaseStrongRef;
59 // }
60 // done as one atomic operation
63
65 releaseWeakRef() const;
66
67 // Returns true is able to checkout a strong ref. False otherwise
68 bool
69 checkoutStrongRefFromWeak() const noexcept;
70
71 bool
72 expired() const noexcept;
73
75 use_count() const noexcept;
76
77 // This function MUST be called after a partial destructor finishes running.
78 // Calling this function may cause other threads to delete the object
79 // pointed to by `o`, so `o` should never be used after calling this
80 // function. The parameter will be set to a `nullptr` after calling this
81 // function to emphasize that it should not be used.
82 // Note: This is intentionally NOT called at the end of `partialDestructor`.
83 // The reason for this is if new classes are written to support this smart
84 // pointer class, they need to write their own `partialDestructor` function
85 // and ensure `partialDestructorFinished` is called at the end. Putting this
86 // call inside the smart pointer class itself is expected to be less error
87 // prone.
88 // Note: The "two-star" programming is intentional. It emphasizes that `o`
89 // may be deleted and the unergonomic API is meant to signal the special
90 // nature of this function call to callers.
91 // Note: This is a template to support incompletely defined classes.
92 template <class T>
93 friend void
95
96private:
97 // TODO: We may need to use a uint64_t for both counts. This will reduce the
98 // memory savings. We need to audit the code to make sure 16 bit counts are
99 // enough for strong pointers and 14 bit counts are enough for weak
100 // pointers. Use type aliases to make it easy to switch types.
102 static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
103 static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
105 static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
106 static constexpr FieldType one = 1;
107
142
148 static constexpr FieldType strongDelta = 1;
149
155 static constexpr FieldType weakDelta = (one << StrongCountNumBits);
156
164 (one << (FieldTypeBits - 1));
165
172 (one << (FieldTypeBits - 2));
173
177 static constexpr FieldType tagMask =
179
183 static constexpr FieldType valueMask = ~tagMask;
184
187 static constexpr FieldType strongMask =
188 ((one << StrongCountNumBits) - 1) & valueMask;
189
192 static constexpr FieldType weakMask =
194
197 {
211 RefCountPair(FieldType v) noexcept;
212 RefCountPair(CountType s, CountType w) noexcept;
213
216 combinedValue() const noexcept;
217
218 static constexpr CountType maxStrongValue =
219 static_cast<CountType>((one << StrongCountNumBits) - 1);
220 static constexpr CountType maxWeakValue =
221 static_cast<CountType>((one << WeakCountNumBits) - 1);
227 static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
228 };
229};
230
231inline void
236
237inline void
242
245{
246 // Subtract `strongDelta` from refCounts. If this releases the last strong
247 // ref, set the `partialDestroyStarted` bit. It is important that the ref
248 // count and the `partialDestroyStartedBit` are changed atomically (hence
249 // the loop and `compare_exchange` op). If this didn't need to be done
250 // atomically, the loop could be replaced with a `fetch_sub` and a
251 // conditional `fetch_or`. This loop will almost always run once.
252
253 using enum ReleaseStrongRefAction;
254 auto prevIntVal = refCounts.load(std::memory_order_acquire);
255 while (1)
256 {
257 RefCountPair const prevVal{prevIntVal};
258 XRPL_ASSERT(
259 (prevVal.strong >= strongDelta),
260 "ripple::IntrusiveRefCounts::releaseStrongRef : previous ref "
261 "higher than new");
262 auto nextIntVal = prevIntVal - strongDelta;
264 if (prevVal.strong == 1)
265 {
266 if (prevVal.weak == 0)
267 {
268 action = destroy;
269 }
270 else
271 {
272 nextIntVal |= partialDestroyStartedMask;
273 action = partialDestroy;
274 }
275 }
276
277 if (refCounts.compare_exchange_weak(
278 prevIntVal, nextIntVal, std::memory_order_acq_rel))
279 {
280 // Can't be in partial destroy because only decrementing the strong
281 // count to zero can start a partial destroy, and that can't happen
282 // twice.
283 XRPL_ASSERT(
284 (action == noop) || !(prevIntVal & partialDestroyStartedMask),
285 "ripple::IntrusiveRefCounts::releaseStrongRef : not in partial "
286 "destroy");
287 return action;
288 }
289 }
290}
291
294{
295 using enum ReleaseStrongRefAction;
296
297 static_assert(weakDelta > strongDelta);
298 auto constexpr delta = weakDelta - strongDelta;
299 auto prevIntVal = refCounts.load(std::memory_order_acquire);
300 // This loop will almost always run once. The loop is needed to atomically
301 // change the counts and flags (the count could be atomically changed, but
302 // the flags depend on the current value of the counts).
303 //
304 // Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
305 // may be able to be set non-atomically. But it is easier to reason about
306 // the code if the flag is set atomically.
307 while (1)
308 {
309 RefCountPair const prevVal{prevIntVal};
310 // Converted the last strong pointer to a weak pointer.
311 //
312 // Can't be in partial destroy because only decrementing the
313 // strong count to zero can start a partial destroy, and that
314 // can't happen twice.
315 XRPL_ASSERT(
316 (!prevVal.partialDestroyStartedBit),
317 "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not in "
318 "partial destroy");
319
320 auto nextIntVal = prevIntVal + delta;
322 if (prevVal.strong == 1)
323 {
324 if (prevVal.weak == 0)
325 {
326 action = noop;
327 }
328 else
329 {
330 nextIntVal |= partialDestroyStartedMask;
331 action = partialDestroy;
332 }
333 }
334 if (refCounts.compare_exchange_weak(
335 prevIntVal, nextIntVal, std::memory_order_acq_rel))
336 {
337 XRPL_ASSERT(
338 (!(prevIntVal & partialDestroyStartedMask)),
339 "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not "
340 "started partial destroy");
341 return action;
342 }
343 }
344}
345
348{
349 auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
350 RefCountPair prev = prevIntVal;
351 if (prev.weak == 1 && prev.strong == 0)
352 {
353 if (!prev.partialDestroyStartedBit)
354 {
355 // This case should only be hit if the partialDestroyStartedBit is
356 // set non-atomically (and even then very rarely). The code is kept
357 // in case we need to set the flag non-atomically for perf reasons.
358 refCounts.wait(prevIntVal, std::memory_order_acquire);
359 prevIntVal = refCounts.load(std::memory_order_acquire);
360 prev = RefCountPair{prevIntVal};
361 }
362 if (!prev.partialDestroyFinishedBit)
363 {
364 // partial destroy MUST finish before running a full destroy (when
365 // using weak pointers)
367 }
369 }
371}
372
373inline bool
375{
376 auto curValue = RefCountPair{1, 1}.combinedValue();
377 auto desiredValue = RefCountPair{2, 1}.combinedValue();
378
379 while (!refCounts.compare_exchange_weak(
380 curValue, desiredValue, std::memory_order_acq_rel))
381 {
382 RefCountPair const prev{curValue};
383 if (!prev.strong)
384 return false;
385
386 desiredValue = curValue + strongDelta;
387 }
388 return true;
389}
390
391inline bool
393{
395 return val.strong == 0;
396}
397
398inline std::size_t
400{
402 return val.strong;
403}
404
406{
407#ifndef NDEBUG
409 XRPL_ASSERT(
410 (!(v & valueMask)),
411 "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
412 auto t = v & tagMask;
413 XRPL_ASSERT(
414 (!t || t == tagMask),
415 "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
416#endif
417}
418
419//------------------------------------------------------------------------------
420
423 : strong{static_cast<CountType>(v & strongMask)}
424 , weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
427{
428 XRPL_ASSERT(
430 "ripple::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside "
431 "range");
432}
433
437 : strong{s}, weak{w}
438{
439 XRPL_ASSERT(
441 "ripple::IntrusiveRefCounts::RefCountPair(CountType, CountType) : "
442 "inputs inside range");
443}
444
447{
448 XRPL_ASSERT(
450 "ripple::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
451 "inside range");
452 return (static_cast<IntrusiveRefCounts::FieldType>(weak)
456}
457
458template <class T>
459inline void
461{
462 T& self = **o;
464 self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
465 XRPL_ASSERT(
467 !p.strong),
468 "ripple::partialDestructorFinished : not a weak ref");
469 if (!p.weak)
470 {
471 // There was a weak count before the partial destructor ran (or we would
472 // have run the full destructor) and now there isn't a weak count. Some
473 // thread is waiting to run the destructor.
474 self.refCounts.notify_one();
475 }
476 // Set the pointer to null to emphasize that the object shouldn't be used
477 // after calling this function as it may be destroyed in another thread.
478 *o = nullptr;
479}
480//------------------------------------------------------------------------------
481
482} // namespace ripple
483#endif
T is_same_v
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
ReleaseWeakRefAction
Action to perform when releasing a weak pointer.
ReleaseStrongRefAction
Action to perform when releasing a strong pointer.
Unpack the count and tag fields from the packed atomic integer form.
FieldType combinedValue() const noexcept
Convert back to the packed integer form.
FieldType partialDestroyFinishedBit
The partialDestroyFinishedBit is set to on when the partial destroy function has finished.
FieldType partialDestroyStartedBit
The partialDestroyStartedBit is set to on when the partial destroy function is started.
static constexpr CountType checkStrongMaxValue
Put an extra margin to detect when running up against limits.
Implement the strong count, weak count, and bit flags for an intrusive pointer.
ReleaseWeakRefAction releaseWeakRef() const
static constexpr size_t StrongCountNumBits
static constexpr FieldType partialDestroyFinishedMask
Flag that is set when the partialDestroy function has finished running.
ReleaseStrongRefAction addWeakReleaseStrongRef() const
static constexpr FieldType tagMask
Mask that will zero out all the count bits and leave the tag bits unchanged.
static constexpr size_t FieldTypeBits
std::size_t use_count() const noexcept
bool expired() const noexcept
static constexpr FieldType weakMask
Mask that will zero out everything except the weak count.
static constexpr FieldType strongMask
Mask that will zero out everything except the strong count.
void addStrongRef() const noexcept
static constexpr FieldType partialDestroyStartedMask
Flag that is set when the partialDestroy function has started running (or is about to start running).
bool checkoutStrongRefFromWeak() const noexcept
static constexpr FieldType strongDelta
Amount to change the strong count when adding or releasing a reference.
friend void partialDestructorFinished(T **o)
virtual ~IntrusiveRefCounts() noexcept
static constexpr FieldType weakDelta
Amount to change the weak count when adding or releasing a reference.
static constexpr size_t WeakCountNumBits
void addWeakRef() const noexcept
static constexpr FieldType valueMask
Mask that will zero out the tag bits and leave the count bits unchanged.
static constexpr FieldType one
std::atomic< FieldType > refCounts
refCounts consists of four fields that are treated atomically:
ReleaseStrongRefAction releaseStrongRef() const