rippled
Loading...
Searching...
No Matches
IntrusiveShared_test.cpp
1#include <test/unit_test/SuiteJournal.h>
2
3#include <xrpl/basics/IntrusivePointer.ipp>
4#include <xrpl/basics/IntrusiveRefCounts.h>
5#include <xrpl/beast/unit_test.h>
6#include <xrpl/beast/utility/Journal.h>
7
8#include <array>
9#include <atomic>
10#include <barrier>
11#include <chrono>
12#include <condition_variable>
13#include <latch>
14#include <optional>
15#include <random>
16#include <string>
17#include <thread>
18#include <variant>
19
20namespace xrpl {
21namespace tests {
22
45struct Barrier
46{
49 int count;
50 int const initial;
51
52 Barrier(int n) : count(n), initial(n)
53 {
54 }
55
56 void
58 {
60 if (--count == 0)
61 {
62 count = initial;
63 cv.notify_all();
64 }
65 else
66 {
67 cv.wait(lock, [&] { return count == initial; });
68 }
69 }
70};
71
72namespace {
73enum class TrackedState : std::uint8_t {
74 uninitialized,
75 alive,
76 partiallyDeletedStarted,
77 partiallyDeleted,
78 deletedStarted,
79 deleted
80};
81
82class TIBase : public IntrusiveRefCounts
83{
84public:
85 static constexpr std::size_t maxStates = 128;
86 static std::array<std::atomic<TrackedState>, maxStates> state;
87 static std::atomic<int> nextId;
88 static TrackedState
89 getState(int id)
90 {
91 assert(id < state.size());
92 return state[id].load(std::memory_order_acquire);
93 }
94 static void
95 resetStates(bool resetCallback)
96 {
97 for (int i = 0; i < maxStates; ++i)
98 {
99 state[i].store(TrackedState::uninitialized, std::memory_order_release);
100 }
102 if (resetCallback)
103 TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
104 }
105
106 struct ResetStatesGuard
107 {
108 bool resetCallback_{false};
109
110 ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
111 {
112 TIBase::resetStates(resetCallback_);
113 }
114 ~ResetStatesGuard()
115 {
116 TIBase::resetStates(resetCallback_);
117 }
118 };
119
120 TIBase() : id_{checkoutID()}
121 {
122 assert(state.size() > id_);
123 state[id_].store(TrackedState::alive, std::memory_order_relaxed);
124 }
125 ~TIBase()
126 {
127 using enum TrackedState;
128
129 assert(state.size() > id_);
130 tracingCallback_(state[id_].load(std::memory_order_relaxed), deletedStarted);
131
132 assert(state.size() > id_);
133 // Use relaxed memory order to try to avoid atomic operations from
134 // adding additional memory synchronizations that may hide threading
135 // errors in the underlying shared pointer class.
136 state[id_].store(deletedStarted, std::memory_order_relaxed);
137
138 tracingCallback_(deletedStarted, deleted);
139
140 assert(state.size() > id_);
141 state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
142
143 tracingCallback_(TrackedState::deleted, std::nullopt);
144 }
145
146 void
147 partialDestructor()
148 {
149 using enum TrackedState;
150
151 assert(state.size() > id_);
152 tracingCallback_(state[id_].load(std::memory_order_relaxed), partiallyDeletedStarted);
153
154 assert(state.size() > id_);
155 state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
156
157 tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
158
159 assert(state.size() > id_);
160 state[id_].store(partiallyDeleted, std::memory_order_relaxed);
161
162 tracingCallback_(partiallyDeleted, std::nullopt);
163 }
164
165 static std::function<void(TrackedState, std::optional<TrackedState>)> tracingCallback_;
166
167 int id_;
168
169private:
170 static int
171 checkoutID()
172 {
173 return nextId.fetch_add(1, std::memory_order_acq_rel);
174 }
175};
176
177std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
178std::atomic<int> TIBase::nextId{0};
179
180std::function<void(TrackedState, std::optional<TrackedState>)> TIBase::tracingCallback_ =
181 [](TrackedState, std::optional<TrackedState>) {};
182
183} // namespace
184
186{
187public:
188 void
190 {
191 testcase("Basics");
192
193 {
194 TIBase::ResetStatesGuard rsg{true};
195
196 TIBase b;
197 BEAST_EXPECT(b.use_count() == 1);
198 b.addWeakRef();
199 BEAST_EXPECT(b.use_count() == 1);
200 auto s = b.releaseStrongRef();
202 BEAST_EXPECT(b.use_count() == 0);
203 TIBase* pb = &b;
205 BEAST_EXPECT(!pb);
206 auto w = b.releaseWeakRef();
207 BEAST_EXPECT(w == ReleaseWeakRefAction::destroy);
208 }
209
212 {
213 TIBase::ResetStatesGuard rsg{true};
214
215 using enum TrackedState;
216 auto b = make_SharedIntrusive<TIBase>();
217 auto id = b->id_;
218 BEAST_EXPECT(TIBase::getState(id) == alive);
219 BEAST_EXPECT(b->use_count() == 1);
220 for (int i = 0; i < 10; ++i)
221 {
222 strong.push_back(b);
223 }
224 b.reset();
225 BEAST_EXPECT(TIBase::getState(id) == alive);
226 strong.resize(strong.size() - 1);
227 BEAST_EXPECT(TIBase::getState(id) == alive);
228 strong.clear();
229 BEAST_EXPECT(TIBase::getState(id) == deleted);
230
231 b = make_SharedIntrusive<TIBase>();
232 id = b->id_;
233 BEAST_EXPECT(TIBase::getState(id) == alive);
234 BEAST_EXPECT(b->use_count() == 1);
235 for (int i = 0; i < 10; ++i)
236 {
237 weak.push_back(b);
238 BEAST_EXPECT(b->use_count() == 1);
239 }
240 BEAST_EXPECT(TIBase::getState(id) == alive);
241 weak.resize(weak.size() - 1);
242 BEAST_EXPECT(TIBase::getState(id) == alive);
243 b.reset();
244 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
245 while (!weak.empty())
246 {
247 weak.resize(weak.size() - 1);
248 if (weak.size())
249 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
250 }
251 BEAST_EXPECT(TIBase::getState(id) == deleted);
252 }
253 {
254 TIBase::ResetStatesGuard rsg{true};
255
256 using enum TrackedState;
257 auto b = make_SharedIntrusive<TIBase>();
258 auto id = b->id_;
259 BEAST_EXPECT(TIBase::getState(id) == alive);
261 BEAST_EXPECT(TIBase::getState(id) == alive);
262 auto s = w.lock();
263 BEAST_EXPECT(s && s->use_count() == 2);
264 b.reset();
265 BEAST_EXPECT(TIBase::getState(id) == alive);
266 BEAST_EXPECT(s && s->use_count() == 1);
267 s.reset();
268 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
269 BEAST_EXPECT(w.expired());
270 s = w.lock();
271 // Cannot convert a weak pointer to a strong pointer if object is
272 // already partially deleted
273 BEAST_EXPECT(!s);
274 w.reset();
275 BEAST_EXPECT(TIBase::getState(id) == deleted);
276 }
277 {
278 TIBase::ResetStatesGuard rsg{true};
279
280 using enum TrackedState;
281 using swu = SharedWeakUnion<TIBase>;
282 swu b = make_SharedIntrusive<TIBase>();
283 BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
284 auto id = b.get()->id_;
285 BEAST_EXPECT(TIBase::getState(id) == alive);
286 swu w = b;
287 BEAST_EXPECT(TIBase::getState(id) == alive);
288 BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
289 w.convertToWeak();
290 BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
291 swu s = w;
292 BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
293 s.convertToStrong();
294 BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
295 b.reset();
296 BEAST_EXPECT(TIBase::getState(id) == alive);
297 BEAST_EXPECT(s.use_count() == 1);
298 BEAST_EXPECT(!w.expired());
299 s.reset();
300 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
301 BEAST_EXPECT(w.expired());
302 w.convertToStrong();
303 // Cannot convert a weak pointer to a strong pointer if object is
304 // already partially deleted
305 BEAST_EXPECT(w.isWeak());
306 w.reset();
307 BEAST_EXPECT(TIBase::getState(id) == deleted);
308 }
309 {
310 // Testing SharedWeakUnion assignment operator
311
312 TIBase::ResetStatesGuard rsg{true};
313
314 auto strong1 = make_SharedIntrusive<TIBase>();
315 auto strong2 = make_SharedIntrusive<TIBase>();
316
317 auto id1 = strong1->id_;
318 auto id2 = strong2->id_;
319
320 BEAST_EXPECT(id1 != id2);
321
322 SharedWeakUnion<TIBase> union1 = strong1;
323 SharedWeakUnion<TIBase> union2 = strong2;
324
325 BEAST_EXPECT(union1.isStrong());
326 BEAST_EXPECT(union2.isStrong());
327 BEAST_EXPECT(union1.get() == strong1.get());
328 BEAST_EXPECT(union2.get() == strong2.get());
329
330 // 1) Normal assignment: explicitly calls SharedWeakUnion assignment
331 union1 = union2;
332 BEAST_EXPECT(union1.isStrong());
333 BEAST_EXPECT(union2.isStrong());
334 BEAST_EXPECT(union1.get() == union2.get());
335 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
336 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive);
337
338 // 2) Test self-assignment
339 BEAST_EXPECT(union1.isStrong());
340 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
341 int initialRefCount = strong1->use_count();
342#pragma clang diagnostic push
343#pragma clang diagnostic ignored "-Wself-assign-overloaded"
344 union1 = union1; // Self-assignment
345#pragma clang diagnostic pop
346 BEAST_EXPECT(union1.isStrong());
347 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
348 BEAST_EXPECT(strong1->use_count() == initialRefCount);
349
350 // 3) Test assignment from null union pointer
351 union1 = SharedWeakUnion<TIBase>();
352 BEAST_EXPECT(union1.get() == nullptr);
353
354 // 4) Test assignment to expired union pointer
355 strong2.reset();
356 union2.reset();
357 union1 = union2;
358 BEAST_EXPECT(union1.get() == nullptr);
359 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted);
360 }
361 }
362
363 void
365 {
366 testcase("Partial Delete");
367
368 // This test creates two threads. One with a strong pointer and one
369 // with a weak pointer. The strong pointer is reset while the weak
370 // pointer still holds a reference, triggering a partial delete.
371 // While the partial delete function runs (a sleep is inserted) the
372 // weak pointer is reset. The destructor should wait to run until
373 // after the partial delete function has completed running.
374
375 using enum TrackedState;
376
377 TIBase::ResetStatesGuard rsg{true};
378
379 auto strong = make_SharedIntrusive<TIBase>();
380 WeakIntrusive<TIBase> weak{strong};
381 bool destructorRan = false;
382 bool partialDeleteRan = false;
383 std::latch partialDeleteStartedSyncPoint{2};
384 strong->tracingCallback_ = [&](TrackedState cur, std::optional<TrackedState> next) {
385 using enum TrackedState;
386 if (next == deletedStarted)
387 {
388 // strong goes out of scope while weak is still in scope
389 // This checks that partialDelete has run to completion
390 // before the destructor is called. A sleep is inserted
391 // inside the partial delete to make sure the destructor is
392 // given an opportunity to run during partial delete.
393 BEAST_EXPECT(cur == partiallyDeleted);
394 }
395 if (next == partiallyDeletedStarted)
396 {
397 partialDeleteStartedSyncPoint.arrive_and_wait();
398 using namespace std::chrono_literals;
399 // Sleep and let the weak pointer go out of scope,
400 // potentially triggering a destructor while partial delete
401 // is running. The test is to make sure that doesn't happen.
403 }
404 if (next == partiallyDeleted)
405 {
406 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
407 partialDeleteRan = true;
408 }
409 if (next == deleted)
410 {
411 BEAST_EXPECT(!destructorRan);
412 destructorRan = true;
413 }
414 };
415 std::thread t1{[&] {
416 partialDeleteStartedSyncPoint.arrive_and_wait();
417 weak.reset(); // Trigger a full delete as soon as the partial
418 // delete starts
419 }};
420 std::thread t2{[&] {
421 strong.reset(); // Trigger a partial delete
422 }};
423 t1.join();
424 t2.join();
425
426 BEAST_EXPECT(destructorRan && partialDeleteRan);
427 }
428
429 void
431 {
432 testcase("Destructor");
433
434 // This test creates two threads. One with a strong pointer and one
435 // with a weak pointer. The weak pointer is reset while the strong
436 // pointer still holds a reference. Then the strong pointer is
437 // reset. Only the destructor should run. The partial destructor
438 // should not be called. Since the weak reset runs to completion
439 // before the strong pointer is reset, threading doesn't add much to
440 // this test, but there is no harm in keeping it.
441
442 using enum TrackedState;
443
444 TIBase::ResetStatesGuard rsg{true};
445
446 auto strong = make_SharedIntrusive<TIBase>();
447 WeakIntrusive<TIBase> weak{strong};
448 bool destructorRan = false;
449 bool partialDeleteRan = false;
450 std::latch weakResetSyncPoint{2};
451 strong->tracingCallback_ = [&](TrackedState cur, std::optional<TrackedState> next) {
452 using enum TrackedState;
453 if (next == partiallyDeleted)
454 {
455 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
456 partialDeleteRan = true;
457 }
458 if (next == deleted)
459 {
460 BEAST_EXPECT(!destructorRan);
461 destructorRan = true;
462 }
463 };
464 std::thread t1{[&] {
465 weak.reset();
466 weakResetSyncPoint.arrive_and_wait();
467 }};
468 std::thread t2{[&] {
469 weakResetSyncPoint.arrive_and_wait();
470 strong.reset(); // Trigger a partial delete
471 }};
472 t1.join();
473 t2.join();
474
475 BEAST_EXPECT(destructorRan && !partialDeleteRan);
476 }
477
478 void
480 {
481 testcase("Multithreaded Clear Mixed Variant");
482
483 // This test creates and destroys many strong and weak pointers in a
484 // loop. There is a random mix of strong and weak pointers stored in
485 // a vector (held as a variant). Both threads clear all the pointers
486 // and check that the invariants hold.
487
488 using enum TrackedState;
489 TIBase::ResetStatesGuard rsg{true};
490
491 std::atomic<int> destructionState{0};
492 // returns destructorRan and partialDestructorRan (in that order)
493 auto getDestructorState = [&]() -> std::pair<bool, bool> {
494 int s = destructionState.load(std::memory_order_relaxed);
495 return {(s & 1) != 0, (s & 2) != 0};
496 };
497 auto setDestructorRan = [&]() -> void { destructionState.fetch_or(1, std::memory_order_acq_rel); };
498 auto setPartialDeleteRan = [&]() -> void { destructionState.fetch_or(2, std::memory_order_acq_rel); };
499 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
500 using enum TrackedState;
501 auto [destructorRan, partialDeleteRan] = getDestructorState();
502 if (next == partiallyDeleted)
503 {
504 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
505 setPartialDeleteRan();
506 }
507 if (next == deleted)
508 {
509 BEAST_EXPECT(!destructorRan);
510 setDestructorRan();
511 }
512 };
513 auto createVecOfPointers = [&](auto const& toClone, std::default_random_engine& eng)
516 std::uniform_int_distribution<> toCreateDist(4, 64);
517 std::uniform_int_distribution<> isStrongDist(0, 1);
518 auto numToCreate = toCreateDist(eng);
519 result.reserve(numToCreate);
520 for (int i = 0; i < numToCreate; ++i)
521 {
522 if (isStrongDist(eng))
523 {
524 result.push_back(SharedIntrusive<TIBase>(toClone));
525 }
526 else
527 {
528 result.push_back(WeakIntrusive<TIBase>(toClone));
529 }
530 }
531 return result;
532 };
533 constexpr int loopIters = 2 * 1024;
534 constexpr int numThreads = 16;
536 Barrier loopStartSyncPoint{numThreads};
537 Barrier postCreateToCloneSyncPoint{numThreads};
538 Barrier postCreateVecOfPointersSyncPoint{numThreads};
539 auto engines = [&]() -> std::vector<std::default_random_engine> {
542 result.reserve(numThreads);
543 for (int i = 0; i < numThreads; ++i)
544 result.emplace_back(rd());
545 return result;
546 }();
547
548 // cloneAndDestroy clones the strong pointer into a vector of mixed
549 // strong and weak pointers and destroys them all at once.
550 // threadId==0 is special.
551 auto cloneAndDestroy = [&](int threadId) {
552 for (int i = 0; i < loopIters; ++i)
553 {
554 // ------ Sync Point ------
555 loopStartSyncPoint.arrive_and_wait();
556
557 // only thread 0 should reset the state
559 if (threadId == 0)
560 {
561 // Thread 0 is the genesis thread. It creates the strong
562 // pointers to be cloned by the other threads. This
563 // thread will also check that the destructor ran and
564 // clear the temporary variables.
565
566 rsg.emplace(false);
567 auto [destructorRan, partialDeleteRan] = getDestructorState();
568 BEAST_EXPECT(!i || destructorRan);
569 destructionState.store(0, std::memory_order_release);
570
571 toClone.clear();
572 toClone.resize(numThreads);
573 auto strong = make_SharedIntrusive<TIBase>();
574 strong->tracingCallback_ = tracingCallback;
575 std::fill(toClone.begin(), toClone.end(), strong);
576 }
577
578 // ------ Sync Point ------
579 postCreateToCloneSyncPoint.arrive_and_wait();
580
581 auto v = createVecOfPointers(toClone[threadId], engines[threadId]);
582 toClone[threadId].reset();
583
584 // ------ Sync Point ------
585 postCreateVecOfPointersSyncPoint.arrive_and_wait();
586
587 v.clear();
588 }
589 };
591 for (int i = 0; i < numThreads; ++i)
592 {
593 threads.emplace_back(cloneAndDestroy, i);
594 }
595 for (int i = 0; i < numThreads; ++i)
596 {
597 threads[i].join();
598 }
599 }
600
601 void
603 {
604 testcase("Multithreaded Clear Mixed Union");
605
606 // This test creates and destroys many SharedWeak pointers in a
607 // loop. All the pointers start as strong and a loop randomly
608 // convert them between strong and weak pointers. Both threads clear
609 // all the pointers and check that the invariants hold.
610 //
611 // Note: This test also differs from the test above in that the pointers
612 // randomly change from strong to weak and from weak to strong in a
613 // loop. This can't be done in the variant test above because variant is
614 // not thread safe while the SharedWeakUnion is thread safe.
615
616 using enum TrackedState;
617
618 TIBase::ResetStatesGuard rsg{true};
619
620 std::atomic<int> destructionState{0};
621 // returns destructorRan and partialDestructorRan (in that order)
622 auto getDestructorState = [&]() -> std::pair<bool, bool> {
623 int s = destructionState.load(std::memory_order_relaxed);
624 return {(s & 1) != 0, (s & 2) != 0};
625 };
626 auto setDestructorRan = [&]() -> void { destructionState.fetch_or(1, std::memory_order_acq_rel); };
627 auto setPartialDeleteRan = [&]() -> void { destructionState.fetch_or(2, std::memory_order_acq_rel); };
628 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
629 using enum TrackedState;
630 auto [destructorRan, partialDeleteRan] = getDestructorState();
631 if (next == partiallyDeleted)
632 {
633 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
634 setPartialDeleteRan();
635 }
636 if (next == deleted)
637 {
638 BEAST_EXPECT(!destructorRan);
639 setDestructorRan();
640 }
641 };
642 auto createVecOfPointers = [&](auto const& toClone,
645 std::uniform_int_distribution<> toCreateDist(4, 64);
646 auto numToCreate = toCreateDist(eng);
647 result.reserve(numToCreate);
648 for (int i = 0; i < numToCreate; ++i)
649 result.push_back(SharedIntrusive<TIBase>(toClone));
650 return result;
651 };
652 constexpr int loopIters = 2 * 1024;
653 constexpr int flipPointersLoopIters = 256;
654 constexpr int numThreads = 16;
656 Barrier loopStartSyncPoint{numThreads};
657 Barrier postCreateToCloneSyncPoint{numThreads};
658 Barrier postCreateVecOfPointersSyncPoint{numThreads};
659 Barrier postFlipPointersLoopSyncPoint{numThreads};
660 auto engines = [&]() -> std::vector<std::default_random_engine> {
663 result.reserve(numThreads);
664 for (int i = 0; i < numThreads; ++i)
665 result.emplace_back(rd());
666 return result;
667 }();
668
669 // cloneAndDestroy clones the strong pointer into a vector of
670 // mixed strong and weak pointers, runs a loop that randomly
671 // changes strong pointers to weak pointers, and destroys them
672 // all at once.
673 auto cloneAndDestroy = [&](int threadId) {
674 for (int i = 0; i < loopIters; ++i)
675 {
676 // ------ Sync Point ------
677 loopStartSyncPoint.arrive_and_wait();
678
679 // only thread 0 should reset the state
681 if (threadId == 0)
682 {
683 // threadId 0 is the genesis thread. It creates the
684 // strong point to be cloned by the other threads. This
685 // thread will also check that the destructor ran and
686 // clear the temporary variables.
687 rsg.emplace(false);
688 auto [destructorRan, partialDeleteRan] = getDestructorState();
689 BEAST_EXPECT(!i || destructorRan);
690 destructionState.store(0, std::memory_order_release);
691
692 toClone.clear();
693 toClone.resize(numThreads);
694 auto strong = make_SharedIntrusive<TIBase>();
695 strong->tracingCallback_ = tracingCallback;
696 std::fill(toClone.begin(), toClone.end(), strong);
697 }
698
699 // ------ Sync Point ------
700 postCreateToCloneSyncPoint.arrive_and_wait();
701
702 auto v = createVecOfPointers(toClone[threadId], engines[threadId]);
703 toClone[threadId].reset();
704
705 // ------ Sync Point ------
706 postCreateVecOfPointersSyncPoint.arrive_and_wait();
707
708 std::uniform_int_distribution<> isStrongDist(0, 1);
709 for (int f = 0; f < flipPointersLoopIters; ++f)
710 {
711 for (auto& p : v)
712 {
713 if (isStrongDist(engines[threadId]))
714 {
715 p.convertToStrong();
716 }
717 else
718 {
719 p.convertToWeak();
720 }
721 }
722 }
723
724 // ------ Sync Point ------
725 postFlipPointersLoopSyncPoint.arrive_and_wait();
726
727 v.clear();
728 }
729 };
731 for (int i = 0; i < numThreads; ++i)
732 {
733 threads.emplace_back(cloneAndDestroy, i);
734 }
735 for (int i = 0; i < numThreads; ++i)
736 {
737 threads[i].join();
738 }
739 }
740
741 void
743 {
744 testcase("Multithreaded Locking Weak");
745
746 // This test creates a single shared atomic pointer that multiple thread
747 // create weak pointers from. The threads then lock the weak pointers.
748 // Both threads clear all the pointers and check that the invariants
749 // hold.
750
751 using enum TrackedState;
752
753 TIBase::ResetStatesGuard rsg{true};
754
755 std::atomic<int> destructionState{0};
756 // returns destructorRan and partialDestructorRan (in that order)
757 auto getDestructorState = [&]() -> std::pair<bool, bool> {
758 int s = destructionState.load(std::memory_order_relaxed);
759 return {(s & 1) != 0, (s & 2) != 0};
760 };
761 auto setDestructorRan = [&]() -> void { destructionState.fetch_or(1, std::memory_order_acq_rel); };
762 auto setPartialDeleteRan = [&]() -> void { destructionState.fetch_or(2, std::memory_order_acq_rel); };
763 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
764 using enum TrackedState;
765 auto [destructorRan, partialDeleteRan] = getDestructorState();
766 if (next == partiallyDeleted)
767 {
768 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
769 setPartialDeleteRan();
770 }
771 if (next == deleted)
772 {
773 BEAST_EXPECT(!destructorRan);
774 setDestructorRan();
775 }
776 };
777
778 constexpr int loopIters = 2 * 1024;
779 constexpr int lockWeakLoopIters = 256;
780 constexpr int numThreads = 16;
782 Barrier loopStartSyncPoint{numThreads};
783 Barrier postCreateToLockSyncPoint{numThreads};
784 Barrier postLockWeakLoopSyncPoint{numThreads};
785
786 // lockAndDestroy creates weak pointers from the strong pointer
787 // and runs a loop that locks the weak pointer. At the end of the loop
788 // all the pointers are destroyed all at once.
789 auto lockAndDestroy = [&](int threadId) {
790 for (int i = 0; i < loopIters; ++i)
791 {
792 // ------ Sync Point ------
793 loopStartSyncPoint.arrive_and_wait();
794
795 // only thread 0 should reset the state
797 if (threadId == 0)
798 {
799 // threadId 0 is the genesis thread. It creates the
800 // strong point to be locked by the other threads. This
801 // thread will also check that the destructor ran and
802 // clear the temporary variables.
803 rsg.emplace(false);
804 auto [destructorRan, partialDeleteRan] = getDestructorState();
805 BEAST_EXPECT(!i || destructorRan);
806 destructionState.store(0, std::memory_order_release);
807
808 toLock.clear();
809 toLock.resize(numThreads);
810 auto strong = make_SharedIntrusive<TIBase>();
811 strong->tracingCallback_ = tracingCallback;
812 std::fill(toLock.begin(), toLock.end(), strong);
813 }
814
815 // ------ Sync Point ------
816 postCreateToLockSyncPoint.arrive_and_wait();
817
818 // Multiple threads all create a weak pointer from the same
819 // strong pointer
820 WeakIntrusive weak{toLock[threadId]};
821 for (int wi = 0; wi < lockWeakLoopIters; ++wi)
822 {
823 BEAST_EXPECT(!weak.expired());
824 auto strong = weak.lock();
825 BEAST_EXPECT(strong);
826 }
827
828 // ------ Sync Point ------
829 postLockWeakLoopSyncPoint.arrive_and_wait();
830
831 toLock[threadId].reset();
832 }
833 };
835 for (int i = 0; i < numThreads; ++i)
836 {
837 threads.emplace_back(lockAndDestroy, i);
838 }
839 for (int i = 0; i < numThreads; ++i)
840 {
841 threads[i].join();
842 }
843 }
844
845 void
855}; // namespace tests
856
857BEAST_DEFINE_TESTSUITE(IntrusiveShared, basics, xrpl);
858} // namespace tests
859} // namespace xrpl
T begin(T... args)
A testsuite class.
Definition suite.h:51
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:147
A shared intrusive pointer class that supports weak pointers.
A combination of a strong and a weak intrusive pointer stored in the space of a single pointer.
void reset()
Set the pointer to null, decrement the appropriate ref count, and run the appropriate release action.
T * get() const
If this is a strong pointer, return the raw pointer.
bool isStrong() const
Return true is this represents a strong pointer.
A weak intrusive pointer class for the SharedIntrusive pointer class.
void run() override
Runs the suite.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T fetch_add(T... args)
T fill(T... args)
T is_same_v
T join(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
void partialDestructorFinished(T **o)
T push_back(T... args)
T reserve(T... args)
T resize(T... args)
T size(T... args)
T sleep_for(T... args)
T store(T... args)
Experimentally, we discovered that using std::barrier performs extremely poorly (~1 hour vs ~1 minute...
std::condition_variable cv