rippled
Loading...
Searching...
No Matches
IntrusiveShared_test.cpp
1#include <test/unit_test/SuiteJournal.h>
2
3#include <xrpl/basics/IntrusivePointer.ipp>
4#include <xrpl/basics/IntrusiveRefCounts.h>
5#include <xrpl/beast/unit_test.h>
6#include <xrpl/beast/utility/Journal.h>
7
8#include <array>
9#include <atomic>
10#include <barrier>
11#include <chrono>
12#include <latch>
13#include <optional>
14#include <random>
15#include <string>
16#include <thread>
17#include <variant>
18
19namespace ripple {
20namespace tests {
21
22namespace {
23enum class TrackedState : std::uint8_t {
24 uninitialized,
25 alive,
26 partiallyDeletedStarted,
27 partiallyDeleted,
28 deletedStarted,
29 deleted
30};
31
32class TIBase : public IntrusiveRefCounts
33{
34public:
35 static constexpr std::size_t maxStates = 128;
36 static std::array<std::atomic<TrackedState>, maxStates> state;
37 static std::atomic<int> nextId;
38 static TrackedState
39 getState(int id)
40 {
41 assert(id < state.size());
42 return state[id].load(std::memory_order_acquire);
43 }
44 static void
45 resetStates(bool resetCallback)
46 {
47 for (int i = 0; i < maxStates; ++i)
48 {
49 state[i].store(
50 TrackedState::uninitialized, std::memory_order_release);
51 }
52 nextId.store(0, std::memory_order_release);
53 if (resetCallback)
54 TIBase::tracingCallback_ = [](TrackedState,
56 }
57
58 struct ResetStatesGuard
59 {
60 bool resetCallback_{false};
61
62 ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
63 {
64 TIBase::resetStates(resetCallback_);
65 }
66 ~ResetStatesGuard()
67 {
68 TIBase::resetStates(resetCallback_);
69 }
70 };
71
72 TIBase() : id_{checkoutID()}
73 {
74 assert(state.size() > id_);
75 state[id_].store(TrackedState::alive, std::memory_order_relaxed);
76 }
77 ~TIBase()
78 {
79 using enum TrackedState;
80
81 assert(state.size() > id_);
82 tracingCallback_(
83 state[id_].load(std::memory_order_relaxed), deletedStarted);
84
85 assert(state.size() > id_);
86 // Use relaxed memory order to try to avoid atomic operations from
87 // adding additional memory synchronizations that may hide threading
88 // errors in the underlying shared pointer class.
89 state[id_].store(deletedStarted, std::memory_order_relaxed);
90
91 tracingCallback_(deletedStarted, deleted);
92
93 assert(state.size() > id_);
94 state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
95
96 tracingCallback_(TrackedState::deleted, std::nullopt);
97 }
98
99 void
100 partialDestructor()
101 {
102 using enum TrackedState;
103
104 assert(state.size() > id_);
105 tracingCallback_(
106 state[id_].load(std::memory_order_relaxed),
107 partiallyDeletedStarted);
108
109 assert(state.size() > id_);
110 state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
111
112 tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
113
114 assert(state.size() > id_);
115 state[id_].store(partiallyDeleted, std::memory_order_relaxed);
116
117 tracingCallback_(partiallyDeleted, std::nullopt);
118 }
119
120 static std::function<void(TrackedState, std::optional<TrackedState>)>
121 tracingCallback_;
122
123 int id_;
124
125private:
126 static int
127 checkoutID()
128 {
129 return nextId.fetch_add(1, std::memory_order_acq_rel);
130 }
131};
132
133std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
134std::atomic<int> TIBase::nextId{0};
135
137 TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
138
139} // namespace
140
142{
143public:
144 void
146 {
147 testcase("Basics");
148
149 {
150 TIBase::ResetStatesGuard rsg{true};
151
152 TIBase b;
153 BEAST_EXPECT(b.use_count() == 1);
154 b.addWeakRef();
155 BEAST_EXPECT(b.use_count() == 1);
156 auto s = b.releaseStrongRef();
158 BEAST_EXPECT(b.use_count() == 0);
159 TIBase* pb = &b;
161 BEAST_EXPECT(!pb);
162 auto w = b.releaseWeakRef();
163 BEAST_EXPECT(w == ReleaseWeakRefAction::destroy);
164 }
165
168 {
169 TIBase::ResetStatesGuard rsg{true};
170
171 using enum TrackedState;
172 auto b = make_SharedIntrusive<TIBase>();
173 auto id = b->id_;
174 BEAST_EXPECT(TIBase::getState(id) == alive);
175 BEAST_EXPECT(b->use_count() == 1);
176 for (int i = 0; i < 10; ++i)
177 {
178 strong.push_back(b);
179 }
180 b.reset();
181 BEAST_EXPECT(TIBase::getState(id) == alive);
182 strong.resize(strong.size() - 1);
183 BEAST_EXPECT(TIBase::getState(id) == alive);
184 strong.clear();
185 BEAST_EXPECT(TIBase::getState(id) == deleted);
186
187 b = make_SharedIntrusive<TIBase>();
188 id = b->id_;
189 BEAST_EXPECT(TIBase::getState(id) == alive);
190 BEAST_EXPECT(b->use_count() == 1);
191 for (int i = 0; i < 10; ++i)
192 {
193 weak.push_back(b);
194 BEAST_EXPECT(b->use_count() == 1);
195 }
196 BEAST_EXPECT(TIBase::getState(id) == alive);
197 weak.resize(weak.size() - 1);
198 BEAST_EXPECT(TIBase::getState(id) == alive);
199 b.reset();
200 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
201 while (!weak.empty())
202 {
203 weak.resize(weak.size() - 1);
204 if (weak.size())
205 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
206 }
207 BEAST_EXPECT(TIBase::getState(id) == deleted);
208 }
209 {
210 TIBase::ResetStatesGuard rsg{true};
211
212 using enum TrackedState;
213 auto b = make_SharedIntrusive<TIBase>();
214 auto id = b->id_;
215 BEAST_EXPECT(TIBase::getState(id) == alive);
217 BEAST_EXPECT(TIBase::getState(id) == alive);
218 auto s = w.lock();
219 BEAST_EXPECT(s && s->use_count() == 2);
220 b.reset();
221 BEAST_EXPECT(TIBase::getState(id) == alive);
222 BEAST_EXPECT(s && s->use_count() == 1);
223 s.reset();
224 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
225 BEAST_EXPECT(w.expired());
226 s = w.lock();
227 // Cannot convert a weak pointer to a strong pointer if object is
228 // already partially deleted
229 BEAST_EXPECT(!s);
230 w.reset();
231 BEAST_EXPECT(TIBase::getState(id) == deleted);
232 }
233 {
234 TIBase::ResetStatesGuard rsg{true};
235
236 using enum TrackedState;
237 using swu = SharedWeakUnion<TIBase>;
238 swu b = make_SharedIntrusive<TIBase>();
239 BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
240 auto id = b.get()->id_;
241 BEAST_EXPECT(TIBase::getState(id) == alive);
242 swu w = b;
243 BEAST_EXPECT(TIBase::getState(id) == alive);
244 BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
245 w.convertToWeak();
246 BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
247 swu s = w;
248 BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
249 s.convertToStrong();
250 BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
251 b.reset();
252 BEAST_EXPECT(TIBase::getState(id) == alive);
253 BEAST_EXPECT(s.use_count() == 1);
254 BEAST_EXPECT(!w.expired());
255 s.reset();
256 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
257 BEAST_EXPECT(w.expired());
258 w.convertToStrong();
259 // Cannot convert a weak pointer to a strong pointer if object is
260 // already partially deleted
261 BEAST_EXPECT(w.isWeak());
262 w.reset();
263 BEAST_EXPECT(TIBase::getState(id) == deleted);
264 }
265 {
266 // Testing SharedWeakUnion assignment operator
267
268 TIBase::ResetStatesGuard rsg{true};
269
270 auto strong1 = make_SharedIntrusive<TIBase>();
271 auto strong2 = make_SharedIntrusive<TIBase>();
272
273 auto id1 = strong1->id_;
274 auto id2 = strong2->id_;
275
276 BEAST_EXPECT(id1 != id2);
277
278 SharedWeakUnion<TIBase> union1 = strong1;
279 SharedWeakUnion<TIBase> union2 = strong2;
280
281 BEAST_EXPECT(union1.isStrong());
282 BEAST_EXPECT(union2.isStrong());
283 BEAST_EXPECT(union1.get() == strong1.get());
284 BEAST_EXPECT(union2.get() == strong2.get());
285
286 // 1) Normal assignment: explicitly calls SharedWeakUnion assignment
287 union1 = union2;
288 BEAST_EXPECT(union1.isStrong());
289 BEAST_EXPECT(union2.isStrong());
290 BEAST_EXPECT(union1.get() == union2.get());
291 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
292 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive);
293
294 // 2) Test self-assignment
295 BEAST_EXPECT(union1.isStrong());
296 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
297 int initialRefCount = strong1->use_count();
298#pragma clang diagnostic push
299#pragma clang diagnostic ignored "-Wself-assign-overloaded"
300 union1 = union1; // Self-assignment
301#pragma clang diagnostic pop
302 BEAST_EXPECT(union1.isStrong());
303 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
304 BEAST_EXPECT(strong1->use_count() == initialRefCount);
305
306 // 3) Test assignment from null union pointer
307 union1 = SharedWeakUnion<TIBase>();
308 BEAST_EXPECT(union1.get() == nullptr);
309
310 // 4) Test assignment to expired union pointer
311 strong2.reset();
312 union2.reset();
313 union1 = union2;
314 BEAST_EXPECT(union1.get() == nullptr);
315 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted);
316 }
317 }
318
319 void
321 {
322 testcase("Partial Delete");
323
324 // This test creates two threads. One with a strong pointer and one
325 // with a weak pointer. The strong pointer is reset while the weak
326 // pointer still holds a reference, triggering a partial delete.
327 // While the partial delete function runs (a sleep is inserted) the
328 // weak pointer is reset. The destructor should wait to run until
329 // after the partial delete function has completed running.
330
331 using enum TrackedState;
332
333 TIBase::ResetStatesGuard rsg{true};
334
335 auto strong = make_SharedIntrusive<TIBase>();
336 WeakIntrusive<TIBase> weak{strong};
337 bool destructorRan = false;
338 bool partialDeleteRan = false;
339 std::latch partialDeleteStartedSyncPoint{2};
340 strong->tracingCallback_ = [&](TrackedState cur,
342 using enum TrackedState;
343 if (next == deletedStarted)
344 {
345 // strong goes out of scope while weak is still in scope
346 // This checks that partialDelete has run to completion
347 // before the desturctor is called. A sleep is inserted
348 // inside the partial delete to make sure the destructor is
349 // given an opportunity to run durring partial delete.
350 BEAST_EXPECT(cur == partiallyDeleted);
351 }
352 if (next == partiallyDeletedStarted)
353 {
354 partialDeleteStartedSyncPoint.arrive_and_wait();
355 using namespace std::chrono_literals;
356 // Sleep and let the weak pointer go out of scope,
357 // potentially triggering a destructor while partial delete
358 // is running. The test is to make sure that doesn't happen.
360 }
361 if (next == partiallyDeleted)
362 {
363 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
364 partialDeleteRan = true;
365 }
366 if (next == deleted)
367 {
368 BEAST_EXPECT(!destructorRan);
369 destructorRan = true;
370 }
371 };
372 std::thread t1{[&] {
373 partialDeleteStartedSyncPoint.arrive_and_wait();
374 weak.reset(); // Trigger a full delete as soon as the partial
375 // delete starts
376 }};
377 std::thread t2{[&] {
378 strong.reset(); // Trigger a partial delete
379 }};
380 t1.join();
381 t2.join();
382
383 BEAST_EXPECT(destructorRan && partialDeleteRan);
384 }
385
386 void
388 {
389 testcase("Destructor");
390
391 // This test creates two threads. One with a strong pointer and one
392 // with a weak pointer. The weak pointer is reset while the strong
393 // pointer still holds a reference. Then the strong pointer is
394 // reset. Only the destructor should run. The partial destructor
395 // should not be called. Since the weak reset runs to completion
396 // before the strong pointer is reset, threading doesn't add much to
397 // this test, but there is no harm in keeping it.
398
399 using enum TrackedState;
400
401 TIBase::ResetStatesGuard rsg{true};
402
403 auto strong = make_SharedIntrusive<TIBase>();
404 WeakIntrusive<TIBase> weak{strong};
405 bool destructorRan = false;
406 bool partialDeleteRan = false;
407 std::latch weakResetSyncPoint{2};
408 strong->tracingCallback_ = [&](TrackedState cur,
410 using enum TrackedState;
411 if (next == partiallyDeleted)
412 {
413 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
414 partialDeleteRan = true;
415 }
416 if (next == deleted)
417 {
418 BEAST_EXPECT(!destructorRan);
419 destructorRan = true;
420 }
421 };
422 std::thread t1{[&] {
423 weak.reset();
424 weakResetSyncPoint.arrive_and_wait();
425 }};
426 std::thread t2{[&] {
427 weakResetSyncPoint.arrive_and_wait();
428 strong.reset(); // Trigger a partial delete
429 }};
430 t1.join();
431 t2.join();
432
433 BEAST_EXPECT(destructorRan && !partialDeleteRan);
434 }
435
436 void
438 {
439 testcase("Multithreaded Clear Mixed Variant");
440
441 // This test creates and destroys many strong and weak pointers in a
442 // loop. There is a random mix of strong and weak pointers stored in
443 // a vector (held as a variant). Both threads clear all the pointers
444 // and check that the invariants hold.
445
446 using enum TrackedState;
447 TIBase::ResetStatesGuard rsg{true};
448
449 std::atomic<int> destructionState{0};
450 // returns destructorRan and partialDestructorRan (in that order)
451 auto getDestructorState = [&]() -> std::pair<bool, bool> {
452 int s = destructionState.load(std::memory_order_relaxed);
453 return {(s & 1) != 0, (s & 2) != 0};
454 };
455 auto setDestructorRan = [&]() -> void {
456 destructionState.fetch_or(1, std::memory_order_acq_rel);
457 };
458 auto setPartialDeleteRan = [&]() -> void {
459 destructionState.fetch_or(2, std::memory_order_acq_rel);
460 };
461 auto tracingCallback = [&](TrackedState cur,
463 using enum TrackedState;
464 auto [destructorRan, partialDeleteRan] = getDestructorState();
465 if (next == partiallyDeleted)
466 {
467 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
468 setPartialDeleteRan();
469 }
470 if (next == deleted)
471 {
472 BEAST_EXPECT(!destructorRan);
473 setDestructorRan();
474 }
475 };
476 auto createVecOfPointers = [&](auto const& toClone,
478 -> std::vector<
482 result;
483 std::uniform_int_distribution<> toCreateDist(4, 64);
484 std::uniform_int_distribution<> isStrongDist(0, 1);
485 auto numToCreate = toCreateDist(eng);
486 result.reserve(numToCreate);
487 for (int i = 0; i < numToCreate; ++i)
488 {
489 if (isStrongDist(eng))
490 {
491 result.push_back(SharedIntrusive<TIBase>(toClone));
492 }
493 else
494 {
495 result.push_back(WeakIntrusive<TIBase>(toClone));
496 }
497 }
498 return result;
499 };
500 constexpr int loopIters = 2 * 1024;
501 constexpr int numThreads = 16;
503 std::barrier loopStartSyncPoint{numThreads};
504 std::barrier postCreateToCloneSyncPoint{numThreads};
505 std::barrier postCreateVecOfPointersSyncPoint{numThreads};
506 auto engines = [&]() -> std::vector<std::default_random_engine> {
509 result.reserve(numThreads);
510 for (int i = 0; i < numThreads; ++i)
511 result.emplace_back(rd());
512 return result;
513 }();
514
515 // cloneAndDestroy clones the strong pointer into a vector of mixed
516 // strong and weak pointers and destroys them all at once.
517 // threadId==0 is special.
518 auto cloneAndDestroy = [&](int threadId) {
519 for (int i = 0; i < loopIters; ++i)
520 {
521 // ------ Sync Point ------
522 loopStartSyncPoint.arrive_and_wait();
523
524 // only thread 0 should reset the state
526 if (threadId == 0)
527 {
528 // Thread 0 is the genesis thread. It creates the strong
529 // pointers to be cloned by the other threads. This
530 // thread will also check that the destructor ran and
531 // clear the temporary variables.
532
533 rsg.emplace(false);
534 auto [destructorRan, partialDeleteRan] =
535 getDestructorState();
536 BEAST_EXPECT(!i || destructorRan);
537 destructionState.store(0, std::memory_order_release);
538
539 toClone.clear();
540 toClone.resize(numThreads);
541 auto strong = make_SharedIntrusive<TIBase>();
542 strong->tracingCallback_ = tracingCallback;
543 std::fill(toClone.begin(), toClone.end(), strong);
544 }
545
546 // ------ Sync Point ------
547 postCreateToCloneSyncPoint.arrive_and_wait();
548
549 auto v =
550 createVecOfPointers(toClone[threadId], engines[threadId]);
551 toClone[threadId].reset();
552
553 // ------ Sync Point ------
554 postCreateVecOfPointersSyncPoint.arrive_and_wait();
555
556 v.clear();
557 }
558 };
560 for (int i = 0; i < numThreads; ++i)
561 {
562 threads.emplace_back(cloneAndDestroy, i);
563 }
564 for (int i = 0; i < numThreads; ++i)
565 {
566 threads[i].join();
567 }
568 }
569
570 void
572 {
573 testcase("Multithreaded Clear Mixed Union");
574
575 // This test creates and destroys many SharedWeak pointers in a
576 // loop. All the pointers start as strong and a loop randomly
577 // convert them between strong and weak pointers. Both threads clear
578 // all the pointers and check that the invariants hold.
579 //
580 // Note: This test also differs from the test above in that the pointers
581 // randomly change from strong to weak and from weak to strong in a
582 // loop. This can't be done in the variant test above because variant is
583 // not thread safe while the SharedWeakUnion is thread safe.
584
585 using enum TrackedState;
586
587 TIBase::ResetStatesGuard rsg{true};
588
589 std::atomic<int> destructionState{0};
590 // returns destructorRan and partialDestructorRan (in that order)
591 auto getDestructorState = [&]() -> std::pair<bool, bool> {
592 int s = destructionState.load(std::memory_order_relaxed);
593 return {(s & 1) != 0, (s & 2) != 0};
594 };
595 auto setDestructorRan = [&]() -> void {
596 destructionState.fetch_or(1, std::memory_order_acq_rel);
597 };
598 auto setPartialDeleteRan = [&]() -> void {
599 destructionState.fetch_or(2, std::memory_order_acq_rel);
600 };
601 auto tracingCallback = [&](TrackedState cur,
603 using enum TrackedState;
604 auto [destructorRan, partialDeleteRan] = getDestructorState();
605 if (next == partiallyDeleted)
606 {
607 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
608 setPartialDeleteRan();
609 }
610 if (next == deleted)
611 {
612 BEAST_EXPECT(!destructorRan);
613 setDestructorRan();
614 }
615 };
616 auto createVecOfPointers = [&](auto const& toClone,
620 std::uniform_int_distribution<> toCreateDist(4, 64);
621 auto numToCreate = toCreateDist(eng);
622 result.reserve(numToCreate);
623 for (int i = 0; i < numToCreate; ++i)
624 result.push_back(SharedIntrusive<TIBase>(toClone));
625 return result;
626 };
627 constexpr int loopIters = 2 * 1024;
628 constexpr int flipPointersLoopIters = 256;
629 constexpr int numThreads = 16;
631 std::barrier loopStartSyncPoint{numThreads};
632 std::barrier postCreateToCloneSyncPoint{numThreads};
633 std::barrier postCreateVecOfPointersSyncPoint{numThreads};
634 std::barrier postFlipPointersLoopSyncPoint{numThreads};
635 auto engines = [&]() -> std::vector<std::default_random_engine> {
638 result.reserve(numThreads);
639 for (int i = 0; i < numThreads; ++i)
640 result.emplace_back(rd());
641 return result;
642 }();
643
644 // cloneAndDestroy clones the strong pointer into a vector of
645 // mixed strong and weak pointers, runs a loop that randomly
646 // changes strong pointers to weak pointers, and destroys them
647 // all at once.
648 auto cloneAndDestroy = [&](int threadId) {
649 for (int i = 0; i < loopIters; ++i)
650 {
651 // ------ Sync Point ------
652 loopStartSyncPoint.arrive_and_wait();
653
654 // only thread 0 should reset the state
656 if (threadId == 0)
657 {
658 // threadId 0 is the genesis thread. It creates the
659 // strong point to be cloned by the other threads. This
660 // thread will also check that the destructor ran and
661 // clear the temporary variables.
662 rsg.emplace(false);
663 auto [destructorRan, partialDeleteRan] =
664 getDestructorState();
665 BEAST_EXPECT(!i || destructorRan);
666 destructionState.store(0, std::memory_order_release);
667
668 toClone.clear();
669 toClone.resize(numThreads);
670 auto strong = make_SharedIntrusive<TIBase>();
671 strong->tracingCallback_ = tracingCallback;
672 std::fill(toClone.begin(), toClone.end(), strong);
673 }
674
675 // ------ Sync Point ------
676 postCreateToCloneSyncPoint.arrive_and_wait();
677
678 auto v =
679 createVecOfPointers(toClone[threadId], engines[threadId]);
680 toClone[threadId].reset();
681
682 // ------ Sync Point ------
683 postCreateVecOfPointersSyncPoint.arrive_and_wait();
684
685 std::uniform_int_distribution<> isStrongDist(0, 1);
686 for (int f = 0; f < flipPointersLoopIters; ++f)
687 {
688 for (auto& p : v)
689 {
690 if (isStrongDist(engines[threadId]))
691 {
692 p.convertToStrong();
693 }
694 else
695 {
696 p.convertToWeak();
697 }
698 }
699 }
700
701 // ------ Sync Point ------
702 postFlipPointersLoopSyncPoint.arrive_and_wait();
703
704 v.clear();
705 }
706 };
708 for (int i = 0; i < numThreads; ++i)
709 {
710 threads.emplace_back(cloneAndDestroy, i);
711 }
712 for (int i = 0; i < numThreads; ++i)
713 {
714 threads[i].join();
715 }
716 }
717
718 void
720 {
721 testcase("Multithreaded Locking Weak");
722
723 // This test creates a single shared atomic pointer that multiple thread
724 // create weak pointers from. The threads then lock the weak pointers.
725 // Both threads clear all the pointers and check that the invariants
726 // hold.
727
728 using enum TrackedState;
729
730 TIBase::ResetStatesGuard rsg{true};
731
732 std::atomic<int> destructionState{0};
733 // returns destructorRan and partialDestructorRan (in that order)
734 auto getDestructorState = [&]() -> std::pair<bool, bool> {
735 int s = destructionState.load(std::memory_order_relaxed);
736 return {(s & 1) != 0, (s & 2) != 0};
737 };
738 auto setDestructorRan = [&]() -> void {
739 destructionState.fetch_or(1, std::memory_order_acq_rel);
740 };
741 auto setPartialDeleteRan = [&]() -> void {
742 destructionState.fetch_or(2, std::memory_order_acq_rel);
743 };
744 auto tracingCallback = [&](TrackedState cur,
746 using enum TrackedState;
747 auto [destructorRan, partialDeleteRan] = getDestructorState();
748 if (next == partiallyDeleted)
749 {
750 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
751 setPartialDeleteRan();
752 }
753 if (next == deleted)
754 {
755 BEAST_EXPECT(!destructorRan);
756 setDestructorRan();
757 }
758 };
759
760 constexpr int loopIters = 2 * 1024;
761 constexpr int lockWeakLoopIters = 256;
762 constexpr int numThreads = 16;
764 std::barrier loopStartSyncPoint{numThreads};
765 std::barrier postCreateToLockSyncPoint{numThreads};
766 std::barrier postLockWeakLoopSyncPoint{numThreads};
767
768 // lockAndDestroy creates weak pointers from the strong pointer
769 // and runs a loop that locks the weak pointer. At the end of the loop
770 // all the pointers are destroyed all at once.
771 auto lockAndDestroy = [&](int threadId) {
772 for (int i = 0; i < loopIters; ++i)
773 {
774 // ------ Sync Point ------
775 loopStartSyncPoint.arrive_and_wait();
776
777 // only thread 0 should reset the state
779 if (threadId == 0)
780 {
781 // threadId 0 is the genesis thread. It creates the
782 // strong point to be locked by the other threads. This
783 // thread will also check that the destructor ran and
784 // clear the temporary variables.
785 rsg.emplace(false);
786 auto [destructorRan, partialDeleteRan] =
787 getDestructorState();
788 BEAST_EXPECT(!i || destructorRan);
789 destructionState.store(0, std::memory_order_release);
790
791 toLock.clear();
792 toLock.resize(numThreads);
793 auto strong = make_SharedIntrusive<TIBase>();
794 strong->tracingCallback_ = tracingCallback;
795 std::fill(toLock.begin(), toLock.end(), strong);
796 }
797
798 // ------ Sync Point ------
799 postCreateToLockSyncPoint.arrive_and_wait();
800
801 // Multiple threads all create a weak pointer from the same
802 // strong pointer
803 WeakIntrusive weak{toLock[threadId]};
804 for (int wi = 0; wi < lockWeakLoopIters; ++wi)
805 {
806 BEAST_EXPECT(!weak.expired());
807 auto strong = weak.lock();
808 BEAST_EXPECT(strong);
809 }
810
811 // ------ Sync Point ------
812 postLockWeakLoopSyncPoint.arrive_and_wait();
813
814 toLock[threadId].reset();
815 }
816 };
818 for (int i = 0; i < numThreads; ++i)
819 {
820 threads.emplace_back(lockAndDestroy, i);
821 }
822 for (int i = 0; i < numThreads; ++i)
823 {
824 threads[i].join();
825 }
826 }
827
828 void
829 run() override
830 {
831 testBasics();
837 }
838}; // namespace tests
839
840BEAST_DEFINE_TESTSUITE(IntrusiveShared, ripple_basics, ripple);
841} // namespace tests
842} // namespace ripple
T begin(T... args)
A testsuite class.
Definition: suite.h:55
testcase_t testcase
Memberspace for declaring test cases.
Definition: suite.h:155
A shared intrusive pointer class that supports weak pointers.
A combination of a strong and a weak intrusive pointer stored in the space of a single pointer.
bool isStrong() const
Return true is this represents a strong pointer.
T * get() const
If this is a strong pointer, return the raw pointer.
void reset()
Set the pointer to null, decrement the appropriate ref count, and run the appropriate release action.
A weak intrusive pointer class for the SharedIntrusive pointer class.
void run() override
Runs the suite.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T fetch_add(T... args)
T fill(T... args)
T join(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
void partialDestructorFinished(T **o)
T push_back(T... args)
T reserve(T... args)
T resize(T... args)
T size(T... args)
T sleep_for(T... args)
T store(T... args)
Implement the strong count, weak count, and bit flags for an intrusive pointer.