rippled
Loading...
Searching...
No Matches
spinlock.h
1/*
2 This file is part of rippled: https://github.com/ripple/rippled
3 Copyright 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
4
5 Permission to use, copy, modify, and/or distribute this software for any
6 purpose with or without fee is hereby granted, provided that the above
7 copyright notice and this permission notice appear in all copies.
8
9 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16*/
17
18#ifndef RIPPLE_BASICS_SPINLOCK_H_INCLUDED
19#define RIPPLE_BASICS_SPINLOCK_H_INCLUDED
20
21#include <xrpl/beast/utility/instrumentation.h>
22
23#include <atomic>
24#include <limits>
25#include <type_traits>
26
27#ifndef __aarch64__
28#include <immintrin.h>
29#endif
30
31namespace ripple {
32
33namespace detail {
44inline void
45spin_pause() noexcept
46{
47#ifdef __aarch64__
48 asm volatile("yield");
49#else
50 _mm_pause();
51#endif
52}
53
54} // namespace detail
55
89template <class T>
91{
92 // clang-format off
93 static_assert(std::is_unsigned_v<T>);
95 static_assert(
96 std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
98 "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
99 // clang-format on
100
101private:
103 T const mask_;
104
105public:
108 operator=(packed_spinlock const&) = delete;
109
119 : bits_(lock), mask_(static_cast<T>(1) << index)
120 {
121 XRPL_ASSERT(
122 index >= 0 && (mask_ != 0),
123 "ripple::packed_spinlock::packed_spinlock : valid index and mask");
124 }
125
126 [[nodiscard]] bool
128 {
129 return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
130 }
131
132 void
134 {
135 while (!try_lock())
136 {
137 // The use of relaxed memory ordering here is intentional and
138 // serves to help reduce cache coherency traffic during times
139 // of contention by avoiding writes that would definitely not
140 // result in the lock being acquired.
141 while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
143 }
144 }
145
146 void
148 {
149 bits_.fetch_and(~mask_, std::memory_order_release);
150 }
151};
152
165template <class T>
167{
168 static_assert(std::is_unsigned_v<T>);
170
171private:
173
174public:
175 spinlock(spinlock const&) = delete;
176 spinlock&
177 operator=(spinlock const&) = delete;
178
187 {
188 }
189
190 [[nodiscard]] bool
192 {
193 T expected = 0;
194
195 return lock_.compare_exchange_weak(
196 expected,
198 std::memory_order_acquire,
199 std::memory_order_relaxed);
200 }
201
202 void
204 {
205 while (!try_lock())
206 {
207 // The use of relaxed memory ordering here is intentional and
208 // serves to help reduce cache coherency traffic during times
209 // of contention by avoiding writes that would definitely not
210 // result in the lock being acquired.
211 while (lock_.load(std::memory_order_relaxed) != 0)
213 }
214 }
215
216 void
218 {
219 lock_.store(0, std::memory_order_release);
220 }
221};
224} // namespace ripple
225
226#endif
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition: spinlock.h:91
std::atomic< T > & bits_
Definition: spinlock.h:102
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition: spinlock.h:118
packed_spinlock(packed_spinlock const &)=delete
packed_spinlock & operator=(packed_spinlock const &)=delete
A spinlock implemented on top of an atomic integer.
Definition: spinlock.h:167
spinlock & operator=(spinlock const &)=delete
spinlock(spinlock const &)=delete
std::atomic< T > & lock_
Definition: spinlock.h:172
bool try_lock()
Definition: spinlock.h:191
spinlock(std::atomic< T > &lock)
Grabs the.
Definition: spinlock.h:186
T declval(T... args)
T fetch_and(T... args)
T is_same_v
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition: spinlock.h:45
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26