rippled
Loading...
Searching...
No Matches
spinlock.h
1/*
2 This file is part of rippled: https://github.com/ripple/rippled
3 Copyright 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
4
5 Permission to use, copy, modify, and/or distribute this software for any
6 purpose with or without fee is hereby granted, provided that the above
7 copyright notice and this permission notice appear in all copies.
8
9 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16*/
17
18#ifndef RIPPLE_BASICS_SPINLOCK_H_INCLUDED
19#define RIPPLE_BASICS_SPINLOCK_H_INCLUDED
20
21#include <xrpl/beast/utility/instrumentation.h>
22#include <atomic>
23#include <limits>
24#include <type_traits>
25
26#ifndef __aarch64__
27#include <immintrin.h>
28#endif
29
30namespace ripple {
31
32namespace detail {
43inline void
44spin_pause() noexcept
45{
46#ifdef __aarch64__
47 asm volatile("yield");
48#else
49 _mm_pause();
50#endif
51}
52
53} // namespace detail
54
88template <class T>
90{
91 // clang-format off
92 static_assert(std::is_unsigned_v<T>);
94 static_assert(
95 std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
97 "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
98 // clang-format on
99
100private:
102 T const mask_;
103
104public:
107 operator=(packed_spinlock const&) = delete;
108
118 : bits_(lock), mask_(static_cast<T>(1) << index)
119 {
120 XRPL_ASSERT(
121 index >= 0 && (mask_ != 0),
122 "ripple::packed_spinlock::packed_spinlock : valid index and mask");
123 }
124
125 [[nodiscard]] bool
127 {
128 return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
129 }
130
131 void
133 {
134 while (!try_lock())
135 {
136 // The use of relaxed memory ordering here is intentional and
137 // serves to help reduce cache coherency traffic during times
138 // of contention by avoiding writes that would definitely not
139 // result in the lock being acquired.
140 while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
142 }
143 }
144
145 void
147 {
148 bits_.fetch_and(~mask_, std::memory_order_release);
149 }
150};
151
164template <class T>
166{
167 static_assert(std::is_unsigned_v<T>);
169
170private:
172
173public:
174 spinlock(spinlock const&) = delete;
175 spinlock&
176 operator=(spinlock const&) = delete;
177
186 {
187 }
188
189 [[nodiscard]] bool
191 {
192 T expected = 0;
193
194 return lock_.compare_exchange_weak(
195 expected,
197 std::memory_order_acquire,
198 std::memory_order_relaxed);
199 }
200
201 void
203 {
204 while (!try_lock())
205 {
206 // The use of relaxed memory ordering here is intentional and
207 // serves to help reduce cache coherency traffic during times
208 // of contention by avoiding writes that would definitely not
209 // result in the lock being acquired.
210 while (lock_.load(std::memory_order_relaxed) != 0)
212 }
213 }
214
215 void
217 {
218 lock_.store(0, std::memory_order_release);
219 }
220};
223} // namespace ripple
224
225#endif
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition: spinlock.h:90
std::atomic< T > & bits_
Definition: spinlock.h:101
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition: spinlock.h:117
packed_spinlock(packed_spinlock const &)=delete
packed_spinlock & operator=(packed_spinlock const &)=delete
A spinlock implemented on top of an atomic integer.
Definition: spinlock.h:166
spinlock & operator=(spinlock const &)=delete
spinlock(spinlock const &)=delete
std::atomic< T > & lock_
Definition: spinlock.h:171
bool try_lock()
Definition: spinlock.h:190
spinlock(std::atomic< T > &lock)
Grabs the.
Definition: spinlock.h:185
T declval(T... args)
T fetch_and(T... args)
T is_same_v
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition: spinlock.h:44
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26