rippled
Loading...
Searching...
No Matches
SlabAllocator.h
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#ifndef RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
21#define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
22
23#include <xrpl/basics/ByteUtilities.h>
24#include <xrpl/beast/type_name.h>
25#include <xrpl/beast/utility/instrumentation.h>
26
27#include <boost/align.hpp>
28#include <boost/container/static_vector.hpp>
29#include <boost/predef.h>
30
31#include <algorithm>
32#include <atomic>
33#include <cstdint>
34#include <cstring>
35#include <mutex>
36#include <vector>
37
38#if BOOST_OS_LINUX
39#include <sys/mman.h>
40#endif
41
42namespace ripple {
43
44template <typename Type>
46{
47 static_assert(
48 sizeof(Type) >= sizeof(std::uint8_t*),
49 "SlabAllocator: the requested object must be larger than a pointer.");
50
51 static_assert(alignof(Type) == 8 || alignof(Type) == 4);
52
54 struct SlabBlock
55 {
56 // A mutex to protect the freelist for this block:
58
59 // A linked list of appropriately sized free buffers:
60 std::uint8_t* l_ = nullptr;
61
62 // The next memory block
64
65 // The underlying memory block:
66 std::uint8_t const* const p_ = nullptr;
67
68 // The extent of the underlying memory block:
70
72 SlabBlock* next,
73 std::uint8_t* data,
75 std::size_t item)
76 : next_(next), p_(data), size_(size)
77 {
78 // We don't need to grab the mutex here, since we're the only
79 // ones with access at this moment.
80
81 while (data + item <= p_ + size_)
82 {
83 // Use memcpy to avoid unaligned UB
84 // (will optimize to equivalent code)
85 std::memcpy(data, &l_, sizeof(std::uint8_t*));
86 l_ = data;
87 data += item;
88 }
89 }
90
92 {
93 // Calling this destructor will release the allocated memory but
94 // will not properly destroy any objects that are constructed in
95 // the block itself.
96 }
97
98 SlabBlock(SlabBlock const& other) = delete;
100 operator=(SlabBlock const& other) = delete;
101
102 SlabBlock(SlabBlock&& other) = delete;
103 SlabBlock&
104 operator=(SlabBlock&& other) = delete;
105
107 bool
108 own(std::uint8_t const* p) const noexcept
109 {
110 return (p >= p_) && (p < p_ + size_);
111 }
112
114 allocate() noexcept
115 {
116 std::uint8_t* ret;
117
118 {
120
121 ret = l_;
122
123 if (ret)
124 {
125 // Use memcpy to avoid unaligned UB
126 // (will optimize to equivalent code)
127 std::memcpy(&l_, ret, sizeof(std::uint8_t*));
128 }
129 }
130
131 return ret;
132 }
133
143 void
145 {
146 XRPL_ASSERT(
147 own(ptr),
148 "ripple::SlabAllocator::SlabBlock::deallocate : own input");
149
151
152 // Use memcpy to avoid unaligned UB
153 // (will optimize to equivalent code)
154 std::memcpy(ptr, &l_, sizeof(std::uint8_t*));
155 l_ = ptr;
156 }
157 };
158
159private:
160 // A linked list of slabs
162
163 // The alignment requirements of the item we're allocating:
165
166 // The size of an item, including the extra bytes requested and
167 // any padding needed for alignment purposes:
169
170 // The size of each individual slab:
172
173public:
182 constexpr explicit SlabAllocator(
183 std::size_t extra,
184 std::size_t alloc = 0,
185 std::size_t align = 0)
186 : itemAlignment_(align ? align : alignof(Type))
187 , itemSize_(
188 boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
189 , slabSize_(alloc)
190 {
191 XRPL_ASSERT(
192 (itemAlignment_ & (itemAlignment_ - 1)) == 0,
193 "ripple::SlabAllocator::SlabAllocator : valid alignment");
194 }
195
196 SlabAllocator(SlabAllocator const& other) = delete;
198 operator=(SlabAllocator const& other) = delete;
199
200 SlabAllocator(SlabAllocator&& other) = delete;
202 operator=(SlabAllocator&& other) = delete;
203
205 {
206 // FIXME: We can't destroy the memory blocks we've allocated, because
207 // we can't be sure that they are not being used. Cleaning the
208 // shutdown process up could make this possible.
209 }
210
212 constexpr std::size_t
213 size() const noexcept
214 {
215 return itemSize_;
216 }
217
224 allocate() noexcept
225 {
226 auto slab = slabs_.load();
227
228 while (slab != nullptr)
229 {
230 if (auto ret = slab->allocate())
231 return ret;
232
233 slab = slab->next_;
234 }
235
236 // No slab can satisfy our request, so we attempt to allocate a new
237 // one here:
239
240 // We want to allocate the memory at a 2 MiB boundary, to make it
241 // possible to use hugepage mappings on Linux:
242 auto buf =
243 boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
244
245 // clang-format off
246 if (!buf) [[unlikely]]
247 return nullptr;
248 // clang-format on
249
250#if BOOST_OS_LINUX
251 // When allocating large blocks, attempt to leverage Linux's
252 // transparent hugepage support. It is unclear and difficult
253 // to accurately determine if doing this impacts performance
254 // enough to justify using platform-specific tricks.
255 if (size >= megabytes(std::size_t(4)))
256 madvise(buf, size, MADV_HUGEPAGE);
257#endif
258
259 // We need to carve out a bit of memory for the slab header
260 // and then align the rest appropriately:
261 auto slabData = reinterpret_cast<void*>(
262 reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
263 auto slabSize = size - sizeof(SlabBlock);
264
265 // This operation is essentially guaranteed not to fail but
266 // let's be careful anyways.
267 if (!boost::alignment::align(
268 itemAlignment_, itemSize_, slabData, slabSize))
269 {
270 boost::alignment::aligned_free(buf);
271 return nullptr;
272 }
273
274 slab = new (buf) SlabBlock(
275 slabs_.load(),
276 reinterpret_cast<std::uint8_t*>(slabData),
277 slabSize,
278 itemSize_);
279
280 // Link the new slab
281 while (!slabs_.compare_exchange_weak(
282 slab->next_,
283 slab,
286 {
287 ; // Nothing to do
288 }
289
290 return slab->allocate();
291 }
292
300 bool
302 {
303 XRPL_ASSERT(
304 ptr,
305 "ripple::SlabAllocator::SlabAllocator::deallocate : non-null "
306 "input");
307
308 for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_)
309 {
310 if (slab->own(ptr))
311 {
312 slab->deallocate(ptr);
313 return true;
314 }
315 }
316
317 return false;
318 }
319};
320
322template <typename Type>
324{
325private:
326 // The list of allocators that belong to this set
327 boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
328
330
331public:
333 {
334 friend class SlabAllocatorSet;
335
336 private:
340
341 public:
342 constexpr SlabConfig(
343 std::size_t extra_,
344 std::size_t alloc_ = 0,
345 std::size_t align_ = alignof(Type))
346 : extra(extra_), alloc(alloc_), align(align_)
347 {
348 }
349 };
350
352 {
353 // Ensure that the specified allocators are sorted from smallest to
354 // largest by size:
355 std::sort(
356 std::begin(cfg),
357 std::end(cfg),
358 [](SlabConfig const& a, SlabConfig const& b) {
359 return a.extra < b.extra;
360 });
361
362 // We should never have two slabs of the same size
364 std::begin(cfg),
365 std::end(cfg),
366 [](SlabConfig const& a, SlabConfig const& b) {
367 return a.extra == b.extra;
368 }) != cfg.end())
369 {
370 throw std::runtime_error(
371 "SlabAllocatorSet<" + beast::type_name<Type>() +
372 ">: duplicate slab size");
373 }
374
375 for (auto const& c : cfg)
376 {
377 auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align);
378
379 if (a.size() > maxSize_)
380 maxSize_ = a.size();
381 }
382 }
383
384 SlabAllocatorSet(SlabAllocatorSet const& other) = delete;
386 operator=(SlabAllocatorSet const& other) = delete;
387
390 operator=(SlabAllocatorSet&& other) = delete;
391
393 {
394 }
395
405 allocate(std::size_t extra) noexcept
406 {
407 if (auto const size = sizeof(Type) + extra; size <= maxSize_)
408 {
409 for (auto& a : allocators_)
410 {
411 if (a.size() >= size)
412 return a.allocate();
413 }
414 }
415
416 return nullptr;
417 }
418
426 bool
428 {
429 for (auto& a : allocators_)
430 {
431 if (a.deallocate(ptr))
432 return true;
433 }
434
435 return false;
436 }
437};
438
439} // namespace ripple
440
441#endif // RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
T adjacent_find(T... args)
T begin(T... args)
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_=0, std::size_t align_=alignof(Type))
A collection of slab allocators of various sizes for a given type.
constexpr SlabAllocatorSet(std::vector< SlabConfig > cfg)
SlabAllocatorSet & operator=(SlabAllocatorSet &&other)=delete
SlabAllocatorSet(SlabAllocatorSet &&other)=delete
boost::container::static_vector< SlabAllocator< Type >, 64 > allocators_
std::uint8_t * allocate(std::size_t extra) noexcept
Returns a suitably aligned pointer, if one is available.
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
SlabAllocatorSet(SlabAllocatorSet const &other)=delete
SlabAllocatorSet & operator=(SlabAllocatorSet const &other)=delete
constexpr SlabAllocator(std::size_t extra, std::size_t alloc=0, std::size_t align=0)
Constructs a slab allocator able to allocate objects of a fixed size.
std::size_t const itemSize_
std::size_t const itemAlignment_
constexpr std::size_t size() const noexcept
Returns the size of the memory block this allocator returns.
std::atomic< SlabBlock * > slabs_
SlabAllocator & operator=(SlabAllocator const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate() noexcept
Returns a suitably aligned pointer, if one is available.
std::size_t const slabSize_
SlabAllocator(SlabAllocator &&other)=delete
SlabAllocator(SlabAllocator const &other)=delete
SlabAllocator & operator=(SlabAllocator &&other)=delete
T end(T... args)
T is_same_v
T memcpy(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
constexpr auto megabytes(T value) noexcept
T sort(T... args)
A block of memory that is owned by a slab allocator.
bool own(std::uint8_t const *p) const noexcept
Determines whether the given pointer belongs to this allocator.
void deallocate(std::uint8_t *ptr) noexcept
Return an item to this allocator's freelist.
SlabBlock & operator=(SlabBlock const &other)=delete
SlabBlock(SlabBlock &&other)=delete
std::uint8_t const *const p_
SlabBlock(SlabBlock *next, std::uint8_t *data, std::size_t size, std::size_t item)
SlabBlock(SlabBlock const &other)=delete
std::uint8_t * allocate() noexcept
SlabBlock & operator=(SlabBlock &&other)=delete