rippled
SlabAllocator.h
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #ifndef RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
21 #define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
22 
23 #include <ripple/beast/type_name.h>
24 #include <algorithm>
25 #include <atomic>
26 #include <cassert>
27 #include <cstdint>
28 #include <mutex>
29 
30 #include <boost/align.hpp>
31 #include <boost/container/static_vector.hpp>
32 #include <boost/predef.h>
33 
34 #if BOOST_OS_LINUX
35 #include <sys/mman.h>
36 #endif
37 
38 namespace ripple {
39 
40 template <typename Type>
42 {
43  static_assert(
44  sizeof(Type) >= sizeof(std::uint8_t*),
45  "SlabAllocator: the requested object must be larger than a pointer.");
46 
47  static_assert(alignof(Type) == 8 || alignof(Type) == 4);
48 
50  struct SlabBlock
51  {
52  // A mutex to protect the freelist for this block:
54 
55  // A linked list of appropriately sized free buffers:
56  std::uint8_t* l_ = nullptr;
57 
58  // The next memory block
60 
61  // The underlying memory block:
62  std::uint8_t const* const p_ = nullptr;
63 
64  // The extent of the underlying memory block:
66 
68  SlabBlock* next,
69  std::uint8_t* data,
71  std::size_t item)
72  : next_(next), p_(data), size_(size)
73  {
74  // We don't need to grab the mutex here, since we're the only
75  // ones with access at this moment.
76 
77  while (data + item <= p_ + size_)
78  {
79  *reinterpret_cast<std::uint8_t**>(data) = l_;
80  l_ = data;
81  data += item;
82  }
83  }
84 
86  {
87  // Calling this destructor will release the allocated memory but
88  // will not properly destroy any objects that are constructed in
89  // the block itself.
90  }
91 
92  SlabBlock(SlabBlock const& other) = delete;
93  SlabBlock&
94  operator=(SlabBlock const& other) = delete;
95 
96  SlabBlock(SlabBlock&& other) = delete;
97  SlabBlock&
98  operator=(SlabBlock&& other) = delete;
99 
101  bool
102  own(std::uint8_t const* p) const noexcept
103  {
104  return (p >= p_) && (p < p_ + size_);
105  }
106 
107  std::uint8_t*
108  allocate() noexcept
109  {
110  std::uint8_t* ret;
111 
112  {
113  std::lock_guard l(m_);
114 
115  ret = l_;
116 
117  if (ret)
118  l_ = *reinterpret_cast<std::uint8_t**>(ret);
119  }
120 
121  return ret;
122  }
123 
133  void
134  deallocate(std::uint8_t* ptr) noexcept
135  {
136  assert(own(ptr));
137 
138  std::lock_guard l(m_);
139  *reinterpret_cast<std::uint8_t**>(ptr) = l_;
140  l_ = ptr;
141  }
142  };
143 
144 private:
145  // A linked list of slabs
147 
148  // The alignment requirements of the item we're allocating:
150 
151  // The size of an item, including the extra bytes requested and
152  // any padding needed for alignment purposes:
154 
155  // The size of each individual slab:
157 
158 public:
167  constexpr explicit SlabAllocator(
168  std::size_t extra,
169  std::size_t alloc = 0,
170  std::size_t align = 0)
171  : itemAlignment_(align ? align : alignof(Type))
172  , itemSize_(
173  boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
174  , slabSize_(alloc)
175  {
176  assert((itemAlignment_ & (itemAlignment_ - 1)) == 0);
177  }
178 
179  SlabAllocator(SlabAllocator const& other) = delete;
181  operator=(SlabAllocator const& other) = delete;
182 
183  SlabAllocator(SlabAllocator&& other) = delete;
185  operator=(SlabAllocator&& other) = delete;
186 
188  {
189  // FIXME: We can't destroy the memory blocks we've allocated, because
190  // we can't be sure that they are not being used. Cleaning the
191  // shutdown process up could make this possible.
192  }
193 
195  constexpr std::size_t
196  size() const noexcept
197  {
198  return itemSize_;
199  }
200 
206  std::uint8_t*
207  allocate() noexcept
208  {
209  auto slab = slabs_.load();
210 
211  while (slab != nullptr)
212  {
213  if (auto ret = slab->allocate())
214  return ret;
215 
216  slab = slab->next_;
217  }
218 
219  // No slab can satisfy our request, so we attempt to allocate a new
220  // one here:
222 
223  // We want to allocate the memory at a 2 MiB boundary, to make it
224  // possible to use hugepage mappings on Linux:
225  auto buf =
226  boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
227 
228  // clang-format off
229  if (!buf) [[unlikely]]
230  return nullptr;
231  // clang-format on
232 
233 #if BOOST_OS_LINUX
234  // When allocating large blocks, attempt to leverage Linux's
235  // transparent hugepage support. It is unclear and difficult
236  // to accurately determine if doing this impacts performance
237  // enough to justify using platform-specific tricks.
238  if (size >= megabytes(std::size_t(4)))
239  madvise(buf, size, MADV_HUGEPAGE);
240 #endif
241 
242  // We need to carve out a bit of memory for the slab header
243  // and then align the rest appropriately:
244  auto slabData = reinterpret_cast<void*>(
245  reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
246  auto slabSize = size - sizeof(SlabBlock);
247 
248  // This operation is essentially guaranteed not to fail but
249  // let's be careful anyways.
250  if (!boost::alignment::align(
251  itemAlignment_, itemSize_, slabData, slabSize))
252  {
253  boost::alignment::aligned_free(buf);
254  return nullptr;
255  }
256 
257  slab = new (buf) SlabBlock(
258  slabs_.load(),
259  reinterpret_cast<std::uint8_t*>(slabData),
260  slabSize,
261  itemSize_);
262 
263  // Link the new slab
264  while (!slabs_.compare_exchange_weak(
265  slab->next_,
266  slab,
267  std::memory_order_release,
268  std::memory_order_relaxed))
269  {
270  ; // Nothing to do
271  }
272 
273  return slab->allocate();
274  }
275 
283  bool
284  deallocate(std::uint8_t* ptr) noexcept
285  {
286  assert(ptr);
287 
288  for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_)
289  {
290  if (slab->own(ptr))
291  {
292  slab->deallocate(ptr);
293  return true;
294  }
295  }
296 
297  return false;
298  }
299 };
300 
302 template <typename Type>
304 {
305 private:
306  // The list of allocators that belong to this set
307  boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
308 
310 
311 public:
313  {
314  friend class SlabAllocatorSet;
315 
316  private:
320 
321  public:
322  constexpr SlabConfig(
323  std::size_t extra_,
324  std::size_t alloc_ = 0,
325  std::size_t align_ = alignof(Type))
326  : extra(extra_), alloc(alloc_), align(align_)
327  {
328  }
329  };
330 
332  {
333  // Ensure that the specified allocators are sorted from smallest to
334  // largest by size:
335  std::sort(
336  std::begin(cfg),
337  std::end(cfg),
338  [](SlabConfig const& a, SlabConfig const& b) {
339  return a.extra < b.extra;
340  });
341 
342  // We should never have two slabs of the same size
343  if (std::adjacent_find(
344  std::begin(cfg),
345  std::end(cfg),
346  [](SlabConfig const& a, SlabConfig const& b) {
347  return a.extra == b.extra;
348  }) != cfg.end())
349  {
350  throw std::runtime_error(
351  "SlabAllocatorSet<" + beast::type_name<Type>() +
352  ">: duplicate slab size");
353  }
354 
355  for (auto const& c : cfg)
356  {
357  auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align);
358 
359  if (a.size() > maxSize_)
360  maxSize_ = a.size();
361  }
362  }
363 
364  SlabAllocatorSet(SlabAllocatorSet const& other) = delete;
366  operator=(SlabAllocatorSet const& other) = delete;
367 
368  SlabAllocatorSet(SlabAllocatorSet&& other) = delete;
370  operator=(SlabAllocatorSet&& other) = delete;
371 
373  {
374  }
375 
384  std::uint8_t*
385  allocate(std::size_t extra) noexcept
386  {
387  if (auto const size = sizeof(Type) + extra; size <= maxSize_)
388  {
389  for (auto& a : allocators_)
390  {
391  if (a.size() >= size)
392  return a.allocate();
393  }
394  }
395 
396  return nullptr;
397  }
398 
406  bool
407  deallocate(std::uint8_t* ptr) noexcept
408  {
409  for (auto& a : allocators_)
410  {
411  if (a.deallocate(ptr))
412  return true;
413  }
414 
415  return false;
416  }
417 };
418 
419 } // namespace ripple
420 
421 #endif // RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED
ripple::SlabAllocator::SlabBlock::deallocate
void deallocate(std::uint8_t *ptr) noexcept
Return an item to this allocator's freelist.
Definition: SlabAllocator.h:134
ripple::SlabAllocator::~SlabAllocator
~SlabAllocator()
Definition: SlabAllocator.h:187
ripple::SlabAllocator::operator=
SlabAllocator & operator=(SlabAllocator const &other)=delete
ripple::SlabAllocatorSet::allocate
std::uint8_t * allocate(std::size_t extra) noexcept
Returns a suitably aligned pointer, if one is available.
Definition: SlabAllocator.h:385
ripple::SlabAllocatorSet::deallocate
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
Definition: SlabAllocator.h:407
std::vector
STL class.
ripple::SlabAllocator::SlabBlock::own
bool own(std::uint8_t const *p) const noexcept
Determines whether the given pointer belongs to this allocator.
Definition: SlabAllocator.h:102
ripple::SlabAllocatorSet::SlabConfig::SlabConfig
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_=0, std::size_t align_=alignof(Type))
Definition: SlabAllocator.h:322
std::lock_guard
STL class.
boost
Definition: IPAddress.h:103
ripple::SlabAllocatorSet::SlabConfig::alloc
std::size_t alloc
Definition: SlabAllocator.h:318
ripple::SlabAllocator::SlabBlock::operator=
SlabBlock & operator=(SlabBlock const &other)=delete
std::sort
T sort(T... args)
algorithm
ripple::SlabAllocatorSet::SlabConfig::align
std::size_t align
Definition: SlabAllocator.h:319
ripple::SlabAllocator::itemAlignment_
const std::size_t itemAlignment_
Definition: SlabAllocator.h:149
ripple::SlabAllocatorSet::operator=
SlabAllocatorSet & operator=(SlabAllocatorSet const &other)=delete
ripple::SlabAllocatorSet::SlabConfig
Definition: SlabAllocator.h:312
ripple::SlabAllocator::SlabBlock::SlabBlock
SlabBlock(SlabBlock *next, std::uint8_t *data, std::size_t size, std::size_t item)
Definition: SlabAllocator.h:67
ripple::SlabAllocatorSet::SlabConfig::extra
std::size_t extra
Definition: SlabAllocator.h:317
ripple::SlabAllocator::slabs_
std::atomic< SlabBlock * > slabs_
Definition: SlabAllocator.h:146
ripple::megabytes
constexpr auto megabytes(T value) noexcept
Definition: ByteUtilities.h:34
ripple::SlabAllocator::SlabAllocator
constexpr SlabAllocator(std::size_t extra, std::size_t alloc=0, std::size_t align=0)
Constructs a slab allocator able to allocate objects of a fixed size.
Definition: SlabAllocator.h:167
ripple::SlabAllocator::size
constexpr std::size_t size() const noexcept
Returns the size of the memory block this allocator returns.
Definition: SlabAllocator.h:196
ripple::SlabAllocator::deallocate
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
Definition: SlabAllocator.h:284
ripple::SlabAllocator::SlabBlock::p_
std::uint8_t const *const p_
Definition: SlabAllocator.h:62
cstdint
std::runtime_error
STL class.
ripple::SlabAllocator::SlabBlock::next_
SlabBlock * next_
Definition: SlabAllocator.h:59
std::uint8_t
atomic
ripple::SlabAllocator::SlabBlock::m_
std::mutex m_
Definition: SlabAllocator.h:53
ripple::SlabAllocator::itemSize_
const std::size_t itemSize_
Definition: SlabAllocator.h:153
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::SlabAllocator::allocate
std::uint8_t * allocate() noexcept
Returns a suitably aligned pointer, if one is available.
Definition: SlabAllocator.h:207
ripple::SlabAllocatorSet::allocators_
boost::container::static_vector< SlabAllocator< Type >, 64 > allocators_
Definition: SlabAllocator.h:307
std::begin
T begin(T... args)
ripple::SlabAllocator::SlabBlock::~SlabBlock
~SlabBlock()
Definition: SlabAllocator.h:85
cassert
std::adjacent_find
T adjacent_find(T... args)
mutex
std::size_t
std::end
T end(T... args)
ripple::SlabAllocator::slabSize_
const std::size_t slabSize_
Definition: SlabAllocator.h:156
ripple::SlabAllocator::SlabBlock
A block of memory that is owned by a slab allocator.
Definition: SlabAllocator.h:50
ripple::SlabAllocatorSet::~SlabAllocatorSet
~SlabAllocatorSet()
Definition: SlabAllocator.h:372
ripple::SlabAllocator::SlabBlock::l_
std::uint8_t * l_
Definition: SlabAllocator.h:56
ripple::SlabAllocator::SlabBlock::size_
const std::size_t size_
Definition: SlabAllocator.h:65
ripple::SlabAllocatorSet
A collection of slab allocators of various sizes for a given type.
Definition: SlabAllocator.h:303
ripple::SlabAllocator
Definition: SlabAllocator.h:41
ripple::SlabAllocator::SlabBlock::allocate
std::uint8_t * allocate() noexcept
Definition: SlabAllocator.h:108
ripple::SlabAllocatorSet::SlabAllocatorSet
constexpr SlabAllocatorSet(std::vector< SlabConfig > cfg)
Definition: SlabAllocator.h:331
ripple::SlabAllocatorSet::maxSize_
std::size_t maxSize_
Definition: SlabAllocator.h:309