join 1.0
lightweight network framework library
Loading...
Searching...
No Matches
allocator.hpp
Go to the documentation of this file.
1
25#ifndef JOIN_CORE_ALLOCATOR_HPP
26#define JOIN_CORE_ALLOCATOR_HPP
27
28// libjoin.
29#include <join/backoff.hpp>
30#include <join/memory.hpp>
31#include <join/utils.hpp>
32
33// C++.
34#include <utility>
35#include <atomic>
36#include <array>
37#include <tuple>
38
39// C.
40#include <cassert>
41#include <cstdint>
42#include <cstddef>
43
44namespace join
45{
49 union alignas (uint64_t) TaggedIndex
50 {
51 struct
52 {
54 uint32_t idx;
55
57 uint32_t gen;
58 };
59
61 uint64_t raw;
62 };
63
67 template <size_t Size>
68 union alignas (std::max_align_t) BasicChunk
69 {
70 static_assert ((Size & (Size - 1)) == 0, "size must be a power of 2");
71 static_assert (Size % alignof (std::max_align_t) == 0, "size must respects maximum alignment requirement");
72 static_assert (Size >= sizeof (uint64_t), "size must respects minimum size for storing index");
73
75 uint32_t _next;
76
78 uint8_t _data[Size];
79 };
80
84 template <size_t Size>
86 {
88
90 static constexpr uint64_t MAGIC = 0x9F7E3B2A8D5C4E1B;
91
93 static constexpr uint32_t NULL_IDX = UINT32_MAX;
94
96 alignas (64) std::atomic_uint64_t _magic;
97
99 alignas (64) std::atomic_uint64_t _head;
100
102 Chunk _chunks[];
103 };
104
108 template <size_t Count, size_t Size>
110 {
111 static_assert (Count > 0, "count must be at least 1");
112 static_assert (Count <= UINT32_MAX, "count exceeds tagged index capacity (~256 GB)");
113
116
118 static constexpr size_t _count = Count;
119
121 static constexpr size_t _size = Size;
122
124 static constexpr size_t _stride = sizeof (Chunk);
125
127 static constexpr size_t _total = sizeof (Segment) + _stride * _count;
128
133 explicit BasicPool (void* ptr) noexcept
134 : _segment (static_cast<Segment*> (ptr))
135 {
136 uint64_t expected = 0;
137
138 if (_segment->_magic.compare_exchange_strong (expected, 0xFFFFFFFFFFFFFFFF, std::memory_order_acq_rel))
139 {
140 for (uint32_t i = 0; i < _count - 1; ++i)
141 {
142 _segment->_chunks[i]._next = i + 1;
143 }
144 _segment->_chunks[_count - 1]._next = Segment::NULL_IDX;
145
146 TaggedIndex head;
147 head.idx = 0;
148 head.gen = 0;
149 _segment->_head.store (head.raw, std::memory_order_relaxed);
150
151 _segment->_magic.store (Segment::MAGIC, std::memory_order_release);
152 }
153 else
154 {
155 Backoff backoff;
156 while (_segment->_magic.load (std::memory_order_acquire) != Segment::MAGIC)
157 {
158 backoff ();
159 }
160 }
161 }
162
167 BasicPool (const BasicPool& other) = delete;
168
173 BasicPool& operator= (const BasicPool& other) = delete;
174
179 BasicPool (BasicPool&& other) noexcept
180 : _segment (other._segment)
181 {
182 other._segment = nullptr;
183 }
184
190 BasicPool& operator= (BasicPool&& other) noexcept
191 {
192 _segment = other._segment;
193 other._segment = nullptr;
194 return *this;
195 }
196
200 ~BasicPool () = default;
201
206 void* pop () noexcept
207 {
208 TaggedIndex cur, next;
209 cur.raw = _segment->_head.load (std::memory_order_acquire);
210
211 for (;;)
212 {
213 if (JOIN_UNLIKELY (cur.idx == Segment::NULL_IDX))
214 {
215 return nullptr;
216 }
217
218 next.idx = _segment->_chunks[cur.idx]._next;
219 next.gen = cur.gen + 1;
220
221 if (JOIN_LIKELY (_segment->_head.compare_exchange_weak (cur.raw, next.raw, std::memory_order_acq_rel,
222 std::memory_order_acquire)))
223 {
224 return &_segment->_chunks[cur.idx];
225 }
226 }
227 }
228
233 void push (void* p) noexcept
234 {
235 TaggedIndex cur, next;
236 next.idx = static_cast<uint32_t> (reinterpret_cast<Chunk*> (p) - _segment->_chunks);
237 cur.raw = _segment->_head.load (std::memory_order_relaxed);
238
239 for (;;)
240 {
241 _segment->_chunks[next.idx]._next = cur.idx;
242 next.gen = cur.gen + 1;
243
244 if (JOIN_LIKELY (_segment->_head.compare_exchange_weak (cur.raw, next.raw, std::memory_order_release,
245 std::memory_order_relaxed)))
246 {
247 return;
248 }
249 }
250 }
251
257 bool owns (void* p) const noexcept
258 {
259 auto base = reinterpret_cast<std::uintptr_t> (_segment->_chunks);
260 auto end = base + (_count * _stride);
261 auto ptr = reinterpret_cast<std::uintptr_t> (p);
262 return ((ptr >= base) && (ptr < end));
263 }
264
266 Segment* _segment = nullptr;
267 };
268
272 template <size_t Count, size_t... Sizes>
273 struct TotalSize;
274
278 template <size_t Count, size_t First, size_t... Rest>
279 struct TotalSize<Count, First, Rest...>
280 {
281 static constexpr size_t value = BasicPool<Count, First>::_total + TotalSize<Count, Rest...>::value;
282 };
283
287 template <size_t Count, size_t Last>
288 struct TotalSize<Count, Last>
289 {
290 static constexpr size_t value = BasicPool<Count, Last>::_total;
291 };
292
296 template <size_t... Sizes>
297 struct Sorted;
298
302 template <size_t First, size_t Second, size_t... Rest>
303 struct Sorted<First, Second, Rest...>
304 : std::integral_constant<bool, (First <= Second) && Sorted<Second, Rest...>::value>
305 {
306 };
307
311 template <size_t Last>
312 struct Sorted<Last> : std::true_type
313 {
314 };
315
319 template <>
320 struct Sorted<> : std::true_type
321 {
322 };
323
327 template <typename Backend, size_t Count, size_t... Sizes>
328 class BasicArena
329 {
330 static_assert (sizeof...(Sizes) > 0, "arena must have at least one pool size");
331 static_assert (Sorted<Sizes...>::value, "pool sizes must be provided in ascending order");
332
333 public:
335 static constexpr size_t _total = TotalSize<Count, Sizes...>::value;
336
341 template <typename... Args>
342 BasicArena (Args&&... args)
343 : _backend (_total, std::forward<Args> (args)...)
344 , _pools (makePools (std::make_index_sequence<sizeof...(Sizes)>{}))
345 {
346 }
347
352 BasicArena (const BasicArena& other) = delete;
353
358 BasicArena& operator= (const BasicArena& other) = delete;
359
364 BasicArena (BasicArena&& other) noexcept
365 : _backend (std::move (other._backend))
366 , _pools (std::move (other._pools))
367 {
368 }
369
375 BasicArena& operator= (BasicArena&& other) noexcept
376 {
377 _backend = std::move (other._backend);
378 _pools = std::move (other._pools);
379 return *this;
380 }
381
385 ~BasicArena () = default;
386
392 void* allocate (size_t size) noexcept
393 {
394 return allocateImplem<0> (size);
395 }
396
402 void* tryAllocate (size_t size) noexcept
403 {
404 return tryAllocateImplem<0> (size);
405 }
406
411 void deallocate (void* p) noexcept
412 {
413 if (p == nullptr)
414 {
415 return;
416 }
417 deallocateImplem<0> (p);
418 }
419
420#ifdef JOIN_HAS_NUMA
426 int mbind (int numa) const noexcept
427 {
428 return _backend.mbind (numa);
429 }
430#endif
431
436 int mlock () const noexcept
437 {
438 return _backend.mlock ();
439 }
440
441 private:
446 static std::array<size_t, sizeof...(Sizes)> makeOffsets ()
447 {
448 size_t totals[] = {BasicPool<Count, Sizes>::_total...};
449 std::array<size_t, sizeof...(Sizes)> offsets{};
450 for (size_t i = 1; i < sizeof...(Sizes); ++i)
451 {
452 offsets[i] = offsets[i - 1] + totals[i - 1];
453 }
454 return offsets;
455 }
456
462 template <size_t... Is>
463 std::tuple<BasicPool<Count, Sizes>...> makePools (std::index_sequence<Is...>) noexcept
464 {
465 auto offsets = makeOffsets ();
466 return std::tuple<BasicPool<Count, Sizes>...>{
467 BasicPool<Count, Sizes> (static_cast<char*> (_backend.get ()) + offsets[Is])...};
468 }
469
473 template <size_t I>
474 typename std::enable_if<(I < sizeof...(Sizes)), void*>::type allocateImplem (size_t size) noexcept
475 {
476 auto& pool = std::get<I> (_pools);
477 if (size <= pool._size)
478 {
479 void* p = pool.pop ();
480 if (p != nullptr)
481 {
482 return p;
483 }
484 }
485 return allocateImplem<I + 1> (size);
486 }
487
491 template <size_t I>
492 typename std::enable_if<(I >= sizeof...(Sizes)), void*>::type allocateImplem (size_t) noexcept
493 {
494 return nullptr;
495 }
496
500 template <size_t I>
501 typename std::enable_if<(I < sizeof...(Sizes)), void*>::type tryAllocateImplem (size_t size) noexcept
502 {
503 auto& pool = std::get<I> (_pools);
504 if (size <= pool._size)
505 {
506 return pool.pop ();
507 }
508 return tryAllocateImplem<I + 1> (size);
509 }
510
514 template <size_t I>
515 typename std::enable_if<(I >= sizeof...(Sizes)), void*>::type tryAllocateImplem (size_t) noexcept
516 {
517 return nullptr;
518 }
519
523 template <size_t I>
524 typename std::enable_if<(I < sizeof...(Sizes))>::type deallocateImplem (void* p) noexcept
525 {
526 auto& pool = std::get<I> (_pools);
527 if (pool.owns (p))
528 {
529 pool.push (p);
530 return;
531 }
532 deallocateImplem<I + 1> (p);
533 }
534
538 template <size_t I>
539 typename std::enable_if<(I >= sizeof...(Sizes))>::type deallocateImplem (void*) noexcept
540 {
541 }
542
544 Backend _backend;
545
547 std::tuple<BasicPool<Count, Sizes>...> _pools;
548 };
549}
550
551#endif
adaptive backoff strategy for busy-wait loops.
Definition backoff.hpp:45
memory arena owning backend and managing one or more pools.
Definition memory.hpp:53
~BasicArena()=default
destroy instance.
BasicArena(const BasicArena &other)=delete
copy constructor.
int mlock() const noexcept
lock memory in RAM.
Definition allocator.hpp:436
void * allocate(size_t size) noexcept
allocate memory from the first pool that fits (promotes if exhausted).
Definition allocator.hpp:392
void * tryAllocate(size_t size) noexcept
allocate memory from the exact pool that fits, without promotion.
Definition allocator.hpp:402
BasicArena(BasicArena &&other) noexcept
move constructor.
Definition allocator.hpp:364
void deallocate(void *p) noexcept
return memory to the appropriate pool.
Definition allocator.hpp:411
Definition acceptor.hpp:32
std::string base(const std::string &filepath)
get base path of the specified file.
Definition filesystem.hpp:41
free-list pool operating over a pre-existing memory region.
Definition allocator.hpp:110
void push(void *p) noexcept
push a chunk back to the pool.
Definition allocator.hpp:233
void * pop() noexcept
pop a chunk from the pool.
Definition allocator.hpp:206
BasicPool(const BasicPool &other)=delete
copy constructor.
BasicPool(BasicPool &&other) noexcept
move constructor.
Definition allocator.hpp:179
bool owns(void *p) const noexcept
check if the pointer belongs to this pool.
Definition allocator.hpp:257
BasicPool(void *ptr) noexcept
initialize the pool over an existing memory region.
Definition allocator.hpp:133
~BasicPool()=default
destroy instance.
segment header.
Definition allocator.hpp:86
is sequence sorted.
Definition allocator.hpp:297
total size computation.
Definition allocator.hpp:273
basic chunk.
Definition allocator.hpp:69
uint32_t _next
index of next free chunk.
Definition allocator.hpp:75
tagged index.
Definition allocator.hpp:50
uint32_t gen
generation counter.
Definition allocator.hpp:57
uint32_t idx
free-list head index.
Definition allocator.hpp:54
uint64_t raw
raw value for atomic CAS.
Definition allocator.hpp:61
#define JOIN_LIKELY(x)
Definition utils.hpp:46
#define JOIN_UNLIKELY(x)
Definition utils.hpp:47