25#ifndef JOIN_CORE_ALLOCATOR_HPP
26#define JOIN_CORE_ALLOCATOR_HPP
67 template <
size_t Size>
70 static_assert ((Size & (Size - 1)) == 0,
"size must be a power of 2");
71 static_assert (Size %
alignof (std::max_align_t) == 0,
"size must respects maximum alignment requirement");
72 static_assert (Size >=
sizeof (uint64_t),
"size must respects minimum size for storing index");
84 template <
size_t Size>
90 static constexpr uint64_t MAGIC = 0x9F7E3B2A8D5C4E1B;
93 static constexpr uint32_t NULL_IDX = UINT32_MAX;
96 alignas (64) std::atomic_uint64_t _magic;
99 alignas (64) std::atomic_uint64_t _head;
108 template <
size_t Count,
size_t Size>
111 static_assert (Count > 0,
"count must be at least 1");
112 static_assert (Count <= UINT32_MAX,
"count exceeds tagged index capacity (~256 GB)");
118 static constexpr size_t _count = Count;
121 static constexpr size_t _size = Size;
124 static constexpr size_t _stride =
sizeof (
Chunk);
127 static constexpr size_t _total =
sizeof (
Segment) + _stride * _count;
134 : _segment (
static_cast<Segment*
> (ptr))
136 uint64_t expected = 0;
138 if (_segment->_magic.compare_exchange_strong (expected, 0xFFFFFFFFFFFFFFFF, std::memory_order_acq_rel))
140 for (uint32_t i = 0; i < _count - 1; ++i)
142 _segment->_chunks[i]._next = i + 1;
144 _segment->_chunks[_count - 1]._next = Segment::NULL_IDX;
149 _segment->_head.store (head.
raw, std::memory_order_relaxed);
151 _segment->_magic.store (Segment::MAGIC, std::memory_order_release);
156 while (_segment->_magic.load (std::memory_order_acquire) != Segment::MAGIC)
180 : _segment (other._segment)
182 other._segment =
nullptr;
192 _segment = other._segment;
193 other._segment =
nullptr;
209 cur.
raw = _segment->_head.load (std::memory_order_acquire);
218 next.
idx = _segment->_chunks[cur.
idx]._next;
221 if (
JOIN_LIKELY (_segment->_head.compare_exchange_weak (cur.
raw, next.
raw, std::memory_order_acq_rel,
222 std::memory_order_acquire)))
224 return &_segment->_chunks[cur.
idx];
236 next.
idx =
static_cast<uint32_t
> (
reinterpret_cast<Chunk*
> (p) - _segment->_chunks);
237 cur.
raw = _segment->_head.load (std::memory_order_relaxed);
241 _segment->_chunks[next.
idx]._next = cur.
idx;
244 if (
JOIN_LIKELY (_segment->_head.compare_exchange_weak (cur.
raw, next.
raw, std::memory_order_release,
245 std::memory_order_relaxed)))
257 bool owns (
void* p)
const noexcept
259 auto base =
reinterpret_cast<std::uintptr_t
> (_segment->_chunks);
260 auto end =
base + (_count * _stride);
261 auto ptr =
reinterpret_cast<std::uintptr_t
> (p);
262 return ((ptr >=
base) && (ptr < end));
272 template <
size_t Count,
size_t... Sizes>
278 template <
size_t Count,
size_t First,
size_t... Rest>
287 template <
size_t Count,
size_t Last>
296 template <
size_t... Sizes>
302 template <
size_t First,
size_t Second,
size_t... Rest>
304 : std::integral_constant<bool, (First <= Second) && Sorted<Second, Rest...>::value>
311 template <size_t Last>
312 struct Sorted<Last> : std::true_type
320 struct Sorted<> : std::true_type
327 template <typename Backend, size_t Count, size_t... Sizes>
330 static_assert (sizeof...(Sizes) > 0, "arena must have at least one pool size");
331 static_assert (Sorted<Sizes...>::value, "pool sizes must be provided in ascending order");
335 static constexpr size_t _total =
TotalSize<Count, Sizes...>::value;
341 template <typename... Args>
343 : _backend (_total, std::forward<Args> (args)...)
344 , _pools (makePools (std::make_index_sequence<sizeof...(Sizes)>{}))
365 : _backend (std::move (other._backend))
366 , _pools (std::move (other._pools))
377 _backend = std::move (other._backend);
378 _pools = std::move (other._pools);
394 return allocateImplem<0> (size);
404 return tryAllocateImplem<0> (size);
417 deallocateImplem<0> (p);
427 return _backend.mbind (numa);
436 return _backend.mlock ();
444 static std::array<size_t,
sizeof...(Sizes)> makeOffsets ()
447 std::array<size_t,
sizeof...(Sizes)> offsets{};
448 for (
size_t i = 1; i <
sizeof...(Sizes); ++i)
450 offsets[i] = offsets[i - 1] + totals[i - 1];
460 template <
size_t... Is>
461 std::tuple<BasicPool<Count, Sizes>...> makePools (std::index_sequence<Is...>)
noexcept
463 auto offsets = makeOffsets ();
464 return std::tuple<BasicPool<Count, Sizes>...>{
465 BasicPool<Count, Sizes> (
static_cast<char*
> (_backend.get ()) + offsets[Is])...};
472 typename std::enable_if<(I <
sizeof...(Sizes)),
void*>::type allocateImplem (
size_t size)
noexcept
474 auto& pool = std::get<I> (_pools);
475 if (size <= pool._size)
477 void* p = pool.pop ();
483 return allocateImplem<I + 1> (size);
490 typename std::enable_if<(I >=
sizeof...(Sizes)),
void*>::type allocateImplem (
size_t)
noexcept
499 typename std::enable_if<(I <
sizeof...(Sizes)),
void*>::type tryAllocateImplem (
size_t size)
noexcept
501 auto& pool = std::get<I> (_pools);
502 if (size <= pool._size)
506 return tryAllocateImplem<I + 1> (size);
513 typename std::enable_if<(I >=
sizeof...(Sizes)),
void*>::type tryAllocateImplem (
size_t)
noexcept
522 typename std::enable_if<(I <
sizeof...(Sizes))>::type deallocateImplem (
void* p)
noexcept
524 auto& pool = std::get<I> (_pools);
530 deallocateImplem<I + 1> (p);
537 typename std::enable_if<(I >=
sizeof...(Sizes))>::type deallocateImplem (
void*)
noexcept
545 std::tuple<BasicPool<Count, Sizes>...> _pools;
adaptive backoff strategy for busy-wait loops.
Definition backoff.hpp:45
memory arena owning backend and managing one or more pools.
Definition memory.hpp:51
~BasicArena()=default
destroy instance.
BasicArena(const BasicArena &other)=delete
copy constructor.
int mlock() const noexcept
lock memory in RAM.
Definition allocator.hpp:434
void * allocate(size_t size) noexcept
allocate memory from the first pool that fits (promotes if exhausted).
Definition allocator.hpp:392
void * tryAllocate(size_t size) noexcept
allocate memory from the exact pool that fits, without promotion.
Definition allocator.hpp:402
BasicArena(BasicArena &&other) noexcept
move constructor.
Definition allocator.hpp:364
int mbind(int numa) const noexcept
bind memory to a NUMA node.
Definition allocator.hpp:425
void deallocate(void *p) noexcept
return memory to the appropriate pool.
Definition allocator.hpp:411
Definition acceptor.hpp:32
std::string base(const std::string &filepath)
get base path of the specified file.
Definition filesystem.hpp:41
free-list pool operating over a pre-existing memory region.
Definition allocator.hpp:110
void push(void *p) noexcept
push a chunk back to the pool.
Definition allocator.hpp:233
void * pop() noexcept
pop a chunk from the pool.
Definition allocator.hpp:206
BasicPool(const BasicPool &other)=delete
copy constructor.
BasicPool(BasicPool &&other) noexcept
move constructor.
Definition allocator.hpp:179
bool owns(void *p) const noexcept
check if the pointer belongs to this pool.
Definition allocator.hpp:257
BasicPool(void *ptr) noexcept
initialize the pool over an existing memory region.
Definition allocator.hpp:133
~BasicPool()=default
destroy instance.
segment header.
Definition allocator.hpp:86
is sequence sorted.
Definition allocator.hpp:297
total size computation.
Definition allocator.hpp:273
basic chunk.
Definition allocator.hpp:69
uint32_t _next
index of next free chunk.
Definition allocator.hpp:75
tagged index.
Definition allocator.hpp:50
uint32_t gen
generation counter.
Definition allocator.hpp:57
uint32_t idx
free-list head index.
Definition allocator.hpp:54
uint64_t raw
raw value for atomic CAS.
Definition allocator.hpp:61
#define JOIN_LIKELY(x)
Definition utils.hpp:46
#define JOIN_UNLIKELY(x)
Definition utils.hpp:47