carb/extras/HandleDatabase.h
File members: carb/extras/HandleDatabase.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
#include "../container/LocklessQueue.h"
#include "../logging/Log.h"
#include "../math/Util.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <mutex>
#include <stack>
#include <vector>
CARB_IGNOREWARNING_MSC_WITH_PUSH(4201) // nonstandard extension used: nameless struct/union
namespace carb
{
namespace extras
{
template <class Mapped, class Handle, class Allocator>
struct HandleRef;
template <class Mapped, class Handle, class Allocator>
struct ConstHandleRef;
template <class Mapped, class Handle, class Allocator = std::allocator<Mapped>>
class CARB_VIZ HandleDatabase
{
public:
using MappedType = Mapped;
using HandleType = Handle;
using AllocatorType = Allocator;
using HandleRefType = HandleRef<Mapped, Handle, Allocator>;
using ConstHandleRefType = ConstHandleRef<Mapped, Handle, Allocator>;
using scoped_handle_ref_type = HandleRefType;
constexpr HandleDatabase() noexcept;
~HandleDatabase();
bool handleWasValid(Handle handle) const;
bool handleIsValid(Handle handle) const;
template <class... Args>
std::pair<Handle, Mapped*> createHandle(Args&&... args);
Mapped* getValueFromHandle(Handle handle) const;
Handle getHandleFromValue(const Mapped* mapped) const;
Mapped* tryAddRef(Handle handle);
void addRef(Handle handle);
void addRef(Mapped* mapped);
bool release(Handle handle);
bool release(Mapped* mapped);
bool releaseIfLastRef(Handle handle);
bool releaseIfLastRef(Mapped* mapped);
HandleRefType makeScopedRef(Handle handle);
ConstHandleRefType makeScopedRef(Handle handle) const;
template <class Func CARB_NO_DOC(
,
std::enable_if_t<!std::is_same<bool, decltype(std::declval<Func>()(std::declval<Handle>(), std::declval<Mapped*>()))>::value,
bool> = false)>
void forEachHandle(Func&& f) const
{
for (uint32_t i = 0; i < kBuckets; ++i)
{
// We are done once we encounter a null pointer
HandleData* data = getDbIndex(i, std::memory_order_acquire);
if (!data)
break;
uint32_t const bucketSize = 1 << i;
for (uint32_t j = 0; j != bucketSize; ++j)
{
Metadata meta = data[j].metadata.load(std::memory_order_acquire);
if (meta.refCount != 0)
{
// Valid handle
HandleUnion hu((bucketSize - 1) + j, meta.lifecycle);
// Call the functor
f(hu.handle, std::addressof(data[j].val));
}
}
}
}
template <class Func CARB_NO_DOC(
,
std::enable_if_t<std::is_same<bool, decltype(std::declval<Func>()(std::declval<Handle>(), std::declval<Mapped*>()))>::value,
bool> = true)>
bool forEachHandle(Func&& f) const
{
for (uint32_t i = 0; i < kBuckets; ++i)
{
// We are done once we encounter a null pointer
HandleData* data = getDbIndex(i, std::memory_order_acquire);
if (!data)
break;
uint32_t const bucketSize = 1 << i;
for (uint32_t j = 0; j != bucketSize; ++j)
{
Metadata meta = data[j].metadata.load(std::memory_order_acquire);
if (meta.refCount != 0)
{
// Valid handle
HandleUnion hu((bucketSize - 1) + j, meta.lifecycle);
// Call the functor
if (!f(hu.handle, std::addressof(data[j].val)))
return false;
}
}
}
return true;
}
size_t clear();
void awaitFinalRelease(Handle h) const;
private:
CARB_VIZ static constexpr uint32_t kBuckets = sizeof(uint32_t) * 8 - 1;
static constexpr uint32_t kMaxSize = (1U << kBuckets) - 1;
// NOTE: Making *any* change to how rollover works should enable HANDLE_DATABASE_EXTENDED_TESTS in TestExtras.cpp
static constexpr uint32_t kRolloverFlag = 0x80000000u;
static constexpr uint32_t kLifecycleMask = 0x7fffffffu;
// A struct for mapping type Handle into its basic components of index and lifecycle
struct HandleUnion
{
union
{
Handle handle;
struct
{
uint32_t index;
uint32_t lifecycle;
};
};
constexpr HandleUnion(Handle h) noexcept : handle(h)
{
}
constexpr HandleUnion(uint32_t idx, uint32_t life) noexcept : index(idx), lifecycle(life & kLifecycleMask)
{
}
};
// A struct for tracking the refcount and lifecycle.
struct alignas(uint64_t) Metadata
{
uint32_t refCount{ 0 };
CARB_VIZ uint32_t lifecycle{ 0 };
constexpr Metadata() noexcept = default;
};
struct HandleData
{
// This union will either be the free link and index, or the constructed mapped type
union
{
struct
{
container::LocklessQueueLink<HandleData> link;
uint32_t index;
};
// This `val` is only constructed when the HandleData is in use
Mapped val;
};
// Metadata, but the union allows us to access the refCount atomically, separately.
union
{
CARB_VIZ std::atomic<Metadata> metadata;
std::atomic_uint32_t refCount; // Must share memory address with metadata.refCount
};
constexpr HandleData(uint32_t idx) noexcept : index(idx), metadata{}
{
}
// Empty destructor, required because of the union with non-trivial types
~HandleData() noexcept
{
}
};
static_assert(alignof(HandleData) >= alignof(Mapped),
"Invalid assumption: HandleData alignment should meet or exceed Mapped alignment");
static_assert(sizeof(Handle) == sizeof(uint64_t), "Handle should be 64-bit");
using HandleDataAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<HandleData>;
using AllocatorTraits = std::allocator_traits<HandleDataAllocator>;
static void indexToBucketAndOffset(uint32_t index, uint32_t& bucket, uint32_t& offset);
HandleData* expandDatabase();
HandleData* getHandleData(uint32_t index) const;
HandleData* getHandleData(const Mapped* mapped) const;
void destroyMapped(HandleData* mapped, uint32_t index);
HandleData* getDbIndex(size_t index, std::memory_order = std::memory_order_seq_cst) const;
// m_database is fixed size and written atomically in expandDatabase(). It never contracts and therefore
// does not require a mutex
CARB_VIZ std::atomic<HandleData*> m_database[kBuckets] = {};
struct Members : HandleDataAllocator
{
// The "tasking timing" test performs much better with m_free as a queue instead of a stack.
container::LocklessQueue<HandleData, &HandleData::link> m_free;
} m_emo;
// An atomic used as a condition variable of sorts to notify about destruction
carb::cpp::atomic_size_t m_destructNotifier{ 0 };
};
template <class Mapped, class Handle, class Allocator = std::allocator<Mapped>>
struct ConstHandleRef
{
public:
using MappedType = const Mapped;
using ReferenceType = const Mapped&;
using PointerType = const Mapped*;
using HandleType = Handle;
using AllocatorType = Allocator;
using Database = HandleDatabase<Mapped, Handle, Allocator>;
using element_type = MappedType;
using handle_type = HandleType;
ConstHandleRef() = default;
ConstHandleRef(Database& database, Handle handle) noexcept : m_mapped(database.tryAddRef(handle))
{
if (m_mapped)
{
m_database = std::addressof(database);
m_handle = handle;
}
}
~ConstHandleRef() noexcept
{
reset();
}
ConstHandleRef(ConstHandleRef&& other) noexcept
{
swap(other);
}
ConstHandleRef& operator=(ConstHandleRef&& other) noexcept
{
swap(other);
return *this;
}
// Specifically non-Copyable in order to reduce implicit usage.
// Use the explicit `clone()` function if a copy is desired
CARB_PREVENT_COPY(ConstHandleRef);
ConstHandleRef clone() const noexcept
{
if (m_mapped)
m_database->addRef(m_mapped);
return ConstHandleRef{ m_database, m_handle, m_mapped };
}
PointerType operator->() noexcept
{
return get();
}
PointerType operator->() const noexcept
{
return get();
}
ReferenceType operator*() noexcept
{
CARB_ASSERT(get());
return *get();
}
ReferenceType operator*() const noexcept
{
CARB_ASSERT(get());
return *get();
}
PointerType get() noexcept
{
return this->m_mapped;
}
PointerType get() const noexcept
{
return this->m_mapped;
}
Handle handle() const noexcept
{
return this->m_handle;
}
explicit operator bool() const noexcept
{
return get() != nullptr;
}
void reset()
{
if (auto mapped = std::exchange(m_mapped, nullptr))
m_database->release(mapped);
m_database = nullptr;
m_handle = HandleType{};
}
void swap(ConstHandleRef& rhs)
{
std::swap(m_database, rhs.m_database);
std::swap(m_handle, rhs.m_handle);
std::swap(m_mapped, rhs.m_mapped);
}
protected:
Database* m_database{};
Handle m_handle{};
Mapped* m_mapped{};
ConstHandleRef(Database* db, Handle h, Mapped* m) noexcept : m_database(db), m_handle(h), m_mapped(m)
{
}
};
template <class Mapped, class Handle, class Allocator = std::allocator<Mapped>>
struct HandleRef : public ConstHandleRef<Mapped, Handle, Allocator>
{
using BaseType = ConstHandleRef<Mapped, Handle, Allocator>;
public:
using MappedType = Mapped;
using ReferenceType = Mapped&;
using PointerType = Mapped*;
using HandleType = Handle;
using AllocatorType = Allocator;
using Database = HandleDatabase<Mapped, Handle, Allocator>;
using element_type = MappedType;
using handle_type = HandleType;
HandleRef() = default;
HandleRef(Database& database, Handle handle) noexcept : BaseType(database, handle)
{
}
HandleRef clone() const noexcept
{
if (this->m_mapped)
this->m_database->addRef(this->m_mapped);
return HandleRef{ this->m_database, this->m_handle, this->m_mapped };
}
PointerType operator->() noexcept
{
return get();
}
PointerType operator->() const noexcept
{
return get();
}
ReferenceType operator*() noexcept
{
CARB_ASSERT(get());
return *get();
}
ReferenceType operator*() const noexcept
{
CARB_ASSERT(get());
return *get();
}
PointerType get() noexcept
{
return this->m_mapped;
}
PointerType get() const noexcept
{
return this->m_mapped;
}
void swap(HandleRef& rhs)
{
BaseType::swap(rhs);
}
private:
HandleRef(Database* db, Handle h, Mapped* m) noexcept : BaseType(db, h, m)
{
}
};
template <class Mapped, class Handle, class Allocator = std::allocator<Mapped>>
using ScopedHandleRef = HandleRef<Mapped, Handle, Allocator>;
template <class Mapped, class Handle, class Allocator>
inline constexpr HandleDatabase<Mapped, Handle, Allocator>::HandleDatabase() noexcept
{
}
template <class Mapped, class Handle, class Allocator>
inline HandleDatabase<Mapped, Handle, Allocator>::~HandleDatabase()
{
// Make sure everything is removed from the free list before we destroy it
m_emo.m_free.popAll();
size_t leaks = 0;
for (size_t i = 0; i < CARB_COUNTOF(m_database); i++)
{
HandleData* handleData = m_database[i].exchange(nullptr, std::memory_order_relaxed);
if (!handleData)
break;
// Go through each entry and destruct any outstanding ones with a non-zero refcount
size_t const kBucketSize = size_t(1) << i;
for (size_t j = 0; j != kBucketSize; ++j)
{
Metadata meta = handleData[j].metadata.load(std::memory_order_relaxed);
if (meta.refCount != 0)
{
handleData[j].val.~Mapped();
++leaks;
}
AllocatorTraits::destroy(m_emo, handleData + j);
}
AllocatorTraits::deallocate(m_emo, handleData, kBucketSize);
}
if (leaks != 0)
{
// We don't notify m_destructNotifier here because destructors are inherently non-threadsafe. No other threads
// should be in any HandleDatabase functions with *this, and if they are then UB will result.
// If we *did* notify here, then a thread in awaitFinalRelease() will attempt unsafe memory accesses, therefore
// this is specifically disallowed.
CARB_LOG_WARN("%s: had %zu outstanding handle(s) at shutdown", CARB_PRETTY_FUNCTION, leaks);
}
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::getHandleData(uint32_t index) const -> HandleData*
{
uint32_t bucketIndex, offsetInBucket;
indexToBucketAndOffset(index, bucketIndex, offsetInBucket);
if (bucketIndex >= CARB_COUNTOF(m_database))
{
return nullptr;
}
auto bucket = getDbIndex(bucketIndex, std::memory_order_acquire);
return bucket ? bucket + offsetInBucket : nullptr;
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::getHandleData(const Mapped* mapped) const -> HandleData*
{
// HandleData must have val as the first member. This would prevent us from casting
// mapped to HandleData without using ugly pointer math otherwise.
// static assert using offsetof() is not possible due to HandleData having a non-standard-layout type.
auto pHandleData = reinterpret_cast<HandleData*>(const_cast<Mapped*>(mapped));
CARB_ASSERT(reinterpret_cast<intptr_t>(&pHandleData->val) - reinterpret_cast<intptr_t>(pHandleData) == 0);
return pHandleData;
}
template <class Mapped, class Handle, class Allocator>
inline void HandleDatabase<Mapped, Handle, Allocator>::destroyMapped(HandleData* hd, uint32_t index)
{
// Destroy the mapped type and add it to the free list.
hd->val.~Mapped();
hd->index = index;
m_emo.m_free.push(hd);
// Just need atomicity, so used relaxed
m_destructNotifier.fetch_add(1, std::memory_order_relaxed);
m_destructNotifier.notify_all();
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::getDbIndex(size_t index, std::memory_order order) const
-> HandleData*
{
static HandleData* const kLocked = reinterpret_cast<HandleData*>(size_t(-1));
CARB_ASSERT(index < kBuckets);
// Spin briefly if the entry is locked
HandleData* bucket = m_database[index].load(order);
while (CARB_UNLIKELY(bucket == kLocked))
{
CARB_HARDWARE_PAUSE();
bucket = m_database[index].load(order);
}
return bucket;
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::handleWasValid(Handle handle) const
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
if (pHandleData == nullptr)
{
return false;
}
Metadata meta = pHandleData->metadata.load(std::memory_order_acquire);
// The zero lifecycle count isn't used
if (hu.lifecycle == 0 || meta.lifecycle == 0)
{
return false;
}
// The kRolloverFlag value is always unset in hu, but possibly present in meta.lifecycle. However, this is still a
// valid comparison because if the kRolloverFlag is set in meta.lifecycle, it means that every possible value has
// been a valid handle at one point and the expression will always result in `true`.
return hu.lifecycle <= meta.lifecycle;
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::handleIsValid(Handle handle) const
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
if (pHandleData == nullptr)
{
return false;
}
// Fence-atomic synchronization since we're not doing any atomic modification, just loading. This fence allows use
// of the relaxed load though.
carb::this_thread::atomic_fence_seq_cst();
Metadata meta = pHandleData->metadata.load(std::memory_order_relaxed);
// We return true if there is a non-zero refcount on the handle data and the lifecycle matches current.
return (meta.refCount != 0 && (meta.lifecycle & kLifecycleMask) == hu.lifecycle);
}
template <class Mapped, class Handle, class Allocator>
template <class... Args>
inline std::pair<Handle, Mapped*> HandleDatabase<Mapped, Handle, Allocator>::createHandle(Args&&... args)
{
HandleData* handleData = m_emo.m_free.pop();
if (!handleData)
handleData = expandDatabase();
CARB_ASSERT(handleData);
Metadata meta = handleData->metadata.load(std::memory_order_acquire);
CARB_ASSERT(((void*)std::addressof(handleData->metadata) == (void*)std::addressof(handleData->refCount)) &&
offsetof(Metadata, refCount) == 0);
meta.refCount = 1;
// Don't allow the 0 lifecycle.
meta.lifecycle++;
if ((meta.lifecycle & kLifecycleMask) == 0)
{
meta.lifecycle = 1 | kRolloverFlag;
}
uint32_t indexToAllocateFrom = handleData->index;
Mapped* p = new (std::addressof(handleData->val)) Mapped(std::forward<Args>(args)...);
handleData->metadata.store(meta, std::memory_order_release);
HandleUnion hu(indexToAllocateFrom, meta.lifecycle);
return std::make_pair(hu.handle, p);
}
template <class Mapped, class Handle, class Allocator>
inline Mapped* HandleDatabase<Mapped, Handle, Allocator>::tryAddRef(Handle handle)
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
if (pHandleData == nullptr)
{
return nullptr;
}
Metadata meta = pHandleData->metadata.load(std::memory_order_acquire);
for (;;)
{
if (meta.refCount == 0 || (meta.lifecycle & kLifecycleMask) != hu.lifecycle)
{
return nullptr;
}
Metadata desired = meta;
desired.refCount++;
if (pHandleData->metadata.compare_exchange_weak(
meta, desired, std::memory_order_release, std::memory_order_relaxed))
{
return std::addressof(pHandleData->val);
}
}
}
template <class Mapped, class Handle, class Allocator>
inline void HandleDatabase<Mapped, Handle, Allocator>::addRef(Handle handle)
{
Mapped* mapped = tryAddRef(handle);
CARB_FATAL_UNLESS(mapped != nullptr, "Attempt to addRef released handle");
}
template <class Mapped, class Handle, class Allocator>
inline void HandleDatabase<Mapped, Handle, Allocator>::addRef(Mapped* mapped)
{
HandleData* pHandleData = getHandleData(mapped);
// We're working with the Mapped type here, so if it's not valid we're in UB.
uint32_t prev = pHandleData->refCount.fetch_add(1, std::memory_order_relaxed);
CARB_CHECK((prev + 1) > 1); // no resurrection and no rollover
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::release(Handle handle)
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
CARB_ASSERT(pHandleData);
if (pHandleData == nullptr)
{
return false;
}
Metadata meta = pHandleData->metadata.load(std::memory_order_acquire);
for (;;)
{
if (meta.refCount == 0 || (meta.lifecycle & kLifecycleMask) != hu.lifecycle)
{
CARB_ASSERT(0);
return false;
}
Metadata desired = meta;
bool released = --desired.refCount == 0;
if (CARB_LIKELY(pHandleData->metadata.compare_exchange_weak(
meta, desired, std::memory_order_release, std::memory_order_relaxed)))
{
if (released)
{
std::atomic_thread_fence(std::memory_order_acquire);
destroyMapped(pHandleData, hu.index);
}
return released;
}
}
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::release(Mapped* mapped)
{
HandleData* pHandleData = getHandleData(mapped);
// We're working with the Mapped type here, so if it's not valid we're in UB.
uint32_t prev = pHandleData->refCount.fetch_sub(1, std::memory_order_release);
CARB_CHECK(prev >= 1); // No underflow
if (prev == 1)
{
// Released
std::atomic_thread_fence(std::memory_order_acquire);
destroyMapped(pHandleData, HandleUnion(getHandleFromValue(mapped)).index);
return true;
}
return false;
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::releaseIfLastRef(Handle handle)
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
CARB_ASSERT(pHandleData);
if (pHandleData == nullptr)
{
return false;
}
Metadata meta = pHandleData->metadata.load(std::memory_order_acquire);
for (;;)
{
if (meta.refCount == 0 || (meta.lifecycle & kLifecycleMask) != hu.lifecycle)
{
CARB_ASSERT(0);
return false;
}
if (meta.refCount > 1)
{
return false;
}
Metadata desired = meta;
desired.refCount = 0;
if (CARB_LIKELY(pHandleData->metadata.compare_exchange_weak(
meta, desired, std::memory_order_release, std::memory_order_relaxed)))
{
std::atomic_thread_fence(std::memory_order_acquire);
destroyMapped(pHandleData, hu.index);
return true;
}
}
}
template <class Mapped, class Handle, class Allocator>
inline bool HandleDatabase<Mapped, Handle, Allocator>::releaseIfLastRef(Mapped* mapped)
{
HandleData* pHandleData = getHandleData(mapped);
// We're working with the Mapped type here, so if it's not valid we're in UB.
uint32_t refCount = pHandleData->refCount.load(std::memory_order_acquire);
for (;;)
{
CARB_CHECK(refCount != 0);
if (refCount > 1)
{
return false;
}
if (CARB_LIKELY(pHandleData->refCount.compare_exchange_weak(
refCount, 0, std::memory_order_release, std::memory_order_relaxed)))
{
std::atomic_thread_fence(std::memory_order_acquire);
destroyMapped(pHandleData, HandleUnion(getHandleFromValue(mapped)).index);
return true;
}
}
}
template <class Mapped, class Handle, class Allocator>
inline Mapped* HandleDatabase<Mapped, Handle, Allocator>::getValueFromHandle(Handle handle) const
{
HandleUnion hu(handle);
HandleData* pHandleData = getHandleData(hu.index);
if (pHandleData == nullptr)
{
return nullptr;
}
Metadata meta = pHandleData->metadata.load(std::memory_order_acquire);
return (meta.refCount != 0 && (meta.lifecycle & kLifecycleMask) == hu.lifecycle) ? std::addressof(pHandleData->val) :
nullptr;
}
template <class Mapped, class Handle, class Allocator>
inline Handle HandleDatabase<Mapped, Handle, Allocator>::getHandleFromValue(const Mapped* mapped) const
{
// Resolve mapped to the handle data
// We're working with the Mapped type here, so if it's not valid we're in UB.
HandleData* pHandleData = getHandleData(mapped);
// Start setting up the handle, but we don't know the index yet.
uint32_t lifecycle = pHandleData->metadata.load(std::memory_order_acquire).lifecycle;
// Find the index by searching all of the buckets.
for (uint32_t i = 0; i < kBuckets; ++i)
{
HandleData* p = getDbIndex(i, std::memory_order_acquire);
if (!p)
break;
size_t offset = size_t(pHandleData - p);
if (offset < (size_t(1) << i))
{
// Found the index!
uint32_t index = (uint32_t(1) << i) - 1 + uint32_t(offset);
return HandleUnion(index, lifecycle).handle;
}
}
// Given mapped doesn't exist in this HandleDatabase
CARB_CHECK(0);
return Handle{};
}
template <class Mapped, class Handle, class Allocator>
inline void HandleDatabase<Mapped, Handle, Allocator>::indexToBucketAndOffset(uint32_t index,
uint32_t& bucket,
uint32_t& offset)
{
// Bucket id
bucket = 31 - math::numLeadingZeroBits(index + 1);
offset = index + 1 - (1 << CARB_MIN(31u, bucket));
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::expandDatabase() -> HandleData*
{
static HandleData* const kLocked = reinterpret_cast<HandleData*>(size_t(-1));
// Scan (right now O(n), but this is rare and n is small) for the first empty database.
size_t bucketToAllocateFrom;
for (bucketToAllocateFrom = 0; bucketToAllocateFrom != CARB_COUNTOF(m_database); bucketToAllocateFrom++)
{
HandleData* mem = m_database[bucketToAllocateFrom].load(std::memory_order_acquire);
for (;;)
{
while (mem == kLocked)
{
// Another thread is trying to allocate this. Try to pull from the free list until it stabilizes.
mem = m_emo.m_free.pop();
if (mem)
return mem;
mem = m_database[bucketToAllocateFrom].load(std::memory_order_acquire);
}
if (!mem)
{
// Try to lock it
if (m_database[bucketToAllocateFrom].compare_exchange_strong(
mem, kLocked, std::memory_order_release, std::memory_order_relaxed))
{
// Successfully locked
break;
}
// Continue if another thread has locked it
if (mem == kLocked)
continue;
// Failed to lock, but it's not locked anymore, so see if there's a free entry now
HandleData* hd = m_emo.m_free.pop();
if (hd)
return hd;
}
CARB_ASSERT(mem != kLocked);
break;
}
if (!mem)
break;
}
CARB_FATAL_UNLESS(bucketToAllocateFrom < CARB_COUNTOF(m_database), "Out of handles!");
uint32_t const allocateCount = 1 << bucketToAllocateFrom;
uint32_t const offset = allocateCount - 1;
HandleData* handleData = AllocatorTraits::allocate(m_emo, allocateCount);
// Set the indexes
for (uint32_t i = 0; i < allocateCount; ++i)
{
AllocatorTraits::construct(m_emo, handleData + i, offset + i);
}
// Add entries (after the first) to the free list
m_emo.m_free.push(handleData + 1, handleData + allocateCount);
// Overwrite the lock with the actual pointer now
HandleData* expect = m_database[bucketToAllocateFrom].exchange(handleData, std::memory_order_release);
CARB_ASSERT(expect == kLocked);
CARB_UNUSED(expect);
// Return the first entry that we reserved for the caller
AllocatorTraits::construct(m_emo, handleData, offset);
return handleData;
}
template <class Mapped, class Handle, class Allocator>
inline size_t HandleDatabase<Mapped, Handle, Allocator>::clear()
{
size_t count = 0;
for (uint32_t i = 0; i != kBuckets; ++i)
{
HandleData* data = getDbIndex(i, std::memory_order_acquire);
if (!data)
break;
uint32_t const bucketSize = 1u << i;
for (uint32_t j = 0; j != bucketSize; ++j)
{
if (data[j].refCount.exchange(0, std::memory_order_release) != 0)
{
// Successfully set the refcount to 0. Destroy and add to the free list
std::atomic_thread_fence(std::memory_order_acquire);
++count;
data[j].val.~Mapped();
data[j].index = bucketSize - 1 + j;
m_emo.m_free.push(std::addressof(data[j]));
}
}
}
// Notify once at the end
if (count)
{
// Just need atomicity, so use relaxed semantics.
m_destructNotifier.fetch_add(1, std::memory_order_relaxed);
m_destructNotifier.notify_all();
}
return count;
}
template <class Mapped, class Handle, class Allocator>
inline void HandleDatabase<Mapped, Handle, Allocator>::awaitFinalRelease(Handle h) const
{
// handleIsValid() performs seq-cst ordering, so we can use relaxed semantics here.
auto val = m_destructNotifier.load(std::memory_order_relaxed);
while (handleIsValid(h))
{
m_destructNotifier.wait(val, std::memory_order_relaxed);
val = m_destructNotifier.load(std::memory_order_relaxed);
}
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::makeScopedRef(Handle handle) -> HandleRefType
{
return HandleRef<Mapped, Handle, Allocator>(*this, handle);
}
template <class Mapped, class Handle, class Allocator>
inline auto HandleDatabase<Mapped, Handle, Allocator>::makeScopedRef(Handle handle) const -> ConstHandleRefType
{
return ConstHandleRef<Mapped, Handle, Allocator>(const_cast<HandleDatabase&>(*this), handle);
}
// Global operators.
template <class Mapped, class Handle, class Allocator>
inline bool operator==(const HandleRef<Mapped, Handle, Allocator>& ref, std::nullptr_t)
{
return ref.get() == nullptr;
}
template <class Mapped, class Handle, class Allocator>
inline bool operator==(std::nullptr_t, const HandleRef<Mapped, Handle, Allocator>& ref)
{
return ref.get() == nullptr;
}
template <class Mapped, class Handle, class Allocator>
inline bool operator!=(const HandleRef<Mapped, Handle, Allocator>& ref, std::nullptr_t)
{
return ref.get() != nullptr;
}
template <class Mapped, class Handle, class Allocator>
inline bool operator!=(std::nullptr_t, const HandleRef<Mapped, Handle, Allocator>& ref)
{
return ref.get() != nullptr;
}
} // namespace extras
} // namespace carb
CARB_IGNOREWARNING_MSC_POP