Merge orbis-kernel submodule

This commit is contained in:
Ivan Chikish 2023-07-03 14:10:16 +03:00
parent 91f48cdf77
commit 1ee6b7c970
97 changed files with 8134 additions and 1 deletions

View file

@ -0,0 +1,74 @@
#pragma once
#include <atomic>
#include <utility>
#include <functional>
namespace orbis {
inline namespace utils {
// Atomic operation; returns old value, or pair of old value and return value
// (cancel op if evaluates to false)
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline std::conditional_t<std::is_void_v<RT>, T, std::pair<T, RT>>
atomic_fetch_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return old;
}
} else {
RT ret = std::invoke(func, _new);
if (!ret || v.compare_exchange_strong(old, _new)) [[likely]] {
return {old, std::move(ret)};
}
}
}
}
// Atomic operation; returns function result value, function is the lambda
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline RT atomic_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return;
}
} else {
RT result = std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return result;
}
}
}
}
#if defined(__ATOMIC_HLE_ACQUIRE) && defined(__ATOMIC_HLE_RELEASE)
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST | __ATOMIC_HLE_ACQUIRE;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST | __ATOMIC_HLE_RELEASE;
#else
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST;
#endif
template <typename T>
inline bool compare_exchange_hle_acq(std::atomic<T> &dest, T &comp, T exch) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_compare_exchange(reinterpret_cast<T *>(&dest), &comp, &exch,
false, s_hle_ack, s_hle_ack);
}
template <typename T>
inline T fetch_add_hle_rel(std::atomic<T> &dest, T value) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_fetch_add(reinterpret_cast<T *>(&dest), value, s_hle_rel);
}
} // namespace utils
} // namespace orbis

View file

@ -0,0 +1,104 @@
#pragma once
#include <bit>
#include <cstddef>
#include <cstdint>
namespace orbis {
inline namespace utils {
template <std::size_t Count> struct BitSet {
using chunk_type = std::uint64_t;
static constexpr auto BitsPerChunk = sizeof(chunk_type) * 8;
static constexpr auto ChunkCount = (Count + BitsPerChunk - 1) / BitsPerChunk;
chunk_type _bits[ChunkCount]{};
constexpr std::size_t countr_one() const {
std::size_t result = 0;
for (auto bits : _bits) {
auto count = std::countr_one(bits);
result += count;
if (count != BitsPerChunk) {
break;
}
}
return result;
}
constexpr std::size_t countr_zero(std::size_t offset = 0) const {
std::size_t result = 0;
if (auto chunkOffset = offset % BitsPerChunk) {
auto count =
std::countr_zero(_bits[offset / BitsPerChunk] >> chunkOffset);
if (count != BitsPerChunk) {
return count + offset;
}
offset = (offset + BitsPerChunk - 1) & ~(BitsPerChunk - 1);
}
for (auto i = offset / BitsPerChunk; i < ChunkCount; ++i) {
auto count = std::countr_zero(_bits[i]);
result += count;
if (count != BitsPerChunk) {
break;
}
}
/*
for (auto bits : _bits) {
auto count = std::countr_zero(bits);
result += count;
if (count != BitsPerChunk) {
break;
}
}
*/
return result + offset;
}
bool empty() const {
for (auto bits : _bits) {
if (bits != 0) {
return false;
}
}
return true;
}
bool full() const {
if constexpr (Count < BitsPerChunk) {
return _bits[0] == (static_cast<std::uint64_t>(1) << Count) - 1;
}
for (auto bits : _bits) {
if (bits != ~static_cast<chunk_type>(0)) {
return false;
}
}
return true;
}
constexpr void clear(std::size_t index) {
_bits[index / BitsPerChunk] &=
~(static_cast<chunk_type>(1) << (index % BitsPerChunk));
}
constexpr void set(std::size_t index) {
_bits[index / BitsPerChunk] |= static_cast<chunk_type>(1)
<< (index % BitsPerChunk);
}
constexpr bool test(std::size_t index) const {
return (_bits[index / BitsPerChunk] &
(static_cast<chunk_type>(1) << (index % BitsPerChunk))) != 0;
}
};
} // namespace utils
} // namespace orbis

View file

@ -0,0 +1,306 @@
#pragma once
#include "BitSet.hpp"
#include "Rc.hpp"
#include <algorithm>
#include <bit>
#include <cstddef>
#include <cstdint>
#include <type_traits>
namespace orbis {
inline namespace utils {
template <WithRc T, typename IdT = int, std::size_t MaxId = 4096,
std::size_t MinId = 0>
requires(MaxId > MinId)
class RcIdMap {
static constexpr auto ChunkSize = std::min<std::size_t>(MaxId - MinId, 64);
static constexpr auto ChunkCount =
(MaxId - MinId + ChunkSize - 1) / ChunkSize;
struct IdMapChunk {
BitSet<ChunkSize> mask = {};
T *objects[ChunkSize]{};
~IdMapChunk() {
std::size_t index = mask.countr_zero();
while (index < ChunkSize) {
objects[index]->decRef();
index = mask.countr_zero(index + 1);
}
}
std::size_t insert(T *object) {
std::size_t index = mask.countr_one();
mask.set(index);
objects[index] = object;
return index;
}
T *get(std::size_t index) { return objects[index]; }
void remove(std::size_t index) {
objects[index]->decRef();
objects[index] = nullptr;
mask.clear(index);
}
};
IdMapChunk m_chunks[ChunkCount]{};
BitSet<ChunkCount> m_fullChunks;
public:
static constexpr auto npos = static_cast<IdT>(~static_cast<std::size_t>(0));
struct end_iterator {};
class iterator {
const IdMapChunk *chunks = nullptr;
std::size_t chunk = 0;
std::size_t index = 0;
public:
iterator(const IdMapChunk *chunks) : chunks(chunks) { findNext(); }
iterator &operator++() {
++index;
findNext();
return *this;
}
std::pair<IdT, T *> operator*() const {
return {static_cast<IdT>(chunk * ChunkSize + index + MinId),
chunks[chunk].objects[index]};
}
bool operator!=(const end_iterator &) const { return chunk < ChunkCount; }
bool operator==(const end_iterator &) const { return chunk >= ChunkCount; }
private:
void findNext() {
while (chunk < ChunkCount) {
index = chunks[chunk].mask.countr_zero(index);
if (index < ChunkSize) {
break;
}
index = 0;
chunk++;
}
}
};
void walk(auto cb) {
for (std::size_t chunk = 0; chunk < ChunkCount; ++chunk) {
std::size_t index = m_chunks[chunk].mask.countr_zero();
while (index < ChunkSize) {
cb(static_cast<IdT>(index + chunk * ChunkSize + MinId),
m_chunks[chunk].objects[index]);
index = m_chunks[chunk].mask.countr_zero(index + 1);
}
}
}
iterator begin() const { return iterator{m_chunks}; }
end_iterator end() const { return {}; }
private:
IdT insert_impl(T *object) {
auto page = m_fullChunks.countr_one();
if (page == ChunkCount) {
return npos;
}
auto index = m_chunks[page].insert(object);
if (m_chunks[page].mask.full()) {
m_fullChunks.set(page);
}
return {static_cast<IdT>(page * ChunkSize + index + MinId)};
}
public:
IdT insert(T *object) {
auto result = insert_impl(object);
if (result != npos) {
object->incRef();
}
return result;
}
IdT insert(const Ref<T> &ref) { return insert(ref.get()); }
IdT insert(Ref<T> &&ref) {
auto object = ref.release();
auto result = insert_impl(object);
if (result == npos) {
object->decRef();
}
return result;
}
T *get(IdT id) {
const auto rawId = static_cast<std::size_t>(id) - MinId;
if (rawId >= MaxId - MinId) {
return nullptr;
}
const auto chunk = rawId / ChunkSize;
const auto index = rawId % ChunkSize;
if (!m_chunks[chunk].mask.test(index)) {
return nullptr;
}
return m_chunks[chunk].get(index);
}
bool remove(IdT id) {
const auto rawId = static_cast<std::size_t>(id) - MinId;
if (rawId >= MaxId - MinId) {
return false;
}
const auto chunk = rawId / ChunkSize;
const auto index = rawId % ChunkSize;
if (!m_chunks[chunk].mask.test(index)) {
return false;
}
m_chunks[chunk].remove(index);
m_fullChunks.clear(chunk);
return true;
}
};
template <typename T, typename IdT = int, std::size_t MaxId = 4096,
std::size_t MinId = 0>
requires(MaxId > MinId)
struct OwningIdMap {
static constexpr auto ChunkSize = std::min<std::size_t>(MaxId - MinId, 64);
static constexpr auto ChunkCount =
(MaxId - MinId + ChunkSize - 1) / ChunkSize;
struct IdMapChunk {
BitSet<ChunkSize> mask = {};
alignas(T) std::byte objects[sizeof(T) * ChunkSize];
~IdMapChunk() {
std::size_t pageOffset = 0;
for (auto page : mask._bits) {
auto tmp = page;
while (true) {
const auto index = std::countr_zero(tmp);
if (index >= 64) {
break;
}
tmp &= ~(static_cast<std::uint64_t>(1) << index);
destroy(pageOffset + index);
}
pageOffset += 64;
}
}
template <typename... ArgsT>
std::pair<std::size_t, T *> emplace_new(ArgsT &&...args) {
std::size_t index = mask.countr_one();
if (index >= ChunkSize) {
return {};
}
mask.set(index);
return {index,
std::construct_at(get(index), std::forward<ArgsT>(args)...)};
}
T *get(std::size_t index) {
return reinterpret_cast<T *>(objects + sizeof(T) * index);
}
void destroy(std::size_t index) {
std::destroy_at(get(index));
mask.clear(index);
}
};
IdMapChunk chunks[ChunkCount]{};
BitSet<ChunkCount> fullChunks;
template <typename... ArgsT>
requires(std::is_constructible_v<T, ArgsT...>)
std::pair<IdT, T *> emplace(ArgsT &&...args) {
auto page = fullChunks.countr_one();
if (page == ChunkCount) {
return {};
}
auto newElem = chunks[page].emplace_new(std::forward<ArgsT>(args)...);
if (chunks[page].mask.full()) {
fullChunks.set(page);
}
return {static_cast<IdT>(page * ChunkSize + newElem.first + MinId),
newElem.second};
}
T *get(IdT id) {
const auto rawId = static_cast<std::size_t>(id) - MinId;
const auto chunk = rawId / ChunkSize;
const auto index = rawId % ChunkSize;
if (chunk >= ChunkCount) {
return nullptr;
}
if (!chunks[chunk].mask.test(index)) {
return nullptr;
}
return chunks[chunk].get(index);
}
bool destroy(IdT id) {
const auto rawId = static_cast<std::size_t>(id) - MinId;
const auto chunk = rawId / ChunkSize;
const auto index = rawId % ChunkSize;
if (chunk >= ChunkCount) {
return false;
}
if (!chunks[chunk].mask.test(index)) {
return false;
}
chunks[chunk].destroy(index);
fullChunks.clear(chunk);
return true;
}
};
} // namespace utils
} // namespace orbis

View file

@ -0,0 +1,48 @@
#pragma once
namespace orbis {
inline namespace utils {
template <typename T> struct LinkedNode {
T object;
LinkedNode *next = nullptr;
LinkedNode *prev = nullptr;
void insertNext(LinkedNode &other) {
other.next = next;
other.prev = this;
if (next != nullptr) {
next->prev = &other;
}
next = &other;
}
void insertPrev(LinkedNode &other) {
other.next = this;
other.prev = prev;
if (prev != nullptr) {
prev->next = &other;
}
prev = &other;
}
LinkedNode *erase() {
if (prev != nullptr) {
prev->next = next;
}
if (next != nullptr) {
next->prev = prev;
}
prev = nullptr;
auto result = next;
next = nullptr;
return result;
}
};
} // namespace utils
} // namespace orbis

View file

@ -0,0 +1,104 @@
#pragma once
#include <atomic>
#include <span>
#include <string>
namespace orbis {
inline namespace logs {
enum class LogLevel : unsigned char {
Always,
Fatal,
Error,
Todo,
Success,
Warning,
Notice,
Trace
};
// Currently enabled log level
inline std::atomic<LogLevel> logs_level = LogLevel::Notice;
template <typename T, typename = void> struct log_class_string {
static const T &get_object(const void *arg) {
return *static_cast<const T *>(arg);
}
static void format(std::string &out, const void *arg);
};
template <> struct log_class_string<const void *, void> {
static void format(std::string &out, const void *arg);
};
template <typename T>
struct log_class_string<T *, void> : log_class_string<const void *, void> {};
template <> struct log_class_string<const char *, void> {
static void format(std::string &out, const void *arg);
};
template <>
struct log_class_string<char *, void> : log_class_string<const char *> {};
template <>
struct log_class_string<const char8_t *, void>
: log_class_string<const char *> {};
template <>
struct log_class_string<char8_t *, void> : log_class_string<const char8_t *> {};
template <typename... Args>
using log_args_t = const void *(&&)[sizeof...(Args) + 1];
struct log_type_info {
decltype(&log_class_string<int>::format) log_string;
template <typename T> static constexpr log_type_info make() {
return log_type_info{
&log_class_string<T>::format,
};
}
};
template <typename... Args>
constexpr const log_type_info type_info_v[sizeof...(Args) + 1]{
log_type_info::make<std::decay_t<Args>>()...};
void _orbis_log_print(LogLevel lvl, const char *msg, std::string_view names,
const log_type_info *sup, ...);
template <typename... Args>
void _orbis_log_impl(LogLevel lvl, const char *msg, const char *names,
const Args &...args) {
// Fast filtering
if (logs_level.load(std::memory_order::relaxed) < lvl)
return;
_orbis_log_print(lvl, msg, names, type_info_v<Args...>,
static_cast<const void *>(&args)...);
}
} // namespace logs
} // namespace orbis
#define ORBIS_LOG_FATAL(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Fatal, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_ERROR(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Error, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_TODO(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Todo, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_SUCCESS(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Success, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_WARNING(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Warning, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_NOTICE(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Notice, (msg), #__VA_ARGS__, \
##__VA_ARGS__)
#define ORBIS_LOG_TRACE(msg, ...) \
::orbis::_orbis_log_impl(::orbis::LogLevel::Trace, (msg), #__VA_ARGS__, \
##__VA_ARGS__)

View file

@ -0,0 +1,124 @@
#pragma once
#include <atomic>
#include <cassert>
#include <type_traits>
#include <utility>
namespace orbis {
inline namespace utils {
struct RcBase {
std::atomic<unsigned> references{0};
virtual ~RcBase() = default;
void incRef() {
if (references.fetch_add(1, std::memory_order::relaxed) > 512) {
assert(!"too many references");
}
}
// returns true if object was destroyed
bool decRef() {
if (references.fetch_sub(1, std::memory_order::relaxed) == 1) {
delete this;
return true;
}
return false;
}
};
template <typename T>
concept WithRc = requires(T t) {
t.incRef();
t.decRef();
};
template <typename T> class Ref {
T *m_ref = nullptr;
public:
Ref() = default;
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref(OT *ref) : m_ref(ref) {
ref->incRef();
}
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref(const Ref<OT> &other) : m_ref(other.get()) {
if (m_ref != nullptr) {
m_ref->incRef();
}
}
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref(Ref<OT> &&other) : m_ref(other.release()) {}
Ref(const Ref &other) : m_ref(other.get()) {
if (m_ref != nullptr) {
m_ref->incRef();
}
}
Ref(Ref &&other) : m_ref(other.release()) {}
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref &operator=(Ref<OT> &&other) {
other.swap(*this);
return *this;
}
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref &operator=(OT *other) {
*this = Ref(other);
return *this;
}
template <typename OT>
requires(std::is_base_of_v<T, OT>)
Ref &operator=(const Ref<OT> &other) {
*this = Ref(other);
return *this;
}
Ref &operator=(const Ref &other) {
*this = Ref(other);
return *this;
}
Ref &operator=(Ref &&other) {
other.swap(*this);
return *this;
}
~Ref() {
if (m_ref != nullptr) {
m_ref->decRef();
}
}
void swap(Ref<T> &other) { std::swap(m_ref, other.m_ref); }
T *get() const { return m_ref; }
T *release() { return std::exchange(m_ref, nullptr); }
T *operator->() const { return m_ref; }
explicit operator bool() const { return m_ref != nullptr; }
bool operator==(std::nullptr_t) const { return m_ref == nullptr; }
bool operator!=(std::nullptr_t) const { return m_ref != nullptr; }
auto operator<=>(const T *other) const { return m_ref <=> other; }
auto operator<=>(const Ref &other) const = default;
};
template <WithRc T, typename... ArgsT>
requires(std::is_constructible_v<T, ArgsT...>)
Ref<T> create(ArgsT &&...args) {
auto result = new T(std::forward<ArgsT>(args)...);
return Ref<T>(result);
}
} // namespace utils
} // namespace orbis

View file

@ -0,0 +1,144 @@
#pragma once
#include <atomic>
#include <mutex>
#include <orbis/utils/AtomicOp.hpp>
namespace orbis {
inline namespace utils {
// IPC-ready shared mutex, using only writer lock is recommended
struct shared_mutex final {
enum : unsigned {
c_one = 1u << 14, // Fixed-point 1.0 value (one writer)
c_sig = 1u << 30,
c_err = 1u << 31,
};
std::atomic<unsigned> m_value{};
void impl_lock_shared(unsigned val);
void impl_unlock_shared(unsigned old);
void impl_wait();
void impl_signal();
void impl_lock(unsigned val);
void impl_unlock(unsigned old);
void impl_lock_upgrade();
public:
constexpr shared_mutex() = default;
bool try_lock_shared() {
// Conditional increment
unsigned value = m_value.load();
return value < c_one - 1 &&
m_value.compare_exchange_strong(value, value + 1);
}
// Lock with HLE acquire hint
void lock_shared() {
unsigned value = m_value.load();
if (value < c_one - 1) [[likely]] {
unsigned old = value;
if (compare_exchange_hle_acq(m_value, old, value + 1)) [[likely]] {
return;
}
}
impl_lock_shared(value + 1);
}
// Unlock with HLE release hint
void unlock_shared() {
const unsigned value = fetch_add_hle_rel(m_value, -1u);
if (value >= c_one) [[unlikely]] {
impl_unlock_shared(value);
}
}
bool try_lock() {
unsigned value = 0;
return m_value.compare_exchange_strong(value, c_one);
}
// Lock with HLE acquire hint
void lock() {
unsigned value = 0;
if (!compare_exchange_hle_acq(m_value, value, +c_one)) [[unlikely]] {
impl_lock(value);
}
}
// Unlock with HLE release hint
void unlock() {
const unsigned value = fetch_add_hle_rel(m_value, 0u - c_one);
if (value != c_one) [[unlikely]] {
impl_unlock(value);
}
}
bool try_lock_upgrade() {
unsigned value = m_value.load();
// Conditional increment, try to convert a single reader into a writer,
// ignoring other writers
return (value + c_one - 1) % c_one == 0 &&
m_value.compare_exchange_strong(value, value + c_one - 1);
}
void lock_upgrade() {
if (!try_lock_upgrade()) [[unlikely]] {
impl_lock_upgrade();
}
}
void lock_downgrade() {
// Convert to reader lock (can result in broken state)
m_value -= c_one - 1;
}
// Check whether can immediately obtain an exclusive (writer) lock
bool is_free() const { return m_value.load() == 0; }
// Check whether can immediately obtain a shared (reader) lock
bool is_lockable() const { return m_value.load() < c_one - 1; }
};
// Simplified shared (reader) lock implementation.
class reader_lock final {
shared_mutex &m_mutex;
bool m_upgraded = false;
public:
reader_lock(const reader_lock &) = delete;
reader_lock &operator=(const reader_lock &) = delete;
explicit reader_lock(shared_mutex &mutex) : m_mutex(mutex) {
m_mutex.lock_shared();
}
// One-way lock upgrade; note that the observed state could have been changed
void upgrade() {
if (!m_upgraded) {
m_mutex.lock_upgrade();
m_upgraded = true;
}
}
// Try to upgrade; if it succeeds, the observed state has NOT been changed
bool try_upgrade() {
return m_upgraded || (m_upgraded = m_mutex.try_lock_upgrade());
}
~reader_lock() { m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared(); }
};
class writer_lock final {
shared_mutex &m_mutex;
public:
writer_lock(const writer_lock &) = delete;
writer_lock &operator=(const writer_lock &) = delete;
explicit writer_lock(shared_mutex &mutex) : m_mutex(mutex) { m_mutex.lock(); }
~writer_lock() { m_mutex.unlock(); }
};
} // namespace utils
} // namespace orbis