move IPC utilities from orbis-kernel to rx

This commit is contained in:
DH 2025-10-05 00:09:42 +03:00
parent 30469f7fb9
commit e73a0b962d
41 changed files with 558 additions and 172 deletions

View file

@ -0,0 +1,72 @@
#pragma once
#include <atomic>
#include <functional>
#include <utility>
namespace rx {
// Atomic operation; returns old value, or pair of old value and return value
// (cancel op if evaluates to false)
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline std::conditional_t<std::is_void_v<RT>, T, std::pair<T, RT>>
atomic_fetch_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return old;
}
} else {
RT ret = std::invoke(func, _new);
if (!ret || v.compare_exchange_strong(old, _new)) [[likely]] {
return {old, std::move(ret)};
}
}
}
}
// Atomic operation; returns function result value, function is the lambda
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline RT atomic_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return;
}
} else {
RT result = std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return result;
}
}
}
}
#if defined(__ATOMIC_HLE_ACQUIRE) && defined(__ATOMIC_HLE_RELEASE)
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST | __ATOMIC_HLE_ACQUIRE;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST | __ATOMIC_HLE_RELEASE;
#else
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST;
#endif
template <typename T>
inline bool compare_exchange_hle_acq(std::atomic<T> &dest, T &comp, T exch) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_compare_exchange(reinterpret_cast<T *>(&dest), &comp, &exch,
false, s_hle_ack, s_hle_ack);
}
template <typename T>
inline T fetch_add_hle_rel(std::atomic<T> &dest, T value) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_fetch_add(reinterpret_cast<T *>(&dest), value, s_hle_rel);
}
} // namespace rx

View file

@ -0,0 +1,165 @@
#pragma once
#include <atomic>
#include <chrono>
#include <cstdint>
#include <functional>
#include <limits>
#include <system_error>
#include <thread>
#include <type_traits>
namespace rx {
inline void yield() { std::this_thread::yield(); }
inline void relax() {
#if defined(__GNUC__) && (defined __i386__ || defined __x86_64__)
__builtin_ia32_pause();
#else
yield();
#endif
}
static constexpr auto kRelaxSpinCount = 12;
static constexpr auto kSpinCount = 16;
inline thread_local bool (*g_scopedUnblock)(bool) = nullptr;
bool try_spin_wait(auto &&pred) {
for (std::size_t i = 0; i < kSpinCount; ++i) {
if (pred()) {
return true;
}
if (i < kRelaxSpinCount) {
relax();
} else {
yield();
}
}
return false;
}
bool spin_wait(auto &&pred, auto &&spinCond) {
if (try_spin_wait(pred)) {
return true;
}
while (spinCond()) {
if (pred()) {
return true;
}
}
return false;
}
struct shared_atomic32 : std::atomic<std::uint32_t> {
using atomic::atomic;
using atomic::operator=;
template <typename Clock, typename Dur>
std::errc wait(std::uint32_t oldValue,
std::chrono::time_point<Clock, Dur> timeout) {
if (try_spin_wait(
[&] { return load(std::memory_order::acquire) != oldValue; })) {
return {};
}
auto now = Clock::now();
if (timeout < now) {
return std::errc::timed_out;
}
return wait_impl(
oldValue,
std::chrono::duration_cast<std::chrono::microseconds>(timeout - now));
}
std::errc wait(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout) {
return wait_impl(oldValue, usec_timeout);
}
std::errc wait(std::uint32_t oldValue) {
if (try_spin_wait(
[&] { return load(std::memory_order::acquire) != oldValue; })) {
return {};
}
return wait_impl(oldValue);
}
auto wait(auto &fn) -> decltype(fn(std::declval<std::uint32_t &>())) {
while (true) {
std::uint32_t lastValue;
if (try_spin_wait([&] {
lastValue = load(std::memory_order::acquire);
return fn(lastValue);
})) {
return;
}
while (wait_impl(lastValue) != std::errc{}) {
}
}
}
int notify_one() const { return notify_n(1); }
int notify_all() const { return notify_n(std::numeric_limits<int>::max()); }
int notify_n(int count) const;
// Atomic operation; returns old value, or pair of old value and return value
// (cancel op if evaluates to false)
template <typename F, typename RT = std::invoke_result_t<F, std::uint32_t &>>
std::conditional_t<std::is_void_v<RT>, std::uint32_t,
std::pair<std::uint32_t, RT>>
fetch_op(F &&func) {
std::uint32_t _new;
std::uint32_t old = load(std::memory_order::relaxed);
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return old;
}
} else {
RT ret = std::invoke(std::forward<F>(func), _new);
if (!ret || compare_exchange_strong(old, _new)) [[likely]] {
return {old, std::move(ret)};
}
}
}
}
// Atomic operation; returns function result value
template <typename F, typename RT = std::invoke_result_t<F, std::uint32_t &>>
RT op(F &&func) {
std::uint32_t _new;
std::uint32_t old = load(std::memory_order::relaxed);
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return;
}
} else {
RT result = std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return result;
}
}
}
}
private:
[[nodiscard]] std::errc wait_impl(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout =
std::chrono::microseconds::max());
};
} // namespace rx

View file

@ -0,0 +1,89 @@
#pragma once
#include "SharedAtomic.hpp"
#include "SharedMutex.hpp"
#include <chrono>
#include <cstdint>
#include <mutex>
#include <system_error>
namespace rx {
// IPC-ready lightweight condition variable
class shared_cv final {
enum : unsigned {
c_waiter_mask = 0xffff,
c_signal_mask = 0x7fff0000,
#ifdef __linux
c_locked_mask = 0x80000000,
#endif
c_signal_one = c_waiter_mask + 1,
};
shared_atomic32 m_value{0};
protected:
// Increment waiter count
unsigned add_waiter() noexcept {
return m_value.op([](unsigned &value) -> unsigned {
if ((value & c_signal_mask) == c_signal_mask ||
(value & c_waiter_mask) == c_waiter_mask) {
// Signal or waiter overflow, return immediately
return 0;
}
// Add waiter (c_waiter_mask)
value += 1;
return value;
});
}
// Internal waiting function
std::errc impl_wait(shared_mutex &mutex, unsigned _val,
std::uint64_t usec_timeout) noexcept;
// Try to notify up to _count threads
void impl_wake(shared_mutex &mutex, int _count) noexcept;
public:
constexpr shared_cv() = default;
std::errc
wait(std::unique_lock<shared_mutex> &lock,
std::chrono::microseconds timeout = std::chrono::microseconds::max()) {
return wait(*lock.mutex(), timeout.count());
}
template <typename Rep, typename Period>
std::errc wait(std::unique_lock<shared_mutex> &lock,
std::chrono::duration<Rep, Period> timeout) {
return wait(
lock,
std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
}
std::errc wait(shared_mutex &mutex,
std::uint64_t usec_timeout = -1) noexcept {
const unsigned _val = add_waiter();
if (!_val) {
return {};
}
mutex.unlock();
return impl_wait(mutex, _val, usec_timeout);
}
// Wake one thread
void notify_one(shared_mutex &mutex) noexcept {
if (m_value) {
impl_wake(mutex, 1);
}
}
// Wake all threads
void notify_all(shared_mutex &mutex) noexcept {
if (m_value) {
impl_wake(mutex, INT_MAX);
}
}
};
} // namespace rx

View file

@ -0,0 +1,148 @@
#pragma once
#include "AtomicOp.hpp"
#include "SharedAtomic.hpp"
#include <system_error>
namespace rx {
// IPC-ready shared mutex, using only writer lock is recommended
class shared_mutex final {
friend class shared_cv;
enum : unsigned {
c_one = 1u << 14, // Fixed-point 1.0 value (one writer)
c_sig = 1u << 30,
c_err = 1u << 31,
};
shared_atomic32 m_value{};
void impl_lock_shared(unsigned val);
void impl_unlock_shared(unsigned old);
std::errc impl_wait();
void impl_signal();
void impl_lock(unsigned val);
void impl_unlock(unsigned old);
void impl_lock_upgrade();
public:
constexpr shared_mutex() = default;
bool try_lock_shared() {
// Conditional increment
unsigned value = m_value.load();
return value < c_one - 1 &&
m_value.compare_exchange_strong(value, value + 1);
}
// Lock with HLE acquire hint
void lock_shared() {
unsigned value = m_value.load();
if (value < c_one - 1) [[likely]] {
unsigned old = value;
if (compare_exchange_hle_acq(m_value, old, value + 1)) [[likely]] {
return;
}
}
impl_lock_shared(value + 1);
}
// Unlock with HLE release hint
void unlock_shared() {
const unsigned value = fetch_add_hle_rel(m_value, -1u);
if (value >= c_one) [[unlikely]] {
impl_unlock_shared(value);
}
}
bool try_lock() {
unsigned value = 0;
return m_value.compare_exchange_strong(value, c_one);
}
// Lock with HLE acquire hint
void lock() {
unsigned value = 0;
if (!compare_exchange_hle_acq(m_value, value, +c_one)) [[unlikely]] {
impl_lock(value);
}
}
// Unlock with HLE release hint
void unlock() {
const unsigned value = fetch_add_hle_rel(m_value, 0u - c_one);
if (value != c_one) [[unlikely]] {
impl_unlock(value);
}
}
bool try_lock_upgrade() {
unsigned value = m_value.load();
// Conditional increment, try to convert a single reader into a writer,
// ignoring other writers
return (value + c_one - 1) % c_one == 0 &&
m_value.compare_exchange_strong(value, value + c_one - 1);
}
void lock_upgrade() {
if (!try_lock_upgrade()) [[unlikely]] {
impl_lock_upgrade();
}
}
void lock_downgrade() {
// Convert to reader lock (can result in broken state)
m_value -= c_one - 1;
}
// Check whether can immediately obtain an exclusive (writer) lock
[[nodiscard]] bool is_free() const { return m_value.load() == 0; }
// Check whether can immediately obtain a shared (reader) lock
[[nodiscard]] bool is_lockable() const { return m_value.load() < c_one - 1; }
private:
// For CV
bool lock_forced(int count = 1);
};
// Simplified shared (reader) lock implementation.
class reader_lock final {
shared_mutex &m_mutex;
bool m_upgraded = false;
public:
reader_lock(const reader_lock &) = delete;
reader_lock &operator=(const reader_lock &) = delete;
explicit reader_lock(shared_mutex &mutex) : m_mutex(mutex) {
m_mutex.lock_shared();
}
// One-way lock upgrade; note that the observed state could have been changed
void upgrade() {
if (!m_upgraded) {
m_mutex.lock_upgrade();
m_upgraded = true;
}
}
// Try to upgrade; if it succeeds, the observed state has NOT been changed
bool try_upgrade() {
return m_upgraded || (m_upgraded = m_mutex.try_lock_upgrade());
}
~reader_lock() { m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared(); }
};
class writer_lock final {
shared_mutex &m_mutex;
public:
writer_lock(const writer_lock &) = delete;
writer_lock &operator=(const writer_lock &) = delete;
explicit writer_lock(shared_mutex &mutex) : m_mutex(mutex) { m_mutex.lock(); }
~writer_lock() { m_mutex.unlock(); }
};
} // namespace rx