move IPC utilities from orbis-kernel to rx

This commit is contained in:
DH 2025-10-05 00:09:42 +03:00
parent 30469f7fb9
commit e73a0b962d
41 changed files with 558 additions and 172 deletions

View file

@ -5,10 +5,13 @@ find_package(Git)
add_library(${PROJECT_NAME} OBJECT
src/debug.cpp
src/die.cpp
src/FileLock.cpp
src/hexdump.cpp
src/mem.cpp
src/SharedAtomic.cpp
src/SharedCV.cpp
src/SharedMutex.cpp
src/Version.cpp
src/FileLock.cpp
)
target_include_directories(${PROJECT_NAME}

View file

@ -0,0 +1,72 @@
#pragma once
#include <atomic>
#include <functional>
#include <utility>
namespace rx {
// Atomic operation; returns old value, or pair of old value and return value
// (cancel op if evaluates to false)
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline std::conditional_t<std::is_void_v<RT>, T, std::pair<T, RT>>
atomic_fetch_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return old;
}
} else {
RT ret = std::invoke(func, _new);
if (!ret || v.compare_exchange_strong(old, _new)) [[likely]] {
return {old, std::move(ret)};
}
}
}
}
// Atomic operation; returns function result value, function is the lambda
template <typename T, typename F, typename RT = std::invoke_result_t<F, T &>>
inline RT atomic_op(std::atomic<T> &v, F func) {
T _new, old = v.load();
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return;
}
} else {
RT result = std::invoke(func, _new);
if (v.compare_exchange_strong(old, _new)) [[likely]] {
return result;
}
}
}
}
#if defined(__ATOMIC_HLE_ACQUIRE) && defined(__ATOMIC_HLE_RELEASE)
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST | __ATOMIC_HLE_ACQUIRE;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST | __ATOMIC_HLE_RELEASE;
#else
static constexpr int s_hle_ack = __ATOMIC_SEQ_CST;
static constexpr int s_hle_rel = __ATOMIC_SEQ_CST;
#endif
template <typename T>
inline bool compare_exchange_hle_acq(std::atomic<T> &dest, T &comp, T exch) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_compare_exchange(reinterpret_cast<T *>(&dest), &comp, &exch,
false, s_hle_ack, s_hle_ack);
}
template <typename T>
inline T fetch_add_hle_rel(std::atomic<T> &dest, T value) {
static_assert(sizeof(T) == 4 || sizeof(T) == 8);
static_assert(std::atomic<T>::is_always_lock_free);
return __atomic_fetch_add(reinterpret_cast<T *>(&dest), value, s_hle_rel);
}
} // namespace rx

View file

@ -0,0 +1,165 @@
#pragma once
#include <atomic>
#include <chrono>
#include <cstdint>
#include <functional>
#include <limits>
#include <system_error>
#include <thread>
#include <type_traits>
namespace rx {
inline void yield() { std::this_thread::yield(); }
inline void relax() {
#if defined(__GNUC__) && (defined __i386__ || defined __x86_64__)
__builtin_ia32_pause();
#else
yield();
#endif
}
static constexpr auto kRelaxSpinCount = 12;
static constexpr auto kSpinCount = 16;
inline thread_local bool (*g_scopedUnblock)(bool) = nullptr;
bool try_spin_wait(auto &&pred) {
for (std::size_t i = 0; i < kSpinCount; ++i) {
if (pred()) {
return true;
}
if (i < kRelaxSpinCount) {
relax();
} else {
yield();
}
}
return false;
}
bool spin_wait(auto &&pred, auto &&spinCond) {
if (try_spin_wait(pred)) {
return true;
}
while (spinCond()) {
if (pred()) {
return true;
}
}
return false;
}
struct shared_atomic32 : std::atomic<std::uint32_t> {
using atomic::atomic;
using atomic::operator=;
template <typename Clock, typename Dur>
std::errc wait(std::uint32_t oldValue,
std::chrono::time_point<Clock, Dur> timeout) {
if (try_spin_wait(
[&] { return load(std::memory_order::acquire) != oldValue; })) {
return {};
}
auto now = Clock::now();
if (timeout < now) {
return std::errc::timed_out;
}
return wait_impl(
oldValue,
std::chrono::duration_cast<std::chrono::microseconds>(timeout - now));
}
std::errc wait(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout) {
return wait_impl(oldValue, usec_timeout);
}
std::errc wait(std::uint32_t oldValue) {
if (try_spin_wait(
[&] { return load(std::memory_order::acquire) != oldValue; })) {
return {};
}
return wait_impl(oldValue);
}
auto wait(auto &fn) -> decltype(fn(std::declval<std::uint32_t &>())) {
while (true) {
std::uint32_t lastValue;
if (try_spin_wait([&] {
lastValue = load(std::memory_order::acquire);
return fn(lastValue);
})) {
return;
}
while (wait_impl(lastValue) != std::errc{}) {
}
}
}
int notify_one() const { return notify_n(1); }
int notify_all() const { return notify_n(std::numeric_limits<int>::max()); }
int notify_n(int count) const;
// Atomic operation; returns old value, or pair of old value and return value
// (cancel op if evaluates to false)
template <typename F, typename RT = std::invoke_result_t<F, std::uint32_t &>>
std::conditional_t<std::is_void_v<RT>, std::uint32_t,
std::pair<std::uint32_t, RT>>
fetch_op(F &&func) {
std::uint32_t _new;
std::uint32_t old = load(std::memory_order::relaxed);
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return old;
}
} else {
RT ret = std::invoke(std::forward<F>(func), _new);
if (!ret || compare_exchange_strong(old, _new)) [[likely]] {
return {old, std::move(ret)};
}
}
}
}
// Atomic operation; returns function result value
template <typename F, typename RT = std::invoke_result_t<F, std::uint32_t &>>
RT op(F &&func) {
std::uint32_t _new;
std::uint32_t old = load(std::memory_order::relaxed);
while (true) {
_new = old;
if constexpr (std::is_void_v<RT>) {
std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return;
}
} else {
RT result = std::invoke(std::forward<F>(func), _new);
if (compare_exchange_strong(old, _new)) [[likely]] {
return result;
}
}
}
}
private:
[[nodiscard]] std::errc wait_impl(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout =
std::chrono::microseconds::max());
};
} // namespace rx

View file

@ -0,0 +1,89 @@
#pragma once
#include "SharedAtomic.hpp"
#include "SharedMutex.hpp"
#include <chrono>
#include <cstdint>
#include <mutex>
#include <system_error>
namespace rx {
// IPC-ready lightweight condition variable
class shared_cv final {
enum : unsigned {
c_waiter_mask = 0xffff,
c_signal_mask = 0x7fff0000,
#ifdef __linux
c_locked_mask = 0x80000000,
#endif
c_signal_one = c_waiter_mask + 1,
};
shared_atomic32 m_value{0};
protected:
// Increment waiter count
unsigned add_waiter() noexcept {
return m_value.op([](unsigned &value) -> unsigned {
if ((value & c_signal_mask) == c_signal_mask ||
(value & c_waiter_mask) == c_waiter_mask) {
// Signal or waiter overflow, return immediately
return 0;
}
// Add waiter (c_waiter_mask)
value += 1;
return value;
});
}
// Internal waiting function
std::errc impl_wait(shared_mutex &mutex, unsigned _val,
std::uint64_t usec_timeout) noexcept;
// Try to notify up to _count threads
void impl_wake(shared_mutex &mutex, int _count) noexcept;
public:
constexpr shared_cv() = default;
std::errc
wait(std::unique_lock<shared_mutex> &lock,
std::chrono::microseconds timeout = std::chrono::microseconds::max()) {
return wait(*lock.mutex(), timeout.count());
}
template <typename Rep, typename Period>
std::errc wait(std::unique_lock<shared_mutex> &lock,
std::chrono::duration<Rep, Period> timeout) {
return wait(
lock,
std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
}
std::errc wait(shared_mutex &mutex,
std::uint64_t usec_timeout = -1) noexcept {
const unsigned _val = add_waiter();
if (!_val) {
return {};
}
mutex.unlock();
return impl_wait(mutex, _val, usec_timeout);
}
// Wake one thread
void notify_one(shared_mutex &mutex) noexcept {
if (m_value) {
impl_wake(mutex, 1);
}
}
// Wake all threads
void notify_all(shared_mutex &mutex) noexcept {
if (m_value) {
impl_wake(mutex, INT_MAX);
}
}
};
} // namespace rx

View file

@ -0,0 +1,148 @@
#pragma once
#include "AtomicOp.hpp"
#include "SharedAtomic.hpp"
#include <system_error>
namespace rx {
// IPC-ready shared mutex, using only writer lock is recommended
class shared_mutex final {
friend class shared_cv;
enum : unsigned {
c_one = 1u << 14, // Fixed-point 1.0 value (one writer)
c_sig = 1u << 30,
c_err = 1u << 31,
};
shared_atomic32 m_value{};
void impl_lock_shared(unsigned val);
void impl_unlock_shared(unsigned old);
std::errc impl_wait();
void impl_signal();
void impl_lock(unsigned val);
void impl_unlock(unsigned old);
void impl_lock_upgrade();
public:
constexpr shared_mutex() = default;
bool try_lock_shared() {
// Conditional increment
unsigned value = m_value.load();
return value < c_one - 1 &&
m_value.compare_exchange_strong(value, value + 1);
}
// Lock with HLE acquire hint
void lock_shared() {
unsigned value = m_value.load();
if (value < c_one - 1) [[likely]] {
unsigned old = value;
if (compare_exchange_hle_acq(m_value, old, value + 1)) [[likely]] {
return;
}
}
impl_lock_shared(value + 1);
}
// Unlock with HLE release hint
void unlock_shared() {
const unsigned value = fetch_add_hle_rel(m_value, -1u);
if (value >= c_one) [[unlikely]] {
impl_unlock_shared(value);
}
}
bool try_lock() {
unsigned value = 0;
return m_value.compare_exchange_strong(value, c_one);
}
// Lock with HLE acquire hint
void lock() {
unsigned value = 0;
if (!compare_exchange_hle_acq(m_value, value, +c_one)) [[unlikely]] {
impl_lock(value);
}
}
// Unlock with HLE release hint
void unlock() {
const unsigned value = fetch_add_hle_rel(m_value, 0u - c_one);
if (value != c_one) [[unlikely]] {
impl_unlock(value);
}
}
bool try_lock_upgrade() {
unsigned value = m_value.load();
// Conditional increment, try to convert a single reader into a writer,
// ignoring other writers
return (value + c_one - 1) % c_one == 0 &&
m_value.compare_exchange_strong(value, value + c_one - 1);
}
void lock_upgrade() {
if (!try_lock_upgrade()) [[unlikely]] {
impl_lock_upgrade();
}
}
void lock_downgrade() {
// Convert to reader lock (can result in broken state)
m_value -= c_one - 1;
}
// Check whether can immediately obtain an exclusive (writer) lock
[[nodiscard]] bool is_free() const { return m_value.load() == 0; }
// Check whether can immediately obtain a shared (reader) lock
[[nodiscard]] bool is_lockable() const { return m_value.load() < c_one - 1; }
private:
// For CV
bool lock_forced(int count = 1);
};
// Simplified shared (reader) lock implementation.
class reader_lock final {
shared_mutex &m_mutex;
bool m_upgraded = false;
public:
reader_lock(const reader_lock &) = delete;
reader_lock &operator=(const reader_lock &) = delete;
explicit reader_lock(shared_mutex &mutex) : m_mutex(mutex) {
m_mutex.lock_shared();
}
// One-way lock upgrade; note that the observed state could have been changed
void upgrade() {
if (!m_upgraded) {
m_mutex.lock_upgrade();
m_upgraded = true;
}
}
// Try to upgrade; if it succeeds, the observed state has NOT been changed
bool try_upgrade() {
return m_upgraded || (m_upgraded = m_mutex.try_lock_upgrade());
}
~reader_lock() { m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared(); }
};
class writer_lock final {
shared_mutex &m_mutex;
public:
writer_lock(const writer_lock &) = delete;
writer_lock &operator=(const writer_lock &) = delete;
explicit writer_lock(shared_mutex &mutex) : m_mutex(mutex) { m_mutex.lock(); }
~writer_lock() { m_mutex.unlock(); }
};
} // namespace rx

204
rx/src/SharedAtomic.cpp Normal file
View file

@ -0,0 +1,204 @@
#include "SharedAtomic.hpp"
using namespace rx;
#ifdef __linux__
#include <linux/futex.h>
std::errc shared_atomic32::wait_impl(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout) {
auto usec_timeout_count = usec_timeout.count();
struct timespec timeout{};
bool useTimeout = usec_timeout != std::chrono::microseconds::max();
if (useTimeout) {
timeout.tv_nsec = (usec_timeout_count % 1000'000) * 1000;
timeout.tv_sec = (usec_timeout_count / 1000'000);
}
bool unblock = (!useTimeout || usec_timeout.count() > 1000) &&
g_scopedUnblock != nullptr;
if (unblock) {
if (!g_scopedUnblock(true)) {
return std::errc::interrupted;
}
}
int result = syscall(SYS_futex, this, FUTEX_WAIT, oldValue,
useTimeout ? &timeout : nullptr);
auto errorCode = result < 0 ? static_cast<std::errc>(errno) : std::errc{};
if (unblock) {
if (!g_scopedUnblock(false)) {
if (result < 0) {
return std::errc::interrupted;
}
return {};
}
}
if (result < 0) {
if (errorCode == std::errc::interrupted) {
return std::errc::resource_unavailable_try_again;
}
return errorCode;
}
return {};
}
int shared_atomic32::notify_n(int count) const {
return syscall(SYS_futex, this, FUTEX_WAKE, count);
}
#elif defined(__APPLE__)
#include <limits>
#define UL_COMPARE_AND_WAIT 1
#define UL_UNFAIR_LOCK 2
#define UL_COMPARE_AND_WAIT_SHARED 3
#define UL_UNFAIR_LOCK64_SHARED 4
#define UL_COMPARE_AND_WAIT64 5
#define UL_COMPARE_AND_WAIT64_SHARED 6
#define ULF_WAKE_ALL 0x00000100
#define ULF_WAKE_THREAD 0x00000200
#define ULF_WAKE_ALLOW_NON_OWNER 0x00000400
#define ULF_WAIT_WORKQ_DATA_CONTENTION 0x00010000
#define ULF_WAIT_CANCEL_POINT 0x00020000
#define ULF_WAIT_ADAPTIVE_SPIN 0x00040000
#define ULF_NO_ERRNO 0x01000000
#define UL_OPCODE_MASK 0x000000FF
#define UL_FLAGS_MASK 0xFFFFFF00
#define ULF_GENERIC_MASK 0xFFFF0000
extern int __ulock_wait(uint32_t operation, void *addr, uint64_t value,
uint32_t timeout);
extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
std::errc shared_atomic32::wait_impl(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout) {
bool useTimeout = usec_timeout != std::chrono::microseconds::max();
bool unblock = (!useTimeout || usec_timeout.count() > 1000) &&
g_scopedUnblock != nullptr;
if (unblock) {
if (!g_scopedUnblock(true)) {
return std::errc::interrupted;
}
}
int result = __ulock_wait(UL_COMPARE_AND_WAIT_SHARED, (void *)this, oldValue,
usec_timeout.count());
if (unblock) {
if (!g_scopedUnblock(false)) {
if (result < 0) {
return std::errc::interrupted;
}
return {};
}
}
if (result < 0) {
return static_cast<std::errc>(errno);
}
return {};
}
int shared_atomic32::notify_n(int count) const {
int result = 0;
uint32_t operation = UL_COMPARE_AND_WAIT_SHARED | ULF_NO_ERRNO;
if (count == 1) {
result = __ulock_wake(operation, (void *)this, 0);
} else if (count == std::numeric_limits<int>::max()) {
result = __ulock_wake(ULF_WAKE_ALL | operation, (void *)this, 0);
} else {
for (int i = 0; i < count; ++i) {
auto ret = __ulock_wake(operation, (void *)this, 0);
if (ret != 0) {
if (result == 0) {
result = ret;
}
break;
}
result++;
}
}
return result;
}
#elif defined(_WIN32)
#include <cmath>
#include <windows.h>
std::errc shared_atomic32::wait_impl(std::uint32_t oldValue,
std::chrono::microseconds usec_timeout) {
bool useTimeout = usec_timeout != std::chrono::microseconds::max();
bool unblock = (!useTimeout || usec_timeout.count() > 1000) &&
g_scopedUnblock != nullptr;
if (unblock) {
if (!g_scopedUnblock(true)) {
return std::errc::interrupted;
}
}
BOOL result = WaitOnAddress(
this, &oldValue, sizeof(std::uint32_t),
useTimeout
? std::chrono::duration_cast<std::chrono::milliseconds>(usec_timeout)
.count()
: INFINITY);
DWORD error = 0;
if (!result) {
error = GetLastError();
} else {
if (load(std::memory_order::relaxed) == oldValue) {
error = ERROR_ALERTED; // dummy error
}
}
if (unblock) {
if (!g_scopedUnblock(false)) {
if (result != TRUE) {
return std::errc::interrupted;
}
return {};
}
}
if (error == ERROR_TIMEOUT) {
return std::errc::timed_out;
}
return std::errc::resource_unavailable_try_again;
}
int shared_atomic32::notify_n(int count) const {
if (count == 1) {
WakeByAddressSingle(const_cast<shared_atomic32 *>(this));
} else if (count == std::numeric_limits<int>::max()) {
WakeByAddressAll(const_cast<shared_atomic32 *>(this));
} else {
for (int i = 0; i < count; ++i) {
WakeByAddressSingle(const_cast<shared_atomic32 *>(this));
}
}
}
#else
#error Unimplemented atomic for this platform
#endif

158
rx/src/SharedCV.cpp Normal file
View file

@ -0,0 +1,158 @@
#include "SharedCV.hpp"
#include <chrono>
#ifdef __linux
#include <linux/futex.h>
#include <syscall.h>
#include <unistd.h>
#endif
namespace rx {
std::errc shared_cv::impl_wait(shared_mutex &mutex, unsigned _val,
std::uint64_t usec_timeout) noexcept {
// Not supposed to fail
if (!_val) {
std::abort();
}
std::errc result = {};
bool useTimeout = usec_timeout != static_cast<std::uint64_t>(-1);
while (true) {
result =
m_value.wait(_val, useTimeout ? std::chrono::microseconds(usec_timeout)
: std::chrono::microseconds::max());
bool spurious = result == std::errc::resource_unavailable_try_again;
// Cleanup
const auto old = m_value.fetch_op([&](unsigned &value) {
// Remove waiter if no signals
if ((value & ~c_waiter_mask) == 0) {
if (!spurious) {
value -= 1;
}
}
// Try to remove signal
if (value & c_signal_mask) {
value -= c_signal_one;
}
#ifdef __linux
if (value & c_locked_mask) {
value -= c_locked_mask;
}
#endif
});
#ifdef __linux
// Lock is already acquired
if (old & c_locked_mask) {
return {};
}
// Wait directly (waiter has been added)
if (old & c_signal_mask) {
return mutex.impl_wait();
}
#else
if (old & c_signal_mask) {
result = {};
break;
}
#endif
// Possibly spurious wakeup
if (!spurious) {
break;
}
_val = old;
}
mutex.lock();
return result;
}
void shared_cv::impl_wake(shared_mutex &mutex, int _count) noexcept {
#ifdef __linux
while (true) {
unsigned _old = m_value.load();
const bool is_one = _count == 1;
// Enqueue _count waiters
_count = std::min<int>(_count, _old & c_waiter_mask);
if (_count <= 0)
return;
// Try to lock the mutex
const bool locked = mutex.lock_forced(_count);
const int max_sig = m_value.op([&](unsigned &value) {
// Verify the number of waiters
int max_sig = std::min<int>(_count, value & c_waiter_mask);
// Add lock signal (mutex was immediately locked)
if (locked && max_sig)
value |= c_locked_mask;
else if (locked)
std::abort();
// Add normal signals
value += c_signal_one * max_sig;
// Remove waiters
value -= max_sig;
_old = value;
return max_sig;
});
if (max_sig < _count) {
// Fixup mutex
mutex.lock_forced(max_sig - _count);
_count = max_sig;
}
if (_count) {
// Wake up one thread + requeue remaining waiters
unsigned awake_count = locked ? 1 : 0;
if (auto r = syscall(SYS_futex, &m_value, FUTEX_REQUEUE, awake_count,
_count - awake_count, &mutex, 0);
r < _count) {
// Keep awaking waiters
_count = is_one ? 1 : INT_MAX;
continue;
}
}
break;
}
#else
unsigned _old = m_value.load();
_count = std::min<int>(_count, _old & c_waiter_mask);
if (_count <= 0)
return;
mutex.lock_forced(1);
const int wakeupWaiters = m_value.op([&](unsigned &value) {
int max_sig = std::min<int>(_count, value & c_waiter_mask);
// Add normal signals
value += c_signal_one * max_sig;
// Remove waiters
value -= max_sig;
_old = value;
return max_sig;
});
if (wakeupWaiters > 0) {
m_value.notify_n(wakeupWaiters);
}
mutex.unlock();
#endif
}
} // namespace orbis::utils

181
rx/src/SharedMutex.cpp Normal file
View file

@ -0,0 +1,181 @@
#include "SharedMutex.hpp"
#include <syscall.h>
#include <unistd.h>
#include <xmmintrin.h>
static void busy_wait(unsigned long long cycles = 3000) {
const auto stop = __builtin_ia32_rdtsc() + cycles;
do
_mm_pause();
while (__builtin_ia32_rdtsc() < stop);
}
namespace rx {
void shared_mutex::impl_lock_shared(unsigned val) {
if (val >= c_err)
std::abort(); // "shared_mutex underflow"
// Try to steal the notification bit
unsigned _old = val;
if (val & c_sig && m_value.compare_exchange_strong(_old, val - c_sig + 1)) {
return;
}
for (int i = 0; i < 10; i++) {
if (try_lock_shared()) {
return;
}
unsigned old = m_value;
if (old & c_sig && m_value.compare_exchange_strong(old, old - c_sig + 1)) {
return;
}
busy_wait();
}
// Acquire writer lock and downgrade
const unsigned old = m_value.fetch_add(c_one);
if (old == 0) {
lock_downgrade();
return;
}
if ((old % c_sig) + c_one >= c_sig)
std::abort(); // "shared_mutex overflow"
while (impl_wait() != std::errc{}) {
}
lock_downgrade();
}
void shared_mutex::impl_unlock_shared(unsigned old) {
if (old - 1 >= c_err)
std::abort(); // "shared_mutex underflow"
// Check reader count, notify the writer if necessary
if ((old - 1) % c_one == 0) {
impl_signal();
}
}
std::errc shared_mutex::impl_wait() {
while (true) {
const auto [old, ok] = m_value.fetch_op([](unsigned &value) {
if (value >= c_sig) {
value -= c_sig;
return true;
}
return false;
});
if (ok) {
break;
}
auto result = m_value.wait(old);
if (result == std::errc::interrupted) {
return result;
}
}
return {};
}
void shared_mutex::impl_signal() {
m_value += c_sig;
m_value.notify_one();
}
void shared_mutex::impl_lock(unsigned val) {
if (val >= c_err)
std::abort(); // "shared_mutex underflow"
// Try to steal the notification bit
unsigned _old = val;
if (val & c_sig &&
m_value.compare_exchange_strong(_old, val - c_sig + c_one)) {
return;
}
for (int i = 0; i < 10; i++) {
busy_wait();
unsigned old = m_value;
if (!old && try_lock()) {
return;
}
if (old & c_sig &&
m_value.compare_exchange_strong(old, old - c_sig + c_one)) {
return;
}
}
const unsigned old = m_value.fetch_add(c_one);
if (old == 0) {
return;
}
if ((old % c_sig) + c_one >= c_sig)
std::abort(); // "shared_mutex overflow"
while (impl_wait() != std::errc{}) {
}
}
void shared_mutex::impl_unlock(unsigned old) {
if (old - c_one >= c_err)
std::abort(); // "shared_mutex underflow"
// 1) Notify the next writer if necessary
// 2) Notify all readers otherwise if necessary (currently indistinguishable
// from writers)
if (old - c_one) {
impl_signal();
}
}
void shared_mutex::impl_lock_upgrade() {
for (int i = 0; i < 10; i++) {
busy_wait();
if (try_lock_upgrade()) {
return;
}
}
// Convert to writer lock
const unsigned old = m_value.fetch_add(c_one - 1);
if ((old % c_sig) + c_one - 1 >= c_sig)
std::abort(); // "shared_mutex overflow"
if (old % c_one == 1) {
return;
}
while (impl_wait() != std::errc{}) {
}
}
bool shared_mutex::lock_forced(int count) {
if (count == 0)
return false;
if (count > 0) {
// Lock
return m_value.op([&](std::uint32_t &v) {
if (v & c_sig) {
v -= c_sig;
v += c_one * count;
return true;
}
bool firstLock = v == 0;
v += c_one * count;
return firstLock;
});
}
// Remove waiters
m_value.fetch_add(c_one * count);
return true;
}
} // namespace rx