Some things improved

shared_mutex_t implemented
GUI Emu Callbacks rewritten
fxm::import, fxm::import_always implemented
cellMsgDialog rewritten
Emu.CallAfter improved (returns std::future)
This commit is contained in:
Nekotekina 2015-09-18 01:41:14 +03:00
parent 9d68c16c62
commit 8ae3401ffa
77 changed files with 1814 additions and 1831 deletions

717
Utilities/Atomic.h Normal file
View file

@ -0,0 +1,717 @@
#pragma once
#if defined(__GNUG__)
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_val_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), bool> sync_bool_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_bool_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_lock_test_and_set(volatile T* dest, T2 value)
{
return __sync_lock_test_and_set(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_add(volatile T* dest, T2 value)
{
return __sync_fetch_and_add(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_sub(volatile T* dest, T2 value)
{
return __sync_fetch_and_sub(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_or(volatile T* dest, T2 value)
{
return __sync_fetch_and_or(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_and(volatile T* dest, T2 value)
{
return __sync_fetch_and_and(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_xor(volatile T* dest, T2 value)
{
return __sync_fetch_and_xor(dest, value);
}
#elif defined(_MSC_VER)
// atomic compare and swap functions
inline u8 sync_val_compare_and_swap(volatile u8* dest, u8 comp, u8 exch)
{
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
}
inline u16 sync_val_compare_and_swap(volatile u16* dest, u16 comp, u16 exch)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
inline u32 sync_val_compare_and_swap(volatile u32* dest, u32 comp, u32 exch)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
}
inline u64 sync_val_compare_and_swap(volatile u64* dest, u64 comp, u64 exch)
{
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
}
inline u128 sync_val_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
_InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp);
return comp;
}
inline bool sync_bool_compare_and_swap(volatile u8* dest, u8 comp, u8 exch)
{
return (u8)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u16* dest, u16 comp, u16 exch)
{
return (u16)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u32* dest, u32 comp, u32 exch)
{
return (u32)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u64* dest, u64 comp, u64 exch)
{
return (u64)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
return _InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp) != 0;
}
// atomic exchange functions
inline u8 sync_lock_test_and_set(volatile u8* dest, u8 value)
{
return _InterlockedExchange8((volatile char*)dest, value);
}
inline u16 sync_lock_test_and_set(volatile u16* dest, u16 value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
inline u32 sync_lock_test_and_set(volatile u32* dest, u32 value)
{
return _InterlockedExchange((volatile long*)dest, value);
}
inline u64 sync_lock_test_and_set(volatile u64* dest, u64 value)
{
return _InterlockedExchange64((volatile long long*)dest, value);
}
inline u128 sync_lock_test_and_set(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
if (sync_bool_compare_and_swap(dest, old, value)) return old;
}
}
// atomic add functions
inline u8 sync_fetch_and_add(volatile u8* dest, u8 value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_add(volatile u16* dest, u16 value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_add(volatile u32* dest, u32 value)
{
return _InterlockedExchangeAdd((volatile long*)dest, value);
}
inline u64 sync_fetch_and_add(volatile u64* dest, u64 value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_add(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo + value.lo;
_new.hi = old.hi + value.hi + (_new.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic sub functions
inline u8 sync_fetch_and_sub(volatile u8* dest, u8 value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
}
inline u16 sync_fetch_and_sub(volatile u16* dest, u16 value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
}
inline u32 sync_fetch_and_sub(volatile u32* dest, u32 value)
{
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
}
inline u64 sync_fetch_and_sub(volatile u64* dest, u64 value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
}
inline u128 sync_fetch_and_sub(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo - value.lo;
_new.hi = old.hi - value.hi - (old.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise or` functions
inline u8 sync_fetch_and_or(volatile u8* dest, u8 value)
{
return _InterlockedOr8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_or(volatile u16* dest, u16 value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_or(volatile u32* dest, u32 value)
{
return _InterlockedOr((volatile long*)dest, value);
}
inline u64 sync_fetch_and_or(volatile u64* dest, u64 value)
{
return _InterlockedOr64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo | value.lo;
_new.hi = old.hi | value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise and` functions
inline u8 sync_fetch_and_and(volatile u8* dest, u8 value)
{
return _InterlockedAnd8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_and(volatile u16* dest, u16 value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_and(volatile u32* dest, u32 value)
{
return _InterlockedAnd((volatile long*)dest, value);
}
inline u64 sync_fetch_and_and(volatile u64* dest, u64 value)
{
return _InterlockedAnd64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo & value.lo;
_new.hi = old.hi & value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise xor` functions
inline u8 sync_fetch_and_xor(volatile u8* dest, u8 value)
{
return _InterlockedXor8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_xor(volatile u16* dest, u16 value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_xor(volatile u32* dest, u32 value)
{
return _InterlockedXor((volatile long*)dest, value);
}
inline u64 sync_fetch_and_xor(volatile u64* dest, u64 value)
{
return _InterlockedXor64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_xor(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo ^ value.lo;
_new.hi = old.hi ^ value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
#endif /* _MSC_VER */
template<typename T, std::size_t Size = sizeof(T)> struct atomic_storage
{
static_assert(!Size, "Invalid atomic type");
};
template<typename T> struct atomic_storage<T, 1>
{
using type = u8;
};
template<typename T> struct atomic_storage<T, 2>
{
using type = u16;
};
template<typename T> struct atomic_storage<T, 4>
{
using type = u32;
};
template<typename T> struct atomic_storage<T, 8>
{
using type = u64;
};
template<typename T> struct atomic_storage<T, 16>
{
using type = u128;
};
template<typename T> using atomic_storage_t = typename atomic_storage<T>::type;
// result wrapper to deal with void result type
template<typename T, typename RT, typename VT> struct atomic_op_result_t
{
RT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(std::move(func(var, std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// void specialization: result is the initial value of the first arg
template<typename T, typename VT> struct atomic_op_result_t<T, void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(var)
{
func(var, std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
// member function specialization
template<typename CT, typename... FArgs, typename RT, typename VT> struct atomic_op_result_t<RT(CT::*)(FArgs...), RT, VT>
{
RT result;
template<typename... Args> inline atomic_op_result_t(RT(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(std::move((var.*func)(std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// member function void specialization
template<typename CT, typename... FArgs, typename VT> struct atomic_op_result_t<void(CT::*)(FArgs...), void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(void(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(var)
{
(var.*func)(std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
template<typename T> class atomic_t
{
using type = std::remove_cv_t<T>;
using stype = atomic_storage_t<type>;
using storage = atomic_storage<type>;
static_assert(alignof(type) <= alignof(stype), "atomic_t<> error: unexpected alignment");
stype m_data;
template<typename T2> static inline void write_relaxed(volatile T2& data, const T2& value)
{
data = value;
}
static inline void write_relaxed(volatile u128& data, const u128& value)
{
sync_lock_test_and_set(&data, value);
}
template<typename T2> static inline T2 read_relaxed(const volatile T2& data)
{
return data;
}
static inline u128 read_relaxed(const volatile u128& value)
{
return sync_val_compare_and_swap(const_cast<volatile u128*>(&value), u128{0}, u128{0});
}
public:
static inline const stype to_subtype(const type& value)
{
return reinterpret_cast<const stype&>(value);
}
static inline const type from_subtype(const stype value)
{
return reinterpret_cast<const type&>(value);
}
atomic_t() = default;
atomic_t(const atomic_t&) = delete;
atomic_t(atomic_t&&) = delete;
inline atomic_t(type value)
: m_data(to_subtype(value))
{
}
atomic_t& operator =(const atomic_t&) = delete;
atomic_t& operator =(atomic_t&&) = delete;
inline atomic_t& operator =(type value)
{
return write_relaxed(m_data, to_subtype(value)), *this;
}
operator type() const volatile
{
return from_subtype(read_relaxed(m_data));
}
// Unsafe direct access
stype* raw_data()
{
return reinterpret_cast<stype*>(&m_data);
}
// Unsafe direct access
type& raw()
{
return reinterpret_cast<type&>(m_data);
}
// Atomically compare data with cmp, replace with exch if equal, return previous data value anyway
inline const type compare_and_swap(const type& cmp, const type& exch) volatile
{
return from_subtype(sync_val_compare_and_swap(&m_data, to_subtype(cmp), to_subtype(exch)));
}
// Atomically compare data with cmp, replace with exch if equal, return true if data was replaced
inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile
{
return sync_bool_compare_and_swap(&m_data, to_subtype(cmp), to_subtype(exch));
}
// Atomically replace data with exch, return previous data value
inline const type exchange(const type& exch) volatile
{
return from_subtype(sync_lock_test_and_set(&m_data, to_subtype(exch)));
}
// Atomically read data, possibly without memory barrier (not for 128 bit)
inline const type load() const volatile
{
return from_subtype(read_relaxed(m_data));
}
// Atomically write data, possibly without memory barrier (not for 128 bit)
inline void store(const type& value) volatile
{
write_relaxed(m_data, to_subtype(value));
}
// Perform an atomic operation on data (func is either pointer to member function or callable object with a T& first arg);
// Returns the result of the callable object call or previous (old) value of the atomic variable if the return type is void
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, Args...)>> auto atomic_op(F func, Args&&... args) volatile -> decltype(atomic_op_result_t<F, RT, T>::result)
{
while (true)
{
// Read the old value from memory
const stype old = read_relaxed(m_data);
// Copy the old value
stype _new = old;
// Call atomic op for the local copy of the old value and save the return value of the function
atomic_op_result_t<F, RT, T> result(func, reinterpret_cast<type&>(_new), args...);
// Atomically compare value with `old`, replace with `_new` and return on success
if (sync_bool_compare_and_swap(&m_data, old, _new)) return result.move();
}
}
// Atomic bitwise OR, returns previous data
inline const type _or(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&m_data, to_subtype(right)));
}
// Atomic bitwise AND, returns previous data
inline const type _and(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, to_subtype(right)));
}
// Atomic bitwise AND NOT (inverts right argument), returns previous data
inline const type _and_not(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, ~to_subtype(right)));
}
// Atomic bitwise XOR, returns previous data
inline const type _xor(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&m_data, to_subtype(right)));
}
inline const type operator |=(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&m_data, to_subtype(right)) | to_subtype(right));
}
inline const type operator &=(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, to_subtype(right)) & to_subtype(right));
}
inline const type operator ^=(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&m_data, to_subtype(right)) ^ to_subtype(right));
}
};
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator ++(atomic_t<T>& left)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1) + 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator --(atomic_t<T>& left)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1) - 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator ++(atomic_t<T>& left, int)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1));
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator --(atomic_t<T>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1));
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, T> operator +=(atomic_t<T>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), right) + right);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, T> operator -=(atomic_t<T>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), right) - right);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator ++(atomic_t<nse_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1) + 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator --(atomic_t<nse_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1) - 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator ++(atomic_t<nse_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1));
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator --(atomic_t<nse_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1));
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, nse_t<T>> operator +=(atomic_t<nse_t<T>>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), right) + right);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, nse_t<T>> operator -=(atomic_t<nse_t<T>>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), right) - right);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator ++(atomic_t<se_t<T>>& left)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return ++value;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator --(atomic_t<se_t<T>>& left)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return --value;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator ++(atomic_t<se_t<T>>& left, int)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return value++;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator --(atomic_t<se_t<T>>& left, int)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return value--;
});
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, se_t<T>> operator +=(atomic_t<se_t<T>>& left, const T2& right)
{
return left.atomic_op([&](se_t<T>& value) -> se_t<T>
{
return value += right;
});
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, se_t<T>> operator -=(atomic_t<se_t<T>>& left, const T2& right)
{
return left.atomic_op([&](se_t<T>& value) -> se_t<T>
{
return value -= right;
});
}
template<typename T> using atomic_be_t = atomic_t<be_t<T>>; // Atomic BE Type (for PS3 virtual memory)
template<typename T> using atomic_le_t = atomic_t<le_t<T>>; // Atomic LE Type (for PSV virtual memory)
// Algorithm for std::atomic; similar to atomic_t::atomic_op()
template<typename T, typename F, typename... Args, typename RT = std::result_of_t<F(T&, Args...)>> auto atomic_op(std::atomic<T>& var, F func, Args&&... args) -> decltype(atomic_op_result_t<F, RT, T>::result)
{
auto old = var.load();
while (true)
{
auto _new = old;
atomic_op_result_t<F, RT, T> result(func, _new, args...);
if (var.compare_exchange_strong(old, _new)) return result.move();
}
}

View file

@ -374,9 +374,6 @@ inline v128 operator ~(const v128& other)
return v128::from64(~other._u64[0], ~other._u64[1]);
}
#define IS_INTEGER(t) (std::is_integral<t>::value || std::is_enum<t>::value)
#define IS_BINARY_COMPARABLE(t1, t2) (IS_INTEGER(t1) && IS_INTEGER(t2) && sizeof(t1) == sizeof(t2))
template<typename T, std::size_t Size = sizeof(T)> struct se_storage
{
static_assert(!Size, "Bad se_storage<> type");
@ -386,7 +383,7 @@ template<typename T> struct se_storage<T, 2>
{
using type = u16;
static constexpr u16 swap(u16 src)
[[deprecated]] static constexpr u16 swap(u16 src) // for reference
{
return (src >> 8) | (src << 8);
}
@ -407,7 +404,7 @@ template<typename T> struct se_storage<T, 4>
{
using type = u32;
static constexpr u32 swap(u32 src)
[[deprecated]] static constexpr u32 swap(u32 src) // for reference
{
return (src >> 24) | (src << 24) | ((src >> 8) & 0x0000ff00) | ((src << 8) & 0x00ff0000);
}
@ -428,7 +425,7 @@ template<typename T> struct se_storage<T, 8>
{
using type = u64;
static constexpr u64 swap(u64 src)
[[deprecated]] static constexpr u64 swap(u64 src) // for reference
{
return (src >> 56) | (src << 56) |
((src >> 40) & 0x000000000000ff00) |
@ -491,7 +488,10 @@ template<typename T1, typename T2> struct se_convert
static struct se_raw_tag_t {} const se_raw{};
template<typename T, bool Se = true> class se_t
template<typename T, bool Se = true> class se_t;
// se_t with switched endianness
template<typename T> class se_t<T, true>
{
using type = std::remove_cv_t<T>;
using stype = se_storage_t<type>;
@ -506,16 +506,32 @@ template<typename T, bool Se = true> class se_t
static_assert(!std::is_enum<type>::value, "se_t<> error: invalid type (enumeration), use integral type instead");
static_assert(alignof(type) == alignof(stype), "se_t<> error: unexpected alignment");
template<typename T2, bool = std::is_integral<T2>::value> struct bool_converter
{
static inline bool to_bool(const se_t<T2>& value)
{
return static_cast<bool>(value.value());
}
};
template<typename T2> struct bool_converter<T2, true>
{
static inline bool to_bool(const se_t<T2>& value)
{
return value.m_data != 0;
}
};
public:
se_t() = default;
se_t(const se_t& right) = default;
template<typename CT, typename = std::enable_if_t<std::is_constructible<type, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value>> inline se_t(const CT& value)
inline se_t(type value)
: m_data(storage::to(value))
{
}
// construct directly from raw data (don't use)
inline se_t(const stype& raw_value, const se_raw_tag_t&)
: m_data(raw_value)
@ -528,29 +544,27 @@ public:
}
// access underlying raw data (don't use)
inline const stype& raw_data() const
inline const stype& raw_data() const noexcept
{
return m_data;
}
se_t& operator =(const se_t&) = default;
template<typename CT> inline std::enable_if_t<std::is_assignable<type&, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value, se_t&> operator =(const CT& value)
inline se_t& operator =(type value)
{
return m_data = storage::to(value), *this;
}
inline operator type() const
{
return value();
return storage::from(m_data);
}
// optimization
explicit inline operator bool() const
{
static_assert(std::is_convertible<T, bool>::value, "Illegal conversion to bool");
return m_data != 0;
return bool_converter<type>::to_bool(*this);
}
// optimization
@ -560,7 +574,7 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator &=(CT right)
{
return m_data &= storage::to(right), *this;
}
@ -572,7 +586,7 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator |=(CT right)
{
return m_data |= storage::to(right), *this;
}
@ -584,12 +598,13 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator ^=(CT right)
{
return m_data ^= storage::to(right), *this;
}
};
// se_t with native endianness
template<typename T> class se_t<T, false>
{
using type = std::remove_cv_t<T>;
@ -607,8 +622,8 @@ public:
se_t(const se_t&) = default;
template<typename CT, typename = std::enable_if_t<std::is_constructible<type, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value>> inline se_t(CT&& value)
: m_data(std::forward<CT>(value))
inline se_t(type value)
: m_data(value)
{
}
@ -619,32 +634,35 @@ public:
se_t& operator =(const se_t& value) = default;
template<typename CT> inline std::enable_if_t<std::is_assignable<type&, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value, se_t&> operator =(const CT& value)
inline se_t& operator =(type value)
{
return m_data = value, *this;
}
inline operator type() const
{
return value();
return m_data;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
{
return m_data &= right, *this;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
{
return m_data |= right, *this;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
{
return m_data ^= right, *this;
}
};
// se_t with native endianness (alias)
template<typename T> using nse_t = se_t<T, false>;
template<typename T, bool Se, typename T1> inline se_t<T, Se>& operator +=(se_t<T, Se>& left, const T1& right)
{
auto value = left.value();
@ -722,15 +740,15 @@ template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator ==(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2), bool> operator ==(const se_t<T1>& left, T2 right)
{
return left.raw_data() == se_storage<T2>::to(right);
return left.raw_data() == se_storage<T1>::to(right);
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator ==(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2), bool> operator ==(T1 left, const se_t<T2>& right)
{
return se_storage<T1>::to(left) == right.raw_data();
return se_storage<T2>::to(left) == right.raw_data();
}
// optimization
@ -740,75 +758,75 @@ template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator !=(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2), bool> operator !=(const se_t<T1>& left, T2 right)
{
return left.raw_data() != se_storage<T2>::to(right);
return left.raw_data() != se_storage<T1>::to(right);
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator !=(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2), bool> operator !=(T1 left, const se_t<T2>& right)
{
return se_storage<T1>::to(left) != right.raw_data();
return se_storage<T2>::to(left) != right.raw_data();
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() & right.raw_data()), se_raw };
return{ left.raw_data() & right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() & se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() & se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() & T2())>> operator &(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) & right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) & right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() | right.raw_data()), se_raw };
return{ left.raw_data() | right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() | se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() | se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() | T2())>> operator |(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) | right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) | right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() ^ right.raw_data()), se_raw };
return{ left.raw_data() ^ right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() ^ se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() ^ se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) ^ right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) ^ right.raw_data(), se_raw };
}
// optimization
template<typename T> inline std::enable_if_t<IS_INTEGER(T) && sizeof(T) >= 4, se_t<decltype(~T())>> operator ~(const se_t<T>& right)
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T) && sizeof(T) >= 4, se_t<decltype(~T())>> operator ~(const se_t<T>& right)
{
return{ static_cast<se_storage_t<T>>(~right.raw_data()), se_raw };
return{ ~right.raw_data(), se_raw };
}
#ifdef IS_LE_MACHINE

View file

@ -232,334 +232,6 @@ struct alignas(16) uint128_t
using __uint128_t = uint128_t;
#endif
// SFINAE Helper type
template<typename T, typename TT = void> using if_integral_t = std::enable_if_t<std::is_integral<T>::value || std::is_same<std::remove_cv_t<T>, __uint128_t>::value, TT>;
#if defined(__GNUG__)
template<typename T, typename T2> inline if_integral_t<T, T> sync_val_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline if_integral_t<T, bool> sync_bool_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_bool_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_lock_test_and_set(volatile T* dest, T2 value)
{
return __sync_lock_test_and_set(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_add(volatile T* dest, T2 value)
{
return __sync_fetch_and_add(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_sub(volatile T* dest, T2 value)
{
return __sync_fetch_and_sub(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_or(volatile T* dest, T2 value)
{
return __sync_fetch_and_or(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_and(volatile T* dest, T2 value)
{
return __sync_fetch_and_and(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_xor(volatile T* dest, T2 value)
{
return __sync_fetch_and_xor(dest, value);
}
#endif /* __GNUG__ */
#if defined(_MSC_VER)
// atomic compare and swap functions
inline uint8_t sync_val_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
}
inline uint16_t sync_val_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
inline uint32_t sync_val_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
}
inline uint64_t sync_val_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
}
inline uint128_t sync_val_compare_and_swap(volatile uint128_t* dest, uint128_t comp, uint128_t exch)
{
_InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp);
return comp;
}
inline bool sync_bool_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return (uint8_t)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return (uint16_t)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return (uint32_t)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return (uint64_t)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint128_t* dest, uint128_t comp, uint128_t exch)
{
return _InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp) != 0;
}
// atomic exchange functions
inline uint8_t sync_lock_test_and_set(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchange8((volatile char*)dest, value);
}
inline uint16_t sync_lock_test_and_set(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
inline uint32_t sync_lock_test_and_set(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchange((volatile long*)dest, value);
}
inline uint64_t sync_lock_test_and_set(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchange64((volatile long long*)dest, value);
}
inline uint128_t sync_lock_test_and_set(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
if (sync_bool_compare_and_swap(dest, old, value)) return old;
}
}
// atomic add functions
inline uint8_t sync_fetch_and_add(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_add(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_add(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_add(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_add(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo + value.lo;
_new.hi = old.hi + value.hi + (_new.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic sub functions
inline uint8_t sync_fetch_and_sub(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
}
inline uint16_t sync_fetch_and_sub(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
}
inline uint32_t sync_fetch_and_sub(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
}
inline uint64_t sync_fetch_and_sub(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
}
inline uint128_t sync_fetch_and_sub(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo - value.lo;
_new.hi = old.hi - value.hi - (old.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise or` functions
inline uint8_t sync_fetch_and_or(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedOr8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_or(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_or(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedOr((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_or(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedOr64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_or(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo | value.lo;
_new.hi = old.hi | value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise and` functions
inline uint8_t sync_fetch_and_and(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedAnd8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_and(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_and(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedAnd((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_and(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedAnd64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_and(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo & value.lo;
_new.hi = old.hi & value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise xor` functions
inline uint8_t sync_fetch_and_xor(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedXor8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_xor(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_xor(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedXor((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_xor(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedXor64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_xor(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo ^ value.lo;
_new.hi = old.hi ^ value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
#endif /* _MSC_VER */
inline uint32_t cntlz32(uint32_t arg)
{
#if defined(_MSC_VER)

View file

@ -22,7 +22,7 @@ public:
const u32 max_value;
semaphore_t(u32 max_value = 1, u32 value = 0)
: m_var({ value, 0 })
: m_var(sync_var_t{ value, 0 })
, max_value(max_value)
{
}

152
Utilities/SharedMutex.cpp Normal file
View file

@ -0,0 +1,152 @@
#include "stdafx.h"
#include "SharedMutex.h"
static const u32 MAX_READERS = 0x7fffffff; // 2^31-1
inline bool shared_mutex_t::try_lock_shared()
{
return m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.readers < MAX_READERS && !info.writers && !info.waiting_readers && !info.waiting_writers)
{
info.readers++;
return true;
}
return false;
});
}
void shared_mutex_t::lock_shared()
{
if (!try_lock_shared())
{
std::unique_lock<std::mutex> lock(m_mutex);
m_wrcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_readers < UINT16_MAX)
{
info.waiting_readers++;
return true;
}
return false;
})));
m_rcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers && !info.waiting_writers && info.readers < MAX_READERS)
{
info.readers++;
return true;
}
return false;
})));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_readers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_readers == UINT16_MAX)
{
m_wrcv.notify_one();
}
}
}
void shared_mutex_t::unlock_shared()
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.readers--)
{
throw EXCEPTION("Not locked");
}
});
const bool notify_writers = info.readers == 1 && info.writers;
const bool notify_readers = info.readers == UINT32_MAX && info.waiting_readers;
if (notify_writers || notify_readers)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (notify_writers) m_wcv.notify_one();
if (notify_readers) m_rcv.notify_one();
}
}
inline bool shared_mutex_t::try_lock()
{
return m_info.compare_and_swap_test({ 0, 0, 0, 0 }, { 0, 1, 0, 0 });
}
void shared_mutex_t::lock()
{
if (!try_lock())
{
std::unique_lock<std::mutex> lock(m_mutex);
m_wwcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_writers < UINT16_MAX)
{
info.waiting_writers++;
return true;
}
return false;
})));
m_wcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers)
{
info.writers++;
return true;
}
return false;
})));
m_wcv.wait(lock, WRAP_EXPR(m_info.load().readers == 0));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_writers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_writers == UINT16_MAX)
{
m_wwcv.notify_one();
}
}
}
void shared_mutex_t::unlock()
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.writers--)
{
throw EXCEPTION("Not locked");
}
});
if (info.waiting_writers || info.waiting_readers)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (info.waiting_writers) m_wcv.notify_one();
else if (info.waiting_readers) m_rcv.notify_all();
}
}

46
Utilities/SharedMutex.h Normal file
View file

@ -0,0 +1,46 @@
#pragma once
#include <shared_mutex>
// An attempt to create lock-free (in optimistic case) implementation similar to std::shared_mutex;
// MSVC implementation of std::shared_timed_mutex is not lock-free and thus may be slow, and std::shared_mutex is not available.
class shared_mutex_t
{
struct ownership_info_t
{
u32 readers : 31;
u32 writers : 1;
u16 waiting_readers;
u16 waiting_writers;
};
atomic_t<ownership_info_t> m_info{};
std::mutex m_mutex;
std::condition_variable m_rcv;
std::condition_variable m_wcv;
std::condition_variable m_wrcv;
std::condition_variable m_wwcv;
public:
shared_mutex_t() = default;
// Lock in shared mode
void lock_shared();
// Try to lock in shared mode
bool try_lock_shared();
// Unlock in shared mode
void unlock_shared();
// Lock exclusively
void lock();
// Try to lock exclusively
bool try_lock();
// Unlock exclusively
void unlock();
};

View file

@ -145,8 +145,7 @@ namespace fmt
std::string to_udec(u64 value);
std::string to_sdec(s64 value);
template<typename T, bool is_enum = std::is_enum<T>::value>
struct unveil
template<typename T, bool is_enum = std::is_enum<T>::value> struct unveil
{
using result_type = T;
@ -156,8 +155,7 @@ namespace fmt
}
};
template<>
struct unveil<char*, false>
template<> struct unveil<char*, false>
{
using result_type = const char*;
@ -167,8 +165,7 @@ namespace fmt
}
};
template<size_t N>
struct unveil<const char[N], false>
template<std::size_t N> struct unveil<const char[N], false>
{
using result_type = const char*;
@ -178,8 +175,7 @@ namespace fmt
}
};
template<>
struct unveil<std::string, false>
template<> struct unveil<std::string, false>
{
using result_type = const char*;
@ -189,8 +185,7 @@ namespace fmt
}
};
template<typename T>
struct unveil<T, true>
template<typename T> struct unveil<T, true>
{
using result_type = std::underlying_type_t<T>;
@ -200,25 +195,13 @@ namespace fmt
}
};
template<typename T>
struct unveil<be_t<T>, false>
template<typename T, bool Se> struct unveil<se_t<T, Se>, false>
{
using result_type = typename unveil<T>::result_type;
force_inline static result_type get_value(const be_t<T>& arg)
force_inline static result_type get_value(const se_t<T, Se>& arg)
{
return unveil<T>::get_value(arg.value());
}
};
template<typename T>
struct unveil<le_t<T>, false>
{
using result_type = typename unveil<T>::result_type;
force_inline static result_type get_value(const le_t<T>& arg)
{
return unveil<T>::get_value(arg.value());
return unveil<T>::get_value(arg);
}
};
@ -270,11 +253,11 @@ namespace fmt
}
}
struct exception
struct exception : public std::exception
{
std::unique_ptr<char[]> message;
template<typename... Args> never_inline safe_buffers exception(const char* file, int line, const char* func, const char* text, Args... args)
template<typename... Args> never_inline safe_buffers exception(const char* file, int line, const char* func, const char* text, Args... args) noexcept
{
const std::string data = format(text, args...) + format("\n(in file %s:%d, in function %s)", file, line, func);
@ -283,16 +266,16 @@ namespace fmt
std::memcpy(message.get(), data.c_str(), data.size() + 1);
}
exception(const exception& other)
exception(const exception& other) noexcept
{
const std::size_t size = std::strlen(other);
const std::size_t size = std::strlen(other.message.get());
message.reset(new char[size + 1]);
std::memcpy(message.get(), other, size + 1);
std::memcpy(message.get(), other.message.get(), size + 1);
}
operator const char*() const
virtual const char* what() const noexcept override
{
return message.get();
}

View file

@ -1121,6 +1121,8 @@ void _se_translator(unsigned int u, EXCEPTION_POINTERS* pExp)
{
throw EXCEPTION("Access violation %s location 0x%llx", is_writing ? "writing" : "reading", addr64);
}
//__int2c(); // if it crashed there, check the callstack for the actual source of the crash
}
const PVOID exception_handler = (atexit([]{ RemoveVectoredExceptionHandler(exception_handler); }), AddVectoredExceptionHandler(1, [](PEXCEPTION_POINTERS pExp) -> LONG
@ -1281,14 +1283,9 @@ void named_thread_t::start(std::function<std::string()> name, std::function<void
LOG_NOTICE(GENERAL, "Thread ended");
}
}
catch (const fmt::exception& e)
{
LOG_ERROR(GENERAL, "Exception: %s", e.message.get());
Emu.Pause();
}
catch (const std::exception& e)
{
LOG_ERROR(GENERAL, "STD Exception: %s", e.what());
LOG_ERROR(GENERAL, "Exception: %s", e.what());
Emu.Pause();
}
catch (EmulationStopped)

View file

@ -147,7 +147,7 @@ class squeue_t
public:
squeue_t()
: m_sync({})
: m_sync(squeue_sync_var_t{})
{
}
@ -156,9 +156,9 @@ public:
return sq_size;
}
bool is_full() const volatile
bool is_full() const
{
return m_sync.data.count == sq_size;
return m_sync.load().count == sq_size;
}
bool push(const T& data, const std::function<bool()>& test_exit)