#pragma once template struct _to_atomic_subtype { static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type"); }; template struct _to_atomic_subtype { using type = u8; }; template struct _to_atomic_subtype { using type = u16; }; template struct _to_atomic_subtype { using type = u32; }; template struct _to_atomic_subtype { using type = u64; }; template struct _to_atomic_subtype { using type = u128; }; template using atomic_subtype_t = typename _to_atomic_subtype::type; // result wrapper to deal with void result type template struct atomic_op_result_t { RT result; template inline atomic_op_result_t(T func, VT& var, Args&&... args) : result(std::move(func(var, std::forward(args)...))) { } inline RT move() { return std::move(result); } }; // void specialization: result is the initial value of the first arg template struct atomic_op_result_t { VT result; template inline atomic_op_result_t(T func, VT& var, Args&&... args) : result(var) { func(var, std::forward(args)...); } inline VT move() { return std::move(result); } }; // member function specialization template struct atomic_op_result_t { RT result; template inline atomic_op_result_t(RT(CT::*func)(FArgs...), VT& var, Args&&... args) : result(std::move((var.*func)(std::forward(args)...))) { } inline RT move() { return std::move(result); } }; // member function void specialization template struct atomic_op_result_t { VT result; template inline atomic_op_result_t(void(CT::*func)(FArgs...), VT& var, Args&&... args) : result(var) { (var.*func)(std::forward(args)...); } inline VT move() { return std::move(result); } }; template union _atomic_base { using type = std::remove_cv_t; using subtype = atomic_subtype_t; type data; // unsafe direct access subtype sub_data; // unsafe direct access to substitute type force_inline static const subtype to_subtype(const type& value) { return reinterpret_cast(value); } force_inline static const type from_subtype(const subtype value) { return reinterpret_cast(value); } force_inline static type& to_type(subtype& value) { return reinterpret_cast(value); } private: template force_inline static void write_relaxed(volatile T2& data, const T2& value) { data = value; } force_inline static void write_relaxed(volatile u128& data, const u128& value) { sync_lock_test_and_set(&data, value); } template force_inline static T2 read_relaxed(const volatile T2& data) { return data; } force_inline static u128 read_relaxed(const volatile u128& value) { return sync_val_compare_and_swap(const_cast(&value), {}, {}); } public: // atomically compare data with cmp, replace with exch if equal, return previous data value anyway force_inline const type compare_and_swap(const type& cmp, const type& exch) volatile { return from_subtype(sync_val_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch))); } // atomically compare data with cmp, replace with exch if equal, return true if data was replaced force_inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile { return sync_bool_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch)); } // read data with memory barrier force_inline const type load_sync() const volatile { const subtype zero = {}; return from_subtype(sync_val_compare_and_swap(const_cast(&sub_data), zero, zero)); } // atomically replace data with exch, return previous data value force_inline const type exchange(const type& exch) volatile { return from_subtype(sync_lock_test_and_set(&sub_data, to_subtype(exch))); } // read data without memory barrier (works as load_sync() for 128 bit) force_inline const type load() const volatile { return from_subtype(read_relaxed(sub_data)); } // write data without memory barrier (works as exchange() for 128 bit, discarding result) force_inline void store(const type& value) volatile { write_relaxed(sub_data, to_subtype(value)); } // perform an atomic operation on data (callable object version, first arg is a reference to atomic type) template> auto atomic_op(F func, Args&&... args) volatile -> decltype(atomic_op_result_t::result) { while (true) { // read the old value from memory const subtype old = read_relaxed(sub_data); // copy the old value subtype _new = old; // call atomic op for the local copy of the old value and save the return value of the function atomic_op_result_t result(func, to_type(_new), args...); // atomically compare value with `old`, replace with `_new` and return on success if (sync_bool_compare_and_swap(&sub_data, old, _new)) return result.move(); } } // atomic bitwise OR, returns previous data force_inline const type _or(const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right))); } // atomic bitwise AND, returns previous data force_inline const type _and(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right))); } // atomic bitwise AND NOT (inverts right argument), returns previous data force_inline const type _and_not(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, ~to_subtype(right))); } // atomic bitwise XOR, returns previous data force_inline const type _xor(const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right))); } force_inline const type operator |=(const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)) | to_subtype(right)); } force_inline const type operator &=(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)) & to_subtype(right)); } force_inline const type operator ^=(const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)) ^ to_subtype(right)); } }; template using if_integral_t = std::enable_if_t::value>; template> inline T operator ++(_atomic_base& left) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1); } template> inline T operator --(_atomic_base& left) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1); } template> inline T operator ++(_atomic_base& left, int) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1)); } template> inline T operator --(_atomic_base& left, int) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1)); } template> inline auto operator +=(_atomic_base& left, T2 right) -> decltype(std::declval() + std::declval()) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right); } template> inline auto operator -=(_atomic_base& left, T2 right) -> decltype(std::declval() - std::declval()) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right); } template> inline le_t operator ++(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1); } template> inline le_t operator --(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1); } template> inline le_t operator ++(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1)); } template> inline le_t operator --(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1)); } template> inline auto operator +=(_atomic_base>& left, T2 right) -> decltype(std::declval() + std::declval()) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right); } template> inline auto operator -=(_atomic_base>& left, T2 right) -> decltype(std::declval() - std::declval()) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right); } template> inline be_t operator ++(_atomic_base>& left) { return left.atomic_op([](be_t& value) -> be_t { return ++value; }); } template> inline be_t operator --(_atomic_base>& left) { return left.atomic_op([](be_t& value) -> be_t { return --value; }); } template> inline be_t operator ++(_atomic_base>& left, int) { return left.atomic_op([](be_t& value) -> be_t { return value++; }); } template> inline be_t operator --(_atomic_base>& left, int) { return left.atomic_op([](be_t& value) -> be_t { return value--; }); } template> inline auto operator +=(_atomic_base>& left, T2 right) -> be_t() + std::declval())> { return left.atomic_op([right](be_t& value) -> be_t { return value += right; }); } template> inline auto operator -=(_atomic_base>& left, T2 right) -> be_t() - std::declval())> { return left.atomic_op([right](be_t& value) -> be_t { return value -= right; }); } template using atomic_t = _atomic_base; // Atomic Type with native endianness (for emulator memory) template using atomic_be_t = _atomic_base>; // Atomic BE Type (for PS3 virtual memory) template using atomic_le_t = _atomic_base>; // Atomic LE Type (for PSV virtual memory)