#pragma once template struct _to_atomic_subtype { static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type"); }; template struct _to_atomic_subtype { using type = u8; }; template struct _to_atomic_subtype { using type = u16; }; template struct _to_atomic_subtype { using type = u32; }; template struct _to_atomic_subtype { using type = u64; }; template struct _to_atomic_subtype { using type = u128; }; template using atomic_subtype_t = typename _to_atomic_subtype::type; template union _atomic_base { using type = std::remove_cv_t; using subtype = atomic_subtype_t; type data; // unsafe direct access subtype sub_data; // unsafe direct access to substitute type force_inline static const subtype to_subtype(const type& value) { return reinterpret_cast(value); } force_inline static const type from_subtype(const subtype value) { return reinterpret_cast(value); } force_inline static type& to_type(subtype& value) { return reinterpret_cast(value); } public: // atomically compare data with cmp, replace with exch if equal, return previous data value anyway force_inline const type compare_and_swap(const type& cmp, const type& exch) volatile { return from_subtype(sync_val_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch))); } // atomically compare data with cmp, replace with exch if equal, return true if data was replaced force_inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile { return sync_bool_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch)); } // read data with memory barrier force_inline const type read_sync() const volatile { const subtype zero = {}; return from_subtype(sync_val_compare_and_swap(const_cast(&sub_data), zero, zero)); } // atomically replace data with exch, return previous data value force_inline const type exchange(const type& exch) volatile { return from_subtype(sync_lock_test_and_set(&sub_data, to_subtype(exch))); } // read data without memory barrier force_inline const type read_relaxed() const volatile { const subtype value = const_cast(sub_data); return from_subtype(value); } // write data without memory barrier force_inline void write_relaxed(const type& value) volatile { const_cast(sub_data) = to_subtype(value); } // perform atomic operation on data template force_inline void atomic_op(const FT atomic_proc) volatile { while (true) { const subtype old = const_cast(sub_data); subtype _new = old; atomic_proc(to_type(_new)); // function should accept reference to T type if (sync_bool_compare_and_swap(&sub_data, old, _new)) return; } } // perform atomic operation on data with special exit condition (if intermediate result != proceed_value) template force_inline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile { while (true) { const subtype old = const_cast(sub_data); subtype _new = old; auto res = static_cast(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value if (res != proceed_value) return res; if (sync_bool_compare_and_swap(&sub_data, old, _new)) return proceed_value; } } // perform atomic operation on data with additional memory barrier template force_inline void atomic_op_sync(const FT atomic_proc) volatile { const subtype zero = {}; subtype old = sync_val_compare_and_swap(&sub_data, zero, zero); while (true) { subtype _new = old; atomic_proc(to_type(_new)); // function should accept reference to T type const subtype val = sync_val_compare_and_swap(&sub_data, old, _new); if (val == old) return; old = val; } } // perform atomic operation on data with additional memory barrier and special exit condition (if intermediate result != proceed_value) template force_inline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile { const subtype zero = {}; subtype old = sync_val_compare_and_swap(&sub_data, zero, zero); while (true) { subtype _new = old; auto res = static_cast(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value if (res != proceed_value) return res; const subtype val = sync_val_compare_and_swap(&sub_data, old, _new); if (val == old) return proceed_value; old = val; } } // atomic bitwise OR, returns previous data force_inline const type _or(const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right))); } // atomic bitwise AND, returns previous data force_inline const type _and(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right))); } // atomic bitwise AND NOT (inverts right argument), returns previous data force_inline const type _and_not(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, ~to_subtype(right))); } // atomic bitwise XOR, returns previous data force_inline const type _xor(const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right))); } force_inline const type operator |= (const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)) | to_subtype(right)); } force_inline const type operator &= (const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)) & to_subtype(right)); } force_inline const type operator ^= (const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)) ^ to_subtype(right)); } }; template using if_integral_le_t = std::enable_if_t::value && std::is_integral::value, le_t>; template using if_integral_be_t = std::enable_if_t::value && std::is_integral::value, be_t>; template inline if_integral_le_t operator ++(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1); } template inline if_integral_le_t operator --(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1); } template inline if_integral_le_t operator ++(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1)); } template inline if_integral_le_t operator --(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1)); } template inline if_integral_le_t operator +=(_atomic_base>& left, T2 right) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right); } template inline if_integral_le_t operator -=(_atomic_base>& left, T2 right) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right); } template inline if_integral_be_t operator ++(_atomic_base>& left) { be_t result; left.atomic_op([&result](be_t& value) { result = ++value; }); return result; } template inline if_integral_be_t operator --(_atomic_base>& left) { be_t result; left.atomic_op([&result](be_t& value) { result = --value; }); return result; } template inline if_integral_be_t operator ++(_atomic_base>& left, int) { be_t result; left.atomic_op([&result](be_t& value) { result = value++; }); return result; } template inline if_integral_be_t operator --(_atomic_base>& left, int) { be_t result; left.atomic_op([&result](be_t& value) { result = value--; }); return result; } template inline if_integral_be_t operator +=(_atomic_base>& left, T2 right) { be_t result; left.atomic_op([&result, right](be_t& value) { result = (value += right); }); return result; } template inline if_integral_be_t operator -=(_atomic_base>& left, T2 right) { be_t result; left.atomic_op([&result, right](be_t& value) { result = (value -= right); }); return result; } template using atomic = _atomic_base; // Atomic Type with native endianness (for emulator memory) template using atomic_be_t = _atomic_base>; // Atomic BE Type (for PS3 virtual memory) template using atomic_le_t = _atomic_base>; // Atomic LE Type (for PSV virtual memory)