#pragma once template struct _to_atomic_subtype { static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type"); }; template struct _to_atomic_subtype { using type = uint8_t; }; template struct _to_atomic_subtype { using type = uint16_t; }; template struct _to_atomic_subtype { using type = uint32_t; }; template struct _to_atomic_subtype { using type = uint64_t; }; template struct _to_atomic_subtype { using type = u128; }; template union _atomic_base { using type = typename std::remove_cv::type; using subtype = typename _to_atomic_subtype::type; type data; // unsafe direct access subtype sub_data; // unsafe direct access to substitute type __forceinline static const subtype to_subtype(const type& value) { return reinterpret_cast(value); } __forceinline static const type from_subtype(const subtype value) { return reinterpret_cast(value); } __forceinline static type& to_type(subtype& value) { return reinterpret_cast(value); } public: // atomically compare data with cmp, replace with exch if equal, return previous data value anyway __forceinline const type compare_and_swap(const type& cmp, const type& exch) volatile { return from_subtype(sync_val_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch))); } // atomically compare data with cmp, replace with exch if equal, return true if data was replaced __forceinline bool compare_and_swap_test(const type& cmp, const type& exch) volatile { return sync_bool_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch)); } // read data with memory barrier __forceinline const type read_sync() const volatile { return from_subtype(sync_val_compare_and_swap(const_cast(&sub_data), 0, 0)); } // atomically replace data with exch, return previous data value __forceinline const type exchange(const type& exch) volatile { return from_subtype(sync_lock_test_and_set(&sub_data, to_subtype(exch))); } // read data without memory barrier __forceinline const type read_relaxed() const volatile { const subtype value = const_cast(sub_data); return from_subtype(value); } // write data without memory barrier __forceinline void write_relaxed(const type& value) volatile { const_cast(sub_data) = to_subtype(value); } // perform atomic operation on data template __forceinline void atomic_op(const FT atomic_proc) volatile { while (true) { const subtype old = const_cast(sub_data); subtype _new = old; atomic_proc(to_type(_new)); // function should accept reference to T type if (sync_bool_compare_and_swap(&sub_data, old, _new)) return; } } // perform atomic operation on data with special exit condition (if intermediate result != proceed_value) template __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile { while (true) { const subtype old = const_cast(sub_data); subtype _new = old; auto res = static_cast(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value if (res != proceed_value) return res; if (sync_bool_compare_and_swap(&sub_data, old, _new)) return proceed_value; } } // perform atomic operation on data with additional memory barrier template __forceinline void atomic_op_sync(const FT atomic_proc) volatile { subtype old = sync_val_compare_and_swap(&sub_data, 0, 0); while (true) { subtype _new = old; atomic_proc(to_type(_new)); // function should accept reference to T type const subtype val = sync_val_compare_and_swap(&sub_data, old, _new); if (val == old) return; old = val; } } // perform atomic operation on data with additional memory barrier and special exit condition (if intermediate result != proceed_value) template __forceinline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile { subtype old = sync_val_compare_and_swap(&sub_data, 0, 0); while (true) { subtype _new = old; auto res = static_cast(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value if (res != proceed_value) return res; const subtype val = sync_val_compare_and_swap(&sub_data, old, _new); if (val == old) return proceed_value; old = val; } } // atomic bitwise OR, returns previous data __forceinline const type _or(const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right))); } // atomic bitwise AND, returns previous data __forceinline const type _and(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right))); } // atomic bitwise AND NOT (inverts right argument), returns previous data __forceinline const type _and_not(const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, ~to_subtype(right))); } // atomic bitwise XOR, returns previous data __forceinline const type _xor(const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right))); } __forceinline const type operator |= (const type& right) volatile { return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)) | to_subtype(right)); } __forceinline const type operator &= (const type& right) volatile { return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)) & to_subtype(right)); } __forceinline const type operator ^= (const type& right) volatile { return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)) ^ to_subtype(right)); } }; // Helper definitions template using if_arithmetic_le_t = const typename std::enable_if::value && std::is_arithmetic::value, le_t>::type; template using if_arithmetic_be_t = const typename std::enable_if::value && std::is_arithmetic::value, be_t>::type; template inline static if_arithmetic_le_t operator ++(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1); } template inline static if_arithmetic_le_t operator --(_atomic_base>& left) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1); } template inline static if_arithmetic_le_t operator ++(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1)); } template inline static if_arithmetic_le_t operator --(_atomic_base>& left, int) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1)); } template inline static if_arithmetic_le_t operator +=(_atomic_base>& left, T2 right) { return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right); } template inline static if_arithmetic_le_t operator -=(_atomic_base>& left, T2 right) { return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right); } template inline static if_arithmetic_be_t operator ++(_atomic_base>& left) { be_t result; left.atomic_op([&result](be_t& value) { result = ++value; }); return result; } template inline static if_arithmetic_be_t operator --(_atomic_base>& left) { be_t result; left.atomic_op([&result](be_t& value) { result = --value; }); return result; } template inline static if_arithmetic_be_t operator ++(_atomic_base>& left, int) { be_t result; left.atomic_op([&result](be_t& value) { result = value++; }); return result; } template inline static if_arithmetic_be_t operator --(_atomic_base>& left, int) { be_t result; left.atomic_op([&result](be_t& value) { result = value--; }); return result; } template inline static if_arithmetic_be_t operator +=(_atomic_base>& left, T2 right) { be_t result; left.atomic_op([&result, right](be_t& value) { result = (value += right); }); return result; } template inline static if_arithmetic_be_t operator -=(_atomic_base>& left, T2 right) { be_t result; left.atomic_op([&result, right](be_t& value) { result = (value -= right); }); return result; } template using atomic = _atomic_base; // Atomic Type with native endianness (for emulator memory) template using atomic_be_t = _atomic_base::type>; // Atomic BE Type (for PS3 virtual memory) template using atomic_le_t = _atomic_base::type>; // Atomic LE Type (for PSV virtual memory)