split rpcs3 and hle libraries

merge rpcs3 utilities
This commit is contained in:
DH 2025-04-08 19:46:57 +03:00
parent b33e2662b6
commit 62ad27d1e2
1233 changed files with 7004 additions and 3819 deletions

2481
rpcs3/Emu/Memory/vm.cpp Normal file

File diff suppressed because it is too large Load diff

386
rpcs3/Emu/Memory/vm.h Normal file
View file

@ -0,0 +1,386 @@
#pragma once
#include <memory>
#include <map>
#include "util/types.hpp"
#include "util/atomic.hpp"
#include "util/auto_typemap.hpp"
#include "util/to_endian.hpp"
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
#include "rpcs3qt/breakpoint_handler.h"
#include "util/logs.hpp"
LOG_CHANNEL(debugbp_log, "DebugBP");
class ppu_thread;
void ppubreak(ppu_thread& ppu);
#endif
namespace utils
{
class shm;
class address_range;
} // namespace utils
namespace vm
{
extern u8* const g_base_addr;
extern u8* const g_sudo_addr;
extern u8* const g_exec_addr;
extern u8* const g_stat_addr;
extern u8* const g_free_addr;
extern u8 g_reservations[65536 / 128 * 64];
struct writer_lock;
enum memory_location_t : uint
{
main,
user64k,
user1m,
rsx_context,
video,
stack,
spu,
memory_location_max,
any = 0xffffffff,
};
enum page_info_t : u8
{
page_readable = (1 << 0),
page_writable = (1 << 1),
page_executable = (1 << 2),
page_fault_notification = (1 << 3),
page_no_reservations = (1 << 4),
page_64k_size = (1 << 5),
page_1m_size = (1 << 6),
page_allocated = (1 << 7),
};
// Address type
enum addr_t : u32
{
};
// Page information
using memory_page = atomic_t<u8>;
// Change memory protection of specified memory region
bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0);
// Check flags for specified memory range (unsafe)
bool check_addr(u32 addr, u8 flags, u32 size);
template <u32 Size = 1>
bool check_addr(u32 addr, u8 flags = page_readable)
{
extern std::array<memory_page, 0x100000000 / 4096> g_pages;
if (Size - 1 >= 4095u || Size & (Size - 1) || addr % Size)
{
// TODO
return check_addr(addr, flags, Size);
}
return !(~g_pages[addr / 4096] & (flags | page_allocated));
}
// Read string in a safe manner (page aware) (bool true = if null-termination)
bool read_string(u32 addr, u32 max_size, std::string& out_string, bool check_pages = true) noexcept;
// Search and map memory in specified memory location (min alignment is 0x10000)
u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000);
// Map memory at specified address (in optionally specified memory location)
bool falloc(u32 addr, u32 size, memory_location_t location = any, const std::shared_ptr<utils::shm>* src = nullptr);
// Unmap memory at specified address (in optionally specified memory location), return size
u32 dealloc(u32 addr, memory_location_t location = any, const std::shared_ptr<utils::shm>* src = nullptr);
// utils::memory_lock wrapper for locking sudo memory
void lock_sudo(u32 addr, u32 size);
enum block_flags_3
{
page_size_4k = 0x100, // SYS_MEMORY_PAGE_SIZE_4K
page_size_64k = 0x200, // SYS_MEMORY_PAGE_SIZE_64K
page_size_1m = 0x400, // SYS_MEMORY_PAGE_SIZE_1M
page_size_mask = 0xF00, // SYS_MEMORY_PAGE_SIZE_MASK
stack_guarded = 0x10,
preallocated = 0x20, // nonshareable
bf0_0x1 = 0x1, // TODO: document
bf0_0x2 = 0x2, // TODO: document
bf0_mask = bf0_0x1 | bf0_0x2,
};
enum alloc_flags
{
alloc_hidden = 0x1000,
alloc_unwritable = 0x2000,
alloc_executable = 0x4000,
alloc_prot_mask = alloc_hidden | alloc_unwritable | alloc_executable,
};
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
auto_typemap<block_t> m;
// Common mapped region for special cases
std::shared_ptr<utils::shm> m_common;
atomic_t<u64> m_id = 0;
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
// Unmap block
bool unmap(std::vector<std::pair<u64, u64>>* unmapped = nullptr);
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u64, u64>>* unmapped);
public:
block_t(u32 addr, u32 size, u64 flags);
~block_t();
public:
const u32 addr; // Start address
const u32 size; // Total size
const u64 flags; // Byte 0xF000: block_flags_3
// Byte 0x0F00: block_flags_2_page_size (SYS_MEMORY_PAGE_SIZE_*)
// Byte 0x00F0: block_flags_1
// Byte 0x000F: block_flags_0
// Search and map memory (min alignment is 0x10000)
u32 alloc(u32 size, const std::shared_ptr<utils::shm>* = nullptr, u32 align = 0x10000, u64 flags = 0);
// Try to map memory at fixed location
bool falloc(u32 addr, u32 size, const std::shared_ptr<utils::shm>* = nullptr, u64 flags = 0);
// Unmap memory at specified location previously returned by alloc(), return size
u32 dealloc(u32 addr, const std::shared_ptr<utils::shm>* = nullptr) const;
// Get memory at specified address (if size = 0, addr assumed exact)
std::pair<u32, std::shared_ptr<utils::shm>> peek(u32 addr, u32 size = 0) const;
// Get allocated memory count
u32 used();
// Internal
u32 imp_used(const vm::writer_lock&) const;
// Returns 0 if invalid, none-zero unique id if valid
u64 is_valid() const
{
return m_id;
}
// Serialization helper for shared memory
void get_shared_memory(std::vector<std::pair<utils::shm*, u32>>& shared);
// Returns sample address for shared memory, 0 on failure
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared);
// Serialization
void save(utils::serial& ar, std::map<utils::shm*, usz>& shared);
block_t(utils::serial& ar, std::vector<std::shared_ptr<utils::shm>>& shared);
};
// Create new memory block with specified parameters and return it
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags = 0);
// Create new memory block with at arbitrary position with specified alignment
std::shared_ptr<block_t> find_map(u32 size, u32 align, u64 flags = 0);
// Delete existing memory block with specified start address, .first=its ptr, .second=success
std::pair<std::shared_ptr<block_t>, bool> unmap(u32 addr, bool must_be_empty = false, const std::shared_ptr<block_t>* ptr = nullptr);
// Get memory block associated with optionally specified memory location or optionally specified address
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);
// Allocate segment at specified location, does nothing if exists already
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags = page_size_64k);
// Get PS3 virtual memory address from the provided pointer (nullptr or pointer from outside is always converted to 0)
// Super memory is allowed as well
inline std::pair<vm::addr_t, bool> try_get_addr(const void* real_ptr)
{
const std::make_unsigned_t<std::ptrdiff_t> diff = static_cast<const u8*>(real_ptr) - g_base_addr;
if (diff <= u64{u32{umax}} * 2 + 1)
{
return {vm::addr_t{static_cast<u32>(diff)}, true};
}
return {};
}
// Unsafe convert host ptr to PS3 VM address (clamp with 4GiB alignment assumption)
inline vm::addr_t get_addr(const void* ptr)
{
return vm::addr_t{static_cast<u32>(uptr(ptr))};
}
template <typename T>
requires(std::is_integral_v<decltype(+T{})> && (sizeof(+T{}) > 4 || std::is_signed_v<decltype(+T{})>))
vm::addr_t cast(const T& addr, std::source_location src_loc = std::source_location::current())
{
return vm::addr_t{::narrow<u32>(+addr, src_loc)};
}
template <typename T>
requires(std::is_integral_v<decltype(+T{})> && (sizeof(+T{}) <= 4 && !std::is_signed_v<decltype(+T{})>))
vm::addr_t cast(const T& addr, u32 = 0, u32 = 0, const char* = nullptr, const char* = nullptr)
{
return vm::addr_t{static_cast<u32>(+addr)};
}
// Convert specified PS3/PSV virtual memory address to a pointer for common access
template <typename T>
requires(std::is_integral_v<decltype(+T{})>)
inline void* base(T addr)
{
return g_base_addr + static_cast<u32>(vm::cast(addr));
}
inline const u8& read8(u32 addr)
{
return g_base_addr[addr];
}
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
inline void write8(u32 addr, u8 value, ppu_thread* ppu = nullptr)
#else
inline void write8(u32 addr, u8 value)
#endif
{
g_base_addr[addr] = value;
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
{
debugbp_log.success("BPMW: breakpoint writing(8) 0x%x at 0x%x", value, addr);
ppubreak(*ppu);
}
#endif
}
// Read or write virtual memory in a safe manner, returns false on failure
bool try_access(u32 addr, void* ptr, u32 size, bool is_write);
inline namespace ps3_
{
// Convert specified PS3 address to a pointer of specified (possibly converted to BE) type
template <typename T, typename U>
inline to_be_t<T>* _ptr(const U& addr)
{
return static_cast<to_be_t<T>*>(base(addr));
}
// Convert specified PS3 address to a reference of specified (possibly converted to BE) type
template <typename T, typename U>
inline to_be_t<T>& _ref(const U& addr)
{
return *static_cast<to_be_t<T>*>(base(addr));
}
// Access memory bypassing memory protection
template <typename T = u8>
inline to_be_t<T>* get_super_ptr(u32 addr)
{
return reinterpret_cast<to_be_t<T>*>(g_sudo_addr + addr);
}
inline const be_t<u16>& read16(u32 addr)
{
return _ref<u16>(addr);
}
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
inline void write16(u32 addr, be_t<u16> value, ppu_thread* ppu = nullptr)
#else
inline void write16(u32 addr, be_t<u16> value)
#endif
{
_ref<u16>(addr) = value;
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
{
debugbp_log.success("BPMW: breakpoint writing(16) 0x%x at 0x%x", value, addr);
ppubreak(*ppu);
}
#endif
}
inline const be_t<u32>& read32(u32 addr)
{
return _ref<u32>(addr);
}
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
inline void write32(u32 addr, be_t<u32> value, ppu_thread* ppu = nullptr)
#else
inline void write32(u32 addr, be_t<u32> value)
#endif
{
_ref<u32>(addr) = value;
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
{
debugbp_log.success("BPMW: breakpoint writing(32) 0x%x at 0x%x", value, addr);
ppubreak(*ppu);
}
#endif
}
inline const be_t<u64>& read64(u32 addr)
{
return _ref<u64>(addr);
}
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
inline void write64(u32 addr, be_t<u64> value, ppu_thread* ppu = nullptr)
#else
inline void write64(u32 addr, be_t<u64> value)
#endif
{
_ref<u64>(addr) = value;
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
{
debugbp_log.success("BPMW: breakpoint writing(64) 0x%x at 0x%x", value, addr);
ppubreak(*ppu);
}
#endif
}
void init();
} // namespace ps3_
void close();
void load(utils::serial& ar);
void save(utils::serial& ar);
// Returns sample address for shared memory, 0 on failure (wraps block_t::get_shm_addr)
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared);
template <typename T, typename AT>
class _ptr_base;
template <typename T, typename AT>
class _ref_base;
} // namespace vm

View file

@ -0,0 +1,102 @@
#pragma once
#include "vm.h"
#include "Emu/RSX/rsx_utils.h"
class cpu_thread;
class shared_mutex;
namespace vm
{
extern thread_local atomic_t<cpu_thread*>* g_tls_locked;
enum range_lock_flags : u64
{
/* flags (3 bits, W + R + Reserved) */
range_writable = 4ull << 61,
range_readable = 2ull << 61,
range_reserved = 1ull << 61,
range_full_mask = 7ull << 61,
/* flag combinations with special meaning */
range_locked = 4ull << 61, // R+W as well, but being exclusively accessed (size extends addr)
range_allocation = 0, // Allocation, no safe access, g_shmem may change at ANY location
range_pos = 61,
range_bits = 3,
};
extern atomic_t<u64, 64> g_range_lock_bits[2];
extern atomic_t<u64> g_shmem[];
// Register reader
void passive_lock(cpu_thread& cpu);
// Register range lock for further use
atomic_t<u64, 64>* alloc_range_lock();
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size);
// Lock memory range ignoring memory protection (Size!=0 also implies aligned begin)
template <uint Size = 0>
FORCE_INLINE void range_lock(atomic_t<u64, 64>* range_lock, u32 begin, u32 _size)
{
if constexpr (Size == 0)
{
if (begin >> 28 == rsx::constants::local_mem_base >> 28)
{
return;
}
}
// Optimistic locking.
// Note that we store the range we will be accessing, without any clamping.
range_lock->store(begin | (u64{_size} << 32));
// Old-style conditional constexpr
const u32 size = Size ? Size : _size;
if (Size == 1 || (begin % 4096 + size % 4096) / 4096 == 0 ? !vm::check_addr(begin) : !vm::check_addr(begin, vm::page_readable, size))
{
range_lock->release(0);
range_lock_internal(range_lock, begin, _size);
return;
}
#ifndef _MSC_VER
__asm__(""); // Tiny barrier
#endif
if (!g_range_lock_bits[1]) [[likely]]
{
return;
}
// Fallback to slow path
range_lock_internal(range_lock, begin, size);
}
// Release it
void free_range_lock(atomic_t<u64, 64>*) noexcept;
// Unregister reader
void passive_unlock(cpu_thread& cpu);
// Optimization (set cpu_flag::memory)
bool temporary_unlock(cpu_thread& cpu) noexcept;
void temporary_unlock() noexcept;
struct writer_lock final
{
atomic_t<u64, 64>* range_lock;
writer_lock(const writer_lock&) = delete;
writer_lock& operator=(const writer_lock&) = delete;
writer_lock() noexcept;
writer_lock(u32 addr, atomic_t<u64, 64>* range_lock = nullptr, u32 size = 128, u64 flags = range_locked) noexcept;
~writer_lock() noexcept;
};
} // namespace vm

522
rpcs3/Emu/Memory/vm_ptr.h Normal file
View file

@ -0,0 +1,522 @@
#pragma once
#include "util/types.hpp"
#include "util/to_endian.hpp"
#include "util/StrFmt.h"
#include "vm.h"
class ppu_thread;
struct ppu_func_opd_t;
namespace vm
{
template <typename T, typename AT>
class _ref_base;
// Enables comparison between comparable types of pointers
template <typename T1, typename T2>
concept PtrComparable = requires(T1* t1, T2* t2) { t1 == t2; };
template <typename T, typename AT>
class _ptr_base
{
AT m_addr;
static_assert(!std::is_pointer_v<T>, "vm::_ptr_base<> error: invalid type (pointer)");
static_assert(!std::is_reference_v<T>, "vm::_ptr_base<> error: invalid type (reference)");
public:
using type = T;
using addr_type = std::remove_cv_t<AT>;
ENABLE_BITWISE_SERIALIZATION;
_ptr_base() = default;
_ptr_base(vm::addr_t addr) noexcept
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
void set(addr_type addr)
{
this->m_addr = addr;
}
static _ptr_base make(addr_type addr)
{
_ptr_base result;
result.m_addr = addr;
return result;
}
// Enable only the conversions which are originally possible between pointer types
template <typename T2, typename AT2>
requires(std::is_convertible_v<T*, T2*>)
operator _ptr_base<T2, AT2>() const noexcept
{
return vm::cast(m_addr);
}
explicit operator bool() const noexcept
{
return m_addr != 0u;
}
// Get vm pointer to a struct member
template <typename MT, typename T2>
requires PtrComparable<T, T2>
_ptr_base<MT, u32> ptr(MT T2::* const mptr) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr));
}
// Get vm pointer to a struct member with array subscription
template <typename MT, typename T2, typename ET = std::remove_extent_t<MT>>
requires PtrComparable<T, T2>
_ptr_base<ET, u32> ptr(MT T2::* const mptr, u32 index) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr) + u32{sizeof(ET)} * index);
}
// Get vm reference to a struct member
template <typename MT, typename T2>
requires PtrComparable<T, T2> && (!std::is_void_v<T>)
_ref_base<MT, u32> ref(MT T2::* const mptr) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr));
}
// Get vm reference to a struct member with array subscription
template <typename MT, typename T2, typename ET = std::remove_extent_t<MT>>
requires PtrComparable<T, T2> && (!std::is_void_v<T>)
_ref_base<ET, u32> ref(MT T2::* const mptr, u32 index) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr) + u32{sizeof(ET)} * index);
}
// Get vm reference
_ref_base<T, u32> ref() const
requires(!std::is_void_v<T>)
{
return vm::cast(m_addr);
}
template <bool Strict = false>
T* get_ptr() const
{
if constexpr (Strict)
{
AUDIT(m_addr);
}
return static_cast<T*>(vm::base(vm::cast(m_addr)));
}
T* operator->() const
requires(!std::is_void_v<T>)
{
return get_ptr<true>();
}
std::add_lvalue_reference_t<T> operator*() const
requires(!std::is_void_v<T>)
{
return *get_ptr<true>();
}
std::add_lvalue_reference_t<T> operator[](u32 index) const
requires(!std::is_void_v<T>)
{
AUDIT(m_addr);
return *static_cast<T*>(vm::base(vm::cast(m_addr) + u32{sizeof(T)} * index));
}
// Test address for arbitrary alignment: (addr & (align - 1)) == 0
bool aligned(u32 align = alignof(T)) const
{
return (m_addr & (align - 1)) == 0u;
}
// Get type size
static constexpr u32 size() noexcept
requires(!std::is_void_v<T>)
{
return sizeof(T);
}
// Get type alignment
static constexpr u32 align() noexcept
requires(!std::is_void_v<T>)
{
return alignof(T);
}
_ptr_base<T, u32> operator+() const
{
return vm::cast(m_addr);
}
_ptr_base<T, u32> operator+(u32 count) const
requires(!std::is_void_v<T>)
{
return vm::cast(vm::cast(m_addr) + count * size());
}
_ptr_base<T, u32> operator-(u32 count) const
requires(!std::is_void_v<T>)
{
return vm::cast(vm::cast(m_addr) - count * size());
}
friend _ptr_base<T, u32> operator+(u32 count, const _ptr_base& ptr)
requires(!std::is_void_v<T>)
{
return vm::cast(vm::cast(ptr.m_addr) + count * size());
}
// Pointer difference operator
template <typename T2, typename AT2>
requires(std::is_object_v<T2> && std::is_same_v<std::decay_t<T>, std::decay_t<T2>>)
s32 operator-(const _ptr_base<T2, AT2>& right) const
{
return static_cast<s32>(vm::cast(m_addr) - vm::cast(right.m_addr)) / size();
}
_ptr_base operator++(int)
requires(!std::is_void_v<T>)
{
_ptr_base result = *this;
m_addr = vm::cast(m_addr) + size();
return result;
}
_ptr_base& operator++()
requires(!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) + size();
return *this;
}
_ptr_base operator--(int)
requires(!std::is_void_v<T>)
{
_ptr_base result = *this;
m_addr = vm::cast(m_addr) - size();
return result;
}
_ptr_base& operator--()
requires(!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) - size();
return *this;
}
_ptr_base& operator+=(s32 count)
requires(!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) + count * size();
return *this;
}
_ptr_base& operator-=(s32 count)
requires(!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) - count * size();
return *this;
}
std::pair<bool, std::conditional_t<std::is_void_v<T>, char, std::remove_const_t<T>>> try_read() const
requires(std::is_copy_constructible_v<T>)
{
alignas(sizeof(T) >= 16 ? 16 : 8) char buf[sizeof(T)]{};
const bool ok = vm::try_access(vm::cast(m_addr), buf, sizeof(T), false);
return {ok, std::bit_cast<decltype(try_read().second)>(buf)};
}
bool try_read(std::conditional_t<std::is_void_v<T>, char, std::remove_const_t<T>>& out) const
requires(!std::is_void_v<T>)
{
return vm::try_access(vm::cast(m_addr), std::addressof(out), sizeof(T), false);
}
bool try_write(const std::conditional_t<std::is_void_v<T>, char, T>& _in) const
requires(!std::is_void_v<T>)
{
return vm::try_access(vm::cast(m_addr), const_cast<T*>(std::addressof(_in)), sizeof(T), true);
}
// Don't use
auto& raw()
{
return m_addr;
}
};
template <typename AT, typename RT, typename... T>
class _ptr_base<RT(T...), AT>
{
AT m_addr;
public:
using addr_type = std::remove_cv_t<AT>;
ENABLE_BITWISE_SERIALIZATION;
_ptr_base() = default;
_ptr_base(vm::addr_t addr)
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
void set(addr_type addr)
{
m_addr = addr;
}
static _ptr_base make(addr_type addr)
{
_ptr_base result;
result.m_addr = addr;
return result;
}
// Conversion to another function pointer
template <typename AT2>
operator _ptr_base<RT(T...), AT2>() const
{
return vm::cast(m_addr);
}
explicit operator bool() const
{
return m_addr != 0u;
}
_ptr_base<RT(T...), u32> operator+() const
{
return vm::cast(m_addr);
}
// Don't use
auto& raw()
{
return m_addr;
}
// Callback; defined in PPUCallback.h, passing context is mandatory
RT operator()(ppu_thread& ppu, T... args) const;
const ppu_func_opd_t& opd() const;
};
template <typename AT, typename RT, typename... T>
class _ptr_base<RT (*)(T...), AT>
{
static_assert(!sizeof(AT), "vm::_ptr_base<> error: use RT(T...) format for functions instead of RT(*)(T...)");
};
// Native endianness pointer to LE data
template <typename T, typename AT = u32>
using ptrl = _ptr_base<to_le_t<T>, AT>;
template <typename T, typename AT = u32>
using cptrl = ptrl<const T, AT>;
// Native endianness pointer to BE data
template <typename T, typename AT = u32>
using ptrb = _ptr_base<to_be_t<T>, AT>;
template <typename T, typename AT = u32>
using cptrb = ptrb<const T, AT>;
// BE pointer to LE data
template <typename T, typename AT = u32>
using bptrl = _ptr_base<to_le_t<T>, to_be_t<AT>>;
// BE pointer to BE data
template <typename T, typename AT = u32>
using bptrb = _ptr_base<to_be_t<T>, to_be_t<AT>>;
// LE pointer to LE data
template <typename T, typename AT = u32>
using lptrl = _ptr_base<to_le_t<T>, to_le_t<AT>>;
// LE pointer to BE data
template <typename T, typename AT = u32>
using lptrb = _ptr_base<to_be_t<T>, to_le_t<AT>>;
inline namespace ps3_
{
// Default pointer type for PS3 HLE functions (Native endianness pointer to BE data)
template <typename T, typename AT = u32>
using ptr = ptrb<T, AT>;
template <typename T, typename AT = u32>
using cptr = ptr<const T, AT>;
// Default pointer to pointer type for PS3 HLE functions (Native endianness pointer to BE pointer to BE data)
template <typename T, typename AT = u32, typename AT2 = u32>
using pptr = ptr<ptr<T, AT2>, AT>;
template <typename T, typename AT = u32, typename AT2 = u32>
using cpptr = pptr<const T, AT, AT2>;
// Default pointer type for PS3 HLE structures (BE pointer to BE data)
template <typename T, typename AT = u32>
using bptr = bptrb<T, AT>;
template <typename T, typename AT = u32>
using bcptr = bptr<const T, AT>;
// Default pointer to pointer type for PS3 HLE structures (BE pointer to BE pointer to BE data)
template <typename T, typename AT = u32, typename AT2 = u32>
using bpptr = bptr<ptr<T, AT2>, AT>;
template <typename T, typename AT = u32, typename AT2 = u32>
using bcpptr = bpptr<const T, AT, AT2>;
// Perform static_cast (for example, vm::ptr<void> to vm::ptr<char>)
template <typename CT, typename T, typename AT>
requires requires(T* t) { static_cast<to_be_t<CT>*>(t); }
inline _ptr_base<to_be_t<CT>, u32> static_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
// Perform const_cast (for example, vm::cptr<char> to vm::ptr<char>)
template <typename CT, typename T, typename AT>
requires requires(T* t) { const_cast<to_be_t<CT>*>(t); }
inline _ptr_base<to_be_t<CT>, u32> const_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
// Perform reinterpret cast
template <typename CT, typename T, typename AT>
requires requires(T* t) { reinterpret_cast<to_be_t<CT>*>(t); }
inline _ptr_base<to_be_t<CT>, u32> unsafe_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
} // namespace ps3_
struct null_t
{
template <typename T, typename AT>
operator _ptr_base<T, AT>() const
{
return _ptr_base<T, AT>{};
}
template <typename T, typename AT>
constexpr bool operator==(const _ptr_base<T, AT>& ptr) const
{
return !ptr;
}
template <typename T, typename AT>
constexpr bool operator<(const _ptr_base<T, AT>& ptr) const
{
return 0 < ptr.addr();
}
};
// Null pointer convertible to any vm::ptr* type
constexpr null_t null{};
} // namespace vm
template <typename T1, typename AT1, typename T2, typename AT2>
requires vm::PtrComparable<T1, T2>
inline bool operator==(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() == right.addr();
}
template <typename T1, typename AT1, typename T2, typename AT2>
requires vm::PtrComparable<T1, T2>
inline bool operator<(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() < right.addr();
}
template <typename T1, typename AT1, typename T2, typename AT2>
requires vm::PtrComparable<T1, T2>
inline bool operator<=(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() <= right.addr();
}
template <typename T1, typename AT1, typename T2, typename AT2>
requires vm::PtrComparable<T1, T2>
inline bool operator>(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() > right.addr();
}
template <typename T1, typename AT1, typename T2, typename AT2>
requires vm::PtrComparable<T1, T2>
inline bool operator>=(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() >= right.addr();
}
// Change AT endianness to BE/LE
template <typename T, typename AT, bool Se>
struct to_se<vm::_ptr_base<T, AT>, Se>
{
using type = vm::_ptr_base<T, typename to_se<AT, Se>::type>;
};
// Format pointer
template <typename T, typename AT>
struct fmt_unveil<vm::_ptr_base<T, AT>>
{
using type = vm::_ptr_base<T, u32>; // Use only T, ignoring AT
static inline auto get(const vm::_ptr_base<T, AT>& arg)
{
return fmt_unveil<AT>::get(arg.addr());
}
};
template <>
struct fmt_class_string<vm::_ptr_base<const void, u32>>
{
static void format(std::string& out, u64 arg);
};
template <typename T>
struct fmt_class_string<vm::_ptr_base<T, u32>> : fmt_class_string<vm::_ptr_base<const void, u32>>
{
// Classify all pointers as const void*
};
template <>
struct fmt_class_string<vm::_ptr_base<const char, u32>>
{
static void format(std::string& out, u64 arg);
};
template <>
struct fmt_class_string<vm::_ptr_base<char, u32>> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify char* as const char*
};
template <usz Size>
struct fmt_class_string<vm::_ptr_base<const char[Size], u32>> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify const char[] as const char*
};
template <usz Size>
struct fmt_class_string<vm::_ptr_base<char[Size], u32>> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify char[] as const char*
};

208
rpcs3/Emu/Memory/vm_ref.h Normal file
View file

@ -0,0 +1,208 @@
#pragma once
#include <type_traits>
#include "vm.h"
#include "util/to_endian.hpp"
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#endif
namespace vm
{
template <typename T, typename AT>
class _ptr_base;
template <typename T, typename AT>
class _ref_base
{
AT m_addr;
static_assert(!std::is_pointer_v<T>, "vm::_ref_base<> error: invalid type (pointer)");
static_assert(!std::is_reference_v<T>, "vm::_ref_base<> error: invalid type (reference)");
static_assert(!std::is_function_v<T>, "vm::_ref_base<> error: invalid type (function)");
static_assert(!std::is_void_v<T>, "vm::_ref_base<> error: invalid type (void)");
public:
using type = T;
using addr_type = std::remove_cv_t<AT>;
_ref_base(const _ref_base&) = default;
_ref_base(vm::addr_t addr)
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
T& get_ref() const
{
return *static_cast<T*>(vm::base(vm::cast(m_addr)));
}
// convert to vm pointer
vm::_ptr_base<T, u32> ptr() const
{
return vm::cast(m_addr);
}
operator std::common_type_t<T>() const
{
return get_ref();
}
operator T&() const
{
return get_ref();
}
T& operator=(const _ref_base& right)
{
return get_ref() = right.get_ref();
}
T& operator=(const std::common_type_t<T>& right) const
{
return get_ref() = right;
}
decltype(auto) operator++(int) const
{
return get_ref()++;
}
decltype(auto) operator++() const
{
return ++get_ref();
}
decltype(auto) operator--(int) const
{
return get_ref()--;
}
decltype(auto) operator--() const
{
return --get_ref();
}
template <typename T2>
decltype(auto) operator+=(const T2& right)
{
return get_ref() += right;
}
template <typename T2>
decltype(auto) operator-=(const T2& right)
{
return get_ref() -= right;
}
template <typename T2>
decltype(auto) operator*=(const T2& right)
{
return get_ref() *= right;
}
template <typename T2>
decltype(auto) operator/=(const T2& right)
{
return get_ref() /= right;
}
template <typename T2>
decltype(auto) operator%=(const T2& right)
{
return get_ref() %= right;
}
template <typename T2>
decltype(auto) operator&=(const T2& right)
{
return get_ref() &= right;
}
template <typename T2>
decltype(auto) operator|=(const T2& right)
{
return get_ref() |= right;
}
template <typename T2>
decltype(auto) operator^=(const T2& right)
{
return get_ref() ^= right;
}
template <typename T2>
decltype(auto) operator<<=(const T2& right)
{
return get_ref() <<= right;
}
template <typename T2>
decltype(auto) operator>>=(const T2& right)
{
return get_ref() >>= right;
}
};
// Native endianness reference to LE data
template <typename T, typename AT = u32>
using refl = _ref_base<to_le_t<T>, AT>;
// Native endianness reference to BE data
template <typename T, typename AT = u32>
using refb = _ref_base<to_be_t<T>, AT>;
// BE reference to LE data
template <typename T, typename AT = u32>
using brefl = _ref_base<to_le_t<T>, to_be_t<AT>>;
// BE reference to BE data
template <typename T, typename AT = u32>
using brefb = _ref_base<to_be_t<T>, to_be_t<AT>>;
// LE reference to LE data
template <typename T, typename AT = u32>
using lrefl = _ref_base<to_le_t<T>, to_le_t<AT>>;
// LE reference to BE data
template <typename T, typename AT = u32>
using lrefb = _ref_base<to_be_t<T>, to_le_t<AT>>;
inline namespace ps3_
{
// default reference for PS3 HLE functions (Native endianness reference to BE data)
template <typename T, typename AT = u32>
using ref = refb<T, AT>;
// default reference for PS3 HLE structures (BE reference to BE data)
template <typename T, typename AT = u32>
using bref = brefb<T, AT>;
} // namespace ps3_
} // namespace vm
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
// Change AT endianness to BE/LE
template <typename T, typename AT, bool Se>
struct to_se<vm::_ref_base<T, AT>, Se>
{
using type = vm::_ref_base<T, typename to_se<AT, Se>::type>;
};
// Forbid formatting
template <typename T, typename AT>
struct fmt_unveil<vm::_ref_base<T, AT>>
{
static_assert(!sizeof(T), "vm::_ref_base<>: ambiguous format argument");
};

View file

@ -0,0 +1,545 @@
#pragma once
#include "vm.h"
#include "vm_locking.h"
#include "util/atomic.hpp"
#include "util/tsc.hpp"
#include <functional>
extern bool g_use_rtm;
extern u64 g_rtm_tx_limit2;
#ifdef _MSC_VER
extern "C"
{
u32 _xbegin();
void _xend();
}
#endif
namespace vm
{
enum : u64
{
rsrv_lock_mask = 127,
rsrv_unique_lock = 64,
rsrv_putunc_flag = 32,
};
// Get reservation status for further atomic update: last update timestamp
inline atomic_t<u64>& reservation_acquire(u32 addr)
{
// Access reservation info: stamp and the lock bit
return *reinterpret_cast<atomic_t<u64>*>(g_reservations + (addr & 0xff80) / 2);
}
// Update reservation status
void reservation_update(u32 addr);
struct reservation_waiter_t
{
u32 wait_flag = 0;
u8 waiters_count = 0;
u8 waiters_index = 0;
};
static inline std::pair<atomic_t<reservation_waiter_t>*, atomic_t<reservation_waiter_t>*> reservation_notifier(u32 raddr)
{
extern std::array<atomic_t<reservation_waiter_t>, 1024> g_resrv_waiters_count;
// Storage efficient method to distinguish different nearby addresses (which are likely)
constexpr u32 wait_vars_for_each = 8;
constexpr u32 unique_address_bit_mask = 0b11;
const usz index = std::popcount(raddr & -1024) + ((raddr / 128) & unique_address_bit_mask) * 32;
auto& waiter = g_resrv_waiters_count[index * wait_vars_for_each];
return {&g_resrv_waiters_count[index * wait_vars_for_each + waiter.load().waiters_index % wait_vars_for_each], &waiter};
}
// Returns waiter count and index
static inline std::pair<u32, u32> reservation_notifier_count_index(u32 raddr)
{
const auto notifiers = reservation_notifier(raddr);
return {notifiers.first->load().waiters_count, static_cast<u32>(notifiers.first - notifiers.second)};
}
// Returns waiter count
static inline u32 reservation_notifier_count(u32 raddr)
{
return reservation_notifier(raddr).first->load().waiters_count;
}
static inline void reservation_notifier_end_wait(atomic_t<reservation_waiter_t>& waiter)
{
waiter.atomic_op([](reservation_waiter_t& value)
{
if (value.waiters_count-- == 1)
{
value.wait_flag = 0;
}
});
}
static inline atomic_t<reservation_waiter_t>* reservation_notifier_begin_wait(u32 raddr, u64 rtime)
{
atomic_t<reservation_waiter_t>& waiter = *reservation_notifier(raddr).first;
waiter.atomic_op([](reservation_waiter_t& value)
{
value.wait_flag = 1;
value.waiters_count++;
});
if ((reservation_acquire(raddr) & -128) != rtime)
{
reservation_notifier_end_wait(waiter);
return nullptr;
}
return &waiter;
}
static inline atomic_t<u32>* reservation_notifier_notify(u32 raddr, bool pospone = false)
{
const auto notifiers = reservation_notifier(raddr);
if (notifiers.first->load().wait_flag)
{
if (notifiers.first == notifiers.second)
{
if (!notifiers.first->fetch_op([](reservation_waiter_t& value)
{
if (value.waiters_index == 0)
{
value.wait_flag = 0;
value.waiters_count = 0;
value.waiters_index++;
return true;
}
return false;
})
.second)
{
return nullptr;
}
}
else
{
u8 old_index = static_cast<u8>(notifiers.first - notifiers.second);
if (!atomic_storage<u8>::compare_exchange(notifiers.second->raw().waiters_index, old_index, (old_index + 1) % 4))
{
return nullptr;
}
notifiers.first->release(reservation_waiter_t{});
}
if (pospone)
{
return utils::bless<atomic_t<u32>>(&notifiers.first->raw().wait_flag);
}
utils::bless<atomic_t<u32>>(&notifiers.first->raw().wait_flag)->notify_all();
}
return nullptr;
}
u64 reservation_lock_internal(u32, atomic_t<u64>&);
void reservation_shared_lock_internal(atomic_t<u64>&);
inline bool reservation_try_lock(atomic_t<u64>& res, u64 rtime)
{
if (res.compare_and_swap_test(rtime, rtime | rsrv_unique_lock)) [[likely]]
{
return true;
}
return false;
}
inline std::pair<atomic_t<u64>&, u64> reservation_lock(u32 addr)
{
auto res = &vm::reservation_acquire(addr);
auto rtime = res->load();
if (rtime & 127 || !reservation_try_lock(*res, rtime)) [[unlikely]]
{
static atomic_t<u64> no_lock{};
rtime = reservation_lock_internal(addr, *res);
if (rtime == umax)
{
res = &no_lock;
}
}
return {*res, rtime};
}
// TODO: remove and make it external
void reservation_op_internal(u32 addr, std::function<bool()> func);
template <bool Ack = false, typename CPU, typename T, typename AT = u32, typename F>
inline SAFE_BUFFERS(auto) reservation_op(CPU& cpu, _ptr_base<T, AT> ptr, F op)
{
// Atomic operation will be performed on aligned 128 bytes of data, so the data size and alignment must comply
static_assert(sizeof(T) <= 128 && alignof(T) == sizeof(T), "vm::reservation_op: unsupported type");
static_assert(std::is_trivially_copyable_v<T>, "vm::reservation_op: not triv copyable (optimization)");
// Use "super" pointer to prevent access violation handling during atomic op
const auto sptr = vm::get_super_ptr<T>(static_cast<u32>(ptr.addr()));
// Prefetch some data
//_m_prefetchw(sptr);
//_m_prefetchw(reinterpret_cast<char*>(sptr) + 64);
// Use 128-byte aligned addr
const u32 addr = static_cast<u32>(ptr.addr()) & -128;
auto& res = vm::reservation_acquire(addr);
//_m_prefetchw(&res);
#if defined(ARCH_X64)
if (g_use_rtm)
{
// Stage 1: single optimistic transaction attempt
unsigned status = -1;
u64 _old = 0;
auto stamp0 = utils::get_tsc(), stamp1 = stamp0, stamp2 = stamp0;
#ifndef _MSC_VER
__asm__ goto("xbegin %l[stage2];" ::: "memory" : stage2);
#else
status = _xbegin();
if (status == umax)
#endif
{
if (res & rsrv_unique_lock)
{
#ifndef _MSC_VER
__asm__ volatile("xend; mov $-1, %%eax;" ::: "memory");
#else
_xend();
#endif
goto stage2;
}
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
res += 128;
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
if constexpr (Ack)
res.notify_all();
return;
}
else
{
if (auto result = std::invoke(op, *sptr))
{
res += 128;
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
if constexpr (Ack)
res.notify_all();
return result;
}
else
{
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
return result;
}
}
}
stage2:
#ifndef _MSC_VER
__asm__ volatile("mov %%eax, %0;" : "=r"(status)::"memory");
#endif
stamp1 = utils::get_tsc();
// Stage 2: try to lock reservation first
_old = res.fetch_add(1);
// Compute stamps excluding memory touch
stamp2 = utils::get_tsc() - (stamp1 - stamp0);
// Start lightened transaction
for (; !(_old & vm::rsrv_unique_lock) && stamp2 - stamp0 <= g_rtm_tx_limit2; stamp2 = utils::get_tsc())
{
if (cpu.has_pause_flag())
{
break;
}
#ifndef _MSC_VER
__asm__ goto("xbegin %l[retry];" ::: "memory" : retry);
#else
status = _xbegin();
if (status != umax) [[unlikely]]
{
goto retry;
}
#endif
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
res += 127;
if (Ack)
res.notify_all();
return;
}
else
{
if (auto result = std::invoke(op, *sptr))
{
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
res += 127;
if (Ack)
res.notify_all();
return result;
}
else
{
#ifndef _MSC_VER
__asm__ volatile("xend;" ::: "memory");
#else
_xend();
#endif
return result;
}
}
retry:
#ifndef _MSC_VER
__asm__ volatile("mov %%eax, %0;" : "=r"(status)::"memory");
#endif
if (!status)
{
break;
}
}
// Stage 3: all failed, heavyweight fallback (see comments at the bottom)
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
vm::reservation_op_internal(addr, [&]
{
std::invoke(op, *sptr);
return true;
});
if constexpr (Ack)
res.notify_all();
return;
}
else
{
auto result = std::invoke_result_t<F, T&>();
vm::reservation_op_internal(addr, [&]
{
if ((result = std::invoke(op, *sptr)))
{
return true;
}
else
{
return false;
}
});
if (Ack && result)
res.notify_all();
return result;
}
}
#else
static_cast<void>(cpu);
#endif /* ARCH_X64 */
// Lock reservation and perform heavyweight lock
reservation_shared_lock_internal(res);
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
{
vm::writer_lock lock(addr);
std::invoke(op, *sptr);
res += 127;
}
if constexpr (Ack)
res.notify_all();
return;
}
else
{
auto result = std::invoke_result_t<F, T&>();
{
vm::writer_lock lock(addr);
if ((result = std::invoke(op, *sptr)))
{
res += 127;
}
else
{
res -= 1;
}
}
if (Ack && result)
res.notify_all();
return result;
}
}
// For internal usage
[[noreturn]] void reservation_escape_internal();
// Read memory value in pseudo-atomic manner
template <typename CPU, typename T, typename AT = u32, typename F>
inline SAFE_BUFFERS(auto) peek_op(CPU&& cpu, _ptr_base<T, AT> ptr, F op)
{
// Atomic operation will be performed on aligned 128 bytes of data, so the data size and alignment must comply
static_assert(sizeof(T) <= 128 && alignof(T) == sizeof(T), "vm::peek_op: unsupported type");
// Use 128-byte aligned addr
const u32 addr = static_cast<u32>(ptr.addr()) & -128;
while (true)
{
if constexpr (std::is_class_v<std::remove_cvref_t<CPU>>)
{
if (cpu.test_stopped())
{
reservation_escape_internal();
}
}
const u64 rtime = vm::reservation_acquire(addr);
if (rtime & 127)
{
continue;
}
// Observe data non-atomically and make sure no reservation updates were made
if constexpr (std::is_void_v<std::invoke_result_t<F, const T&>>)
{
std::invoke(op, *ptr);
if (rtime == vm::reservation_acquire(addr))
{
return;
}
}
else
{
auto res = std::invoke(op, *ptr);
if (rtime == vm::reservation_acquire(addr))
{
return res;
}
}
}
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) light_op(T& data, F op)
{
// Optimized real ptr -> vm ptr conversion, simply UB if out of range
const u32 addr = static_cast<u32>(reinterpret_cast<const u8*>(&data) - g_base_addr);
// Use "super" pointer to prevent access violation handling during atomic op
const auto sptr = vm::get_super_ptr<T>(addr);
// "Lock" reservation
auto& res = vm::reservation_acquire(addr);
auto [_old, _ok] = res.fetch_op([&](u64& r)
{
if (r & vm::rsrv_unique_lock)
{
return false;
}
r += 1;
return true;
});
if (!_ok) [[unlikely]]
{
vm::reservation_shared_lock_internal(res);
}
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
res += 127;
if constexpr (Ack)
{
res.notify_all();
}
}
else
{
auto result = std::invoke(op, *sptr);
res += 127;
if constexpr (Ack)
{
res.notify_all();
}
return result;
}
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) atomic_op(T& data, F op)
{
return light_op<Ack, T>(data, [&](T& data)
{
return data.atomic_op(op);
});
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) fetch_op(T& data, F op)
{
return light_op<Ack, T>(data, [&](T& data)
{
return data.fetch_op(op);
});
}
} // namespace vm

175
rpcs3/Emu/Memory/vm_var.h Normal file
View file

@ -0,0 +1,175 @@
#pragma once
#include "vm_ptr.h"
#include "util/to_endian.hpp"
namespace vm
{
template <memory_location_t Location = vm::main>
struct page_allocator
{
static inline std::pair<vm::addr_t, u32> alloc(u32 size, u32 align)
{
return {vm::cast(vm::alloc(size, Location, std::max<u32>(align, 0x10000))), size};
}
static inline void dealloc(u32 addr, u32 size) noexcept
{
ensure(vm::dealloc(addr, Location) >= size);
}
};
template <typename T>
struct stack_allocator
{
static inline std::pair<vm::addr_t, u32> alloc(u32 size, u32 align)
{
return T::stack_push(size, align);
}
static inline void dealloc(u32 addr, u32 size) noexcept
{
T::stack_pop_verbose(addr, size);
}
};
// General variable base class
template <typename T, typename A>
class _var_base final : public _ptr_base<T, const u32>
{
using pointer = _ptr_base<T, const u32>;
const u32 m_mem_size;
_var_base(std::pair<vm::addr_t, u32> alloc_info)
: pointer(alloc_info.first), m_mem_size(alloc_info.second)
{
}
public:
// Unmoveable object
_var_base(const _var_base&) = delete;
_var_base& operator=(const _var_base&) = delete;
using enable_bitcopy = std::false_type; // Disable bitcopy inheritence
_var_base()
: _var_base(A::alloc(sizeof(T), alignof(T)))
{
}
_var_base(const T& right)
: _var_base()
{
std::memcpy(pointer::get_ptr(), &right, sizeof(T));
}
~_var_base()
{
if (pointer::addr())
{
A::dealloc(pointer::addr(), m_mem_size);
}
}
};
// Dynamic length array variable specialization
template <typename T, typename A>
class _var_base<T[], A> final : public _ptr_base<T, const u32>
{
using pointer = _ptr_base<T, const u32>;
const u32 m_mem_size;
const u32 m_size;
_var_base(u32 count, std::pair<vm::addr_t, u32> alloc_info)
: pointer(alloc_info.first), m_mem_size(alloc_info.second), m_size(u32{sizeof(T)} * count)
{
}
public:
_var_base(const _var_base&) = delete;
_var_base& operator=(const _var_base&) = delete;
using enable_bitcopy = std::false_type; // Disable bitcopy inheritence
_var_base(u32 count)
: _var_base(count, A::alloc(u32{sizeof(T)} * count, alignof(T)))
{
}
// Initialize via the iterator
template <typename I>
_var_base(u32 count, I&& it)
: _var_base(count)
{
std::copy_n(std::forward<I>(it), count, pointer::get_ptr());
}
~_var_base()
{
if (pointer::addr())
{
A::dealloc(pointer::addr(), m_mem_size);
}
}
// Remove operator ->
T* operator->() const = delete;
u32 get_count() const
{
return m_size / u32{sizeof(T)};
}
auto begin() const
{
return *this + 0;
}
auto end() const
{
return *this + get_count();
}
};
// LE variable
template <typename T, typename A>
using varl = _var_base<to_le_t<T>, A>;
// BE variable
template <typename T, typename A>
using varb = _var_base<to_be_t<T>, A>;
inline namespace ps3_
{
// BE variable
template <typename T, typename A = stack_allocator<ppu_thread>>
using var = varb<T, A>;
// Make BE variable initialized from value
template <typename T, typename A = stack_allocator<ppu_thread>>
[[nodiscard]] auto make_var(const T& value)
{
return (varb<T, A>(value));
}
// Make char[] variable initialized from std::string
template <typename A = stack_allocator<ppu_thread>>
[[nodiscard]] auto make_str(const std::string& str)
{
return (_var_base<char[], A>(size32(str) + 1, str.c_str()));
}
// Global HLE variable
template <typename T, uint Count = 1>
struct gvar final : ptr<T>
{
static constexpr u32 alloc_size{sizeof(T) * Count};
static constexpr u32 alloc_align{std::max<u32>(alignof(T), 16)};
};
} // namespace ps3_
} // namespace vm