mirror of
https://github.com/RPCSX/rpcsx.git
synced 2025-12-06 07:12:14 +01:00
Replaces `std::shared_pointer` with `stx::atomic_ptr` and `stx::shared_ptr`. Notes to programmers: * This pr kills the use of `dynamic_cast`, `std::dynamic_pointer_cast` and `std::weak_ptr` on IDM objects, possible replacement is to save the object ID on the base object, then use idm::check/get_unlocked to the destination type via the saved ID which may be null. Null pointer check is how you can tell type mismatch (as dynamic cast) or object destruction (as weak_ptr locking). * Double-inheritance on IDM objects should be used with care, `stx::shared_ptr` does not support constant-evaluated pointer offsetting to parent/child type. * `idm::check/get_unlocked` can now be used anywhere. Misc fixes: * Fixes some segfaults with RPCN with interaction with IDM. * Fix deadlocks in access violation handler due locking recursion. * Fixes race condition in process exit-spawn on memory containers read. * Fix bug that theoretically can prevent RPCS3 from booting - fix `id_manager::typeinfo` comparison to compare members instead of `memcmp` which can fail spuriously on padding bytes. * Ensure all IDM inherited types of base, either has `id_base` or `id_type` defined locally, this allows to make getters such as `idm::get_unlocked<lv2_socket, lv2_socket_raw>()` which were broken before. (requires save-states invalidation) * Removes broken operator[] overload of `stx::shared_ptr` and `stx::single_ptr` for non-array types.
208 lines
3.5 KiB
C++
208 lines
3.5 KiB
C++
#pragma once
|
|
|
|
#include "sys_sync.h"
|
|
|
|
#include "Emu/Memory/vm_ptr.h"
|
|
|
|
#include "Emu/Cell/PPUThread.h"
|
|
|
|
struct sys_mutex_attribute_t
|
|
{
|
|
be_t<u32> protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT
|
|
be_t<u32> recursive; // SYS_SYNC_RECURSIVE or SYS_SYNC_NOT_RECURSIVE
|
|
be_t<u32> pshared;
|
|
be_t<u32> adaptive;
|
|
be_t<u64> ipc_key;
|
|
be_t<s32> flags;
|
|
be_t<u32> pad;
|
|
|
|
union
|
|
{
|
|
nse_t<u64, 1> name_u64;
|
|
char name[sizeof(u64)];
|
|
};
|
|
};
|
|
|
|
class ppu_thread;
|
|
|
|
struct lv2_mutex final : lv2_obj
|
|
{
|
|
static const u32 id_base = 0x85000000;
|
|
|
|
const lv2_protocol protocol;
|
|
const u32 recursive;
|
|
const u32 adaptive;
|
|
const u64 key;
|
|
const u64 name;
|
|
|
|
u32 cond_count = 0; // Condition Variables
|
|
shared_mutex mutex;
|
|
atomic_t<u32> lock_count{0}; // Recursive Locks
|
|
|
|
struct alignas(16) control_data_t
|
|
{
|
|
u32 owner{};
|
|
u32 reserved{};
|
|
ppu_thread* sq{};
|
|
};
|
|
|
|
atomic_t<control_data_t> control{};
|
|
|
|
lv2_mutex(u32 protocol, u32 recursive,u32 adaptive, u64 key, u64 name) noexcept
|
|
: protocol{static_cast<u8>(protocol)}
|
|
, recursive(recursive)
|
|
, adaptive(adaptive)
|
|
, key(key)
|
|
, name(name)
|
|
{
|
|
}
|
|
|
|
lv2_mutex(utils::serial& ar);
|
|
static std::function<void(void*)> load(utils::serial& ar);
|
|
void save(utils::serial& ar);
|
|
|
|
template <typename T>
|
|
CellError try_lock(T& cpu)
|
|
{
|
|
auto it = control.load();
|
|
|
|
if (!it.owner)
|
|
{
|
|
auto store = it;
|
|
store.owner = cpu.id;
|
|
if (!control.compare_and_swap_test(it, store))
|
|
{
|
|
return CELL_EBUSY;
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
if (it.owner == cpu.id)
|
|
{
|
|
// Recursive locking
|
|
if (recursive == SYS_SYNC_RECURSIVE)
|
|
{
|
|
if (lock_count == 0xffffffffu)
|
|
{
|
|
return CELL_EKRESOURCE;
|
|
}
|
|
|
|
lock_count++;
|
|
return {};
|
|
}
|
|
|
|
return CELL_EDEADLK;
|
|
}
|
|
|
|
return CELL_EBUSY;
|
|
}
|
|
|
|
template <typename T>
|
|
bool try_own(T& cpu)
|
|
{
|
|
if (control.atomic_op([&](control_data_t& data)
|
|
{
|
|
if (data.owner)
|
|
{
|
|
cpu.prio.atomic_op([tag = ++g_priority_order_tag](std::common_type_t<decltype(T::prio)>& prio)
|
|
{
|
|
prio.order = tag;
|
|
});
|
|
|
|
cpu.next_cpu = data.sq;
|
|
data.sq = &cpu;
|
|
return false;
|
|
}
|
|
else
|
|
{
|
|
data.owner = cpu.id;
|
|
return true;
|
|
}
|
|
}))
|
|
{
|
|
cpu.next_cpu = nullptr;
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
template <typename T>
|
|
CellError try_unlock(T& cpu)
|
|
{
|
|
auto it = control.load();
|
|
|
|
if (it.owner != cpu.id)
|
|
{
|
|
return CELL_EPERM;
|
|
}
|
|
|
|
if (lock_count)
|
|
{
|
|
lock_count--;
|
|
return {};
|
|
}
|
|
|
|
if (!it.sq)
|
|
{
|
|
auto store = it;
|
|
store.owner = 0;
|
|
|
|
if (control.compare_and_swap_test(it, store))
|
|
{
|
|
return {};
|
|
}
|
|
}
|
|
|
|
return CELL_EBUSY;
|
|
}
|
|
|
|
template <typename T>
|
|
T* reown()
|
|
{
|
|
T* res{};
|
|
|
|
control.fetch_op([&](control_data_t& data)
|
|
{
|
|
res = nullptr;
|
|
|
|
if (auto sq = static_cast<T*>(data.sq))
|
|
{
|
|
res = schedule<T>(data.sq, protocol, false);
|
|
|
|
if (sq == data.sq)
|
|
{
|
|
atomic_storage<u32>::release(control.raw().owner, res->id);
|
|
return false;
|
|
}
|
|
|
|
data.owner = res->id;
|
|
return true;
|
|
}
|
|
else
|
|
{
|
|
data.owner = 0;
|
|
return true;
|
|
}
|
|
});
|
|
|
|
if (res && cpu_flag::again - res->state)
|
|
{
|
|
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
|
|
res->next_cpu = nullptr;
|
|
}
|
|
|
|
return res;
|
|
}
|
|
};
|
|
|
|
|
|
// Syscalls
|
|
|
|
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr);
|
|
error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id);
|
|
error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout);
|
|
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id);
|
|
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id);
|