rpcsx/rpcs3/Emu/Cell/lv2/sys_mutex.h

208 lines
3.5 KiB
C
Raw Normal View History

2020-12-05 13:08:24 +01:00
#pragma once
2016-04-14 00:23:53 +02:00
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/PPUThread.h"
2015-03-06 23:10:04 +01:00
struct sys_mutex_attribute_t
{
be_t<u32> protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT
be_t<u32> recursive; // SYS_SYNC_RECURSIVE or SYS_SYNC_NOT_RECURSIVE
2015-03-06 23:10:04 +01:00
be_t<u32> pshared;
be_t<u32> adaptive;
be_t<u64> ipc_key;
2014-12-28 14:15:22 +01:00
be_t<s32> flags;
be_t<u32> pad;
2015-03-06 23:10:04 +01:00
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
class ppu_thread;
struct lv2_mutex final : lv2_obj
{
2017-01-25 18:50:30 +01:00
static const u32 id_base = 0x85000000;
2020-06-09 17:35:14 +02:00
const lv2_protocol protocol;
2017-02-02 18:47:25 +01:00
const u32 recursive;
const u32 adaptive;
const u64 key;
2015-03-06 23:10:04 +01:00
const u64 name;
u32 cond_count = 0; // Condition Variables
shared_mutex mutex;
2017-02-02 18:47:25 +01:00
atomic_t<u32> lock_count{0}; // Recursive Locks
struct alignas(16) control_data_t
{
u32 owner{};
u32 reserved{};
ppu_thread* sq{};
};
atomic_t<control_data_t> control{};
2021-05-08 19:08:25 +02:00
lv2_mutex(u32 protocol, u32 recursive,u32 adaptive, u64 key, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}
2017-02-02 18:47:25 +01:00
, recursive(recursive)
2017-07-24 17:59:48 +02:00
, adaptive(adaptive)
, key(key)
2015-03-06 23:10:04 +01:00
, name(name)
{
}
2015-07-08 17:01:59 +02:00
lv2_mutex(utils::serial& ar);
2023-02-13 11:33:06 +01:00
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
template <typename T>
CellError try_lock(T& cpu)
2017-02-02 18:47:25 +01:00
{
auto it = control.load();
2017-02-02 18:47:25 +01:00
if (!it.owner)
{
auto store = it;
store.owner = cpu.id;
if (!control.compare_and_swap_test(it, store))
{
return CELL_EBUSY;
}
return {};
}
if (it.owner == cpu.id)
2017-02-02 18:47:25 +01:00
{
// Recursive locking
if (recursive == SYS_SYNC_RECURSIVE)
{
if (lock_count == 0xffffffffu)
{
return CELL_EKRESOURCE;
}
lock_count++;
return {};
}
return CELL_EDEADLK;
}
return CELL_EBUSY;
}
template <typename T>
bool try_own(T& cpu)
2017-02-02 18:47:25 +01:00
{
if (control.atomic_op([&](control_data_t& data)
2017-02-02 18:47:25 +01:00
{
if (data.owner)
2017-02-02 18:47:25 +01:00
{
cpu.prio.atomic_op([tag = ++g_priority_order_tag](std::common_type_t<decltype(T::prio)>& prio)
{
prio.order = tag;
});
cpu.next_cpu = data.sq;
data.sq = &cpu;
return false;
2017-02-02 18:47:25 +01:00
}
else
{
data.owner = cpu.id;
return true;
2017-02-02 18:47:25 +01:00
}
}))
{
cpu.next_cpu = nullptr;
return true;
}
return false;
2017-02-02 18:47:25 +01:00
}
template <typename T>
CellError try_unlock(T& cpu)
2017-02-02 18:47:25 +01:00
{
auto it = control.load();
2017-02-02 18:47:25 +01:00
if (it.owner != cpu.id)
2017-02-02 18:47:25 +01:00
{
return CELL_EPERM;
}
if (lock_count)
{
lock_count--;
return {};
}
if (!it.sq)
2017-02-02 18:47:25 +01:00
{
auto store = it;
store.owner = 0;
if (control.compare_and_swap_test(it, store))
2017-02-02 18:47:25 +01:00
{
return {};
}
}
return CELL_EBUSY;
}
template <typename T>
T* reown()
2017-02-02 18:47:25 +01:00
{
T* res{};
control.fetch_op([&](control_data_t& data)
2017-02-02 18:47:25 +01:00
{
res = nullptr;
if (auto sq = static_cast<T*>(data.sq))
{
res = schedule<T>(data.sq, protocol, false);
2023-06-03 17:30:18 +02:00
if (sq == data.sq)
{
atomic_storage<u32>::release(control.raw().owner, res->id);
return false;
}
data.owner = res->id;
return true;
}
else
{
data.owner = 0;
return true;
}
});
if (res && cpu_flag::again - res->state)
{
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
res->next_cpu = nullptr;
}
return res;
2017-02-02 18:47:25 +01:00
}
};
2014-12-23 00:31:11 +01:00
2017-02-02 18:47:25 +01:00
// Syscalls
2019-06-09 01:03:24 +02:00
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr);
error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id);
2017-02-02 18:47:25 +01:00
error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout);
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id);
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id);