rpcsx/rpcs3/Emu/Cell/lv2/sys_mutex.h

169 lines
2.8 KiB
C
Raw Normal View History

2020-12-05 13:08:24 +01:00
#pragma once
2016-04-14 00:23:53 +02:00
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
2015-03-06 23:10:04 +01:00
struct sys_mutex_attribute_t
{
be_t<u32> protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT
be_t<u32> recursive; // SYS_SYNC_RECURSIVE or SYS_SYNC_NOT_RECURSIVE
2015-03-06 23:10:04 +01:00
be_t<u32> pshared;
be_t<u32> adaptive;
be_t<u64> ipc_key;
2014-12-28 14:15:22 +01:00
be_t<s32> flags;
be_t<u32> pad;
2015-03-06 23:10:04 +01:00
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_mutex final : lv2_obj
{
2017-01-25 18:50:30 +01:00
static const u32 id_base = 0x85000000;
2020-06-09 17:35:14 +02:00
const lv2_protocol protocol;
2017-02-02 18:47:25 +01:00
const u32 recursive;
const u32 shared;
const u32 adaptive;
const u64 key;
2015-03-06 23:10:04 +01:00
const u64 name;
2017-02-02 18:47:25 +01:00
const s32 flags;
2015-03-06 23:10:04 +01:00
struct alignas(8) count_info
{
u32 mutex_count; // Mutex copies count (0 means doesn't exist anymore)
u32 cond_count; // Condition Variables
};
shared_mutex mutex;
atomic_t<u32> owner{0};
2017-02-02 18:47:25 +01:00
atomic_t<u32> lock_count{0}; // Recursive Locks
atomic_t<count_info> obj_count{};
2017-02-02 18:47:25 +01:00
std::deque<cpu_thread*> sq;
2017-07-24 17:59:48 +02:00
lv2_mutex(u32 protocol, u32 recursive, u32 shared, u32 adaptive, u64 key, s32 flags, u64 name)
2020-06-09 17:35:14 +02:00
: protocol{protocol}
2017-02-02 18:47:25 +01:00
, recursive(recursive)
2017-07-24 17:59:48 +02:00
, shared(shared)
, adaptive(adaptive)
, key(key)
2015-03-06 23:10:04 +01:00
, name(name)
, flags(flags)
{
}
2015-07-08 17:01:59 +02:00
CellError on_id_create()
{
obj_count.atomic_op([](count_info& info){ info.mutex_count++; });
return {};
}
2017-02-02 18:47:25 +01:00
CellError try_lock(u32 id)
{
const u32 value = owner;
if (value >> 1 == id)
{
// Recursive locking
if (recursive == SYS_SYNC_RECURSIVE)
{
if (lock_count == 0xffffffffu)
{
return CELL_EKRESOURCE;
}
lock_count++;
return {};
}
return CELL_EDEADLK;
}
if (value == 0)
{
if (owner.compare_and_swap_test(0, id << 1))
{
return {};
}
}
return CELL_EBUSY;
}
bool try_own(cpu_thread& cpu, u32 id)
{
if (owner.fetch_op([&](u32& val)
{
if (val == 0)
{
val = id << 1;
}
else
{
val |= 1;
}
}))
{
sq.emplace_back(&cpu);
return false;
}
return true;
}
CellError try_unlock(u32 id)
{
const u32 value = owner;
if (value >> 1 != id)
{
return CELL_EPERM;
}
if (lock_count)
{
lock_count--;
return {};
}
if (value == id << 1)
{
if (owner.compare_and_swap_test(value, 0))
{
return {};
}
}
return CELL_EBUSY;
}
template <typename T>
T* reown()
2017-02-02 18:47:25 +01:00
{
if (auto cpu = schedule<T>(sq, protocol))
{
owner = cpu->id << 1 | !sq.empty();
return static_cast<T*>(cpu);
2017-02-02 18:47:25 +01:00
}
else
{
owner = 0;
return nullptr;
2017-02-02 18:47:25 +01:00
}
}
};
class ppu_thread;
2014-12-23 00:31:11 +01:00
2017-02-02 18:47:25 +01:00
// Syscalls
2019-06-09 01:03:24 +02:00
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr);
error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id);
2017-02-02 18:47:25 +01:00
error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout);
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id);
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id);