rpcsx/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp

342 lines
6 KiB
C++
Raw Normal View History

2020-12-05 13:08:24 +01:00
#include "stdafx.h"
#include "sys_lwmutex.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
LOG_CHANNEL(sys_lwmutex);
2014-08-23 16:51:51 +02:00
lv2_lwmutex::lv2_lwmutex(utils::serial& ar)
: protocol(ar)
, control(ar.operator decltype(control)())
, name(ar.operator be_t<u64>())
, signaled(ar)
{
}
void lv2_lwmutex::save(utils::serial& ar)
{
ar(protocol, control, name, signaled);
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace(u8"_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, control=*0x%x, has_name=0x%x, name=0x%llx (“%s”))", lwmutex_id, protocol, control, has_name, name, lv2_obj::name64(std::bit_cast<be_t<u64>>(name)));
2018-02-09 15:49:37 +01:00
2015-07-19 23:29:40 +02:00
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY && protocol != SYS_SYNC_PRIORITY)
{
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)", protocol);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
}
if (!(has_name < 0))
{
name = 0;
}
2017-02-03 00:16:09 +01:00
if (const u32 id = idm::make<lv2_obj, lv2_lwmutex>(protocol, control, name))
{
*lwmutex_id = id;
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
return CELL_EAGAIN;
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
2021-07-30 14:22:00 +02:00
std::shared_ptr<lv2_lwmutex> _mutex;
2017-02-03 00:16:09 +01:00
while (true)
{
2021-07-30 14:22:00 +02:00
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
{
2021-07-30 14:22:00 +02:00
// Ignore check on first iteration
if (_mutex && std::addressof(mutex) != _mutex.get())
{
// Other thread has destroyed the lwmutex earlier
return CELL_ESRCH;
}
std::lock_guard lock(mutex.mutex);
if (!mutex.sq.empty())
{
return CELL_EBUSY;
}
old_val = mutex.lwcond_waiters.or_fetch(smin);
if (old_val != smin)
{
// Deschedule if waiters were found
lv2_obj::sleep(ppu);
2021-07-30 14:22:00 +02:00
// Repeat loop: there are lwcond waiters
return CELL_EAGAIN;
}
2021-07-30 14:22:00 +02:00
return {};
});
if (!ptr)
2017-02-03 00:16:09 +01:00
{
2021-07-30 14:22:00 +02:00
return CELL_ESRCH;
2017-02-03 00:16:09 +01:00
}
2021-07-30 14:22:00 +02:00
if (ret)
{
2021-07-30 14:22:00 +02:00
if (ret != CELL_EAGAIN)
{
2021-07-30 14:22:00 +02:00
return ret;
}
2021-07-30 14:22:00 +02:00
}
else
{
break;
}
2015-03-11 16:30:50 +01:00
2021-07-30 14:22:00 +02:00
_mutex = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31)
{
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
2021-07-30 14:22:00 +02:00
old_val = _mutex->lwcond_waiters;
}
2021-07-30 14:22:00 +02:00
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)", lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
2017-02-03 00:16:09 +01:00
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
{
if (mutex.signaled.try_dec(0))
2017-02-03 00:16:09 +01:00
{
2018-10-17 07:16:30 +02:00
return true;
2017-02-03 00:16:09 +01:00
}
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex);
2017-02-03 00:16:09 +01:00
auto [old, _] = mutex.signaled.fetch_op([](s32& value)
2019-06-20 13:42:06 +02:00
{
if (value)
{
value = 0;
return true;
}
return false;
});
if (old)
2017-02-03 00:16:09 +01:00
{
if (old == smin)
{
ppu.gpr[3] = CELL_EBUSY;
}
2018-10-17 07:16:30 +02:00
return true;
2017-02-03 00:16:09 +01:00
}
2021-07-30 14:22:00 +02:00
mutex.add_waiter(&ppu);
mutex.sleep(ppu, timeout, true);
2017-02-03 00:16:09 +01:00
return false;
});
2015-03-11 16:30:50 +01:00
2021-07-30 14:22:00 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (mutex.ret)
{
return not_an_error(ppu.gpr[3]);
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
if (std::find(mutex->sq.begin(), mutex->sq.end(), &ppu) == mutex->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(mutex->mutex);
2017-02-03 00:16:09 +01:00
if (!mutex->unqueue(mutex->sq, &ppu))
{
break;
2017-02-03 00:16:09 +01:00
}
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
thread_ctrl::wait_on(ppu.state, state);
}
}
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_trylock(lwmutex_id=0x%x)", lwmutex_id);
2017-02-03 00:16:09 +01:00
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
{
auto [_, ok] = mutex.signaled.fetch_op([](s32& value)
2019-06-20 13:42:06 +02:00
{
if (value & 1)
{
value = 0;
return true;
}
return false;
});
return ok;
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (!mutex.ret)
{
2017-02-03 00:16:09 +01:00
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
2019-08-28 00:22:31 +02:00
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
2017-02-03 00:16:09 +01:00
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex);
2017-02-03 00:16:09 +01:00
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu, true);
2019-08-28 00:22:31 +02:00
return;
2017-02-03 00:16:09 +01:00
}
mutex.signaled |= 1;
});
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
2019-08-28 00:22:31 +02:00
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu, true);
2019-08-28 00:22:31 +02:00
return;
}
mutex.signaled |= smin;
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}