rpcsx/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp

364 lines
6.6 KiB
C++
Raw Normal View History

2020-12-05 13:08:24 +01:00
#include "stdafx.h"
#include "sys_lwmutex.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwmutex);
2014-08-23 16:51:51 +02:00
lv2_lwmutex::lv2_lwmutex(utils::serial& ar)
: protocol(ar)
, control(ar.operator decltype(control)())
, name(ar.operator be_t<u64>())
{
ar(lv2_control.raw().signaled);
}
void lv2_lwmutex::save(utils::serial& ar)
{
ar(protocol, control, name, lv2_control.raw().signaled);
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace(u8"_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, control=*0x%x, has_name=0x%x, name=0x%llx (“%s”))", lwmutex_id, protocol, control, has_name, name, lv2_obj::name64(std::bit_cast<be_t<u64>>(name)));
2018-02-09 15:49:37 +01:00
2015-07-19 23:29:40 +02:00
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY && protocol != SYS_SYNC_PRIORITY)
{
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)", protocol);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
}
if (!(has_name < 0))
{
name = 0;
}
2017-02-03 00:16:09 +01:00
if (const u32 id = idm::make<lv2_obj, lv2_lwmutex>(protocol, control, name))
{
2022-09-18 20:19:34 +02:00
ppu.check_state();
2017-02-03 00:16:09 +01:00
*lwmutex_id = id;
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
return CELL_EAGAIN;
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
2021-07-30 14:22:00 +02:00
std::shared_ptr<lv2_lwmutex> _mutex;
2017-02-03 00:16:09 +01:00
while (true)
{
2021-07-30 14:22:00 +02:00
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
{
2021-07-30 14:22:00 +02:00
// Ignore check on first iteration
if (_mutex && std::addressof(mutex) != _mutex.get())
{
// Other thread has destroyed the lwmutex earlier
return CELL_ESRCH;
}
std::lock_guard lock(mutex.mutex);
if (mutex.load_sq())
2021-07-30 14:22:00 +02:00
{
return CELL_EBUSY;
}
old_val = mutex.lwcond_waiters.or_fetch(smin);
if (old_val != smin)
{
// Deschedule if waiters were found
lv2_obj::sleep(ppu);
2021-07-30 14:22:00 +02:00
// Repeat loop: there are lwcond waiters
return CELL_EAGAIN;
}
2021-07-30 14:22:00 +02:00
return {};
});
if (!ptr)
2017-02-03 00:16:09 +01:00
{
2021-07-30 14:22:00 +02:00
return CELL_ESRCH;
2017-02-03 00:16:09 +01:00
}
2021-07-30 14:22:00 +02:00
if (ret)
{
2021-07-30 14:22:00 +02:00
if (ret != CELL_EAGAIN)
{
2021-07-30 14:22:00 +02:00
return ret;
}
2021-07-30 14:22:00 +02:00
}
else
{
break;
}
2015-03-11 16:30:50 +01:00
2021-07-30 14:22:00 +02:00
_mutex = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31)
{
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
2021-07-30 14:22:00 +02:00
old_val = _mutex->lwcond_waiters;
}
2021-07-30 14:22:00 +02:00
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)", lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
2017-02-03 00:16:09 +01:00
{
if (s32 signal = mutex.lv2_control.fetch_op([](auto& data)
2017-02-03 00:16:09 +01:00
{
if (data.signaled == 1)
{
data.signaled = 0;
return true;
}
return false;
}).first.signaled)
{
if (signal == smin)
{
ppu.gpr[3] = CELL_EBUSY;
}
return true;
}
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (s32 signal = mutex.try_own(&ppu))
2017-02-03 00:16:09 +01:00
{
if (signal == smin)
{
ppu.gpr[3] = CELL_EBUSY;
}
ppu.cancel_sleep = 0;
2018-10-17 07:16:30 +02:00
return true;
2017-02-03 00:16:09 +01:00
}
const bool finished = !mutex.sleep(ppu, timeout);
notify.cleanup();
return finished;
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2021-07-30 14:22:00 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (mutex.ret)
{
return not_an_error(ppu.gpr[3]);
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(mutex->mutex);
2017-02-03 00:16:09 +01:00
if (!mutex->unqueue(mutex->lv2_control.raw().sq, &ppu))
2017-02-03 00:16:09 +01:00
{
break;
2017-02-03 00:16:09 +01:00
}
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
thread_ctrl::wait_on(ppu.state, state);
}
}
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_trylock(lwmutex_id=0x%x)", lwmutex_id);
2017-02-03 00:16:09 +01:00
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
{
auto [_, ok] = mutex.lv2_control.fetch_op([](auto& data)
2019-06-20 13:42:06 +02:00
{
if (data.signaled & 1)
{
data.signaled = 0;
return true;
}
return false;
});
return ok;
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (!mutex.ret)
{
2017-02-03 00:16:09 +01:00
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
2017-02-03 00:16:09 +01:00
{
if (mutex.try_unlock(false))
{
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>())
2017-02-03 00:16:09 +01:00
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
2017-02-03 00:16:09 +01:00
}
});
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}
2019-06-20 13:42:06 +02:00
error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:42:06 +02:00
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{
if (mutex.try_unlock(true))
{
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>(true))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
}
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}