2022-07-28 14:10:16 +03:00
|
|
|
|
#include "stdafx.h"
|
2018-09-29 01:12:00 +03:00
|
|
|
|
#include "sys_lwcond.h"
|
|
|
|
|
|
|
2015-03-07 01:58:42 +03:00
|
|
|
|
#include "Emu/IdManager.h"
|
2014-08-23 18:51:51 +04:00
|
|
|
|
|
2016-04-14 01:23:53 +03:00
|
|
|
|
#include "Emu/Cell/ErrorCodes.h"
|
2014-08-23 18:51:51 +04:00
|
|
|
|
#include "Emu/Cell/PPUThread.h"
|
2014-06-25 00:38:34 +02:00
|
|
|
|
#include "sys_lwmutex.h"
|
2014-01-30 00:31:09 +04:00
|
|
|
|
|
2022-07-21 17:25:02 +03:00
|
|
|
|
#include "util/asm.hpp"
|
|
|
|
|
|
|
2018-08-25 15:39:00 +03:00
|
|
|
|
LOG_CHANNEL(sys_lwcond);
|
2014-01-30 00:31:09 +04:00
|
|
|
|
|
2022-07-04 16:02:17 +03:00
|
|
|
|
lv2_lwcond::lv2_lwcond(utils::serial& ar)
|
|
|
|
|
|
: name(ar.operator be_t<u64>())
|
|
|
|
|
|
, lwid(ar)
|
|
|
|
|
|
, protocol(ar)
|
|
|
|
|
|
, control(ar.operator decltype(control)())
|
|
|
|
|
|
{
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void lv2_lwcond::save(utils::serial& ar)
|
|
|
|
|
|
{
|
|
|
|
|
|
USING_SERIALIZATION_VERSION(lv2_sync);
|
|
|
|
|
|
ar(name, lwid, protocol, control);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-03-17 09:59:28 +02:00
|
|
|
|
error_code _sys_lwcond_create(ppu_thread& ppu, vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name)
|
2015-07-21 19:35:55 +03:00
|
|
|
|
{
|
2020-06-05 12:36:28 +03:00
|
|
|
|
ppu.state += cpu_flag::wait;
|
2019-06-20 14:45:17 +03:00
|
|
|
|
|
2020-03-18 16:47:44 +02:00
|
|
|
|
sys_lwcond.warning(u8"_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, control=*0x%x, name=0x%llx (“%s”))", lwcond_id, lwmutex_id, control, name, lv2_obj::name64(std::bit_cast<be_t<u64>>(name)));
|
2015-07-21 19:35:55 +03:00
|
|
|
|
|
2019-05-07 22:05:16 +03:00
|
|
|
|
u32 protocol;
|
|
|
|
|
|
|
|
|
|
|
|
// Extract protocol from lwmutex
|
|
|
|
|
|
if (!idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&protocol](lv2_lwmutex& mutex)
|
|
|
|
|
|
{
|
|
|
|
|
|
protocol = mutex.protocol;
|
|
|
|
|
|
}))
|
2015-07-21 19:35:55 +03:00
|
|
|
|
{
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return CELL_ESRCH;
|
2015-07-21 19:35:55 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-05-07 22:05:16 +03:00
|
|
|
|
if (protocol == SYS_SYNC_RETRY)
|
|
|
|
|
|
{
|
2019-08-17 23:53:49 +03:00
|
|
|
|
// Lwcond can't have SYS_SYNC_RETRY protocol
|
2019-05-07 22:05:16 +03:00
|
|
|
|
protocol = SYS_SYNC_PRIORITY;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (const u32 id = idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id, protocol, control))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2022-09-18 21:19:34 +03:00
|
|
|
|
ppu.check_state();
|
2017-02-03 02:16:09 +03:00
|
|
|
|
*lwcond_id = id;
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
2014-01-30 00:31:09 +04:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return CELL_EAGAIN;
|
2014-01-30 00:31:09 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-06-20 14:45:17 +03:00
|
|
|
|
error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
|
2014-01-30 00:31:09 +04:00
|
|
|
|
{
|
2020-06-05 12:36:28 +03:00
|
|
|
|
ppu.state += cpu_flag::wait;
|
2019-06-20 14:45:17 +03:00
|
|
|
|
|
2016-01-13 00:57:16 +03:00
|
|
|
|
sys_lwcond.warning("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
|
2014-01-30 00:31:09 +04:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
const auto cond = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
|
|
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (atomic_storage<ppu_thread*>::load(cond.sq))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
|
|
|
|
|
return CELL_EBUSY;
|
|
|
|
|
|
}
|
2015-03-09 22:56:55 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return {};
|
|
|
|
|
|
});
|
2015-03-11 18:30:50 +03:00
|
|
|
|
|
2015-04-12 04:36:25 +03:00
|
|
|
|
if (!cond)
|
2014-02-14 15:40:41 +04:00
|
|
|
|
{
|
|
|
|
|
|
return CELL_ESRCH;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
if (cond.ret)
|
2014-02-14 15:40:41 +04:00
|
|
|
|
{
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return cond.ret;
|
2014-02-14 15:40:41 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
2014-01-30 00:31:09 +04:00
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-05-14 22:33:41 +03:00
|
|
|
|
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 ppu_thread_id, u32 mode)
|
2014-01-30 00:31:09 +04:00
|
|
|
|
{
|
2020-06-05 12:36:28 +03:00
|
|
|
|
ppu.state += cpu_flag::wait;
|
2019-06-20 14:45:17 +03:00
|
|
|
|
|
2020-05-14 22:33:41 +03:00
|
|
|
|
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, ppu_thread_id=0x%llx, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
// Mode 1: lwmutex was initially owned by the calling thread
|
|
|
|
|
|
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
|
|
|
|
|
|
// Mode 3: lwmutex was forcefully owned by the calling thread
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
if (mode < 1 || mode > 3)
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2020-12-09 18:04:52 +03:00
|
|
|
|
fmt::throw_exception("Unknown mode (%d)", mode);
|
2015-03-10 17:42:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-08-04 13:13:51 +03:00
|
|
|
|
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> int
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2020-05-14 22:33:41 +03:00
|
|
|
|
ppu_thread* cpu = nullptr;
|
|
|
|
|
|
|
2021-05-22 10:35:15 +03:00
|
|
|
|
if (ppu_thread_id != u32{umax})
|
2019-02-01 13:00:30 +02:00
|
|
|
|
{
|
2020-05-14 22:33:41 +03:00
|
|
|
|
cpu = idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(ppu_thread_id));
|
|
|
|
|
|
|
2021-05-22 09:02:30 +03:00
|
|
|
|
if (!cpu)
|
2020-03-23 13:22:54 +02:00
|
|
|
|
{
|
|
|
|
|
|
return -1;
|
|
|
|
|
|
}
|
2019-02-01 13:00:30 +02:00
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2020-05-14 22:33:41 +03:00
|
|
|
|
lv2_lwmutex* mutex = nullptr;
|
2019-02-01 13:00:30 +02:00
|
|
|
|
|
|
|
|
|
|
if (mode != 2)
|
2018-10-14 21:54:41 +03:00
|
|
|
|
{
|
2019-02-01 13:00:30 +02:00
|
|
|
|
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
|
|
|
|
|
|
|
|
|
|
|
|
if (!mutex)
|
|
|
|
|
|
{
|
2019-08-28 01:22:31 +03:00
|
|
|
|
return -1;
|
2019-02-01 13:00:30 +02:00
|
|
|
|
}
|
2018-10-14 21:54:41 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (atomic_storage<ppu_thread*>::load(cond.sq))
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2018-09-03 22:28:33 +03:00
|
|
|
|
std::lock_guard lock(cond.mutex);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2022-07-04 16:02:17 +03:00
|
|
|
|
if (cpu)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
|
|
|
|
|
|
{
|
|
|
|
|
|
ppu.state += cpu_flag::again;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-05-14 22:33:41 +03:00
|
|
|
|
auto result = cpu ? cond.unqueue(cond.sq, cpu) :
|
|
|
|
|
|
cond.schedule<ppu_thread>(cond.sq, cond.protocol);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
|
|
|
|
|
if (result)
|
|
|
|
|
|
{
|
2022-07-04 16:02:17 +03:00
|
|
|
|
if (static_cast<ppu_thread*>(result)->state & cpu_flag::again)
|
|
|
|
|
|
{
|
|
|
|
|
|
ppu.state += cpu_flag::again;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2017-02-06 21:36:46 +03:00
|
|
|
|
if (mode == 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
static_cast<ppu_thread*>(result)->gpr[3] = CELL_EBUSY;
|
|
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2020-03-23 13:22:54 +02:00
|
|
|
|
if (mode != 2)
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (mode == 3 && mutex->load_sq()) [[unlikely]]
|
2020-03-23 13:22:54 +02:00
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
std::lock_guard lock(mutex->mutex);
|
|
|
|
|
|
|
2020-03-23 13:22:54 +02:00
|
|
|
|
// Respect ordering of the sleep queue
|
2022-07-28 14:10:16 +03:00
|
|
|
|
mutex->try_own(result, true);
|
|
|
|
|
|
auto result2 = mutex->reown<ppu_thread>();
|
2022-07-04 16:02:17 +03:00
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (result2->state & cpu_flag::again)
|
2022-07-04 16:02:17 +03:00
|
|
|
|
{
|
|
|
|
|
|
ppu.state += cpu_flag::again;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|
2022-08-04 14:13:51 +03:00
|
|
|
|
|
|
|
|
|
|
if (result2 != result)
|
|
|
|
|
|
{
|
|
|
|
|
|
cond.awake(result2);
|
|
|
|
|
|
result = nullptr;
|
|
|
|
|
|
}
|
2020-03-23 13:22:54 +02:00
|
|
|
|
}
|
|
|
|
|
|
else if (mode == 1)
|
|
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
mutex->try_own(result, true);
|
2020-03-23 13:22:54 +02:00
|
|
|
|
result = nullptr;
|
|
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
2020-03-23 13:22:54 +02:00
|
|
|
|
|
|
|
|
|
|
if (result)
|
2019-08-28 01:22:31 +03:00
|
|
|
|
{
|
2022-08-04 13:13:51 +03:00
|
|
|
|
cond.awake(result);
|
2019-08-28 01:22:31 +03:00
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2019-08-28 01:22:31 +03:00
|
|
|
|
return 1;
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-08-28 01:22:31 +03:00
|
|
|
|
return 0;
|
2017-02-03 02:16:09 +03:00
|
|
|
|
});
|
2018-02-09 17:49:37 +03:00
|
|
|
|
|
2019-08-28 01:22:31 +03:00
|
|
|
|
if (!cond || cond.ret == -1)
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
|
|
|
|
|
return CELL_ESRCH;
|
|
|
|
|
|
}
|
2015-07-21 19:35:55 +03:00
|
|
|
|
|
2019-02-01 13:00:30 +02:00
|
|
|
|
if (!cond.ret)
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2021-05-22 10:35:15 +03:00
|
|
|
|
if (ppu_thread_id == u32{umax})
|
2019-02-01 13:00:30 +02:00
|
|
|
|
{
|
|
|
|
|
|
if (mode == 3)
|
|
|
|
|
|
{
|
|
|
|
|
|
return not_an_error(CELL_ENOENT);
|
|
|
|
|
|
}
|
|
|
|
|
|
else if (mode == 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return not_an_error(CELL_EPERM);
|
|
|
|
|
|
}
|
2019-02-01 13:00:30 +02:00
|
|
|
|
|
2014-01-30 00:31:09 +04:00
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-06 21:36:46 +03:00
|
|
|
|
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode)
|
2014-01-30 00:31:09 +04:00
|
|
|
|
{
|
2020-06-05 12:36:28 +03:00
|
|
|
|
ppu.state += cpu_flag::wait;
|
2019-06-20 14:45:17 +03:00
|
|
|
|
|
2016-01-13 00:57:16 +03:00
|
|
|
|
sys_lwcond.trace("_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, mode);
|
2014-02-14 15:40:41 +04:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
// Mode 1: lwmutex was initially owned by the calling thread
|
|
|
|
|
|
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
if (mode < 1 || mode > 2)
|
2015-03-11 00:47:13 +03:00
|
|
|
|
{
|
2020-12-09 18:04:52 +03:00
|
|
|
|
fmt::throw_exception("Unknown mode (%d)", mode);
|
2015-03-11 00:47:13 +03:00
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2022-08-04 13:13:51 +03:00
|
|
|
|
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> s32
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2021-02-17 22:58:10 +03:00
|
|
|
|
lv2_lwmutex* mutex{};
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2019-02-01 13:00:30 +02:00
|
|
|
|
if (mode != 2)
|
2018-10-14 21:54:41 +03:00
|
|
|
|
{
|
2019-02-01 13:00:30 +02:00
|
|
|
|
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
|
|
|
|
|
|
|
|
|
|
|
|
if (!mutex)
|
|
|
|
|
|
{
|
|
|
|
|
|
return -1;
|
|
|
|
|
|
}
|
2018-10-14 21:54:41 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (atomic_storage<ppu_thread*>::load(cond.sq))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2018-09-03 22:28:33 +03:00
|
|
|
|
std::lock_guard lock(cond.mutex);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
|
|
|
|
|
u32 result = 0;
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
|
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
|
2022-07-04 16:02:17 +03:00
|
|
|
|
{
|
2022-07-25 18:57:47 +03:00
|
|
|
|
if (cpu->state & cpu_flag::again)
|
2022-07-04 16:02:17 +03:00
|
|
|
|
{
|
|
|
|
|
|
ppu.state += cpu_flag::again;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
auto sq = cond.sq;
|
|
|
|
|
|
atomic_storage<ppu_thread*>::release(cond.sq, nullptr);
|
2023-02-13 02:33:06 -08:00
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
|
while (const auto cpu = cond.schedule<ppu_thread>(sq, cond.protocol))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2017-02-06 21:36:46 +03:00
|
|
|
|
if (mode == 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
|
|
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2017-07-29 21:24:57 +03:00
|
|
|
|
if (mode == 1)
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
mutex->try_own(cpu, true);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
2019-04-25 17:27:50 +03:00
|
|
|
|
lv2_obj::append(cpu);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result++;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
|
if (result && mode == 2)
|
2019-08-28 01:22:31 +03:00
|
|
|
|
{
|
2022-08-04 13:13:51 +03:00
|
|
|
|
lv2_obj::awake_all();
|
2019-08-28 01:22:31 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return result;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
});
|
2015-03-11 18:30:50 +03:00
|
|
|
|
|
2019-02-01 13:00:30 +02:00
|
|
|
|
if (!cond || cond.ret == -1)
|
2015-03-11 18:30:50 +03:00
|
|
|
|
{
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return CELL_ESRCH;
|
2015-03-11 18:30:50 +03:00
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
if (mode == 1)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Mode 1: return the amount of threads (TODO)
|
|
|
|
|
|
return not_an_error(cond.ret);
|
|
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
return CELL_OK;
|
2014-01-30 00:31:09 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout)
|
2014-01-30 00:31:09 +04:00
|
|
|
|
{
|
2020-06-05 12:36:28 +03:00
|
|
|
|
ppu.state += cpu_flag::wait;
|
2019-06-20 14:45:17 +03:00
|
|
|
|
|
2016-01-13 00:57:16 +03:00
|
|
|
|
sys_lwcond.trace("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
|
2014-03-23 01:04:55 +04:00
|
|
|
|
|
2019-02-16 20:15:54 +02:00
|
|
|
|
ppu.gpr[3] = CELL_OK;
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
std::shared_ptr<lv2_lwmutex> mutex;
|
|
|
|
|
|
|
2022-07-05 14:12:21 +03:00
|
|
|
|
auto& sstate = *ppu.optional_savestate_state;
|
|
|
|
|
|
|
2022-08-04 13:13:51 +03:00
|
|
|
|
const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond)
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
|
|
|
|
|
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
|
|
|
|
|
|
|
|
|
|
|
|
if (!mutex)
|
|
|
|
|
|
{
|
2019-08-18 00:00:22 +03:00
|
|
|
|
return;
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
2015-03-10 17:42:08 +03:00
|
|
|
|
|
2021-07-30 15:22:00 +03:00
|
|
|
|
// Increment lwmutex's lwcond's waiters count
|
|
|
|
|
|
mutex->lwcond_waiters++;
|
2020-03-23 09:30:17 +02:00
|
|
|
|
|
2022-08-04 13:13:51 +03:00
|
|
|
|
lv2_obj::prepare_for_sleep(ppu);
|
2022-07-21 11:08:57 +03:00
|
|
|
|
|
2018-09-03 22:28:33 +03:00
|
|
|
|
std::lock_guard lock(cond.mutex);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2022-07-05 14:12:21 +03:00
|
|
|
|
const bool mutex_sleep = sstate.try_read<bool>().second;
|
|
|
|
|
|
sstate.clear();
|
|
|
|
|
|
|
|
|
|
|
|
if (mutex_sleep)
|
2022-07-04 16:02:17 +03:00
|
|
|
|
{
|
|
|
|
|
|
// Special: loading state from the point of waiting on lwmutex sleep queue
|
2022-07-28 14:10:16 +03:00
|
|
|
|
mutex->try_own(&ppu, true);
|
2022-07-04 16:02:17 +03:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
// Add a waiter
|
2022-07-25 18:57:47 +03:00
|
|
|
|
lv2_obj::emplace(cond.sq, &ppu);
|
2022-07-04 16:02:17 +03:00
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (!ppu.loaded_from_savestate && !mutex->try_unlock(false))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2019-04-25 17:27:50 +03:00
|
|
|
|
std::lock_guard lock2(mutex->mutex);
|
|
|
|
|
|
|
|
|
|
|
|
// Process lwmutex sleep queue
|
2022-07-28 14:10:16 +03:00
|
|
|
|
if (const auto cpu = mutex->reown<ppu_thread>())
|
2019-04-25 17:27:50 +03:00
|
|
|
|
{
|
2022-07-04 16:02:17 +03:00
|
|
|
|
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
|
|
|
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
|
ensure(cond.unqueue(cond.sq, &ppu));
|
2022-07-04 16:02:17 +03:00
|
|
|
|
ppu.state += cpu_flag::again;
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-08-11 10:17:29 +03:00
|
|
|
|
// Put the current thread to sleep and schedule lwmutex waiter atomically
|
2019-04-25 17:27:50 +03:00
|
|
|
|
cond.append(cpu);
|
2022-08-11 10:17:29 +03:00
|
|
|
|
cond.sleep(ppu, timeout);
|
|
|
|
|
|
return;
|
2019-04-25 17:27:50 +03:00
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-08-04 13:13:51 +03:00
|
|
|
|
cond.sleep(ppu, timeout);
|
2017-02-03 02:16:09 +03:00
|
|
|
|
});
|
2015-03-11 18:30:50 +03:00
|
|
|
|
|
2015-04-12 04:36:25 +03:00
|
|
|
|
if (!cond || !mutex)
|
2015-03-11 00:47:13 +03:00
|
|
|
|
{
|
|
|
|
|
|
return CELL_ESRCH;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-04 16:02:17 +03:00
|
|
|
|
if (ppu.state & cpu_flag::again)
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-21 16:53:49 +03:00
|
|
|
|
while (auto state = +ppu.state)
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2022-07-21 16:53:49 +03:00
|
|
|
|
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
|
2018-10-11 01:17:19 +03:00
|
|
|
|
{
|
2022-07-04 16:02:17 +03:00
|
|
|
|
break;
|
2021-02-13 16:50:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-04 16:02:17 +03:00
|
|
|
|
if (is_stopped(state))
|
2021-02-13 16:50:07 +02:00
|
|
|
|
{
|
2022-08-08 22:16:23 +03:00
|
|
|
|
std::scoped_lock lock(cond->mutex, mutex->mutex);
|
2022-07-04 16:02:17 +03:00
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
|
bool mutex_sleep = false;
|
|
|
|
|
|
bool cond_sleep = false;
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu)
|
2022-07-25 18:57:47 +03:00
|
|
|
|
{
|
|
|
|
|
|
if (cpu == &ppu)
|
|
|
|
|
|
{
|
|
|
|
|
|
mutex_sleep = true;
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
for (auto cpu = atomic_storage<ppu_thread*>::load(cond->sq); cpu; cpu = cpu->next_cpu)
|
2022-07-25 18:57:47 +03:00
|
|
|
|
{
|
|
|
|
|
|
if (cpu == &ppu)
|
|
|
|
|
|
{
|
|
|
|
|
|
cond_sleep = true;
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2022-07-04 16:02:17 +03:00
|
|
|
|
|
|
|
|
|
|
if (!cond_sleep && !mutex_sleep)
|
|
|
|
|
|
{
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-05 14:12:21 +03:00
|
|
|
|
sstate(mutex_sleep);
|
2022-07-04 16:02:17 +03:00
|
|
|
|
ppu.state += cpu_flag::again;
|
2021-02-13 16:50:07 +02:00
|
|
|
|
break;
|
2018-10-11 01:17:19 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2022-07-21 17:25:02 +03:00
|
|
|
|
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
|
|
|
|
|
|
{
|
|
|
|
|
|
busy_wait(500);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (ppu.state & cpu_flag::signal)
|
|
|
|
|
|
{
|
|
|
|
|
|
continue;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
if (timeout)
|
2015-03-10 17:42:08 +03:00
|
|
|
|
{
|
2019-07-14 06:55:11 +03:00
|
|
|
|
if (lv2_obj::wait_timeout(timeout, &ppu))
|
2015-04-12 04:36:25 +03:00
|
|
|
|
{
|
2022-08-08 22:11:18 +03:00
|
|
|
|
const u64 start_time = ppu.start_time;
|
|
|
|
|
|
|
2020-03-28 06:16:59 +03:00
|
|
|
|
// Wait for rescheduling
|
|
|
|
|
|
if (ppu.check_state())
|
|
|
|
|
|
{
|
2022-07-04 16:02:17 +03:00
|
|
|
|
continue;
|
2020-03-28 06:16:59 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-03 22:28:33 +03:00
|
|
|
|
std::lock_guard lock(cond->mutex);
|
2015-07-21 19:35:55 +03:00
|
|
|
|
|
2020-03-28 06:16:59 +03:00
|
|
|
|
if (cond->unqueue(cond->sq, &ppu))
|
2017-02-03 02:16:09 +03:00
|
|
|
|
{
|
2020-03-28 06:16:59 +03:00
|
|
|
|
ppu.gpr[3] = CELL_ETIMEDOUT;
|
|
|
|
|
|
break;
|
2015-04-12 04:36:25 +03:00
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2020-11-05 18:59:01 +03:00
|
|
|
|
reader_lock lock2(mutex->mutex);
|
|
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
|
bool mutex_sleep = false;
|
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
|
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu)
|
2022-07-25 18:57:47 +03:00
|
|
|
|
{
|
|
|
|
|
|
if (cpu == &ppu)
|
|
|
|
|
|
{
|
|
|
|
|
|
mutex_sleep = true;
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!mutex_sleep)
|
2020-03-28 06:16:59 +03:00
|
|
|
|
{
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
2020-03-28 06:16:59 +03:00
|
|
|
|
mutex->sleep(ppu);
|
2022-08-08 22:11:18 +03:00
|
|
|
|
ppu.start_time = start_time; // Restore start time because awake has been called
|
2020-03-28 06:16:59 +03:00
|
|
|
|
timeout = 0;
|
|
|
|
|
|
continue;
|
2015-04-12 04:36:25 +03:00
|
|
|
|
}
|
2015-07-21 19:35:55 +03:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
2022-09-20 11:47:05 +03:00
|
|
|
|
ppu.state.wait(state);
|
2015-03-10 17:42:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2021-05-22 10:35:15 +03:00
|
|
|
|
if (--mutex->lwcond_waiters == smin)
|
2020-03-23 09:30:17 +02:00
|
|
|
|
{
|
|
|
|
|
|
// Notify the thread destroying lwmutex on last waiter
|
|
|
|
|
|
mutex->lwcond_waiters.notify_all();
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
|
// Return cause
|
2017-02-06 21:36:46 +03:00
|
|
|
|
return not_an_error(ppu.gpr[3]);
|
2014-01-30 00:31:09 +04:00
|
|
|
|
}
|