rpcsx/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp

388 lines
7.2 KiB
C++
Raw Normal View History

2020-12-05 13:08:24 +01:00
#include "stdafx.h"
#include "sys_lwcond.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
LOG_CHANNEL(sys_lwcond);
error_code _sys_lwcond_create(ppu_thread& ppu, vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:45:17 +02:00
sys_lwcond.warning(u8"_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, control=*0x%x, name=0x%llx (“%s”))", lwcond_id, lwmutex_id, control, name, lv2_obj::name64(std::bit_cast<be_t<u64>>(name)));
u32 protocol;
// Extract protocol from lwmutex
if (!idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&protocol](lv2_lwmutex& mutex)
{
protocol = mutex.protocol;
}))
{
2017-02-03 00:16:09 +01:00
return CELL_ESRCH;
}
if (protocol == SYS_SYNC_RETRY)
{
// Lwcond can't have SYS_SYNC_RETRY protocol
protocol = SYS_SYNC_PRIORITY;
}
if (const u32 id = idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id, protocol, control))
2017-02-03 00:16:09 +01:00
{
*lwcond_id = id;
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
return CELL_EAGAIN;
}
2019-06-20 13:45:17 +02:00
error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:45:17 +02:00
sys_lwcond.warning("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
2017-02-03 00:16:09 +01:00
const auto cond = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
{
if (cond.waiters)
{
return CELL_EBUSY;
}
2015-03-09 20:56:55 +01:00
2017-02-03 00:16:09 +01:00
return {};
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (cond.ret)
{
2017-02-03 00:16:09 +01:00
return cond.ret;
}
return CELL_OK;
}
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 ppu_thread_id, u32 mode)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:45:17 +02:00
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, ppu_thread_id=0x%llx, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
// Mode 3: lwmutex was forcefully owned by the calling thread
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode < 1 || mode > 3)
2015-03-10 15:42:08 +01:00
{
fmt::throw_exception("Unknown mode (%d)", mode);
2015-03-10 15:42:08 +01:00
}
2019-08-28 00:22:31 +02:00
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> int
2015-03-10 15:42:08 +01:00
{
ppu_thread* cpu = nullptr;
if (ppu_thread_id != UINT32_MAX)
{
cpu = idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(ppu_thread_id));
if (!cpu || cpu->joiner == ppu_join_status::exited)
{
return -1;
}
}
2015-03-10 15:42:08 +01:00
lv2_lwmutex* mutex = nullptr;
if (mode != 2)
{
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
2019-08-28 00:22:31 +02:00
return -1;
}
}
if (cond.waiters)
2015-03-10 15:42:08 +01:00
{
std::lock_guard lock(cond.mutex);
2017-02-03 00:16:09 +01:00
auto result = cpu ? cond.unqueue(cond.sq, cpu) :
cond.schedule<ppu_thread>(cond.sq, cond.protocol);
2017-02-03 00:16:09 +01:00
if (result)
{
cond.waiters--;
2017-02-06 19:36:46 +01:00
if (mode == 2)
{
static_cast<ppu_thread*>(result)->gpr[3] = CELL_EBUSY;
}
2017-02-03 00:16:09 +01:00
if (mode != 2)
2017-02-03 00:16:09 +01:00
{
ensure(!mutex->signaled);
std::lock_guard lock(mutex->mutex);
if (mode == 3 && !mutex->sq.empty()) [[unlikely]]
{
// Respect ordering of the sleep queue
mutex->sq.emplace_back(result);
result = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol);
}
else if (mode == 1)
{
ensure(mutex->add_waiter(result));
result = nullptr;
}
2017-02-03 00:16:09 +01:00
}
if (result)
2019-08-28 00:22:31 +02:00
{
cond.awake(result);
}
2017-02-03 00:16:09 +01:00
2019-08-28 00:22:31 +02:00
return 1;
2017-02-03 00:16:09 +01:00
}
2015-03-10 15:42:08 +01:00
}
2019-08-28 00:22:31 +02:00
return 0;
2017-02-03 00:16:09 +01:00
});
2018-02-09 15:49:37 +01:00
2019-08-28 00:22:31 +02:00
if (!cond || cond.ret == -1)
2017-02-03 00:16:09 +01:00
{
return CELL_ESRCH;
}
if (!cond.ret)
2017-02-03 00:16:09 +01:00
{
if (ppu_thread_id == UINT32_MAX)
{
if (mode == 3)
{
return not_an_error(CELL_ENOENT);
}
else if (mode == 2)
{
return CELL_OK;
}
}
2017-02-03 00:16:09 +01:00
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:45:17 +02:00
sys_lwcond.trace("_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, mode);
2017-02-03 00:16:09 +01:00
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode < 1 || mode > 2)
2015-03-10 22:47:13 +01:00
{
fmt::throw_exception("Unknown mode (%d)", mode);
2015-03-10 22:47:13 +01:00
}
2015-03-10 15:42:08 +01:00
bool need_awake = false;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> s32
2015-03-10 15:42:08 +01:00
{
lv2_lwmutex* mutex;
2017-02-03 00:16:09 +01:00
if (mode != 2)
{
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return -1;
}
}
if (cond.waiters)
2017-02-03 00:16:09 +01:00
{
std::lock_guard lock(cond.mutex);
2017-02-03 00:16:09 +01:00
u32 result = 0;
2015-03-10 15:42:08 +01:00
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.protocol))
2017-02-03 00:16:09 +01:00
{
cond.waiters--;
2017-02-06 19:36:46 +01:00
if (mode == 2)
{
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
}
2017-02-03 00:16:09 +01:00
if (mode == 1)
2017-02-03 00:16:09 +01:00
{
ensure(!mutex->signaled);
std::lock_guard lock(mutex->mutex);
ensure(mutex->add_waiter(cpu));
2017-02-03 00:16:09 +01:00
}
else
{
lv2_obj::append(cpu);
need_awake = true;
2017-02-03 00:16:09 +01:00
}
result++;
}
2019-08-28 00:22:31 +02:00
if (need_awake)
{
lv2_obj::awake_all();
}
2017-02-03 00:16:09 +01:00
return result;
}
return 0;
});
2015-03-11 16:30:50 +01:00
if (!cond || cond.ret == -1)
2015-03-11 16:30:50 +01:00
{
2017-02-03 00:16:09 +01:00
return CELL_ESRCH;
2015-03-11 16:30:50 +01:00
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode == 1)
{
// Mode 1: return the amount of threads (TODO)
return not_an_error(cond.ret);
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
2019-06-20 13:45:17 +02:00
sys_lwcond.trace("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
2014-03-22 22:04:55 +01:00
ppu.gpr[3] = CELL_OK;
2017-02-03 00:16:09 +01:00
std::shared_ptr<lv2_lwmutex> mutex;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond)
2017-02-03 00:16:09 +01:00
{
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return;
2017-02-03 00:16:09 +01:00
}
2015-03-10 15:42:08 +01:00
// Try to increment lwmutex's lwcond's waiters count
if (!mutex->lwcond_waiters.fetch_op([](s32& val)
{
if (val == INT32_MIN)
{
return false;
}
val++;
return true;
}).second)
{
// Failed - lwmutex was detroyed and all waiters have quit
mutex.reset();
return;
}
std::lock_guard lock(cond.mutex);
2017-02-03 00:16:09 +01:00
// Add a waiter
cond.waiters++;
cond.sq.emplace_back(&ppu);
{
std::lock_guard lock2(mutex->mutex);
// Process lwmutex sleep queue
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
{
cond.append(cpu);
}
else
{
mutex->signaled |= 1;
}
2017-02-03 00:16:09 +01:00
}
// Sleep current thread and schedule lwmutex waiter
cond.sleep(ppu, timeout);
2017-02-03 00:16:09 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!cond || !mutex)
2015-03-10 22:47:13 +01:00
{
return CELL_ESRCH;
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
2015-03-10 15:42:08 +01:00
{
if (ppu.is_stopped())
{
return 0;
}
2017-02-03 00:16:09 +01:00
if (timeout)
2015-03-10 15:42:08 +01:00
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
2015-04-12 03:36:25 +02:00
{
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
}
std::lock_guard lock(cond->mutex);
if (cond->unqueue(cond->sq, &ppu))
2017-02-03 00:16:09 +01:00
{
cond->waiters--;
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
2015-04-12 03:36:25 +02:00
}
2017-02-03 00:16:09 +01:00
reader_lock lock2(mutex->mutex);
if (std::find(mutex->sq.cbegin(), mutex->sq.cend(), &ppu) == mutex->sq.cend())
{
break;
}
2017-02-03 00:16:09 +01:00
mutex->sleep(ppu);
timeout = 0;
continue;
2015-04-12 03:36:25 +02:00
}
}
else
{
2017-02-03 00:16:09 +01:00
thread_ctrl::wait();
2015-03-10 15:42:08 +01:00
}
}
if (--mutex->lwcond_waiters == INT32_MIN)
{
// Notify the thread destroying lwmutex on last waiter
mutex->lwcond_waiters.notify_all();
}
2017-02-03 00:16:09 +01:00
// Return cause
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
}