rpcsx/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp

302 lines
5.9 KiB
C++
Raw Normal View History

#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
#include "sys_lwcond.h"
2017-02-03 00:16:09 +01:00
namespace vm { using namespace ps3; }
2016-05-13 15:55:34 +02:00
logs::channel sys_lwcond("sys_lwcond", logs::level::notice);
extern u64 get_system_time();
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_create(vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name, u32 arg5)
{
2017-02-03 00:16:09 +01:00
sys_lwcond.warning("_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, control=*0x%x, name=0x%llx, arg5=0x%x)", lwcond_id, lwmutex_id, control, name, arg5);
2017-02-03 00:16:09 +01:00
// Temporarily
if (!idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id))
{
2017-02-03 00:16:09 +01:00
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (const u32 id = idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id))
{
*lwcond_id = id;
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
return CELL_EAGAIN;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_destroy(u32 lwcond_id)
{
sys_lwcond.warning("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
2017-02-03 00:16:09 +01:00
const auto cond = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
{
if (cond.waiters)
{
return CELL_EBUSY;
}
2015-03-09 20:56:55 +01:00
2017-02-03 00:16:09 +01:00
return {};
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (cond.ret)
{
2017-02-03 00:16:09 +01:00
return cond.ret;
}
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode)
{
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, ppu_thread_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
// Mode 3: lwmutex was forcefully owned by the calling thread
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode < 1 || mode > 3)
2015-03-10 15:42:08 +01:00
{
fmt::throw_exception("Unknown mode (%d)" HERE, mode);
2015-03-10 15:42:08 +01:00
}
2017-02-03 00:16:09 +01:00
lv2_lwmutex* mutex = nullptr;
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> cpu_thread*
2015-03-10 15:42:08 +01:00
{
2017-02-03 00:16:09 +01:00
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mutex && cond.waiters)
2015-03-10 15:42:08 +01:00
{
2017-02-03 00:16:09 +01:00
semaphore_lock lock(mutex->mutex);
cpu_thread* result = nullptr;
if (ppu_thread_id != -1)
{
for (auto cpu : cond.sq)
{
if (cpu->id == ppu_thread_id)
{
verify(HERE), cond.unqueue(cond.sq, cpu);
result = cpu;
break;
}
}
}
else
{
result = cond.schedule<ppu_thread>(cond.sq, mutex->protocol);
}
if (result)
{
cond.waiters--;
static_cast<ppu_thread*>(result)->gpr[3] = mode == 2;
if (mode != 2 && !mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
{
mutex->sq.emplace_back(result);
result = nullptr;
mode = 2; // Enforce CELL_OK
}
return result;
}
2015-03-10 15:42:08 +01:00
}
2017-02-03 00:16:09 +01:00
return nullptr;
});
if ((lwmutex_id && !mutex) || !cond)
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (cond.ret)
{
cond.ret->set_signal();
}
else if (mode == 2)
{
return CELL_OK;
}
else if (mode == 1 || ppu_thread_id == -1)
{
return not_an_error(CELL_EPERM);
}
else
{
return not_an_error(CELL_ENOENT);
}
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
{
sys_lwcond.trace("_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, mode);
2017-02-03 00:16:09 +01:00
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode < 1 || mode > 2)
2015-03-10 22:47:13 +01:00
{
2017-02-03 00:16:09 +01:00
fmt::throw_exception("Unknown mode (%d)" HERE, mode);
2015-03-10 22:47:13 +01:00
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
std::basic_string<cpu_thread*> threads;
lv2_lwmutex* mutex = nullptr;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> u32
2015-03-10 15:42:08 +01:00
{
2017-02-03 00:16:09 +01:00
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (mutex && cond.waiters)
{
semaphore_lock lock(mutex->mutex);
u32 result = 0;
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, mutex->protocol))
{
cond.waiters--;
static_cast<ppu_thread*>(cpu)->gpr[3] = mode == 2;
if (mode != 2 && !mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
{
mutex->sq.emplace_back(cpu);
}
else
{
threads.push_back(cpu);
}
result++;
}
return result;
}
return 0;
});
2015-03-11 16:30:50 +01:00
2017-02-03 00:16:09 +01:00
if ((lwmutex_id && !mutex) || !cond)
2015-03-11 16:30:50 +01:00
{
2017-02-03 00:16:09 +01:00
return CELL_ESRCH;
2015-03-11 16:30:50 +01:00
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
// TODO: signal only one thread
for (auto cpu : threads)
{
cpu->set_signal();
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
if (mode == 1)
{
// Mode 1: return the amount of threads (TODO)
return not_an_error(cond.ret);
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
return CELL_OK;
}
2017-02-03 00:16:09 +01:00
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout)
{
sys_lwcond.trace("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
2014-03-22 22:04:55 +01:00
2017-02-05 13:48:11 +01:00
const u64 start_time = ppu.gpr[10] = get_system_time();
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
std::shared_ptr<lv2_lwmutex> mutex;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> cpu_thread*
{
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return nullptr;
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
semaphore_lock lock(mutex->mutex);
// Add a waiter
cond.waiters++;
cond.sq.emplace_back(&ppu);
// Process lwmutex sleep queue
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
{
return cpu;
}
mutex->signaled++;
return nullptr;
});
2015-03-11 16:30:50 +01:00
2015-04-12 03:36:25 +02:00
if (!cond || !mutex)
2015-03-10 22:47:13 +01:00
{
return CELL_ESRCH;
}
2017-02-03 00:16:09 +01:00
if (cond.ret)
{
cond.ret->set_signal();
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
// SLEEP
while (!ppu.state.test_and_reset(cpu_flag::signal))
2015-03-10 15:42:08 +01:00
{
2017-02-03 00:16:09 +01:00
if (timeout)
2015-03-10 15:42:08 +01:00
{
const u64 passed = get_system_time() - start_time;
if (passed >= timeout)
2015-04-12 03:36:25 +02:00
{
2017-02-03 00:16:09 +01:00
semaphore_lock lock(mutex->mutex);
2017-02-03 00:16:09 +01:00
if (!cond->unqueue(cond->sq, &ppu))
{
timeout = 0;
continue;
2015-04-12 03:36:25 +02:00
}
2017-02-03 00:16:09 +01:00
cond->waiters--;
if (mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
2015-04-12 03:36:25 +02:00
{
2017-02-03 00:16:09 +01:00
return not_an_error(CELL_EDEADLK);
2015-04-12 03:36:25 +02:00
}
2017-02-03 00:16:09 +01:00
return not_an_error(CELL_ETIMEDOUT);
2015-04-12 03:36:25 +02:00
}
2015-03-10 15:42:08 +01:00
2017-02-03 00:16:09 +01:00
thread_ctrl::wait_for(timeout - passed);
}
else
{
2017-02-03 00:16:09 +01:00
thread_ctrl::wait();
2015-03-10 15:42:08 +01:00
}
}
2017-02-03 00:16:09 +01:00
// Return cause
return not_an_error(ppu.gpr[3] ? CELL_EBUSY : CELL_OK);
}