rpcsx/rpcs3/Emu/Cell/lv2/sys_cond.cpp

309 lines
5.5 KiB
C++
Raw Normal View History

#include "stdafx.h"
#include "sys_cond.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2017-07-24 17:59:48 +02:00
#include "Emu/IPC.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
LOG_CHANNEL(sys_cond);
2017-07-24 17:59:48 +02:00
template<> DECLARE(ipc_manager<lv2_cond, u64>::g_ipc) {};
2019-06-09 00:38:01 +02:00
error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.warning("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
2014-09-20 02:08:12 +02:00
2017-02-02 18:47:25 +01:00
auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex)
2014-02-13 17:59:13 +01:00
{
2017-02-02 18:47:25 +01:00
return CELL_ESRCH;
2014-02-13 17:59:13 +01:00
}
const auto _attr = *attr;
if (auto error = lv2_obj::create<lv2_cond>(_attr.pshared, _attr.ipc_key, _attr.flags, [&]
2017-07-24 17:59:48 +02:00
{
return std::make_shared<lv2_cond>(
_attr.pshared,
_attr.flags,
_attr.ipc_key,
_attr.name_u64,
2020-06-09 17:35:14 +02:00
mutex_id,
2017-07-24 17:59:48 +02:00
std::move(mutex));
}))
2017-02-02 18:47:25 +01:00
{
2017-07-24 17:59:48 +02:00
return error;
2017-02-02 18:47:25 +01:00
}
2017-07-24 17:59:48 +02:00
*cond_id = idm::last_id();
return CELL_OK;
}
2019-06-09 00:38:01 +02:00
error_code sys_cond_destroy(ppu_thread& ppu, u32 cond_id)
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.warning("sys_cond_destroy(cond_id=0x%x)", cond_id);
2017-02-02 18:47:25 +01:00
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> CellError
{
std::lock_guard lock(cond.mutex->mutex);
2017-02-02 18:47:25 +01:00
if (cond.waiters)
{
return CELL_EBUSY;
}
2015-03-06 23:10:04 +01:00
cond.mutex->obj_count.atomic_op([](typename lv2_mutex::count_info& info){ info.cond_count--; });
2017-02-02 18:47:25 +01:00
return {};
});
2015-03-06 23:10:04 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
2014-02-13 17:59:13 +01:00
{
return CELL_ESRCH;
}
2017-02-02 18:47:25 +01:00
if (cond.ret)
2014-02-13 17:59:13 +01:00
{
2017-02-02 18:47:25 +01:00
return cond.ret;
2014-02-13 17:59:13 +01:00
}
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
2019-08-28 00:22:31 +02:00
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
2017-02-02 18:47:25 +01:00
{
if (cond.waiters)
{
std::lock_guard lock(cond.mutex->mutex);
2017-02-02 18:47:25 +01:00
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
{
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
cond.waiters--;
if (cond.mutex->try_own(*cpu, cpu->id))
{
cond.awake(cpu);
}
2017-02-02 18:47:25 +01:00
}
}
});
2015-03-06 23:10:04 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
{
return CELL_ESRCH;
}
2014-02-26 11:35:30 +01:00
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
2017-02-02 18:47:25 +01:00
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
{
if (cond.waiters)
{
std::lock_guard lock(cond.mutex->mutex);
2017-02-02 18:47:25 +01:00
cpu_thread* result = nullptr;
cond.waiters -= ::size32(cond.sq);
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, SYS_SYNC_PRIORITY))
2017-02-02 18:47:25 +01:00
{
if (cond.mutex->try_own(*cpu, cpu->id))
{
verify(HERE), !std::exchange(result, cpu);
}
2017-02-02 18:47:25 +01:00
}
2015-03-06 23:10:04 +01:00
if (result)
2019-08-28 00:22:31 +02:00
{
lv2_obj::awake(result);
2019-08-28 00:22:31 +02:00
}
}
2017-02-02 18:47:25 +01:00
});
2015-03-06 23:10:04 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
{
return CELL_ESRCH;
}
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
2015-03-06 23:10:04 +01:00
2019-08-28 00:22:31 +02:00
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> int
2017-02-02 18:47:25 +01:00
{
if (const auto cpu = idm::check_unlocked<named_thread<ppu_thread>>(thread_id);
!cpu || cpu->joiner == ppu_join_status::exited)
{
2019-08-28 00:22:31 +02:00
return -1;
}
2017-02-02 18:47:25 +01:00
if (cond.waiters)
{
std::lock_guard lock(cond.mutex->mutex);
2017-02-02 18:47:25 +01:00
for (auto cpu : cond.sq)
{
if (cpu->id == thread_id)
{
verify(HERE), cond.unqueue(cond.sq, cpu);
cond.waiters--;
if (cond.mutex->try_own(*cpu, cpu->id))
{
cond.awake(cpu);
}
2019-08-28 00:22:31 +02:00
return 1;
2017-02-02 18:47:25 +01:00
}
}
}
2019-08-28 00:22:31 +02:00
return 0;
2017-02-02 18:47:25 +01:00
});
2019-08-28 00:22:31 +02:00
if (!cond || cond.ret == -1)
{
return CELL_ESRCH;
}
2019-08-28 00:22:31 +02:00
if (!cond.ret)
2014-02-26 11:35:30 +01:00
{
2017-02-02 18:47:25 +01:00
return not_an_error(CELL_EPERM);
2014-02-26 11:35:30 +01:00
}
2015-03-06 23:10:04 +01:00
2015-07-26 13:21:25 +02:00
return CELL_OK;
2014-02-13 17:59:13 +01:00
}
2017-02-02 18:47:25 +01:00
error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
2014-02-13 17:59:13 +01:00
{
2019-06-09 00:38:01 +02:00
vm::temporary_unlock(ppu);
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
2017-02-02 18:47:25 +01:00
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
{
if (cond.mutex->owner >> 1 == ppu.id)
{
// Add a "promise" to add a waiter
cond.waiters++;
}
2015-03-06 23:10:04 +01:00
2017-02-02 18:47:25 +01:00
// Save the recursive value
return cond.mutex->lock_count.load();
});
2015-03-06 23:10:04 +01:00
2015-04-12 03:36:25 +02:00
if (!cond)
{
return CELL_ESRCH;
}
2017-02-02 18:47:25 +01:00
// Verify ownership
if (cond->mutex->owner >> 1 != ppu.id)
{
2014-02-26 11:35:30 +01:00
return CELL_EPERM;
}
2017-02-02 18:47:25 +01:00
else
{
// Further function result
ppu.gpr[3] = CELL_OK;
std::lock_guard lock(cond->mutex->mutex);
2017-02-02 18:47:25 +01:00
// Register waiter
cond->sq.emplace_back(&ppu);
2014-02-13 17:59:13 +01:00
2017-02-02 18:47:25 +01:00
// Unlock the mutex
cond->mutex->lock_count = 0;
if (auto cpu = cond->mutex->reown<ppu_thread>())
{
cond->mutex->append(cpu);
}
2015-04-12 03:36:25 +02:00
// Sleep current thread and schedule mutex waiter
cond->sleep(ppu, timeout);
2017-02-02 18:47:25 +01:00
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
2015-07-19 03:56:33 +02:00
{
if (ppu.is_stopped())
{
return 0;
}
2017-02-02 18:47:25 +01:00
if (timeout)
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
2015-07-19 03:56:33 +02:00
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(cond->mutex->mutex);
2018-02-09 15:49:37 +01:00
2017-02-02 18:47:25 +01:00
// Try to cancel the waiting
if (cond->unqueue(cond->sq, &ppu))
{
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
cond->waiters--;
2017-02-02 18:47:25 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
// Own or requeue
if (cond->mutex->try_own(ppu, ppu.id))
{
break;
}
}
else if (cond->mutex->owner >> 1 == ppu.id)
{
break;
}
2015-07-19 03:56:33 +02:00
cond->mutex->sleep(ppu);
timeout = 0;
continue;
2015-04-12 03:36:25 +02:00
}
2015-07-19 03:56:33 +02:00
}
else
{
2017-02-02 18:47:25 +01:00
thread_ctrl::wait();
2014-02-26 11:35:30 +01:00
}
}
2015-03-06 23:10:04 +01:00
2017-02-02 18:47:25 +01:00
// Verify ownership
verify(HERE), cond->mutex->owner >> 1 == ppu.id;
2017-02-02 18:47:25 +01:00
// Restore the recursive value
cond->mutex->lock_count = cond.ret;
return not_an_error(ppu.gpr[3]);
}