rpcsx/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp

487 lines
9 KiB
C++
Raw Normal View History

2020-12-05 13:08:24 +01:00
#include "stdafx.h"
#include "sys_event_flag.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2017-07-24 17:59:48 +02:00
#include "Emu/IPC.h"
2014-08-08 17:55:12 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
2014-08-08 17:55:12 +02:00
2016-05-13 15:55:34 +02:00
#include <algorithm>
LOG_CHANNEL(sys_event_flag);
2014-08-08 17:55:12 +02:00
lv2_event_flag::lv2_event_flag(utils::serial& ar)
: protocol(ar)
, key(ar)
, type(ar)
, name(ar)
{
ar(pattern);
}
std::shared_ptr<void> lv2_event_flag::load(utils::serial& ar)
{
auto eflag = std::make_shared<lv2_event_flag>(ar);
return lv2_obj::load(eflag->key, eflag);
}
void lv2_event_flag::save(utils::serial& ar)
{
ar(protocol, key, type, name, pattern);
}
error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_event_flag_attribute_t> attr, u64 init)
2014-08-23 02:16:54 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_create(id=*0x%x, attr=*0x%x, init=0x%llx)", id, attr, init);
2014-08-23 02:16:54 +02:00
2015-03-05 22:29:05 +01:00
if (!id || !attr)
{
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
2014-08-08 17:55:12 +02:00
{
sys_event_flag.error("sys_event_flag_create(): unknown protocol (0x%x)", protocol);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
2014-08-08 17:55:12 +02:00
}
const u32 type = _attr.type;
2015-03-05 22:29:05 +01:00
2015-07-19 23:29:40 +02:00
if (type != SYS_SYNC_WAITER_SINGLE && type != SYS_SYNC_WAITER_MULTIPLE)
2014-08-08 17:55:12 +02:00
{
sys_event_flag.error("sys_event_flag_create(): unknown type (0x%x)", type);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
2014-08-08 17:55:12 +02:00
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (const auto error = lv2_obj::create<lv2_event_flag>(_attr.pshared, ipc_key, _attr.flags, [&]
2017-07-24 17:59:48 +02:00
{
return std::make_shared<lv2_event_flag>(
_attr.protocol,
ipc_key,
_attr.type,
_attr.name_u64,
2017-07-24 17:59:48 +02:00
init);
}))
2017-02-03 17:27:03 +01:00
{
2017-07-24 17:59:48 +02:00
return error;
2017-02-03 17:27:03 +01:00
}
2014-08-08 17:55:12 +02:00
2017-07-24 17:59:48 +02:00
*id = idm::last_id();
return CELL_OK;
2014-08-08 17:55:12 +02:00
}
error_code sys_event_flag_destroy(ppu_thread& ppu, u32 id)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_destroy(id=0x%x)", id);
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
const auto flag = idm::withdraw<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag) -> CellError
{
if (flag.waiters)
{
return CELL_EBUSY;
}
2014-08-08 17:55:12 +02:00
lv2_obj::on_id_destroy(flag, flag.key);
2017-02-03 17:27:03 +01:00
return {};
});
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (!flag)
2015-03-05 22:29:05 +01:00
{
return CELL_ESRCH;
}
2017-02-03 17:27:03 +01:00
if (flag.ret)
2014-08-08 17:55:12 +02:00
{
2017-02-03 17:27:03 +01:00
return flag.ret;
2014-08-08 17:55:12 +02:00
}
return CELL_OK;
}
2017-02-03 17:27:03 +01:00
error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_wait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x, timeout=0x%llx)", id, bitptn, mode, result, timeout);
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
// Fix function arguments for external access
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = -1;
ppu.gpr[4] = bitptn;
ppu.gpr[5] = mode;
2017-02-03 17:27:03 +01:00
ppu.gpr[6] = 0;
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
// Always set result
struct store_result
{
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept
{
if (ptr) *ptr = val;
}
} store{result};
2014-08-08 17:55:12 +02:00
if (!lv2_event_flag::check_mode(mode))
2014-08-08 17:55:12 +02:00
{
sys_event_flag.error("sys_event_flag_wait(): unknown mode (0x%x)", mode);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
2014-08-08 17:55:12 +02:00
}
2017-02-03 17:27:03 +01:00
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag) -> CellError
{
if (flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &ppu.gpr[6]);
}).second)
2017-02-03 17:27:03 +01:00
{
// TODO: is it possible to return EPERM in this case?
return {};
}
lv2_obj::notify_all_t notify;
std::lock_guard lock(flag.mutex);
2017-02-03 17:27:03 +01:00
if (flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &ppu.gpr[6]);
}).second)
2017-02-03 17:27:03 +01:00
{
return {};
}
2020-02-26 21:13:54 +01:00
if (flag.type == SYS_SYNC_WAITER_SINGLE && !flag.sq.empty())
2017-02-03 17:27:03 +01:00
{
return CELL_EPERM;
}
flag.waiters++;
flag.sq.emplace_back(&ppu);
flag.sleep(ppu, timeout, true);
2017-02-03 17:27:03 +01:00
return CELL_EBUSY;
});
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
if (!flag)
2015-03-05 22:29:05 +01:00
{
return CELL_ESRCH;
}
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (flag.ret)
2015-03-05 22:29:05 +01:00
{
2017-02-03 17:27:03 +01:00
if (flag.ret != CELL_EBUSY)
{
return flag.ret;
}
2015-03-05 22:29:05 +01:00
}
2017-02-03 17:27:03 +01:00
else
2014-08-08 17:55:12 +02:00
{
store.val = ppu.gpr[6];
2015-07-19 23:29:40 +02:00
return CELL_OK;
}
2014-08-08 17:55:12 +02:00
while (auto state = +ppu.state)
2015-07-19 23:29:40 +02:00
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(flag->mutex);
if (std::find(flag->sq.begin(), flag->sq.end(), &ppu) == flag->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
2015-07-19 23:29:40 +02:00
if (timeout)
2015-03-05 22:29:05 +01:00
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
2015-03-06 23:10:04 +01:00
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(flag->mutex);
2017-02-03 17:27:03 +01:00
if (!flag->unqueue(flag->sq, &ppu))
{
break;
2017-02-03 17:27:03 +01:00
}
2015-07-19 23:29:40 +02:00
2017-02-03 17:27:03 +01:00
flag->waiters--;
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
ppu.gpr[6] = flag->pattern;
break;
2015-03-06 23:10:04 +01:00
}
2014-08-08 17:55:12 +02:00
}
2015-07-19 23:29:40 +02:00
else
2014-08-08 17:55:12 +02:00
{
thread_ctrl::wait_on(ppu.state, state);
2014-08-08 17:55:12 +02:00
}
2015-03-05 22:29:05 +01:00
}
2018-02-09 15:49:37 +01:00
store.val = ppu.gpr[6];
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
2014-08-08 17:55:12 +02:00
}
error_code sys_event_flag_trywait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_trywait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x)", id, bitptn, mode, result);
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (result) *result = 0;
2014-08-08 17:55:12 +02:00
if (!lv2_event_flag::check_mode(mode))
2014-08-08 17:55:12 +02:00
{
sys_event_flag.error("sys_event_flag_trywait(): unknown mode (0x%x)", mode);
2015-07-19 23:29:40 +02:00
return CELL_EINVAL;
2014-08-08 17:55:12 +02:00
}
2017-02-03 17:27:03 +01:00
u64 pattern;
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag)
{
return flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &pattern);
}).second;
2017-02-03 17:27:03 +01:00
});
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (!flag)
2015-03-05 22:29:05 +01:00
{
return CELL_ESRCH;
}
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (!flag.ret)
2015-03-05 22:29:05 +01:00
{
2017-02-03 17:27:03 +01:00
return not_an_error(CELL_EBUSY);
2014-08-08 17:55:12 +02:00
}
2017-02-03 17:27:03 +01:00
if (result) *result = pattern;
return CELL_OK;
2014-08-08 17:55:12 +02:00
}
error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
2014-08-08 17:55:12 +02:00
{
cpu.state += cpu_flag::wait;
2017-02-03 17:27:03 +01:00
// Warning: may be called from SPU thread.
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id, bitptn);
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (!flag)
2014-08-08 17:55:12 +02:00
{
2015-03-05 22:29:05 +01:00
return CELL_ESRCH;
2014-08-08 17:55:12 +02:00
}
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
if ((flag->pattern & bitptn) == bitptn)
2015-03-05 22:29:05 +01:00
{
2017-02-03 17:27:03 +01:00
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
if (true)
2017-02-03 17:27:03 +01:00
{
std::lock_guard lock(flag->mutex);
2017-02-06 19:36:46 +01:00
for (auto ppu : flag->sq)
{
if (ppu->state & cpu_flag::again)
{
cpu.state += cpu_flag::again;
// Fake error for abort
return not_an_error(CELL_EAGAIN);
}
}
2017-02-06 19:36:46 +01:00
// Sort sleep queue in required order
if (flag->protocol != SYS_SYNC_FIFO)
2017-02-03 17:27:03 +01:00
{
2017-02-06 19:36:46 +01:00
std::stable_sort(flag->sq.begin(), flag->sq.end(), [](cpu_thread* a, cpu_thread* b)
{
return static_cast<ppu_thread*>(a)->prio < static_cast<ppu_thread*>(b)->prio;
});
}
2017-02-03 17:27:03 +01:00
2017-02-06 19:36:46 +01:00
// Process all waiters in single atomic op
const u32 count = flag->pattern.atomic_op([&](u64& value)
2017-02-06 19:36:46 +01:00
{
value |= bitptn;
u32 count = 0;
2017-02-03 17:27:03 +01:00
2017-02-06 19:36:46 +01:00
for (auto cpu : flag->sq)
{
auto& ppu = static_cast<ppu_thread&>(*cpu);
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
2018-02-09 15:49:37 +01:00
2017-02-06 19:36:46 +01:00
if (lv2_event_flag::check_pattern(value, pattern, mode, &ppu.gpr[6]))
{
ppu.gpr[3] = CELL_OK;
count++;
2017-02-06 19:36:46 +01:00
}
else
{
ppu.gpr[3] = -1;
}
2017-02-06 19:36:46 +01:00
}
return count;
2017-02-06 19:36:46 +01:00
});
if (!count)
{
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
// Remove waiters
const auto tail = std::remove_if(flag->sq.begin(), flag->sq.end(), [&](cpu_thread* cpu)
2017-02-03 17:27:03 +01:00
{
auto& ppu = static_cast<ppu_thread&>(*cpu);
2017-02-06 19:36:46 +01:00
if (ppu.gpr[3] == CELL_OK)
{
flag->waiters--;
flag->append(cpu);
2017-02-06 19:36:46 +01:00
return true;
}
2017-02-03 17:27:03 +01:00
2017-02-06 19:36:46 +01:00
return false;
});
if (tail != flag->sq.end())
{
flag->sq.erase(tail, flag->sq.end());
lv2_obj::awake_all();
}
2017-02-06 19:36:46 +01:00
}
2018-02-09 15:49:37 +01:00
2014-08-08 17:55:12 +02:00
return CELL_OK;
}
error_code sys_event_flag_clear(ppu_thread& ppu, u32 id, u64 bitptn)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_clear(id=0x%x, bitptn=0x%llx)", id, bitptn);
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag)
{
flag.pattern &= bitptn;
});
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
if (!flag)
2015-03-05 22:29:05 +01:00
{
return CELL_ESRCH;
}
2014-08-08 17:55:12 +02:00
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_cancel(id=0x%x, num=*0x%x)", id, num);
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
if (num) *num = 0;
2014-08-08 17:55:12 +02:00
2017-02-03 17:27:03 +01:00
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
if (!flag)
2014-08-08 17:55:12 +02:00
{
2015-03-05 22:29:05 +01:00
return CELL_ESRCH;
2014-08-08 17:55:12 +02:00
}
2017-02-06 19:36:46 +01:00
u32 value = 0;
{
std::lock_guard lock(flag->mutex);
2017-02-03 17:27:03 +01:00
for (auto cpu : flag->sq)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
2017-02-06 19:36:46 +01:00
// Get current pattern
const u64 pattern = flag->pattern;
2014-08-08 17:55:12 +02:00
2017-02-06 19:36:46 +01:00
// Set count
value = ::size32(flag->sq);
2015-07-19 23:29:40 +02:00
// Signal all threads to return CELL_ECANCELED (protocol does not matter)
for (auto thread : ::as_rvalue(std::move(flag->sq)))
2017-02-06 19:36:46 +01:00
{
auto& ppu = static_cast<ppu_thread&>(*thread);
2015-07-19 23:29:40 +02:00
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ECANCELED;
ppu.gpr[6] = pattern;
2015-07-19 23:29:40 +02:00
2017-02-06 19:36:46 +01:00
flag->waiters--;
flag->append(thread);
2017-02-06 19:36:46 +01:00
}
if (value)
{
lv2_obj::awake_all();
}
2015-03-11 16:30:50 +01:00
}
2015-07-19 23:29:40 +02:00
static_cast<void>(ppu.test_stopped());
2017-02-06 19:36:46 +01:00
if (num) *num = value;
2014-08-08 17:55:12 +02:00
return CELL_OK;
}
error_code sys_event_flag_get(ppu_thread& ppu, u32 id, vm::ptr<u64> flags)
2014-08-08 17:55:12 +02:00
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_get(id=0x%x, flags=*0x%x)", id, flags);
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [](lv2_event_flag& flag)
2015-03-05 22:29:05 +01:00
{
2017-02-03 17:27:03 +01:00
return +flag.pattern;
});
2015-03-05 22:29:05 +01:00
2017-02-03 17:27:03 +01:00
if (!flag)
{
if (flags) *flags = 0;
2015-03-05 22:29:05 +01:00
return CELL_ESRCH;
}
if (!flags)
{
return CELL_EFAULT;
}
2017-02-03 17:27:03 +01:00
*flags = flag.ret;
2014-08-08 17:55:12 +02:00
return CELL_OK;
2015-03-05 22:29:05 +01:00
}