rpcsx/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp

552 lines
9.5 KiB
C++
Raw Normal View History

2020-12-05 13:08:24 +01:00
#include "stdafx.h"
#include "sys_rwlock.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2017-07-24 17:59:48 +02:00
#include "Emu/IPC.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2014-08-23 16:51:51 +02:00
#include "Emu/Cell/PPUThread.h"
LOG_CHANNEL(sys_rwlock);
lv2_rwlock::lv2_rwlock(utils::serial& ar)
: protocol(ar)
, key(ar)
, name(ar)
{
ar(owner);
}
std::shared_ptr<void> lv2_rwlock::load(utils::serial& ar)
{
auto rwlock = std::make_shared<lv2_rwlock>(ar);
return lv2_obj::load(rwlock->key, rwlock);
}
void lv2_rwlock::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, owner);
}
2019-07-14 17:14:15 +02:00
error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.warning("sys_rwlock_create(rw_lock_id=*0x%x, attr=*0x%x)", rw_lock_id, attr);
2015-03-08 03:32:41 +01:00
if (!rw_lock_id || !attr)
2015-01-04 23:45:09 +01:00
{
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
2015-03-08 03:32:41 +01:00
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
{
sys_rwlock.error("sys_rwlock_create(): unknown protocol (0x%x)", protocol);
2015-07-20 18:46:24 +02:00
return CELL_EINVAL;
}
2014-08-13 20:01:09 +02:00
const u64 ipc_key = lv2_obj::get_key(_attr);
if (auto error = lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
2017-07-24 17:59:48 +02:00
}))
2017-01-31 21:04:23 +01:00
{
2017-07-24 17:59:48 +02:00
return error;
2017-01-31 21:04:23 +01:00
}
2017-07-24 17:59:48 +02:00
*rw_lock_id = idm::last_id();
return CELL_OK;
}
2019-07-14 17:14:15 +02:00
error_code sys_rwlock_destroy(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.warning("sys_rwlock_destroy(rw_lock_id=0x%x)", rw_lock_id);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::withdraw<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rw) -> CellError
{
if (rw.owner)
{
return CELL_EBUSY;
}
2015-03-08 03:32:41 +01:00
lv2_obj::on_id_destroy(rw, rw.key);
2017-01-31 21:04:23 +01:00
return {};
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret)
{
2017-01-31 21:04:23 +01:00
return rwlock.ret;
}
return CELL_OK;
}
2017-01-31 21:04:23 +01:00
error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
if (val <= 0 && !(val & 1))
{
if (rwlock.owner.compare_and_swap_test(val, val - 2))
{
return true;
}
}
2015-03-08 03:32:41 +01:00
lv2_obj::notify_all_t notify;
std::lock_guard lock(rwlock.mutex);
2017-01-31 21:04:23 +01:00
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
{
if (val <= 0 && !(val & 1))
{
val -= 2;
}
else
{
val |= 1;
}
});
if (_old > 0 || _old & 1)
{
rwlock.rq.emplace_back(&ppu);
rwlock.sleep(ppu, timeout, true);
2017-01-31 21:04:23 +01:00
return false;
}
return true;
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret)
2015-07-20 18:46:24 +02:00
{
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_OK;
2015-03-11 16:30:50 +01:00
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(rwlock->mutex);
if (std::find(rwlock->rq.begin(), rwlock->rq.end(), &ppu) == rwlock->rq.end())
{
break;
}
ppu.state += cpu_flag::again;
break;
}
if (state & cpu_flag::signal)
{
break;
}
2015-07-20 18:46:24 +02:00
if (timeout)
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
2015-07-20 18:46:24 +02:00
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(rwlock->mutex);
2017-01-31 21:04:23 +01:00
if (!rwlock->unqueue(rwlock->rq, &ppu))
{
break;
2017-01-31 21:04:23 +01:00
}
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
2015-07-20 18:46:24 +02:00
}
}
else
{
thread_ctrl::wait_on(ppu.state, state);
2015-07-20 18:46:24 +02:00
}
}
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
}
2019-07-14 17:14:15 +02:00
error_code sys_rwlock_tryrlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_tryrlock(rw_lock_id=0x%x)", rw_lock_id);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rwlock)
{
auto [_, ok] = rwlock.owner.fetch_op([](s64& val)
2017-01-31 21:04:23 +01:00
{
if (val <= 0 && !(val & 1))
2017-01-31 21:04:23 +01:00
{
val -= 2;
2017-01-31 21:04:23 +01:00
return true;
}
2015-03-08 03:32:41 +01:00
return false;
});
return ok;
2017-01-31 21:04:23 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (!rwlock.ret)
2015-07-20 18:46:24 +02:00
{
2017-01-31 21:04:23 +01:00
return not_an_error(CELL_EBUSY);
2015-07-20 18:46:24 +02:00
}
2015-03-08 03:32:41 +01:00
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_runlock(rw_lock_id=0x%x)", rw_lock_id);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
2015-03-08 03:32:41 +01:00
2017-01-31 21:04:23 +01:00
if (val < 0 && !(val & 1))
{
if (rwlock.owner.compare_and_swap_test(val, val + 2))
{
return true;
}
}
return false;
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret)
{
2017-01-31 21:04:23 +01:00
return CELL_OK;
2015-03-08 03:32:41 +01:00
}
2017-01-31 21:04:23 +01:00
else
{
std::lock_guard lock(rwlock->mutex);
2017-01-31 21:04:23 +01:00
// Remove one reader
const s64 _old = rwlock->owner.fetch_op([](s64& val)
{
if (val < -1)
2017-01-31 21:04:23 +01:00
{
val += 2;
2017-01-31 21:04:23 +01:00
}
});
if (_old >= 0)
{
return CELL_EPERM;
}
if (_old == -1)
{
if (const auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty() | !rwlock->rq.empty();
2017-01-31 21:04:23 +01:00
rwlock->awake(cpu);
2017-01-31 21:04:23 +01:00
}
else
{
rwlock->owner = 0;
ensure(rwlock->rq.empty());
2017-01-31 21:04:23 +01:00
}
}
}
2015-03-08 03:32:41 +01:00
return CELL_OK;
}
2017-01-31 21:04:23 +01:00
error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
2017-07-23 22:14:49 +02:00
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock) -> s64
2017-01-31 21:04:23 +01:00
{
const s64 val = rwlock.owner;
if (val == 0)
{
if (rwlock.owner.compare_and_swap_test(0, ppu.id << 1))
{
2017-07-23 22:14:49 +02:00
return 0;
2017-01-31 21:04:23 +01:00
}
}
else if (val >> 1 == ppu.id)
{
2017-07-23 22:14:49 +02:00
return val;
2017-01-31 21:04:23 +01:00
}
lv2_obj::notify_all_t notify;
std::lock_guard lock(rwlock.mutex);
2017-01-31 21:04:23 +01:00
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
{
if (val == 0)
{
val = ppu.id << 1;
}
else
{
val |= 1;
}
});
if (_old != 0)
{
rwlock.wq.emplace_back(&ppu);
rwlock.sleep(ppu, timeout, true);
2017-01-31 21:04:23 +01:00
}
2017-07-23 22:14:49 +02:00
return _old;
2017-01-31 21:04:23 +01:00
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
2015-03-08 03:32:41 +01:00
return CELL_ESRCH;
}
2017-07-23 22:14:49 +02:00
if (rwlock.ret == 0)
{
2017-01-31 21:04:23 +01:00
return CELL_OK;
}
2017-07-23 22:14:49 +02:00
if (rwlock.ret >> 1 == ppu.id)
2015-07-20 18:46:24 +02:00
{
2017-01-31 21:04:23 +01:00
return CELL_EDEADLK;
2015-07-20 18:46:24 +02:00
}
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_OK;
2015-07-20 18:46:24 +02:00
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(rwlock->mutex);
if (std::find(rwlock->wq.begin(), rwlock->wq.end(), &ppu) == rwlock->wq.end())
{
break;
}
ppu.state += cpu_flag::again;
break;
}
2015-07-20 18:46:24 +02:00
if (timeout)
{
2019-07-14 05:55:11 +02:00
if (lv2_obj::wait_timeout(timeout, &ppu))
2015-07-20 18:46:24 +02:00
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(rwlock->mutex);
2017-01-31 21:04:23 +01:00
if (!rwlock->unqueue(rwlock->wq, &ppu))
2015-07-20 18:46:24 +02:00
{
break;
2017-01-31 21:04:23 +01:00
}
// If the last waiter quit the writer sleep queue, wake blocked readers
if (!rwlock->rq.empty() && rwlock->wq.empty() && rwlock->owner < 0)
2017-01-31 21:04:23 +01:00
{
rwlock->owner.atomic_op([&](s64& owner)
{
2019-10-25 14:59:17 +02:00
owner -= 2 * static_cast<s64>(rwlock->rq.size()); // Add readers to value
owner &= -2; // Clear wait bit
});
2017-01-31 21:04:23 +01:00
// Protocol doesn't matter here since they are all enqueued anyways
for (auto cpu : ::as_rvalue(std::move(rwlock->rq)))
2015-07-20 18:46:24 +02:00
{
rwlock->append(cpu);
2015-07-20 18:46:24 +02:00
}
lv2_obj::awake_all();
2015-07-20 18:46:24 +02:00
}
2020-03-29 21:53:03 +02:00
else if (rwlock->rq.empty() && rwlock->wq.empty())
{
rwlock->owner &= -2;
}
2015-07-20 18:46:24 +02:00
2017-02-06 19:36:46 +01:00
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
2015-07-20 18:46:24 +02:00
}
}
else
{
thread_ctrl::wait_on(ppu.state, state);
}
}
2015-03-08 03:32:41 +01:00
2017-02-06 19:36:46 +01:00
return not_an_error(ppu.gpr[3]);
}
2017-01-31 21:04:23 +01:00
error_code sys_rwlock_trywlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
2015-03-08 03:32:41 +01:00
2017-01-31 21:04:23 +01:00
// Return previous value
return val ? val : rwlock.owner.compare_and_swap(0, ppu.id << 1);
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret != 0)
{
2017-01-31 21:04:23 +01:00
if (rwlock.ret >> 1 == ppu.id)
{
return CELL_EDEADLK;
}
2018-02-09 15:49:37 +01:00
2017-01-31 21:04:23 +01:00
return not_an_error(CELL_EBUSY);
}
2015-03-08 03:32:41 +01:00
return CELL_OK;
}
2017-01-31 21:04:23 +01:00
error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
2019-07-14 17:14:15 +02:00
sys_rwlock.trace("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id);
2017-01-31 21:04:23 +01:00
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
2015-03-08 03:32:41 +01:00
2017-01-31 21:04:23 +01:00
// Return previous value
return val != ppu.id << 1 ? val : rwlock.owner.compare_and_swap(val, 0);
});
2015-03-11 16:30:50 +01:00
2015-04-12 22:16:30 +02:00
if (!rwlock)
{
return CELL_ESRCH;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret >> 1 != ppu.id)
{
2015-03-08 03:32:41 +01:00
return CELL_EPERM;
}
2017-01-31 21:04:23 +01:00
if (rwlock.ret & 1)
{
std::lock_guard lock(rwlock->mutex);
2017-01-31 21:04:23 +01:00
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
2017-07-23 22:15:09 +02:00
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty() | !rwlock->rq.empty();
2017-01-31 21:04:23 +01:00
rwlock->awake(cpu);
2017-01-31 21:04:23 +01:00
}
else if (auto readers = rwlock->rq.size())
{
for (auto cpu : rwlock->rq)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
for (auto cpu : ::as_rvalue(std::move(rwlock->rq)))
2017-01-31 21:04:23 +01:00
{
rwlock->append(cpu);
2017-01-31 21:04:23 +01:00
}
rwlock->owner.release(-2 * static_cast<s64>(readers));
lv2_obj::awake_all();
2017-01-31 21:04:23 +01:00
}
else
{
rwlock->owner = 0;
}
}
2015-03-08 03:32:41 +01:00
return CELL_OK;
}