2023-07-08 01:50:45 +02:00
|
|
|
#include "umtx.hpp"
|
2023-07-10 12:58:53 +02:00
|
|
|
#include "orbis/KernelContext.hpp"
|
|
|
|
|
#include "orbis/thread.hpp"
|
|
|
|
|
#include "orbis/utils/AtomicOp.hpp"
|
|
|
|
|
#include "orbis/utils/Logs.hpp"
|
2023-07-09 12:52:38 +02:00
|
|
|
#include "time.hpp"
|
2023-07-08 01:50:45 +02:00
|
|
|
|
2023-07-10 12:58:53 +02:00
|
|
|
namespace orbis {
|
|
|
|
|
std::pair<const UmtxKey, UmtxCond> *UmtxChain::enqueue(UmtxKey &key,
|
|
|
|
|
Thread *thr) {
|
|
|
|
|
if (!spare_queue.empty()) {
|
|
|
|
|
auto node = spare_queue.extract(spare_queue.begin());
|
|
|
|
|
node.key() = key;
|
|
|
|
|
node.mapped().thr = thr;
|
|
|
|
|
return &*sleep_queue.insert(std::move(node));
|
|
|
|
|
}
|
|
|
|
|
return &*sleep_queue.emplace(key, thr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void UmtxChain::erase(std::pair<const UmtxKey, UmtxCond> *obj) {
|
|
|
|
|
for (auto [it, e] = sleep_queue.equal_range(obj->first); it != e; it++) {
|
|
|
|
|
if (&*it == obj) {
|
|
|
|
|
auto node = sleep_queue.extract(it);
|
|
|
|
|
node.key() = {};
|
|
|
|
|
spare_queue.insert(spare_queue.begin(), std::move(node));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-07-10 18:48:37 +02:00
|
|
|
|
2023-07-11 07:48:12 +02:00
|
|
|
uint UmtxChain::notify_one(const UmtxKey &key) {
|
2023-07-10 18:48:37 +02:00
|
|
|
auto it = sleep_queue.find(key);
|
2023-07-10 19:46:23 +02:00
|
|
|
if (it == sleep_queue.end())
|
2023-07-11 07:48:12 +02:00
|
|
|
return 0;
|
2023-07-10 18:48:37 +02:00
|
|
|
it->second.thr = nullptr;
|
|
|
|
|
it->second.cv.notify_one(mtx);
|
|
|
|
|
this->erase(&*it);
|
2023-07-11 07:48:12 +02:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint UmtxChain::notify_all(const UmtxKey &key) {
|
|
|
|
|
uint n = 0;
|
|
|
|
|
while (notify_one(key))
|
|
|
|
|
n++;
|
|
|
|
|
return n;
|
2023-07-10 18:48:37 +02:00
|
|
|
}
|
2023-07-10 12:58:53 +02:00
|
|
|
} // namespace orbis
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_lock_umtx(Thread *thread, ptr<umtx> umtx, ulong id,
|
2023-07-10 12:58:53 +02:00
|
|
|
std::uint64_t ut) {
|
2023-07-13 16:53:13 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, thread->tid, umtx, id, ut);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_unlock_umtx(Thread *thread, ptr<umtx> umtx,
|
|
|
|
|
ulong id) {
|
2023-07-13 16:53:13 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, thread->tid, umtx, id);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_wait(Thread *thread, ptr<void> addr, ulong id,
|
2023-07-11 15:14:35 +02:00
|
|
|
std::uint64_t ut, bool is32) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, addr, id, ut, is32);
|
2023-07-10 19:46:23 +02:00
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain0(thread->tproc->pid, addr);
|
|
|
|
|
auto node = chain.enqueue(key, thread);
|
|
|
|
|
ErrorCode result = {};
|
2023-07-11 15:14:35 +02:00
|
|
|
ulong val = 0;
|
|
|
|
|
if (is32)
|
|
|
|
|
val = reinterpret_cast<ptr<std::atomic<uint>>>(addr)->load();
|
|
|
|
|
else
|
|
|
|
|
val = reinterpret_cast<ptr<std::atomic<ulong>>>(addr)->load();
|
|
|
|
|
if (val == id) {
|
2023-07-11 06:35:55 +02:00
|
|
|
if (ut + 1 == 0) {
|
2023-07-13 16:54:23 +02:00
|
|
|
while (true) {
|
|
|
|
|
node->second.cv.wait(chain.mtx);
|
|
|
|
|
if (node->second.thr != thread)
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-07-11 06:35:55 +02:00
|
|
|
} else {
|
|
|
|
|
auto start = std::chrono::steady_clock::now();
|
|
|
|
|
std::uint64_t udiff = 0;
|
|
|
|
|
while (true) {
|
|
|
|
|
node->second.cv.wait(chain.mtx, ut - udiff);
|
|
|
|
|
if (node->second.thr != thread)
|
|
|
|
|
break;
|
|
|
|
|
udiff = (std::chrono::steady_clock::now() - start).count() / 1000;
|
|
|
|
|
if (udiff >= ut) {
|
|
|
|
|
result = ErrorCode::TIMEDOUT;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-07-10 19:46:23 +02:00
|
|
|
}
|
|
|
|
|
if (node->second.thr == thread)
|
|
|
|
|
chain.erase(node);
|
|
|
|
|
return result;
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-10 19:46:23 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_wake(Thread *thread, ptr<void> addr, sint n_wake) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, addr, n_wake);
|
2023-07-10 19:46:23 +02:00
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain0(thread->tproc->pid, addr);
|
|
|
|
|
std::size_t count = chain.sleep_queue.count(key);
|
|
|
|
|
// TODO: check this
|
|
|
|
|
while (count--) {
|
|
|
|
|
chain.notify_one(key);
|
|
|
|
|
if (n_wake-- <= 1)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return {};
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-10 12:58:53 +02:00
|
|
|
namespace orbis {
|
|
|
|
|
enum class umutex_lock_mode {
|
|
|
|
|
lock,
|
|
|
|
|
try_,
|
|
|
|
|
wait,
|
|
|
|
|
};
|
|
|
|
|
template <>
|
|
|
|
|
void log_class_string<umutex_lock_mode>::format(std::string &out,
|
|
|
|
|
const void *arg) {
|
|
|
|
|
switch (get_object(arg)) {
|
|
|
|
|
case umutex_lock_mode::lock:
|
|
|
|
|
out += "lock";
|
|
|
|
|
break;
|
|
|
|
|
case umutex_lock_mode::try_:
|
|
|
|
|
out += "try";
|
|
|
|
|
break;
|
|
|
|
|
case umutex_lock_mode::wait:
|
|
|
|
|
out += "wait";
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
static ErrorCode do_lock_normal(Thread *thread, ptr<umutex> m, uint flags,
|
|
|
|
|
std::uint64_t ut, umutex_lock_mode mode) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, m, flags, ut, mode);
|
2023-07-10 15:35:08 +02:00
|
|
|
|
2023-07-10 12:58:53 +02:00
|
|
|
ErrorCode error = {};
|
|
|
|
|
while (true) {
|
|
|
|
|
int owner = m->owner.load(std::memory_order_acquire);
|
|
|
|
|
if (mode == umutex_lock_mode::wait) {
|
|
|
|
|
if (owner == kUmutexUnowned || owner == kUmutexContested)
|
|
|
|
|
return {};
|
|
|
|
|
} else {
|
|
|
|
|
owner = kUmutexUnowned;
|
|
|
|
|
if (m->owner.compare_exchange_strong(owner, thread->tid))
|
|
|
|
|
return {};
|
|
|
|
|
if (owner == kUmutexContested) {
|
|
|
|
|
if (m->owner.compare_exchange_strong(owner,
|
|
|
|
|
thread->tid | kUmutexContested))
|
|
|
|
|
return {};
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((flags & kUmutexErrorCheck) != 0 &&
|
|
|
|
|
(owner & ~kUmutexContested) == thread->tid)
|
|
|
|
|
return ErrorCode::DEADLK;
|
|
|
|
|
|
|
|
|
|
if (mode == umutex_lock_mode::try_)
|
|
|
|
|
return ErrorCode::BUSY;
|
|
|
|
|
|
|
|
|
|
if (error != ErrorCode{})
|
|
|
|
|
return error;
|
|
|
|
|
|
|
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain1(thread->tproc->pid, m);
|
|
|
|
|
auto node = chain.enqueue(key, thread);
|
2023-07-10 15:35:08 +02:00
|
|
|
if (m->owner.compare_exchange_strong(owner, owner | kUmutexContested)) {
|
2023-07-10 12:58:53 +02:00
|
|
|
node->second.cv.wait(chain.mtx, ut);
|
2023-07-13 16:54:23 +02:00
|
|
|
if (node->second.thr == thread) {
|
2023-07-10 12:58:53 +02:00
|
|
|
error = ErrorCode::TIMEDOUT;
|
2023-07-13 16:54:23 +02:00
|
|
|
}
|
2023-07-10 12:58:53 +02:00
|
|
|
}
|
|
|
|
|
if (node->second.thr == thread)
|
|
|
|
|
chain.erase(node);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
static ErrorCode do_lock_pi(Thread *thread, ptr<umutex> m, uint flags,
|
|
|
|
|
std::uint64_t ut, umutex_lock_mode mode) {
|
|
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, m, flags, ut, mode);
|
2023-07-13 13:23:14 +02:00
|
|
|
return do_lock_normal(thread, m, flags, ut, mode);
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
2023-07-10 12:58:53 +02:00
|
|
|
static ErrorCode do_lock_pp(Thread *thread, ptr<umutex> m, uint flags,
|
|
|
|
|
std::uint64_t ut, umutex_lock_mode mode) {
|
|
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, m, flags, ut, mode);
|
|
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
static ErrorCode do_unlock_normal(Thread *thread, ptr<umutex> m, uint flags) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, m, flags);
|
2023-07-08 01:50:45 +02:00
|
|
|
|
2023-07-10 12:58:53 +02:00
|
|
|
int owner = m->owner.load(std::memory_order_acquire);
|
|
|
|
|
if ((owner & ~kUmutexContested) != thread->tid)
|
|
|
|
|
return ErrorCode::PERM;
|
|
|
|
|
|
|
|
|
|
if ((owner & kUmtxContested) == 0) {
|
|
|
|
|
if (m->owner.compare_exchange_strong(owner, kUmutexUnowned))
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain1(thread->tproc->pid, m);
|
|
|
|
|
std::size_t count = chain.sleep_queue.count(key);
|
|
|
|
|
bool ok = m->owner.compare_exchange_strong(
|
|
|
|
|
owner, count <= 1 ? kUmutexUnowned : kUmutexContested);
|
2023-07-10 19:46:23 +02:00
|
|
|
if (count)
|
|
|
|
|
chain.notify_one(key);
|
2023-07-10 12:58:53 +02:00
|
|
|
if (!ok)
|
|
|
|
|
return ErrorCode::INVAL;
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
static ErrorCode do_unlock_pi(Thread *thread, ptr<umutex> m, uint flags) {
|
2023-07-13 13:23:14 +02:00
|
|
|
return do_unlock_normal(thread, m, flags);
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
2023-07-10 12:58:53 +02:00
|
|
|
static ErrorCode do_unlock_pp(Thread *thread, ptr<umutex> m, uint flags) {
|
|
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, m, flags);
|
|
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace orbis
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_trylock_umutex(Thread *thread, ptr<umutex> m) {
|
2023-07-13 16:53:13 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, m);
|
2023-07-12 16:24:28 +02:00
|
|
|
uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-10 12:58:53 +02:00
|
|
|
switch (flags & (kUmutexPrioInherit | kUmutexPrioProtect)) {
|
|
|
|
|
case 0:
|
|
|
|
|
return do_lock_normal(thread, m, flags, 0, umutex_lock_mode::try_);
|
|
|
|
|
case kUmutexPrioInherit:
|
|
|
|
|
return do_lock_pi(thread, m, flags, 0, umutex_lock_mode::try_);
|
|
|
|
|
case kUmutexPrioProtect:
|
|
|
|
|
return do_lock_pp(thread, m, flags, 0, umutex_lock_mode::try_);
|
|
|
|
|
}
|
|
|
|
|
return ErrorCode::INVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_lock_umutex(Thread *thread, ptr<umutex> m,
|
|
|
|
|
std::uint64_t ut) {
|
2023-07-13 16:53:13 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, m, ut);
|
2023-07-12 16:24:28 +02:00
|
|
|
uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-10 12:58:53 +02:00
|
|
|
switch (flags & (kUmutexPrioInherit | kUmutexPrioProtect)) {
|
|
|
|
|
case 0:
|
|
|
|
|
return do_lock_normal(thread, m, flags, ut, umutex_lock_mode::lock);
|
|
|
|
|
case kUmutexPrioInherit:
|
|
|
|
|
return do_lock_pi(thread, m, flags, ut, umutex_lock_mode::lock);
|
|
|
|
|
case kUmutexPrioProtect:
|
|
|
|
|
return do_lock_pp(thread, m, flags, ut, umutex_lock_mode::lock);
|
|
|
|
|
}
|
|
|
|
|
return ErrorCode::INVAL;
|
|
|
|
|
}
|
2023-07-08 01:50:45 +02:00
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_unlock_umutex(Thread *thread, ptr<umutex> m) {
|
2023-07-13 16:53:13 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, m);
|
2023-07-12 16:24:28 +02:00
|
|
|
uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-10 12:58:53 +02:00
|
|
|
switch (flags & (kUmutexPrioInherit | kUmutexPrioProtect)) {
|
|
|
|
|
case 0:
|
|
|
|
|
return do_unlock_normal(thread, m, flags);
|
|
|
|
|
case kUmutexPrioInherit:
|
|
|
|
|
return do_unlock_pi(thread, m, flags);
|
|
|
|
|
case kUmutexPrioProtect:
|
|
|
|
|
return do_unlock_pp(thread, m, flags);
|
|
|
|
|
}
|
|
|
|
|
return ErrorCode::INVAL;
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_set_ceiling(Thread *thread, ptr<umutex> m,
|
|
|
|
|
std::uint32_t ceiling,
|
|
|
|
|
ptr<uint32_t> oldCeiling) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, m, ceiling, oldCeiling);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_cv_wait(Thread *thread, ptr<ucond> cv,
|
2023-07-10 12:58:53 +02:00
|
|
|
ptr<umutex> m, std::uint64_t ut,
|
2023-07-09 12:52:38 +02:00
|
|
|
ulong wflags) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, cv, m, ut, wflags);
|
2023-07-12 16:24:28 +02:00
|
|
|
uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-13 13:23:47 +02:00
|
|
|
if ((wflags & ~(kCvWaitAbsTime | kCvWaitClockId))) {
|
|
|
|
|
ORBIS_LOG_FATAL("umtx_cv_wait: UNKNOWN wflags", wflags);
|
|
|
|
|
return ErrorCode::INVAL;
|
|
|
|
|
}
|
|
|
|
|
if ((wflags & kCvWaitClockId) != 0 && ut + 1) {
|
2023-07-11 07:40:03 +02:00
|
|
|
ORBIS_LOG_FATAL("umtx_cv_wait: CLOCK_ID unimplemented", wflags);
|
|
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
2023-07-13 13:23:47 +02:00
|
|
|
if ((wflags & kCvWaitAbsTime) != 0 && ut + 1) {
|
2023-07-11 07:40:03 +02:00
|
|
|
ORBIS_LOG_FATAL("umtx_cv_wait: ABSTIME unimplemented", wflags);
|
|
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-13 20:23:55 +02:00
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain0(thread->tproc->pid, cv);
|
2023-07-11 07:40:03 +02:00
|
|
|
auto node = chain.enqueue(key, thread);
|
|
|
|
|
|
|
|
|
|
if (!cv->has_waiters.load(std::memory_order::relaxed))
|
|
|
|
|
cv->has_waiters.store(1, std::memory_order::relaxed);
|
|
|
|
|
|
|
|
|
|
ErrorCode result = umtx_unlock_umutex(thread, m);
|
|
|
|
|
if (result == ErrorCode{}) {
|
|
|
|
|
if (ut + 1 == 0) {
|
2023-07-13 16:54:23 +02:00
|
|
|
while (true) {
|
|
|
|
|
node->second.cv.wait(chain.mtx, ut);
|
|
|
|
|
if (node->second.thr != thread)
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-07-11 07:40:03 +02:00
|
|
|
} else {
|
|
|
|
|
auto start = std::chrono::steady_clock::now();
|
|
|
|
|
std::uint64_t udiff = 0;
|
|
|
|
|
while (true) {
|
|
|
|
|
node->second.cv.wait(chain.mtx, ut - udiff);
|
|
|
|
|
if (node->second.thr != thread)
|
|
|
|
|
break;
|
|
|
|
|
udiff = (std::chrono::steady_clock::now() - start).count() / 1000;
|
|
|
|
|
if (udiff >= ut) {
|
|
|
|
|
result = ErrorCode::TIMEDOUT;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (node->second.thr != thread) {
|
|
|
|
|
result = {};
|
|
|
|
|
} else {
|
|
|
|
|
chain.erase(node);
|
|
|
|
|
if (chain.sleep_queue.count(key) == 0)
|
|
|
|
|
cv->has_waiters.store(0, std::memory_order::relaxed);
|
|
|
|
|
}
|
|
|
|
|
return result;
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_cv_signal(Thread *thread, ptr<ucond> cv) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, cv);
|
2023-07-11 07:48:12 +02:00
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain0(thread->tproc->pid, cv);
|
|
|
|
|
std::size_t count = chain.sleep_queue.count(key);
|
|
|
|
|
if (chain.notify_one(key) >= count)
|
|
|
|
|
cv->has_waiters.store(0, std::memory_order::relaxed);
|
|
|
|
|
return {};
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_cv_broadcast(Thread *thread, ptr<ucond> cv) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, cv);
|
2023-07-11 07:48:12 +02:00
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain0(thread->tproc->pid, cv);
|
|
|
|
|
chain.notify_all(key);
|
|
|
|
|
cv->has_waiters.store(0, std::memory_order::relaxed);
|
2023-07-13 13:31:13 +02:00
|
|
|
return {};
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_rw_rdlock(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_rw_wrlock(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_rw_unlock(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_wake_private(Thread *thread, ptr<void> uaddr,
|
|
|
|
|
sint n_wake) {
|
2023-07-12 12:17:53 +02:00
|
|
|
return umtx_wake(thread, uaddr, n_wake);
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-09 12:52:38 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_wait_umutex(Thread *thread, ptr<umutex> m,
|
2023-07-10 12:58:53 +02:00
|
|
|
std::uint64_t ut) {
|
|
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, m, ut);
|
2023-07-12 16:24:28 +02:00
|
|
|
uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-10 12:58:53 +02:00
|
|
|
switch (flags & (kUmutexPrioInherit | kUmutexPrioProtect)) {
|
|
|
|
|
case 0:
|
|
|
|
|
return do_lock_normal(thread, m, flags, ut, umutex_lock_mode::wait);
|
|
|
|
|
case kUmutexPrioInherit:
|
|
|
|
|
return do_lock_pi(thread, m, flags, ut, umutex_lock_mode::wait);
|
|
|
|
|
case kUmutexPrioProtect:
|
|
|
|
|
return do_lock_pp(thread, m, flags, ut, umutex_lock_mode::wait);
|
|
|
|
|
}
|
|
|
|
|
return ErrorCode::INVAL;
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-10 18:48:37 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_wake_umutex(Thread *thread, ptr<umutex> m) {
|
|
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, m);
|
|
|
|
|
int owner = m->owner.load(std::memory_order::acquire);
|
|
|
|
|
if ((owner & ~kUmutexContested) != 0)
|
|
|
|
|
return {};
|
|
|
|
|
|
2023-07-12 16:24:28 +02:00
|
|
|
[[maybe_unused]] uint flags;
|
|
|
|
|
if (ErrorCode err = uread(flags, &m->flags); err != ErrorCode{})
|
|
|
|
|
return err;
|
2023-07-10 18:48:37 +02:00
|
|
|
|
|
|
|
|
auto [chain, key, lock] = g_context.getUmtxChain1(thread->tproc->pid, m);
|
|
|
|
|
std::size_t count = chain.sleep_queue.count(key);
|
|
|
|
|
if (count <= 1) {
|
|
|
|
|
owner = kUmutexContested;
|
|
|
|
|
m->owner.compare_exchange_strong(owner, kUmutexUnowned);
|
|
|
|
|
}
|
|
|
|
|
if (count != 0 && (owner & ~kUmutexContested) == 0)
|
|
|
|
|
chain.notify_one(key);
|
|
|
|
|
return {};
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_sem_wait(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_sem_wake(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-12 12:17:53 +02:00
|
|
|
orbis::ErrorCode orbis::umtx_nwake_private(Thread *thread, ptr<void *> uaddrs,
|
2023-07-09 12:52:38 +02:00
|
|
|
std::int64_t count) {
|
2023-07-13 21:15:52 +02:00
|
|
|
ORBIS_LOG_TRACE(__FUNCTION__, thread->tid, uaddrs, count);
|
2023-07-12 12:17:53 +02:00
|
|
|
while (count-- > 0) {
|
|
|
|
|
void *uaddr;
|
2023-07-12 13:05:23 +02:00
|
|
|
auto error = uread(uaddr, uaddrs++);
|
2023-07-12 12:17:53 +02:00
|
|
|
if (error != ErrorCode{})
|
|
|
|
|
return error;
|
|
|
|
|
umtx_wake_private(thread, uaddr, 1);
|
|
|
|
|
}
|
|
|
|
|
return {};
|
2023-07-08 01:50:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
orbis::ErrorCode orbis::umtx_wake2_umutex(Thread *thread, ptr<void> obj,
|
|
|
|
|
std::int64_t val, ptr<void> uaddr1,
|
|
|
|
|
ptr<void> uaddr2) {
|
2023-07-10 12:58:53 +02:00
|
|
|
ORBIS_LOG_TODO(__FUNCTION__, obj, val, uaddr1, uaddr2);
|
2023-07-08 01:50:45 +02:00
|
|
|
return ErrorCode::NOSYS;
|
|
|
|
|
}
|