2014-01-20 01:19:37 +04:00
|
|
|
#pragma once
|
|
|
|
|
|
2016-04-14 01:23:53 +03:00
|
|
|
#include "sys_sync.h"
|
2015-06-19 18:49:38 +03:00
|
|
|
|
2018-09-26 01:14:10 +03:00
|
|
|
#include "Emu/Memory/vm_ptr.h"
|
|
|
|
|
|
2014-01-31 03:40:05 +04:00
|
|
|
struct sys_lwmutex_attribute_t
|
|
|
|
|
{
|
2014-09-19 04:19:22 +04:00
|
|
|
be_t<u32> protocol;
|
|
|
|
|
be_t<u32> recursive;
|
2017-02-03 02:16:09 +03:00
|
|
|
|
|
|
|
|
union
|
|
|
|
|
{
|
2019-10-25 17:50:46 +03:00
|
|
|
nse_t<u64, 1> name_u64;
|
2019-07-08 20:00:27 +03:00
|
|
|
char name[sizeof(u64)];
|
2017-02-03 02:16:09 +03:00
|
|
|
};
|
2014-02-09 15:11:48 +04:00
|
|
|
};
|
|
|
|
|
|
2015-03-09 00:56:45 +03:00
|
|
|
enum : u32
|
|
|
|
|
{
|
2015-06-24 14:53:47 +03:00
|
|
|
lwmutex_free = 0xffffffffu,
|
|
|
|
|
lwmutex_dead = 0xfffffffeu,
|
|
|
|
|
lwmutex_reserved = 0xfffffffdu,
|
2015-03-09 00:56:45 +03:00
|
|
|
};
|
|
|
|
|
|
2014-01-20 01:19:37 +04:00
|
|
|
struct sys_lwmutex_t
|
|
|
|
|
{
|
2016-04-14 01:23:53 +03:00
|
|
|
struct alignas(8) sync_var_t
|
2015-03-09 00:56:45 +03:00
|
|
|
{
|
|
|
|
|
be_t<u32> owner;
|
|
|
|
|
be_t<u32> waiter;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
union
|
|
|
|
|
{
|
2015-09-18 01:41:14 +03:00
|
|
|
atomic_t<sync_var_t> lock_var;
|
2015-03-09 00:56:45 +03:00
|
|
|
|
|
|
|
|
struct
|
|
|
|
|
{
|
2015-05-27 06:11:59 +03:00
|
|
|
atomic_be_t<u32> owner;
|
|
|
|
|
atomic_be_t<u32> waiter;
|
2015-06-24 14:53:47 +03:00
|
|
|
}
|
|
|
|
|
vars;
|
2015-03-09 04:30:34 +03:00
|
|
|
|
2015-05-27 06:11:59 +03:00
|
|
|
atomic_be_t<u64> all_info;
|
2015-03-09 00:56:45 +03:00
|
|
|
};
|
2018-02-09 17:49:37 +03:00
|
|
|
|
2014-01-20 01:19:37 +04:00
|
|
|
be_t<u32> attribute;
|
2015-03-09 00:56:45 +03:00
|
|
|
be_t<u32> recursive_count;
|
|
|
|
|
be_t<u32> sleep_queue; // lwmutex pseudo-id
|
2014-01-20 01:19:37 +04:00
|
|
|
be_t<u32> pad;
|
2015-03-09 00:56:45 +03:00
|
|
|
};
|
2014-01-31 03:40:05 +04:00
|
|
|
|
2017-01-29 19:50:18 +03:00
|
|
|
struct lv2_lwmutex final : lv2_obj
|
2015-03-09 00:56:45 +03:00
|
|
|
{
|
2017-01-25 20:50:30 +03:00
|
|
|
static const u32 id_base = 0x95000000;
|
|
|
|
|
|
2020-06-09 18:35:14 +03:00
|
|
|
const lv2_protocol protocol;
|
2018-02-09 17:49:37 +03:00
|
|
|
const vm::ptr<sys_lwmutex_t> control;
|
2020-03-18 16:47:44 +02:00
|
|
|
const be_t<u64> name;
|
2014-09-19 04:19:22 +04:00
|
|
|
|
2018-11-26 18:55:22 +03:00
|
|
|
shared_mutex mutex;
|
2020-03-23 09:30:17 +02:00
|
|
|
atomic_t<s32> lwcond_waiters{0};
|
2015-03-09 04:30:34 +03:00
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
struct alignas(16) control_data_t
|
|
|
|
|
{
|
|
|
|
|
s32 signaled{0};
|
|
|
|
|
u32 reserved{};
|
|
|
|
|
ppu_thread* sq{};
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
atomic_t<control_data_t> lv2_control{};
|
|
|
|
|
|
2021-05-08 20:08:25 +03:00
|
|
|
lv2_lwmutex(u32 protocol, vm::ptr<sys_lwmutex_t> control, u64 name) noexcept
|
|
|
|
|
: protocol{static_cast<u8>(protocol)}
|
2017-02-03 02:16:09 +03:00
|
|
|
, control(control)
|
2020-03-18 16:47:44 +02:00
|
|
|
, name(std::bit_cast<be_t<u64>>(name))
|
2015-03-09 00:56:45 +03:00
|
|
|
{
|
2015-03-11 00:47:13 +03:00
|
|
|
}
|
2020-03-23 09:30:17 +02:00
|
|
|
|
2022-07-04 16:02:17 +03:00
|
|
|
lv2_lwmutex(utils::serial& ar);
|
|
|
|
|
void save(utils::serial& ar);
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
ppu_thread* load_sq() const
|
|
|
|
|
{
|
|
|
|
|
return atomic_storage<ppu_thread*>::load(lv2_control.raw().sq);
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-25 18:57:47 +03:00
|
|
|
template <typename T>
|
2022-07-28 14:10:16 +03:00
|
|
|
s32 try_own(T* cpu, bool wait_only = false)
|
2020-03-23 09:30:17 +02:00
|
|
|
{
|
2022-07-28 14:10:16 +03:00
|
|
|
const s32 signal = lv2_control.fetch_op([&](control_data_t& data)
|
|
|
|
|
{
|
|
|
|
|
if (!data.signaled)
|
|
|
|
|
{
|
2023-04-28 20:10:21 +03:00
|
|
|
cpu->prio.atomic_op([tag = ++g_priority_order_tag](std::common_type_t<decltype(T::prio)>& prio)
|
|
|
|
|
{
|
|
|
|
|
prio.order = tag;
|
|
|
|
|
});
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
cpu->next_cpu = data.sq;
|
|
|
|
|
data.sq = cpu;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
ensure(!wait_only);
|
|
|
|
|
data.signaled = 0;
|
|
|
|
|
}
|
|
|
|
|
}).signaled;
|
|
|
|
|
|
2024-06-12 08:19:50 +03:00
|
|
|
if (signal)
|
|
|
|
|
{
|
|
|
|
|
cpu->next_cpu = nullptr;
|
|
|
|
|
}
|
|
|
|
|
else
|
2020-03-23 09:30:17 +02:00
|
|
|
{
|
2024-06-12 08:19:50 +03:00
|
|
|
const bool notify = lwcond_waiters.fetch_op([](s32& val)
|
2020-03-23 09:30:17 +02:00
|
|
|
{
|
2024-06-12 08:19:50 +03:00
|
|
|
if (val + 0u <= 1u << 31)
|
|
|
|
|
{
|
|
|
|
|
// Value was either positive or INT32_MIN
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2020-03-23 09:30:17 +02:00
|
|
|
|
2024-06-12 08:19:50 +03:00
|
|
|
// lwmutex was set to be destroyed, but there are lwcond waiters
|
|
|
|
|
// Turn off the "lwcond_waiters notification" bit as we are adding an lwmutex waiter
|
|
|
|
|
val &= 0x7fff'ffff;
|
|
|
|
|
return true;
|
|
|
|
|
}).second;
|
2023-02-13 02:33:06 -08:00
|
|
|
|
2024-06-12 08:19:50 +03:00
|
|
|
if (notify)
|
|
|
|
|
{
|
|
|
|
|
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
|
|
|
|
|
lwcond_waiters.notify_all();
|
|
|
|
|
}
|
2022-08-08 15:03:32 +03:00
|
|
|
}
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
return signal;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool try_unlock(bool unlock2)
|
|
|
|
|
{
|
|
|
|
|
if (!load_sq())
|
|
|
|
|
{
|
|
|
|
|
control_data_t old{};
|
|
|
|
|
old.signaled = atomic_storage<s32>::load(lv2_control.raw().signaled);
|
|
|
|
|
control_data_t store = old;
|
|
|
|
|
store.signaled |= (unlock2 ? s32{smin} : 1);
|
|
|
|
|
|
2022-08-08 08:33:49 +03:00
|
|
|
if (lv2_control.compare_exchange(old, store))
|
2022-07-28 14:10:16 +03:00
|
|
|
{
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
T* reown(bool unlock2 = false)
|
|
|
|
|
{
|
2023-03-18 11:36:55 +02:00
|
|
|
T* res = nullptr;
|
2022-07-28 14:10:16 +03:00
|
|
|
|
|
|
|
|
lv2_control.fetch_op([&](control_data_t& data)
|
|
|
|
|
{
|
2023-03-18 11:36:55 +02:00
|
|
|
res = nullptr;
|
2022-07-28 14:10:16 +03:00
|
|
|
|
2022-08-08 15:03:32 +03:00
|
|
|
if (auto sq = static_cast<T*>(data.sq))
|
2022-07-28 14:10:16 +03:00
|
|
|
{
|
2023-03-18 11:36:55 +02:00
|
|
|
res = schedule<T>(data.sq, protocol, false);
|
2022-07-28 14:10:16 +03:00
|
|
|
|
2023-06-03 18:29:20 +03:00
|
|
|
if (sq == data.sq)
|
2022-07-28 14:10:16 +03:00
|
|
|
{
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
data.signaled |= (unlock2 ? s32{smin} : 1);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
2023-03-18 11:36:55 +02:00
|
|
|
if (res && cpu_flag::again - res->state)
|
|
|
|
|
{
|
|
|
|
|
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
|
|
|
|
|
res->next_cpu = nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-28 14:10:16 +03:00
|
|
|
return res;
|
2020-03-23 09:30:17 +02:00
|
|
|
}
|
2014-02-23 17:52:52 +01:00
|
|
|
};
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2015-07-21 19:35:55 +03:00
|
|
|
// Aux
|
2016-07-28 00:43:22 +03:00
|
|
|
class ppu_thread;
|
2015-07-21 19:35:55 +03:00
|
|
|
|
2017-02-03 02:16:09 +03:00
|
|
|
// Syscalls
|
|
|
|
|
|
2019-06-20 14:42:06 +03:00
|
|
|
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name);
|
|
|
|
|
error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id);
|
2017-02-03 02:16:09 +03:00
|
|
|
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout);
|
2019-06-20 14:42:06 +03:00
|
|
|
error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id);
|
2017-02-06 21:36:46 +03:00
|
|
|
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id);
|
2019-06-20 14:42:06 +03:00
|
|
|
error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id);
|