2019-07-14 05:55:11 +02:00
|
|
|
|
#pragma once
|
2016-04-14 00:23:53 +02:00
|
|
|
|
|
2016-09-07 00:38:52 +02:00
|
|
|
|
#include "Utilities/mutex.h"
|
|
|
|
|
|
#include "Utilities/sema.h"
|
|
|
|
|
|
#include "Utilities/cond.h"
|
2016-04-14 00:23:53 +02:00
|
|
|
|
|
2018-09-25 22:34:45 +02:00
|
|
|
|
#include "Emu/Memory/vm_locking.h"
|
2017-02-22 11:10:55 +01:00
|
|
|
|
#include "Emu/CPU/CPUThread.h"
|
2017-02-02 18:47:25 +01:00
|
|
|
|
#include "Emu/Cell/ErrorCodes.h"
|
2017-07-24 17:59:48 +02:00
|
|
|
|
#include "Emu/IdManager.h"
|
|
|
|
|
|
#include "Emu/IPC.h"
|
2020-02-15 23:36:20 +01:00
|
|
|
|
#include "Emu/system_config.h"
|
2019-07-14 05:55:11 +02:00
|
|
|
|
#include "Emu/System.h"
|
2017-02-02 18:47:25 +01:00
|
|
|
|
|
2017-02-03 22:36:04 +01:00
|
|
|
|
#include <deque>
|
2019-07-14 05:55:11 +02:00
|
|
|
|
#include <thread>
|
2017-02-03 22:36:04 +01:00
|
|
|
|
|
2016-04-14 00:23:53 +02:00
|
|
|
|
// attr_protocol (waiting scheduling policy)
|
|
|
|
|
|
enum
|
|
|
|
|
|
{
|
2017-07-24 17:59:48 +02:00
|
|
|
|
SYS_SYNC_FIFO = 0x1, // First In, First Out Order
|
|
|
|
|
|
SYS_SYNC_PRIORITY = 0x2, // Priority Order
|
|
|
|
|
|
SYS_SYNC_PRIORITY_INHERIT = 0x3, // Basic Priority Inheritance Protocol
|
|
|
|
|
|
SYS_SYNC_RETRY = 0x4, // Not selected while unlocking
|
|
|
|
|
|
SYS_SYNC_ATTR_PROTOCOL_MASK = 0xf,
|
2016-04-14 00:23:53 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// attr_recursive (recursive locks policy)
|
|
|
|
|
|
enum
|
|
|
|
|
|
{
|
2017-07-24 17:59:48 +02:00
|
|
|
|
SYS_SYNC_RECURSIVE = 0x10,
|
|
|
|
|
|
SYS_SYNC_NOT_RECURSIVE = 0x20,
|
|
|
|
|
|
SYS_SYNC_ATTR_RECURSIVE_MASK = 0xf0,
|
2016-04-14 00:23:53 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
2017-07-24 17:59:48 +02:00
|
|
|
|
// attr_pshared (sharing among processes policy)
|
2016-04-14 00:23:53 +02:00
|
|
|
|
enum
|
|
|
|
|
|
{
|
2017-07-24 17:59:48 +02:00
|
|
|
|
SYS_SYNC_PROCESS_SHARED = 0x100,
|
|
|
|
|
|
SYS_SYNC_NOT_PROCESS_SHARED = 0x200,
|
|
|
|
|
|
SYS_SYNC_ATTR_PSHARED_MASK = 0xf00,
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// attr_flags (creation policy)
|
|
|
|
|
|
enum
|
|
|
|
|
|
{
|
|
|
|
|
|
SYS_SYNC_NEWLY_CREATED = 0x1, // Create new object, fails if specified IPC key exists
|
|
|
|
|
|
SYS_SYNC_NOT_CREATE = 0x2, // Reference existing object, fails if IPC key not found
|
|
|
|
|
|
SYS_SYNC_NOT_CARE = 0x3, // Reference existing object, create new one if IPC key not found
|
|
|
|
|
|
SYS_SYNC_ATTR_FLAGS_MASK = 0xf,
|
2016-04-14 00:23:53 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// attr_adaptive
|
|
|
|
|
|
enum
|
|
|
|
|
|
{
|
2017-07-24 17:59:48 +02:00
|
|
|
|
SYS_SYNC_ADAPTIVE = 0x1000,
|
|
|
|
|
|
SYS_SYNC_NOT_ADAPTIVE = 0x2000,
|
|
|
|
|
|
SYS_SYNC_ATTR_ADAPTIVE_MASK = 0xf000,
|
2016-04-14 00:23:53 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
2017-01-29 17:50:18 +01:00
|
|
|
|
// Base class for some kernel objects (shared set of 8192 objects).
|
|
|
|
|
|
struct lv2_obj
|
|
|
|
|
|
{
|
|
|
|
|
|
using id_type = lv2_obj;
|
|
|
|
|
|
|
|
|
|
|
|
static const u32 id_step = 0x100;
|
|
|
|
|
|
static const u32 id_count = 8192;
|
2017-01-31 00:09:55 +01:00
|
|
|
|
|
2019-11-01 20:21:15 +01:00
|
|
|
|
private:
|
|
|
|
|
|
enum thread_cmd : s32
|
|
|
|
|
|
{
|
|
|
|
|
|
yield_cmd = INT32_MIN,
|
|
|
|
|
|
enqueue_cmd,
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
2017-01-31 00:09:55 +01:00
|
|
|
|
// Find and remove the object from the container (deque or vector)
|
|
|
|
|
|
template <typename T, typename E>
|
|
|
|
|
|
static bool unqueue(std::deque<T*>& queue, const E& object)
|
|
|
|
|
|
{
|
|
|
|
|
|
for (auto found = queue.cbegin(), end = queue.cend(); found != end; found++)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (*found == object)
|
|
|
|
|
|
{
|
|
|
|
|
|
queue.erase(found);
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename E, typename T>
|
|
|
|
|
|
static T* schedule(std::deque<T*>& queue, u32 protocol)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (queue.empty())
|
|
|
|
|
|
{
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (protocol == SYS_SYNC_FIFO)
|
|
|
|
|
|
{
|
|
|
|
|
|
const auto res = queue.front();
|
|
|
|
|
|
queue.pop_front();
|
|
|
|
|
|
return res;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-11-01 20:21:15 +01:00
|
|
|
|
s32 prio = 3071;
|
2017-01-31 00:09:55 +01:00
|
|
|
|
auto it = queue.cbegin();
|
|
|
|
|
|
|
|
|
|
|
|
for (auto found = it, end = queue.cend(); found != end; found++)
|
|
|
|
|
|
{
|
2020-02-18 12:50:47 +01:00
|
|
|
|
const s32 _prio = static_cast<E*>(*found)->prio;
|
2017-01-31 00:09:55 +01:00
|
|
|
|
|
|
|
|
|
|
if (_prio < prio)
|
|
|
|
|
|
{
|
|
|
|
|
|
it = found;
|
|
|
|
|
|
prio = _prio;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const auto res = *it;
|
|
|
|
|
|
queue.erase(it);
|
|
|
|
|
|
return res;
|
|
|
|
|
|
}
|
2017-02-06 19:36:46 +01:00
|
|
|
|
|
2019-04-25 16:27:50 +02:00
|
|
|
|
private:
|
2017-02-06 19:36:46 +01:00
|
|
|
|
// Remove the current thread from the scheduling queue, register timeout
|
2019-04-25 16:27:50 +02:00
|
|
|
|
static void sleep_unlocked(cpu_thread&, u64 timeout);
|
|
|
|
|
|
|
|
|
|
|
|
// Schedule the thread
|
2019-11-01 20:21:15 +01:00
|
|
|
|
static void awake_unlocked(cpu_thread*, s32 prio = enqueue_cmd);
|
2017-02-06 19:36:46 +01:00
|
|
|
|
|
2019-04-25 16:27:50 +02:00
|
|
|
|
public:
|
|
|
|
|
|
static void sleep(cpu_thread& cpu, const u64 timeout = 0)
|
2017-02-06 19:36:46 +01:00
|
|
|
|
{
|
2019-04-25 16:27:50 +02:00
|
|
|
|
vm::temporary_unlock(cpu);
|
|
|
|
|
|
std::lock_guard{g_mutex}, sleep_unlocked(cpu, timeout);
|
|
|
|
|
|
g_to_awake.clear();
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-11-01 20:21:15 +01:00
|
|
|
|
static inline void awake(cpu_thread* const thread, s32 prio = enqueue_cmd)
|
2019-04-25 16:27:50 +02:00
|
|
|
|
{
|
|
|
|
|
|
std::lock_guard lock(g_mutex);
|
|
|
|
|
|
awake_unlocked(thread, prio);
|
2017-02-06 19:36:46 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-26 12:06:37 +02:00
|
|
|
|
static void yield(cpu_thread& thread)
|
|
|
|
|
|
{
|
|
|
|
|
|
vm::temporary_unlock(thread);
|
2019-11-01 20:21:15 +01:00
|
|
|
|
awake(&thread, yield_cmd);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void set_priority(cpu_thread& thread, s32 prio)
|
|
|
|
|
|
{
|
|
|
|
|
|
verify(HERE), prio + 512u < 3712;
|
|
|
|
|
|
awake(&thread, prio);
|
2018-05-26 12:06:37 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-04-25 16:27:50 +02:00
|
|
|
|
static inline void awake_all()
|
|
|
|
|
|
{
|
|
|
|
|
|
awake({});
|
|
|
|
|
|
g_to_awake.clear();
|
|
|
|
|
|
}
|
2017-02-06 19:36:46 +01:00
|
|
|
|
|
2019-04-25 16:27:50 +02:00
|
|
|
|
static inline void append(cpu_thread* const thread)
|
2017-02-06 19:36:46 +01:00
|
|
|
|
{
|
2019-04-25 16:27:50 +02:00
|
|
|
|
g_to_awake.emplace_back(thread);
|
2017-02-06 19:36:46 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void cleanup();
|
|
|
|
|
|
|
2017-07-24 17:59:48 +02:00
|
|
|
|
template <typename T, typename F>
|
2020-01-02 17:07:52 +01:00
|
|
|
|
static error_code create(u32 pshared, u64 ipc_key, s32 flags, F&& make, bool key_not_zero = true)
|
2017-07-24 17:59:48 +02:00
|
|
|
|
{
|
|
|
|
|
|
switch (pshared)
|
|
|
|
|
|
{
|
|
|
|
|
|
case SYS_SYNC_PROCESS_SHARED:
|
|
|
|
|
|
{
|
2020-01-02 17:07:52 +01:00
|
|
|
|
if (key_not_zero && ipc_key == 0)
|
2019-11-01 20:22:43 +01:00
|
|
|
|
{
|
|
|
|
|
|
return CELL_EINVAL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-07-24 17:59:48 +02:00
|
|
|
|
switch (flags)
|
|
|
|
|
|
{
|
|
|
|
|
|
case SYS_SYNC_NEWLY_CREATED:
|
|
|
|
|
|
case SYS_SYNC_NOT_CARE:
|
|
|
|
|
|
{
|
|
|
|
|
|
std::shared_ptr<T> result = make();
|
|
|
|
|
|
|
|
|
|
|
|
if (!ipc_manager<T, u64>::add(ipc_key, [&] { if (!idm::import_existing<lv2_obj, T>(result)) result.reset(); return result; }, &result))
|
|
|
|
|
|
{
|
|
|
|
|
|
if (flags == SYS_SYNC_NEWLY_CREATED)
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EEXIST;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!idm::import_existing<lv2_obj, T>(result))
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EAGAIN;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
else if (!result)
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EAGAIN;
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
case SYS_SYNC_NOT_CREATE:
|
|
|
|
|
|
{
|
|
|
|
|
|
auto result = ipc_manager<T, u64>::get(ipc_key);
|
|
|
|
|
|
|
|
|
|
|
|
if (!result)
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_ESRCH;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!idm::import_existing<lv2_obj, T>(result))
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EAGAIN;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
default:
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EINVAL;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
case SYS_SYNC_NOT_PROCESS_SHARED:
|
|
|
|
|
|
{
|
|
|
|
|
|
if (!idm::import<lv2_obj, T>(std::forward<F>(make)))
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EAGAIN;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
|
}
|
|
|
|
|
|
default:
|
|
|
|
|
|
{
|
|
|
|
|
|
return CELL_EINVAL;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-03-07 23:11:35 +01:00
|
|
|
|
template<bool is_usleep = false, bool scale = true>
|
2019-04-25 16:27:50 +02:00
|
|
|
|
static bool wait_timeout(u64 usec, cpu_thread* const cpu = {})
|
2019-07-14 05:55:11 +02:00
|
|
|
|
{
|
2020-01-29 21:42:41 +01:00
|
|
|
|
static_assert(UINT64_MAX / cond_variable::max_timeout >= 100, "max timeout is not valid for scaling");
|
2019-08-02 20:53:47 +02:00
|
|
|
|
|
2020-03-07 23:11:35 +01:00
|
|
|
|
if constexpr (scale)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Scale time
|
|
|
|
|
|
usec = std::min<u64>(usec, UINT64_MAX / 100) * 100 / g_cfg.core.clocks_scale;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Clamp
|
|
|
|
|
|
usec = std::min<u64>(usec, cond_variable::max_timeout);
|
2019-07-14 05:55:11 +02:00
|
|
|
|
|
|
|
|
|
|
extern u64 get_system_time();
|
|
|
|
|
|
|
|
|
|
|
|
u64 passed = 0;
|
|
|
|
|
|
u64 remaining;
|
|
|
|
|
|
|
|
|
|
|
|
const u64 start_time = get_system_time();
|
|
|
|
|
|
while (usec >= passed)
|
|
|
|
|
|
{
|
|
|
|
|
|
remaining = usec - passed;
|
2019-10-09 19:03:34 +02:00
|
|
|
|
#ifdef __linux__
|
|
|
|
|
|
// NOTE: Assumption that timer initialization has succeeded
|
2019-10-14 19:04:47 +02:00
|
|
|
|
u64 host_min_quantum = is_usleep && remaining <= 1000 ? 10 : 50;
|
2019-10-09 19:03:34 +02:00
|
|
|
|
#else
|
|
|
|
|
|
// Host scheduler quantum for windows (worst case)
|
|
|
|
|
|
// NOTE: On ps3 this function has very high accuracy
|
|
|
|
|
|
constexpr u64 host_min_quantum = 500;
|
|
|
|
|
|
#endif
|
2019-10-14 19:04:47 +02:00
|
|
|
|
// TODO: Tune for other non windows operating sytems
|
2019-07-14 05:55:11 +02:00
|
|
|
|
|
|
|
|
|
|
if (g_cfg.core.sleep_timers_accuracy < (is_usleep ? sleep_timers_accuracy_level::_usleep : sleep_timers_accuracy_level::_all_timers))
|
|
|
|
|
|
{
|
2019-10-06 12:30:56 +02:00
|
|
|
|
thread_ctrl::wait_for(remaining, !is_usleep);
|
2019-07-14 05:55:11 +02:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
if (remaining > host_min_quantum)
|
|
|
|
|
|
{
|
|
|
|
|
|
#ifdef __linux__
|
|
|
|
|
|
// Do not wait for the last quantum to avoid loss of accuracy
|
2019-10-06 12:30:56 +02:00
|
|
|
|
thread_ctrl::wait_for(remaining - ((remaining % host_min_quantum) + host_min_quantum), !is_usleep);
|
2019-07-14 05:55:11 +02:00
|
|
|
|
#else
|
2019-09-09 10:09:30 +02:00
|
|
|
|
// Wait on multiple of min quantum for large durations to avoid overloading low thread cpus
|
2019-10-06 12:30:56 +02:00
|
|
|
|
thread_ctrl::wait_for(remaining - (remaining % host_min_quantum), !is_usleep);
|
2019-07-14 05:55:11 +02:00
|
|
|
|
#endif
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
// Try yielding. May cause long wake latency but helps weaker CPUs a lot by alleviating resource pressure
|
|
|
|
|
|
std::this_thread::yield();
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-03-07 23:11:35 +01:00
|
|
|
|
if (thread_ctrl::state() == thread_state::aborting)
|
2019-07-14 05:55:11 +02:00
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (cpu && cpu->state & cpu_flag::signal)
|
|
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
passed = get_system_time() - start_time;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-06 19:36:46 +01:00
|
|
|
|
private:
|
|
|
|
|
|
// Scheduler mutex
|
2018-11-26 16:55:22 +01:00
|
|
|
|
static shared_mutex g_mutex;
|
2017-02-06 19:36:46 +01:00
|
|
|
|
|
2019-04-25 16:27:50 +02:00
|
|
|
|
// Pending list of threads to run
|
|
|
|
|
|
static thread_local std::vector<class cpu_thread*> g_to_awake;
|
|
|
|
|
|
|
2017-02-06 19:36:46 +01:00
|
|
|
|
// Scheduler queue for active PPU threads
|
|
|
|
|
|
static std::deque<class ppu_thread*> g_ppu;
|
|
|
|
|
|
|
|
|
|
|
|
// Waiting for the response from
|
|
|
|
|
|
static std::deque<class cpu_thread*> g_pending;
|
|
|
|
|
|
|
|
|
|
|
|
// Scheduler queue for timeouts (wait until -> thread)
|
2018-10-11 00:17:19 +02:00
|
|
|
|
static std::deque<std::pair<u64, class cpu_thread*>> g_waiting;
|
2017-02-06 19:36:46 +01:00
|
|
|
|
|
|
|
|
|
|
static void schedule_all();
|
2017-01-29 17:50:18 +01:00
|
|
|
|
};
|