2020-12-05 13:08:24 +01:00
# pragma once
2016-04-14 00:23:53 +02:00
2016-09-07 00:38:52 +02:00
# include "Utilities/mutex.h"
# include "Utilities/sema.h"
2016-04-14 00:23:53 +02:00
2017-02-22 11:10:55 +01:00
# include "Emu/CPU/CPUThread.h"
2017-02-02 18:47:25 +01:00
# include "Emu/Cell/ErrorCodes.h"
2021-03-23 20:32:50 +01:00
# include "Emu/Cell/timers.hpp"
2017-07-24 17:59:48 +02:00
# include "Emu/IdManager.h"
# include "Emu/IPC.h"
2020-02-15 23:36:20 +01:00
# include "Emu/system_config.h"
2017-02-02 18:47:25 +01:00
2019-07-14 05:55:11 +02:00
# include <thread>
2017-02-03 22:36:04 +01:00
2016-04-14 00:23:53 +02:00
// attr_protocol (waiting scheduling policy)
2021-05-08 19:08:25 +02:00
enum lv2_protocol : u8
2016-04-14 00:23:53 +02:00
{
2017-07-24 17:59:48 +02:00
SYS_SYNC_FIFO = 0x1 , // First In, First Out Order
SYS_SYNC_PRIORITY = 0x2 , // Priority Order
SYS_SYNC_PRIORITY_INHERIT = 0x3 , // Basic Priority Inheritance Protocol
SYS_SYNC_RETRY = 0x4 , // Not selected while unlocking
2020-08-28 00:22:39 +02:00
} ;
enum : u32
{
2017-07-24 17:59:48 +02:00
SYS_SYNC_ATTR_PROTOCOL_MASK = 0xf ,
2016-04-14 00:23:53 +02:00
} ;
// attr_recursive (recursive locks policy)
enum
{
2017-07-24 17:59:48 +02:00
SYS_SYNC_RECURSIVE = 0x10 ,
SYS_SYNC_NOT_RECURSIVE = 0x20 ,
SYS_SYNC_ATTR_RECURSIVE_MASK = 0xf0 ,
2016-04-14 00:23:53 +02:00
} ;
2017-07-24 17:59:48 +02:00
// attr_pshared (sharing among processes policy)
2016-04-14 00:23:53 +02:00
enum
{
2017-07-24 17:59:48 +02:00
SYS_SYNC_PROCESS_SHARED = 0x100 ,
SYS_SYNC_NOT_PROCESS_SHARED = 0x200 ,
SYS_SYNC_ATTR_PSHARED_MASK = 0xf00 ,
} ;
// attr_flags (creation policy)
enum
{
SYS_SYNC_NEWLY_CREATED = 0x1 , // Create new object, fails if specified IPC key exists
SYS_SYNC_NOT_CREATE = 0x2 , // Reference existing object, fails if IPC key not found
SYS_SYNC_NOT_CARE = 0x3 , // Reference existing object, create new one if IPC key not found
SYS_SYNC_ATTR_FLAGS_MASK = 0xf ,
2016-04-14 00:23:53 +02:00
} ;
// attr_adaptive
enum
{
2017-07-24 17:59:48 +02:00
SYS_SYNC_ADAPTIVE = 0x1000 ,
SYS_SYNC_NOT_ADAPTIVE = 0x2000 ,
SYS_SYNC_ATTR_ADAPTIVE_MASK = 0xf000 ,
2016-04-14 00:23:53 +02:00
} ;
2021-05-21 07:48:37 +02:00
enum ppu_thread_status : u32 ;
2017-01-29 17:50:18 +01:00
// Base class for some kernel objects (shared set of 8192 objects).
struct lv2_obj
{
static const u32 id_step = 0x100 ;
static const u32 id_count = 8192 ;
2019-11-28 11:17:16 +01:00
static constexpr std : : pair < u32 , u32 > id_invl_range = { 0 , 8 } ;
2017-01-31 00:09:55 +01:00
2019-11-01 20:21:15 +01:00
private :
enum thread_cmd : s32
{
2021-05-22 09:35:15 +02:00
yield_cmd = smin ,
2019-11-01 20:21:15 +01:00
enqueue_cmd ,
} ;
2020-06-04 05:37:25 +02:00
// Function executed under IDM mutex, error will make the object creation fail and the error will be returned
2021-05-07 08:58:30 +02:00
CellError on_id_create ( )
2020-06-04 05:37:25 +02:00
{
2021-05-07 08:58:30 +02:00
exists + + ;
2020-06-04 05:37:25 +02:00
return { } ;
}
2019-11-01 20:21:15 +01:00
public :
2022-07-04 15:02:17 +02:00
SAVESTATE_INIT_POS ( 4 ) ; // Dependency on PPUs
2019-11-01 20:21:15 +01:00
2021-05-07 08:58:30 +02:00
// Existence validation (workaround for shared-ptr ref-counting)
atomic_t < u32 > exists = 0 ;
2021-05-14 19:10:47 +02:00
template < typename Ptr >
static bool check ( Ptr & & ptr )
{
return ptr & & ptr - > exists ;
}
2020-06-13 10:35:02 +02:00
static std : : string name64 ( u64 name_u64 )
2020-03-17 08:59:28 +01:00
{
2020-06-13 10:35:02 +02:00
const auto ptr = reinterpret_cast < const char * > ( & name_u64 ) ;
2020-03-17 08:59:28 +01:00
2020-06-13 10:35:02 +02:00
// NTS string, ignore invalid/newline characters
2020-08-28 00:22:39 +02:00
// Example: "lv2\n\0tx" will be printed as "lv2"
2020-06-13 10:35:02 +02:00
std : : string str { ptr , std : : find ( ptr , ptr + 7 , ' \0 ' ) } ;
2020-05-29 16:59:14 +02:00
str . erase ( std : : remove_if ( str . begin ( ) , str . end ( ) , [ ] ( uchar c ) { return ! std : : isprint ( c ) ; } ) , str . end ( ) ) ;
2020-03-17 08:59:28 +01:00
return str ;
2021-04-09 21:12:47 +02:00
}
2020-03-17 08:59:28 +01:00
2022-07-25 17:57:47 +02:00
// Find and remove the object from the linked list
template < typename T >
2022-07-28 13:10:16 +02:00
static T * unqueue ( T * & first , T * object , T * T : : * mem_ptr = & T : : next_cpu )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
auto it = + first ;
2022-09-13 15:08:55 +02:00
2022-07-25 17:57:47 +02:00
if ( it = = object )
{
2022-07-28 13:10:16 +02:00
atomic_storage < T * > : : release ( first , it - > * mem_ptr ) ;
atomic_storage < T * > : : release ( it - > * mem_ptr , nullptr ) ;
2022-07-25 17:57:47 +02:00
return it ;
}
for ( ; it ; )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
const auto next = it - > * mem_ptr + 0 ;
if ( next = = object )
2017-01-31 00:09:55 +01:00
{
2022-07-28 13:10:16 +02:00
atomic_storage < T * > : : release ( it - > * mem_ptr , next - > * mem_ptr ) ;
atomic_storage < T * > : : release ( next - > * mem_ptr , nullptr ) ;
2022-07-25 17:57:47 +02:00
return next ;
2017-01-31 00:09:55 +01:00
}
2022-07-25 17:57:47 +02:00
it = next ;
2017-01-31 00:09:55 +01:00
}
2021-06-15 14:04:51 +02:00
return { } ;
2017-01-31 00:09:55 +01:00
}
2022-07-28 13:10:16 +02:00
// Remove an object from the linked set according to the protocol
2017-01-31 00:09:55 +01:00
template < typename E , typename T >
2022-07-28 13:10:16 +02:00
static E * schedule ( T & first , u32 protocol )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
auto it = static_cast < E * > ( first ) ;
if ( ! it )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
return it ;
2017-01-31 00:09:55 +01:00
}
2022-07-28 13:10:16 +02:00
auto parent_found = & first ;
2017-01-31 00:09:55 +01:00
if ( protocol = = SYS_SYNC_FIFO )
{
2022-07-28 13:10:16 +02:00
while ( true )
2022-07-25 17:57:47 +02:00
{
2022-07-28 13:10:16 +02:00
const auto next = + it - > next_cpu ;
2022-07-04 15:02:17 +02:00
2022-07-28 13:10:16 +02:00
if ( next )
{
parent_found = & it - > next_cpu ;
it = next ;
continue ;
}
if ( it & & cpu_flag : : again - it - > state )
{
atomic_storage < T > : : release ( * parent_found , nullptr ) ;
}
return it ;
}
2017-01-31 00:09:55 +01:00
}
2022-07-25 17:57:47 +02:00
s32 prio = it - > prio ;
auto found = it ;
2017-01-31 00:09:55 +01:00
2022-07-25 17:57:47 +02:00
while ( true )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
auto & node = it - > next_cpu ;
const auto next = static_cast < E * > ( node ) ;
if ( ! next )
{
break ;
}
const s32 _prio = static_cast < E * > ( next ) - > prio ;
2017-01-31 00:09:55 +01:00
2022-07-28 13:10:16 +02:00
// This condition tests for equality as well so the eraliest element to be pushed is popped
if ( _prio < = prio )
2017-01-31 00:09:55 +01:00
{
2022-07-25 17:57:47 +02:00
found = next ;
parent_found = & node ;
2017-01-31 00:09:55 +01:00
prio = _prio ;
}
2022-07-25 17:57:47 +02:00
it = next ;
}
if ( cpu_flag : : again - found - > state )
{
2022-07-28 13:10:16 +02:00
atomic_storage < T > : : release ( * parent_found , found - > next_cpu ) ;
atomic_storage < T > : : release ( found - > next_cpu , nullptr ) ;
2017-01-31 00:09:55 +01:00
}
2022-07-25 17:57:47 +02:00
return found ;
}
2022-07-04 15:02:17 +02:00
2022-07-25 17:57:47 +02:00
template < typename T >
2022-07-28 13:10:16 +02:00
static void emplace ( T & first , T object )
2022-07-25 17:57:47 +02:00
{
2022-07-28 13:10:16 +02:00
atomic_storage < T > : : release ( object - > next_cpu , first ) ;
atomic_storage < T > : : release ( first , object ) ;
2017-01-31 00:09:55 +01:00
}
2017-02-06 19:36:46 +01:00
2019-04-25 16:27:50 +02:00
private :
2017-02-06 19:36:46 +01:00
// Remove the current thread from the scheduling queue, register timeout
2022-08-08 07:33:49 +02:00
static bool sleep_unlocked ( cpu_thread & , u64 timeout , u64 current_time ) ;
2019-04-25 16:27:50 +02:00
// Schedule the thread
2022-08-04 12:13:51 +02:00
static bool awake_unlocked ( cpu_thread * , s32 prio = enqueue_cmd ) ;
2017-02-06 19:36:46 +01:00
2019-04-25 16:27:50 +02:00
public :
2021-05-22 09:35:15 +02:00
static constexpr u64 max_timeout = u64 { umax } / 1000 ;
2020-11-05 16:59:01 +01:00
2022-08-08 07:33:49 +02:00
static bool sleep ( cpu_thread & cpu , const u64 timeout = 0 ) ;
2019-04-25 16:27:50 +02:00
2022-08-04 12:13:51 +02:00
static bool awake ( cpu_thread * thread , s32 prio = enqueue_cmd ) ;
2017-02-06 19:36:46 +01:00
2020-03-18 20:44:58 +01:00
// Returns true on successful context switch, false otherwise
2020-11-03 05:18:42 +01:00
static bool yield ( cpu_thread & thread ) ;
2019-11-01 20:21:15 +01:00
static void set_priority ( cpu_thread & thread , s32 prio )
{
2020-12-09 08:47:45 +01:00
ensure ( prio + 512u < 3712 ) ;
2022-08-04 12:13:51 +02:00
awake ( & thread , prio ) ;
2018-05-26 12:06:37 +02:00
}
2022-08-04 12:13:51 +02:00
static inline void awake_all ( )
2019-04-25 16:27:50 +02:00
{
2022-08-04 12:13:51 +02:00
awake ( { } ) ;
2019-04-25 16:27:50 +02:00
g_to_awake . clear ( ) ;
}
2017-02-06 19:36:46 +01:00
2021-07-09 03:47:34 +02:00
static ppu_thread_status ppu_state ( ppu_thread * ppu , bool lock_idm = true , bool lock_lv2 = true ) ;
2021-05-21 07:48:37 +02:00
2019-04-25 16:27:50 +02:00
static inline void append ( cpu_thread * const thread )
2017-02-06 19:36:46 +01:00
{
2019-04-25 16:27:50 +02:00
g_to_awake . emplace_back ( thread ) ;
2017-02-06 19:36:46 +01:00
}
2022-07-04 15:02:17 +02:00
// Serialization related
2022-07-11 09:53:29 +02:00
static void set_future_sleep ( cpu_thread * cpu ) ;
2022-07-04 15:02:17 +02:00
static bool is_scheduler_ready ( ) ;
2022-07-28 13:10:16 +02:00
// Must be called under IDM lock
static bool has_ppus_in_running_state ( ) ;
2022-09-06 17:59:23 +02:00
static void set_yield_frequency ( u64 freq , u64 max_allowed_tsx ) ;
2017-02-06 19:36:46 +01:00
static void cleanup ( ) ;
2021-05-07 12:08:16 +02:00
template < typename T >
static inline u64 get_key ( const T & attr )
{
2021-07-17 11:30:32 +02:00
return ( attr . pshared = = SYS_SYNC_PROCESS_SHARED ? + attr . ipc_key : 0 ) ;
2021-05-07 12:08:16 +02:00
}
2017-07-24 17:59:48 +02:00
template < typename T , typename F >
2020-01-02 17:07:52 +01:00
static error_code create ( u32 pshared , u64 ipc_key , s32 flags , F & & make , bool key_not_zero = true )
2017-07-24 17:59:48 +02:00
{
switch ( pshared )
{
case SYS_SYNC_PROCESS_SHARED :
{
2020-01-02 17:07:52 +01:00
if ( key_not_zero & & ipc_key = = 0 )
2019-11-01 20:22:43 +01:00
{
return CELL_EINVAL ;
}
2017-07-24 17:59:48 +02:00
switch ( flags )
{
case SYS_SYNC_NEWLY_CREATED :
case SYS_SYNC_NOT_CARE :
2021-05-07 08:58:30 +02:00
case SYS_SYNC_NOT_CREATE :
2017-07-24 17:59:48 +02:00
{
2021-05-07 08:58:30 +02:00
break ;
}
default : return CELL_EINVAL ;
}
break ;
}
case SYS_SYNC_NOT_PROCESS_SHARED :
{
break ;
}
default : return CELL_EINVAL ;
}
2017-07-24 17:59:48 +02:00
2021-05-07 08:58:30 +02:00
// EAGAIN for IDM IDs shortage
CellError error = CELL_EAGAIN ;
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
if ( ! idm : : import < lv2_obj , T > ( [ & ] ( ) - > std : : shared_ptr < T >
{
2021-05-07 19:48:26 +02:00
std : : shared_ptr < T > result = make ( ) ;
2021-05-07 08:58:30 +02:00
auto finalize_construct = [ & ] ( ) - > std : : shared_ptr < T >
{
if ( ( error = result - > on_id_create ( ) ) )
2017-07-24 17:59:48 +02:00
{
2021-05-07 08:58:30 +02:00
result . reset ( ) ;
}
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
return std : : move ( result ) ;
} ;
2017-07-24 17:59:48 +02:00
2021-05-07 12:08:16 +02:00
if ( pshared ! = SYS_SYNC_PROCESS_SHARED )
2021-05-07 08:58:30 +02:00
{
// Creation of unique (non-shared) object handle
return finalize_construct ( ) ;
}
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
auto & ipc_container = g_fxo - > get < ipc_manager < T , u64 > > ( ) ;
2017-07-24 17:59:48 +02:00
2021-05-07 08:58:30 +02:00
if ( flags = = SYS_SYNC_NOT_CREATE )
2017-07-24 17:59:48 +02:00
{
2021-05-07 08:58:30 +02:00
result = ipc_container . get ( ipc_key ) ;
2017-07-24 17:59:48 +02:00
if ( ! result )
{
2021-05-07 08:58:30 +02:00
error = CELL_ESRCH ;
return result ;
2017-07-24 17:59:48 +02:00
}
2021-05-07 08:58:30 +02:00
// Run on_id_create() on existing object
return finalize_construct ( ) ;
}
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
bool added = false ;
std : : tie ( added , result ) = ipc_container . add ( ipc_key , finalize_construct , flags ! = SYS_SYNC_NEWLY_CREATED ) ;
2017-07-24 17:59:48 +02:00
2021-05-07 08:58:30 +02:00
if ( ! added )
2017-07-24 17:59:48 +02:00
{
2021-05-07 08:58:30 +02:00
if ( flags = = SYS_SYNC_NEWLY_CREATED )
{
// Object already exists but flags does not allow it
error = CELL_EEXIST ;
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
// We specified we do not want to peek pointer's value, result must be empty
AUDIT ( ! result ) ;
return result ;
}
2020-06-04 05:37:25 +02:00
2021-05-07 08:58:30 +02:00
// Run on_id_create() on existing object
return finalize_construct ( ) ;
2017-07-24 17:59:48 +02:00
}
2021-05-12 17:44:35 +02:00
return result ;
2021-05-07 08:58:30 +02:00
} ) )
2017-07-24 17:59:48 +02:00
{
2021-05-07 08:58:30 +02:00
return error ;
2017-07-24 17:59:48 +02:00
}
2021-05-07 08:58:30 +02:00
return CELL_OK ;
}
template < typename T >
2021-05-07 12:08:16 +02:00
static void on_id_destroy ( T & obj , u64 ipc_key , u64 pshared = - 1 )
2021-05-07 08:58:30 +02:00
{
2021-05-07 12:08:16 +02:00
if ( pshared = = umax )
{
// Default is to check key
pshared = ipc_key ! = 0 ;
}
if ( obj . exists - - = = 1u & & pshared )
2021-05-07 08:58:30 +02:00
{
g_fxo - > get < ipc_manager < T , u64 > > ( ) . remove ( ipc_key ) ;
2017-07-24 17:59:48 +02:00
}
}
2022-07-04 15:02:17 +02:00
template < typename T >
static std : : shared_ptr < T > load ( u64 ipc_key , std : : shared_ptr < T > make , u64 pshared = - 1 )
{
if ( pshared = = umax ? ipc_key ! = 0 : pshared ! = 0 )
{
g_fxo - > need < ipc_manager < T , u64 > > ( ) ;
make = g_fxo - > get < ipc_manager < T , u64 > > ( ) . add ( ipc_key , [ & ] ( )
{
return make ;
} , true ) . second ;
}
// Ensure no error
ensure ( ! make - > on_id_create ( ) ) ;
return make ;
}
2022-08-08 07:33:49 +02:00
static bool wait_timeout ( u64 usec , ppu_thread * cpu = { } , bool scale = true , bool is_usleep = false ) ;
2019-07-14 05:55:11 +02:00
2022-07-21 10:08:57 +02:00
static inline void notify_all ( )
{
for ( auto cpu : g_to_notify )
{
if ( ! cpu )
{
2022-08-08 07:33:49 +02:00
break ;
2022-07-21 10:08:57 +02:00
}
2022-08-04 14:29:24 +02:00
if ( cpu ! = & g_to_notify )
{
// Note: by the time of notification the thread could have been deallocated which is why the direct function is used
// TODO: Pass a narrower mask
atomic_wait_engine : : notify_one ( cpu , 4 , atomic_wait : : default_mask < atomic_bs_t < cpu_flag > > ) ;
}
2022-07-21 10:08:57 +02:00
}
2022-08-08 07:33:49 +02:00
g_to_notify [ 0 ] = nullptr ;
g_postpone_notify_barrier = false ;
2022-07-21 10:08:57 +02:00
}
2022-08-04 12:13:51 +02:00
// Can be called before the actual sleep call in order to move it out of mutex scope
2022-09-13 15:08:55 +02:00
static void prepare_for_sleep ( cpu_thread & cpu ) ;
2022-07-26 08:26:25 +02:00
2022-08-04 12:13:51 +02:00
struct notify_all_t
{
notify_all_t ( ) noexcept
2022-07-26 08:26:25 +02:00
{
2022-08-04 12:13:51 +02:00
g_postpone_notify_barrier = true ;
2022-07-26 08:26:25 +02:00
}
2022-08-04 14:29:24 +02:00
notify_all_t ( const notify_all_t & ) = delete ;
2022-09-13 15:08:55 +02:00
2022-08-04 14:29:24 +02:00
static void cleanup ( )
{
for ( auto & cpu : g_to_notify )
{
if ( ! cpu )
{
return ;
}
// While IDM mutex is still locked (this function assumes so) check if the notification is still needed
2022-08-08 07:33:49 +02:00
// Pending flag is meant for forced notification (if the CPU really has pending work it can restore the flag in theory)
if ( cpu ! = & g_to_notify & & static_cast < const decltype ( cpu_thread : : state ) * > ( cpu ) - > none_of ( cpu_flag : : signal + cpu_flag : : pending ) )
2022-08-04 14:29:24 +02:00
{
// Omit it (this is a void pointer, it can hold anything)
cpu = & g_to_notify ;
}
}
}
2022-07-21 10:08:57 +02:00
~ notify_all_t ( ) noexcept
{
lv2_obj : : notify_all ( ) ;
}
} ;
2017-02-06 19:36:46 +01:00
// Scheduler mutex
2018-11-26 16:55:22 +01:00
static shared_mutex g_mutex ;
2017-02-06 19:36:46 +01:00
2021-07-09 03:47:34 +02:00
private :
2019-04-25 16:27:50 +02:00
// Pending list of threads to run
static thread_local std : : vector < class cpu_thread * > g_to_awake ;
2017-02-06 19:36:46 +01:00
// Scheduler queue for active PPU threads
2022-07-28 13:10:16 +02:00
static class ppu_thread * g_ppu ;
2017-02-06 19:36:46 +01:00
// Waiting for the response from
2022-07-25 17:57:47 +02:00
static u32 g_pending ;
2022-07-04 15:02:17 +02:00
2022-08-04 12:13:51 +02:00
// Pending list of threads to notify (cpu_thread::state ptr)
static thread_local std : : add_pointer_t < const void > g_to_notify [ 4 ] ;
// If a notify_all_t object exists locally, postpone notifications to the destructor of it (not recursive, notifies on the first destructor for safety)
static thread_local bool g_postpone_notify_barrier ;
2022-07-21 10:08:57 +02:00
2022-08-08 07:33:49 +02:00
static void schedule_all ( u64 current_time = 0 ) ;
2017-01-29 17:50:18 +01:00
} ;