2020-12-05 13:08:24 +01:00
# include "stdafx.h"
2018-09-29 00:12:00 +02:00
# include "Emu/Cell/PPUModule.h"
2017-07-17 15:20:29 +02:00
# include "Utilities/bin_patch.h"
2020-03-05 15:12:40 +01:00
# include "Utilities/StrUtil.h"
2021-01-30 14:08:22 +01:00
# include "Utilities/address_range.h"
2022-07-04 15:02:17 +02:00
# include "util/serialization.hpp"
2016-04-14 01:09:41 +02:00
# include "Crypto/sha1.h"
2017-02-11 19:06:57 +01:00
# include "Crypto/unself.h"
2016-04-14 01:09:41 +02:00
# include "Loader/ELF.h"
2020-10-30 21:26:22 +01:00
# include "Emu/System.h"
2023-04-26 19:21:26 +02:00
# include "Emu/system_config.h"
2020-02-15 23:36:20 +01:00
# include "Emu/VFS.h"
2016-04-14 01:09:41 +02:00
# include "Emu/Cell/PPUOpcodes.h"
2023-08-29 14:32:26 +02:00
# include "Emu/Cell/SPUThread.h"
2016-06-05 12:14:20 +02:00
# include "Emu/Cell/PPUAnalyser.h"
2016-04-14 01:09:41 +02:00
2019-07-03 19:17:04 +02:00
# include "Emu/Cell/lv2/sys_process.h"
2016-04-14 01:09:41 +02:00
# include "Emu/Cell/lv2/sys_prx.h"
2018-11-20 22:17:53 +01:00
# include "Emu/Cell/lv2/sys_memory.h"
2017-12-31 15:38:02 +01:00
# include "Emu/Cell/lv2/sys_overlay.h"
2016-04-14 01:09:41 +02:00
2018-10-26 12:08:45 +02:00
# include "Emu/Cell/Modules/StaticHLE.h"
2017-03-22 21:23:47 +01:00
# include <map>
2022-12-24 05:50:49 +01:00
# include <span>
2017-03-22 21:23:47 +01:00
# include <set>
2016-06-07 22:24:20 +02:00
# include <algorithm>
2022-11-05 14:29:44 +01:00
# include <shared_mutex>
2020-12-18 15:43:34 +01:00
# include "util/asm.hpp"
2016-06-05 12:14:20 +02:00
2020-02-01 05:36:53 +01:00
LOG_CHANNEL ( ppu_loader ) ;
2016-08-19 23:14:10 +02:00
2020-05-06 17:18:30 +02:00
extern std : : string ppu_get_function_name ( const std : : string & _module , u32 fnid ) ;
extern std : : string ppu_get_variable_name ( const std : : string & _module , u32 vnid ) ;
2017-02-10 13:20:54 +01:00
extern void ppu_register_range ( u32 addr , u32 size ) ;
2021-12-30 17:39:18 +01:00
extern void ppu_register_function_at ( u32 addr , u32 size , ppu_intrp_func_t ptr ) ;
2016-04-14 01:09:41 +02:00
2016-07-27 23:43:22 +02:00
extern void sys_initialize_tls ( ppu_thread & , u64 , u32 , u32 , u32 ) ;
2016-04-14 01:09:41 +02:00
2023-02-11 16:17:43 +01:00
std : : unordered_map < std : : string , ppu_static_module * > & ppu_module_manager : : get ( )
{
2023-02-17 01:29:24 +01:00
// In C++ the order of static initialization is undefined if it happens in
// separate compilation units, therefore we have to initialize the map on first use.
2023-02-11 16:17:43 +01:00
static std : : unordered_map < std : : string , ppu_static_module * > s_module_map ;
return s_module_map ;
}
2017-04-13 01:31:42 +02:00
// HLE function name cache
std : : vector < std : : string > g_ppu_function_names ;
2022-09-13 15:08:55 +02:00
atomic_t < u32 > liblv2_begin = 0 , liblv2_end = 0 ;
2022-09-03 05:46:16 +02:00
2021-09-06 09:33:44 +02:00
extern u32 ppu_generate_id ( std : : string_view name )
2016-04-14 01:09:41 +02:00
{
// Symbol name suffix
2021-09-06 09:33:44 +02:00
constexpr auto suffix = " \x67 \x59 \x65 \x99 \x04 \x25 \x04 \x90 \x56 \x64 \x27 \x49 \x94 \x89 \x74 \x1A " sv ;
2016-04-14 01:09:41 +02:00
sha1_context ctx ;
u8 output [ 20 ] ;
// Compute SHA-1 hash
sha1_starts ( & ctx ) ;
2021-09-06 09:33:44 +02:00
sha1_update ( & ctx , reinterpret_cast < const u8 * > ( name . data ( ) ) , name . size ( ) ) ;
sha1_update ( & ctx , reinterpret_cast < const u8 * > ( suffix . data ( ) ) , suffix . size ( ) ) ;
2016-04-14 01:09:41 +02:00
sha1_finish ( & ctx , output ) ;
2021-09-06 09:33:44 +02:00
le_t < u32 > result = 0 ;
std : : memcpy ( & result , output , sizeof ( result ) ) ;
return result ;
2016-04-14 01:09:41 +02:00
}
ppu_static_module : : ppu_static_module ( const char * name )
: name ( name )
{
ppu_module_manager : : register_module ( this ) ;
}
2021-07-31 19:29:36 +02:00
void ppu_static_module : : add_init_func ( void ( * func ) ( ppu_static_module * ) )
{
m_on_init . emplace_back ( func ) ;
}
void ppu_static_module : : initialize ( )
{
for ( auto func : m_on_init )
{
func ( this ) ;
}
}
2020-05-06 17:18:30 +02:00
void ppu_module_manager : : register_module ( ppu_static_module * _module )
2016-04-27 00:27:24 +02:00
{
2023-02-11 16:17:43 +01:00
ppu_module_manager : : get ( ) . emplace ( _module - > name , _module ) ;
2016-04-27 00:27:24 +02:00
}
2020-05-06 17:18:30 +02:00
ppu_static_function & ppu_module_manager : : access_static_function ( const char * _module , u32 fnid )
2016-04-27 00:27:24 +02:00
{
2023-02-11 16:17:43 +01:00
auto & res = : : at32 ( ppu_module_manager : : get ( ) , _module ) - > functions [ fnid ] ;
2017-08-12 12:52:59 +02:00
if ( res . name )
{
2020-05-06 17:18:30 +02:00
fmt : : throw_exception ( " PPU FNID duplication in module %s (%s, 0x%x) " , _module , res . name , fnid ) ;
2017-08-12 12:52:59 +02:00
}
return res ;
2016-04-27 00:27:24 +02:00
}
2020-05-06 17:18:30 +02:00
ppu_static_variable & ppu_module_manager : : access_static_variable ( const char * _module , u32 vnid )
2016-04-27 00:27:24 +02:00
{
2023-02-11 16:17:43 +01:00
auto & res = : : at32 ( ppu_module_manager : : get ( ) , _module ) - > variables [ vnid ] ;
2017-08-12 12:52:59 +02:00
if ( res . name )
{
2020-05-06 17:18:30 +02:00
fmt : : throw_exception ( " PPU VNID duplication in module %s (%s, 0x%x) " , _module , res . name , vnid ) ;
2017-08-12 12:52:59 +02:00
}
return res ;
2016-04-27 00:27:24 +02:00
}
const ppu_static_module * ppu_module_manager : : get_module ( const std : : string & name )
{
2023-02-11 16:17:43 +01:00
const auto & map = ppu_module_manager : : get ( ) ;
2016-04-27 00:27:24 +02:00
const auto found = map . find ( name ) ;
return found ! = map . end ( ) ? found - > second : nullptr ;
}
2021-07-31 19:29:36 +02:00
void ppu_module_manager : : initialize_modules ( )
{
2023-02-11 16:17:43 +01:00
for ( auto & _module : ppu_module_manager : : get ( ) )
2021-07-31 19:29:36 +02:00
{
_module . second - > initialize ( ) ;
}
}
2017-03-22 21:23:47 +01:00
// Global linkage information
struct ppu_linkage_info
{
2021-03-02 17:22:39 +01:00
ppu_linkage_info ( ) = default ;
ppu_linkage_info ( const ppu_linkage_info & ) = delete ;
ppu_linkage_info & operator = ( const ppu_linkage_info & ) = delete ;
2020-05-06 17:18:30 +02:00
struct module_data
2017-03-22 21:23:47 +01:00
{
struct info
{
2017-10-01 03:40:11 +02:00
ppu_static_function * static_func = nullptr ;
ppu_static_variable * static_var = nullptr ;
2017-03-22 21:23:47 +01:00
u32 export_addr = 0 ;
2021-04-19 13:22:03 +02:00
std : : set < u32 > imports { } ;
std : : set < u32 > frefss { } ;
2017-03-22 21:23:47 +01:00
} ;
// FNID -> (export; [imports...])
2022-11-05 14:29:44 +01:00
std : : map < u32 , info > functions { } ;
std : : map < u32 , info > variables { } ;
2017-04-13 01:31:42 +02:00
2017-07-12 20:28:33 +02:00
// Obsolete
2017-04-13 01:31:42 +02:00
bool imported = false ;
2017-03-22 21:23:47 +01:00
} ;
// Module map
2022-11-05 14:29:44 +01:00
std : : map < std : : string , module_data > modules { } ;
2022-11-19 12:50:31 +01:00
std : : map < std : : string , atomic_t < bool > , std : : less < > > lib_lock ;
2022-12-24 05:50:49 +01:00
shared_mutex lib_lock_mutex ;
2022-11-05 14:29:44 +01:00
shared_mutex mutex ;
2017-03-22 21:23:47 +01:00
} ;
2016-04-14 01:09:41 +02:00
// Initialize static modules.
2022-07-04 15:02:17 +02:00
static void ppu_initialize_modules ( ppu_linkage_info * link , utils : : serial * ar = nullptr )
2016-04-14 01:09:41 +02:00
{
2017-07-13 17:35:37 +02:00
if ( ! link - > modules . empty ( ) )
{
return ;
}
2021-07-31 19:29:36 +02:00
ppu_module_manager : : initialize_modules ( ) ;
2016-04-14 01:09:41 +02:00
const std : : initializer_list < const ppu_static_module * > registered
{
& ppu_module_manager : : cellAdec ,
& ppu_module_manager : : cellAtrac ,
& ppu_module_manager : : cellAtracMulti ,
& ppu_module_manager : : cellAudio ,
& ppu_module_manager : : cellAvconfExt ,
2020-02-20 18:34:09 +01:00
& ppu_module_manager : : cellAuthDialogUtility ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellBGDL ,
& ppu_module_manager : : cellCamera ,
& ppu_module_manager : : cellCelp8Enc ,
& ppu_module_manager : : cellCelpEnc ,
2017-03-25 15:25:24 +01:00
& ppu_module_manager : : cellCrossController ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellDaisy ,
& ppu_module_manager : : cellDmux ,
2019-03-24 20:59:18 +01:00
& ppu_module_manager : : cellDtcpIpUtility ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellFiber ,
& ppu_module_manager : : cellFont ,
& ppu_module_manager : : cellFontFT ,
2017-02-13 03:46:02 +01:00
& ppu_module_manager : : cell_FreeType2 ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellFs ,
& ppu_module_manager : : cellGame ,
& ppu_module_manager : : cellGameExec ,
& ppu_module_manager : : cellGcmSys ,
& ppu_module_manager : : cellGem ,
& ppu_module_manager : : cellGifDec ,
& ppu_module_manager : : cellHttp ,
& ppu_module_manager : : cellHttps ,
& ppu_module_manager : : cellHttpUtil ,
& ppu_module_manager : : cellImeJp ,
& ppu_module_manager : : cellJpgDec ,
& ppu_module_manager : : cellJpgEnc ,
& ppu_module_manager : : cellKey2char ,
& ppu_module_manager : : cellL10n ,
2017-04-07 14:48:59 +02:00
& ppu_module_manager : : cellLibprof ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellMic ,
& ppu_module_manager : : cellMusic ,
& ppu_module_manager : : cellMusicDecode ,
& ppu_module_manager : : cellMusicExport ,
2019-03-23 23:09:41 +01:00
& ppu_module_manager : : cellNetAoi ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellNetCtl ,
& ppu_module_manager : : cellOskDialog ,
& ppu_module_manager : : cellOvis ,
& ppu_module_manager : : cellPamf ,
2019-03-24 14:09:08 +01:00
& ppu_module_manager : : cellPesmUtility ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellPhotoDecode ,
& ppu_module_manager : : cellPhotoExport ,
& ppu_module_manager : : cellPhotoImportUtil ,
& ppu_module_manager : : cellPngDec ,
& ppu_module_manager : : cellPngEnc ,
& ppu_module_manager : : cellPrint ,
& ppu_module_manager : : cellRec ,
& ppu_module_manager : : cellRemotePlay ,
& ppu_module_manager : : cellResc ,
& ppu_module_manager : : cellRtc ,
2017-04-19 20:24:52 +02:00
& ppu_module_manager : : cellRtcAlarm ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellRudp ,
& ppu_module_manager : : cellSail ,
& ppu_module_manager : : cellSailRec ,
& ppu_module_manager : : cellSaveData ,
& ppu_module_manager : : cellMinisSaveData ,
& ppu_module_manager : : cellScreenShot ,
& ppu_module_manager : : cellSearch ,
& ppu_module_manager : : cellSheap ,
& ppu_module_manager : : cellSpudll ,
& ppu_module_manager : : cellSpurs ,
& ppu_module_manager : : cellSpursJq ,
& ppu_module_manager : : cellSsl ,
2017-07-21 17:41:11 +02:00
& ppu_module_manager : : cellSubDisplay ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellSync ,
& ppu_module_manager : : cellSync2 ,
& ppu_module_manager : : cellSysconf ,
& ppu_module_manager : : cellSysmodule ,
& ppu_module_manager : : cellSysutil ,
& ppu_module_manager : : cellSysutilAp ,
& ppu_module_manager : : cellSysutilAvc2 ,
2019-03-24 20:59:18 +01:00
& ppu_module_manager : : cellSysutilAvcExt ,
2017-03-25 15:25:24 +01:00
& ppu_module_manager : : cellSysutilNpEula ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellSysutilMisc ,
& ppu_module_manager : : cellUsbd ,
& ppu_module_manager : : cellUsbPspcm ,
& ppu_module_manager : : cellUserInfo ,
& ppu_module_manager : : cellVdec ,
& ppu_module_manager : : cellVideoExport ,
2019-03-24 20:59:18 +01:00
& ppu_module_manager : : cellVideoPlayerUtility ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : cellVideoUpload ,
& ppu_module_manager : : cellVoice ,
& ppu_module_manager : : cellVpost ,
2019-03-25 20:32:03 +01:00
& ppu_module_manager : : libad_async ,
& ppu_module_manager : : libad_core ,
2021-07-17 13:08:21 +02:00
& ppu_module_manager : : libfs_utility_init ,
2017-03-25 15:25:24 +01:00
& ppu_module_manager : : libmedi ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : libmixer ,
& ppu_module_manager : : libsnd3 ,
& ppu_module_manager : : libsynth2 ,
& ppu_module_manager : : sceNp ,
& ppu_module_manager : : sceNp2 ,
& ppu_module_manager : : sceNpClans ,
& ppu_module_manager : : sceNpCommerce2 ,
2018-02-03 14:07:41 +01:00
& ppu_module_manager : : sceNpMatchingInt ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : sceNpSns ,
& ppu_module_manager : : sceNpTrophy ,
& ppu_module_manager : : sceNpTus ,
& ppu_module_manager : : sceNpUtil ,
2021-03-08 16:28:07 +01:00
& ppu_module_manager : : sys_crashdump ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : sys_io ,
2017-09-25 17:52:34 +02:00
& ppu_module_manager : : sys_net ,
2016-04-14 01:09:41 +02:00
& ppu_module_manager : : sysPrxForUser ,
& ppu_module_manager : : sys_libc ,
& ppu_module_manager : : sys_lv2dbg ,
2018-10-26 12:08:45 +02:00
& ppu_module_manager : : static_hle ,
2022-11-19 12:50:31 +01:00
& ppu_module_manager : : hle_patches ,
2016-04-14 01:09:41 +02:00
} ;
2017-03-22 21:23:47 +01:00
// Initialize double-purpose fake OPD array for HLE functions
2021-12-30 17:39:18 +01:00
const auto & hle_funcs = ppu_function_manager : : get ( g_cfg . core . ppu_decoder ! = ppu_decoder_type : : _static ) ;
2016-04-14 01:09:41 +02:00
2021-05-17 13:22:27 +02:00
u32 & hle_funcs_addr = g_fxo - > get < ppu_function_manager > ( ) . addr ;
2017-03-22 21:23:47 +01:00
// Allocate memory for the array (must be called after fixed allocations)
2022-07-04 15:02:17 +02:00
if ( ! hle_funcs_addr )
hle_funcs_addr = vm : : alloc ( : : size32 ( hle_funcs ) * 8 , vm : : main ) ;
else
vm : : page_protect ( hle_funcs_addr , utils : : align ( : : size32 ( hle_funcs ) * 8 , 0x1000 ) , 0 , vm : : page_writable ) ;
2016-04-14 01:09:41 +02:00
2017-03-22 21:23:47 +01:00
// Initialize as PPU executable code
2021-05-17 13:22:27 +02:00
ppu_register_range ( hle_funcs_addr , : : size32 ( hle_funcs ) * 8 ) ;
2016-04-14 01:09:41 +02:00
2017-03-22 21:23:47 +01:00
// Fill the array (visible data: self address and function index)
2021-05-17 13:22:27 +02:00
for ( u32 addr = hle_funcs_addr , index = 0 ; index < hle_funcs . size ( ) ; addr + = 8 , index + + )
2016-04-14 01:09:41 +02:00
{
2020-12-14 12:32:04 +01:00
// Function address = next CIA, RTOC = 0 (vm::null)
vm : : write32 ( addr + 0 , addr + 4 ) ;
vm : : write32 ( addr + 4 , 0 ) ;
2016-04-14 01:09:41 +02:00
2017-03-22 21:23:47 +01:00
// Register the HLE function directly
2020-12-14 12:32:04 +01:00
ppu_register_function_at ( addr + 0 , 4 , nullptr ) ;
ppu_register_function_at ( addr + 4 , 4 , hle_funcs [ index ] ) ;
2016-04-14 01:09:41 +02:00
}
2017-03-22 21:23:47 +01:00
// Set memory protection to read-only
2021-05-17 13:22:27 +02:00
vm : : page_protect ( hle_funcs_addr , utils : : align ( : : size32 ( hle_funcs ) * 8 , 0x1000 ) , 0 , 0 , vm : : page_writable ) ;
2017-04-13 01:31:42 +02:00
// Initialize function names
const bool is_first = g_ppu_function_names . empty ( ) ;
if ( is_first )
{
g_ppu_function_names . resize ( hle_funcs . size ( ) ) ;
g_ppu_function_names [ 0 ] = " INVALID " ;
g_ppu_function_names [ 1 ] = " HLE RETURN " ;
}
// For HLE variable allocation
u32 alloc_addr = 0 ;
// "Use" all the modules for correct linkage
2023-06-12 03:47:20 +02:00
if ( ppu_loader . trace )
2017-04-13 01:31:42 +02:00
{
2023-06-12 03:47:20 +02:00
for ( auto & _module : registered )
{
ppu_loader . trace ( " Registered static module: %s " , _module - > name ) ;
}
2017-04-22 16:58:40 +02:00
}
2017-04-13 01:31:42 +02:00
2022-07-04 15:02:17 +02:00
struct hle_vars_save
{
hle_vars_save ( ) = default ;
hle_vars_save ( const hle_vars_save & ) = delete ;
hle_vars_save & operator = ( const hle_vars_save & ) = delete ;
hle_vars_save ( utils : : serial & ar )
{
auto & manager = ppu_module_manager : : get ( ) ;
while ( true )
{
2023-11-15 20:07:42 +01:00
const std : : string name = ar . pop < std : : string > ( ) ;
2022-09-13 15:08:55 +02:00
2022-07-04 15:02:17 +02:00
if ( name . empty ( ) )
{
// Null termination
break ;
}
2022-09-19 14:57:51 +02:00
const auto _module = : : at32 ( manager , name ) ;
2022-07-04 15:02:17 +02:00
auto & variable = _module - > variables ;
2022-09-13 15:08:55 +02:00
2023-12-18 20:56:52 +01:00
for ( usz i = 0 , end = ar . pop < usz > ( ) ; i < end ; i + + )
2022-07-04 15:02:17 +02:00
{
2023-11-15 20:07:42 +01:00
auto * ptr = & : : at32 ( variable , ar . pop < u32 > ( ) ) ;
ptr - > addr = ar . pop < u32 > ( ) ;
2022-07-04 15:02:17 +02:00
ensure ( ! ! ptr - > var ) ;
}
}
}
void save ( utils : : serial & ar )
{
for ( auto & pair : ppu_module_manager : : get ( ) )
{
const auto _module = pair . second ;
2022-07-04 21:59:50 +02:00
if ( _module - > variables . empty ( ) )
{
continue ;
}
2022-07-04 15:02:17 +02:00
ar ( _module - > name ) ;
ar ( _module - > variables . size ( ) ) ;
for ( auto & variable : _module - > variables )
{
ar ( variable . first , variable . second . addr ) ;
}
}
// Null terminator
ar ( std : : string { } ) ;
}
} ;
if ( ar )
{
g_fxo - > init < hle_vars_save > ( * ar ) ;
}
else
{
g_fxo - > init < hle_vars_save > ( ) ;
}
2017-04-22 16:58:40 +02:00
for ( auto & pair : ppu_module_manager : : get ( ) )
{
2020-05-06 17:18:30 +02:00
const auto _module = pair . second ;
auto & linkage = link - > modules [ _module - > name ] ;
2017-04-13 01:31:42 +02:00
2020-05-06 17:18:30 +02:00
for ( auto & function : _module - > functions )
2017-04-13 01:31:42 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " ** 0x%08X: %s " , function . first , function . second . name ) ;
2017-04-13 01:31:42 +02:00
if ( is_first )
{
2021-07-18 11:18:02 +02:00
g_ppu_function_names [ function . second . index ] = fmt : : format ( " %s:%s " , function . second . name , _module - > name ) ;
2017-04-13 01:31:42 +02:00
}
2021-09-06 09:33:44 +02:00
auto & flink = linkage . functions [ function . first ] ;
2017-04-13 01:31:42 +02:00
2021-09-06 09:33:44 +02:00
flink . static_func = & function . second ;
flink . export_addr = g_fxo - > get < ppu_function_manager > ( ) . func_addr ( function . second . index ) ;
function . second . export_addr = & flink . export_addr ;
2017-04-13 01:31:42 +02:00
}
2020-05-06 17:18:30 +02:00
for ( auto & variable : _module - > variables )
2017-04-13 01:31:42 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " ** &0x%08X: %s (size=0x%x, align=0x%x) " , variable . first , variable . second . name , variable . second . size , variable . second . align ) ;
2017-04-13 01:31:42 +02:00
// Allocate HLE variable
2022-07-04 15:02:17 +02:00
if ( ar )
{
// Already loaded
}
else if ( variable . second . size > = 0x10000 | | variable . second . align > = 0x10000 )
2017-04-13 01:31:42 +02:00
{
2018-05-07 20:57:06 +02:00
variable . second . addr = vm : : alloc ( variable . second . size , vm : : main , std : : max < u32 > ( variable . second . align , 0x10000 ) ) ;
2017-04-13 01:31:42 +02:00
}
else
{
2020-12-18 15:43:34 +01:00
const u32 next = utils : : align ( alloc_addr , variable . second . align ) ;
2021-02-08 16:04:50 +01:00
const u32 end = next + variable . second . size - 1 ;
2017-04-13 01:31:42 +02:00
2021-02-08 16:04:50 +01:00
if ( ! next | | ( end > > 16 ! = alloc_addr > > 16 ) )
2017-04-13 01:31:42 +02:00
{
2021-02-08 16:04:50 +01:00
alloc_addr = vm : : alloc ( 0x10000 , vm : : main ) ;
2017-04-13 01:31:42 +02:00
}
else
{
alloc_addr = next ;
}
2017-09-16 19:39:41 +02:00
variable . second . addr = alloc_addr ;
2017-04-13 01:31:42 +02:00
alloc_addr + = variable . second . size ;
}
2021-03-08 21:41:23 +01:00
* variable . second . var = variable . second . addr ;
ppu_loader . trace ( " Allocated HLE variable %s.%s at 0x%x " , _module - > name , variable . second . name , * variable . second . var ) ;
2017-04-13 01:31:42 +02:00
// Initialize HLE variable
if ( variable . second . init )
{
variable . second . init ( ) ;
}
2017-09-16 19:39:41 +02:00
if ( ( variable . second . flags & MFF_HIDDEN ) = = 0 )
{
auto & vlink = linkage . variables [ variable . first ] ;
2017-10-01 03:40:11 +02:00
vlink . static_var = & variable . second ;
vlink . export_addr = variable . second . addr ;
variable . second . export_addr = & vlink . export_addr ;
2017-09-16 19:39:41 +02:00
}
2017-04-13 01:31:42 +02:00
}
}
2016-04-14 01:09:41 +02:00
}
2021-07-18 11:18:02 +02:00
// For the debugger (g_ppu_function_names shouldn't change, string_view should suffice)
extern const std : : unordered_map < u32 , std : : string_view > & get_exported_function_names_as_addr_indexed_map ( )
{
2021-10-12 22:12:30 +02:00
struct info_t
{
std : : unordered_map < u32 , std : : string_view > res ;
u64 update_time = 0 ;
} ;
static thread_local std : : unique_ptr < info_t > info ;
if ( ! info )
{
info = std : : make_unique < info_t > ( ) ;
info - > res . reserve ( ppu_module_manager : : get ( ) . size ( ) ) ;
}
auto & [ res , update_time ] = * info ;
2021-07-18 11:18:02 +02:00
const auto link = g_fxo - > try_get < ppu_linkage_info > ( ) ;
const auto hle_funcs = g_fxo - > try_get < ppu_function_manager > ( ) ;
if ( ! link | | ! hle_funcs )
{
res . clear ( ) ;
return res ;
}
const u64 current_time = get_system_time ( ) ;
// Update list every >=0.1 seconds
if ( current_time - update_time < 100'000 )
{
return res ;
}
update_time = current_time ;
res . clear ( ) ;
for ( auto & pair : ppu_module_manager : : get ( ) )
{
const auto _module = pair . second ;
auto & linkage = link - > modules [ _module - > name ] ;
for ( auto & function : _module - > functions )
{
auto & flink = linkage . functions [ function . first ] ;
u32 addr = flink . export_addr ;
if ( vm : : check_addr < 4 > ( addr , vm : : page_readable ) & & addr ! = hle_funcs - > func_addr ( function . second . index ) )
{
addr = vm : : read32 ( addr ) ;
if ( ! ( addr % 4 ) & & vm : : check_addr < 4 > ( addr , vm : : page_executable ) )
{
res . try_emplace ( addr , g_ppu_function_names [ function . second . index ] ) ;
}
}
}
}
return res ;
}
2017-07-01 01:08:51 +02:00
// Resolve relocations for variable/function linkage.
2023-08-06 18:47:23 +02:00
static void ppu_patch_refs ( const ppu_module & _module , std : : vector < ppu_reloc > * out_relocs , u32 fref , u32 faddr )
2016-04-14 01:09:41 +02:00
{
2017-06-26 07:37:28 +02:00
struct ref_t
2016-04-14 01:09:41 +02:00
{
be_t < u32 > type ;
be_t < u32 > addr ;
2017-06-19 06:59:02 +02:00
be_t < u32 > addend ; // Note: Treating it as addend seems to be correct for now, but still unknown if theres more in this variable
2016-04-14 01:09:41 +02:00
} ;
2023-08-06 18:47:23 +02:00
for ( const ref_t * ref = & _module . get_ref < ref_t > ( fref ) ; ref - > type ; fref + = sizeof ( ref_t ) , ref = & _module . get_ref < ref_t > ( fref ) )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
if ( ref - > addend ) ppu_loader . warning ( " **** REF(%u): Addend value(0x%x, 0x%x) " , ref - > type , ref - > addr , ref - > addend ) ;
2017-06-26 07:37:28 +02:00
2017-07-01 01:08:51 +02:00
const u32 raddr = ref - > addr ;
const u32 rtype = ref - > type ;
const u32 rdata = faddr + ref - > addend ;
2017-06-26 07:37:28 +02:00
2017-07-01 01:08:51 +02:00
if ( out_relocs )
{
// Register relocation with unpredictable target (data=0)
ppu_reloc _rel ;
_rel . addr = raddr ;
_rel . type = rtype ;
_rel . data = 0 ;
out_relocs - > emplace_back ( _rel ) ;
}
2016-04-14 01:09:41 +02:00
2017-07-01 01:08:51 +02:00
// OPs must be similar to relocations
switch ( rtype )
2016-04-14 01:09:41 +02:00
{
2017-07-01 01:08:51 +02:00
case 1 :
2016-04-14 01:09:41 +02:00
{
2023-08-06 18:47:23 +02:00
const u32 value = _module . get_ref < u32 > ( ref - > addr ) = rdata ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** REF(1): 0x%x <- 0x%x " , ref - > addr , value ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2017-07-01 01:08:51 +02:00
case 4 :
2017-06-26 07:37:28 +02:00
{
2023-08-06 18:47:23 +02:00
const u16 value = _module . get_ref < u16 > ( ref - > addr ) = static_cast < u16 > ( rdata ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** REF(4): 0x%x <- 0x%04x (0x%llx) " , ref - > addr , value , faddr ) ;
2017-06-26 07:37:28 +02:00
break ;
}
2017-07-01 01:08:51 +02:00
case 6 :
2017-06-26 07:37:28 +02:00
{
2023-08-06 18:47:23 +02:00
const u16 value = _module . get_ref < u16 > ( ref - > addr ) = static_cast < u16 > ( rdata > > 16 ) + ( rdata & 0x8000 ? 1 : 0 ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** REF(6): 0x%x <- 0x%04x (0x%llx) " , ref - > addr , value , faddr ) ;
2017-06-26 07:37:28 +02:00
break ;
}
2017-06-30 06:44:04 +02:00
case 57 :
{
2023-08-06 18:47:23 +02:00
const u16 value = _module . get_ref < ppu_bf_t < be_t < u16 > , 0 , 14 > > ( ref - > addr ) = static_cast < u16 > ( rdata ) > > 2 ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** REF(57): 0x%x <- 0x%04x (0x%llx) " , ref - > addr , value , faddr ) ;
2017-06-30 06:44:04 +02:00
break ;
}
2020-02-01 05:36:53 +01:00
default : ppu_loader . error ( " **** REF(%u): Unknown/Illegal type (0x%x, 0x%x) " , rtype , raddr , ref - > addend ) ;
2016-04-14 01:09:41 +02:00
}
}
}
2022-12-24 05:50:49 +01:00
enum PRX_EXPORT_ATTRIBUTES : u16
{
PRX_EXPORT_LIBRARY_FLAG = 1 ,
PRX_EXPORT_PRX_MANAGEMENT_FUNCTIONS_FLAG = 0x8000 ,
} ;
2016-04-14 01:09:41 +02:00
// Export or import module struct
struct ppu_prx_module_info
{
u8 size ;
u8 unk0 ;
be_t < u16 > version ;
be_t < u16 > attributes ;
be_t < u16 > num_func ;
be_t < u16 > num_var ;
be_t < u16 > num_tlsvar ;
u8 info_hash ;
u8 info_tlshash ;
u8 unk1 [ 2 ] ;
vm : : bcptr < char > name ;
vm : : bcptr < u32 > nids ; // Imported FNIDs, Exported NIDs
vm : : bptr < u32 > addrs ;
vm : : bcptr < u32 > vnids ; // Imported VNIDs
vm : : bcptr < u32 > vstubs ;
be_t < u32 > unk4 ;
be_t < u32 > unk5 ;
} ;
2021-09-06 09:33:44 +02:00
bool ppu_form_branch_to_code ( u32 entry , u32 target ) ;
extern u32 ppu_get_exported_func_addr ( u32 fnid , const std : : string & module_name )
{
return g_fxo - > get < ppu_linkage_info > ( ) . modules [ module_name ] . functions [ fnid ] . export_addr ;
}
2021-09-01 12:38:17 +02:00
2022-11-05 16:14:34 +01:00
extern bool ppu_register_library_lock ( std : : string_view libname , bool lock_lib )
{
auto link = g_fxo - > try_get < ppu_linkage_info > ( ) ;
if ( ! link | | libname . empty ( ) )
{
return false ;
}
2022-12-24 05:50:49 +01:00
reader_lock lock ( link - > lib_lock_mutex ) ;
2022-11-05 16:14:34 +01:00
2022-11-19 12:50:31 +01:00
if ( auto it = link - > lib_lock . find ( libname ) ; it ! = link - > lib_lock . cend ( ) )
2022-11-05 16:14:34 +01:00
{
2022-11-19 12:50:31 +01:00
return lock_lib ? ! it - > second . test_and_set ( ) : it - > second . test_and_reset ( ) ;
2022-11-05 16:14:34 +01:00
}
if ( ! lock_lib )
{
// If lock hasn't been installed it wasn't locked in the first place
return false ;
}
lock . upgrade ( ) ;
2022-11-19 12:50:31 +01:00
auto & lib_lock = link - > lib_lock . emplace ( std : : string { libname } , false ) . first - > second ;
2022-11-05 16:14:34 +01:00
2022-11-19 12:50:31 +01:00
return ! lib_lock . test_and_set ( ) ;
2022-11-05 16:14:34 +01:00
}
2016-04-14 01:09:41 +02:00
// Load and register exports; return special exports found (nameless module)
2023-08-06 18:47:23 +02:00
static auto ppu_load_exports ( const ppu_module & _module , ppu_linkage_info * link , u32 exports_start , u32 exports_end , bool for_observing_callbacks = false , std : : basic_string < bool > * loaded_flags = nullptr )
2016-04-14 01:09:41 +02:00
{
std : : unordered_map < u32 , u32 > result ;
2022-12-24 05:50:49 +01:00
// Flags were already provided meaning it's an unload operation
const bool unload_exports = loaded_flags & & ! loaded_flags - > empty ( ) ;
2022-11-05 14:29:44 +01:00
std : : lock_guard lock ( link - > mutex ) ;
2022-12-24 05:50:49 +01:00
usz unload_index = 0 ;
2023-01-15 21:12:54 +01:00
ppu_prx_module_info lib { } ;
2022-12-24 05:50:49 +01:00
2023-01-15 21:12:54 +01:00
for ( u32 addr = exports_start ; addr < exports_end ; unload_index + + , addr + = lib . size ? lib . size : sizeof ( ppu_prx_module_info ) )
2016-04-14 01:09:41 +02:00
{
2023-08-06 18:47:23 +02:00
std : : memcpy ( & lib , & _module . get_ref < ppu_prx_module_info > ( addr ) , sizeof ( lib ) ) ;
2016-04-14 01:09:41 +02:00
2022-12-24 05:50:49 +01:00
const bool is_library = ! ! ( lib . attributes & PRX_EXPORT_LIBRARY_FLAG ) ;
const bool is_management = ! is_library & & ! ! ( lib . attributes & PRX_EXPORT_PRX_MANAGEMENT_FUNCTIONS_FLAG ) ;
if ( loaded_flags & & ! unload_exports )
{
loaded_flags - > push_back ( false ) ;
}
if ( is_management )
2016-04-14 01:09:41 +02:00
{
// Set special exports
for ( u32 i = 0 , end = lib . num_func + lib . num_var ; i < end ; i + + )
{
2023-08-06 18:47:23 +02:00
const u32 nid = _module . get_ref < u32 > ( lib . nids , i ) ;
const u32 addr = _module . get_ref < u32 > ( lib . addrs , i ) ;
2016-04-14 01:09:41 +02:00
if ( i < lib . num_func )
{
2023-08-06 18:47:23 +02:00
ppu_loader . notice ( " ** Special: [%s] at 0x%x [0x%x, 0x%x] " , ppu_get_function_name ( { } , nid ) , addr , _module . get_ref < u32 > ( addr ) , _module . get_ref < u32 > ( addr + 4 ) ) ;
2016-04-14 01:09:41 +02:00
}
else
{
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " ** Special: &[%s] at 0x%x " , ppu_get_variable_name ( { } , nid ) , addr ) ;
2016-04-14 01:09:41 +02:00
}
result . emplace ( nid , addr ) ;
}
continue ;
}
2022-12-24 05:50:49 +01:00
if ( ! is_library )
{
// Skipped if none of the flags is set
continue ;
}
2022-11-05 16:14:34 +01:00
if ( for_observing_callbacks )
{
continue ;
}
2023-08-06 18:47:23 +02:00
const std : : string module_name ( & _module . get_ref < const char > ( lib . name ) ) ;
2016-04-14 01:09:41 +02:00
2022-12-24 05:50:49 +01:00
if ( unload_exports )
{
if ( : : at32 ( * loaded_flags , unload_index ) )
{
ppu_register_library_lock ( module_name , false ) ;
}
continue ;
}
ppu_loader . notice ( " ** Exported module '%s' (vnids=0x%x, vstubs=0x%x, version=0x%x, attributes=0x%x, unk4=0x%x, unk5=0x%x) " , module_name , lib . vnids , lib . vstubs , lib . version , lib . attributes , lib . unk4 , lib . unk5 ) ;
2016-04-14 01:09:41 +02:00
if ( lib . num_tlsvar )
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " Unexpected num_tlsvar (%u)! " , lib . num_tlsvar ) ;
2016-04-14 01:09:41 +02:00
}
2022-12-24 05:50:49 +01:00
const bool should_load = ppu_register_library_lock ( module_name , true ) ;
if ( loaded_flags )
{
loaded_flags - > back ( ) = should_load ;
}
if ( ! should_load )
{
ppu_loader . notice ( " ** Skipped module '%s' (already loaded) " , module_name ) ;
continue ;
}
2016-04-14 01:09:41 +02:00
// Static module
const auto _sm = ppu_module_manager : : get_module ( module_name ) ;
2017-04-13 01:31:42 +02:00
// Module linkage
auto & mlink = link - > modules [ module_name ] ;
2016-04-14 01:09:41 +02:00
const auto fnids = + lib . nids ;
const auto faddrs = + lib . addrs ;
// Get functions
for ( u32 i = 0 , end = lib . num_func ; i < end ; i + + )
{
2023-08-06 18:47:23 +02:00
const u32 fnid = _module . get_ref < u32 > ( fnids , i ) ;
const u32 faddr = _module . get_ref < u32 > ( faddrs , i ) ;
ppu_loader . notice ( " **** %s export: [%s] (0x%08x) at 0x%x [at:0x%x] " , module_name , ppu_get_function_name ( module_name , fnid ) , fnid , faddr , _module . get_ref < u32 > ( faddr ) ) ;
2016-04-14 01:09:41 +02:00
// Function linkage info
2017-04-13 01:31:42 +02:00
auto & flink = mlink . functions [ fnid ] ;
2016-04-14 01:09:41 +02:00
2021-05-17 13:22:27 +02:00
if ( flink . static_func & & flink . export_addr = = g_fxo - > get < ppu_function_manager > ( ) . func_addr ( flink . static_func - > index ) )
2017-10-01 03:40:11 +02:00
{
flink . export_addr = 0 ;
}
if ( flink . export_addr )
2016-04-14 01:09:41 +02:00
{
2022-12-24 18:31:54 +01:00
ppu_loader . notice ( " Already linked function '%s' in module '%s' " , ppu_get_function_name ( module_name , fnid ) , module_name ) ;
2016-04-14 01:09:41 +02:00
}
2017-04-13 01:31:42 +02:00
//else
2016-04-14 01:09:41 +02:00
{
// Static function
2022-09-19 14:57:51 +02:00
const auto _sf = _sm & & _sm - > functions . count ( fnid ) ? & : : at32 ( _sm - > functions , fnid ) : nullptr ;
2016-04-14 01:09:41 +02:00
2018-09-08 21:21:24 +02:00
if ( _sf & & ( _sf - > flags & MFF_FORCED_HLE ) )
2016-04-14 01:09:41 +02:00
{
2017-03-22 21:23:47 +01:00
// Inject a branch to the HLE implementation
2022-11-19 12:50:31 +01:00
const u32 target = g_fxo - > get < ppu_function_manager > ( ) . func_addr ( _sf - > index , true ) ;
2017-03-22 21:23:47 +01:00
2020-10-16 20:09:32 +02:00
// Set exported function
2020-12-14 12:32:04 +01:00
flink . export_addr = target - 4 ;
2020-10-16 20:09:32 +02:00
2023-08-06 18:47:23 +02:00
if ( auto ptr = _module . get_ptr < u32 > ( faddr ) ; vm : : try_get_addr ( ptr ) . first )
{
ppu_form_branch_to_code ( * ptr , target ) ;
}
2016-04-14 01:09:41 +02:00
}
else
{
// Set exported function
2017-03-22 21:23:47 +01:00
flink . export_addr = faddr ;
2016-04-14 01:09:41 +02:00
// Fix imports
2017-03-22 21:23:47 +01:00
for ( const u32 addr : flink . imports )
2016-04-14 01:09:41 +02:00
{
2023-08-06 18:47:23 +02:00
_module . get_ref < u32 > ( addr ) = faddr ;
2020-02-01 05:36:53 +01:00
//ppu_loader.warning("Exported function '%s' in module '%s'", ppu_get_function_name(module_name, fnid), module_name);
2016-04-14 01:09:41 +02:00
}
2017-06-26 07:37:28 +02:00
2017-07-01 01:08:51 +02:00
for ( const u32 fref : flink . frefss )
2017-06-26 07:37:28 +02:00
{
2023-08-06 18:47:23 +02:00
ppu_patch_refs ( _module , nullptr , fref , faddr ) ;
2017-06-26 07:37:28 +02:00
}
2016-04-14 01:09:41 +02:00
}
}
}
const auto vnids = lib . nids + lib . num_func ;
const auto vaddrs = lib . addrs + lib . num_func ;
// Get variables
for ( u32 i = 0 , end = lib . num_var ; i < end ; i + + )
{
2023-08-06 18:47:23 +02:00
const u32 vnid = _module . get_ref < u32 > ( vnids , i ) ;
const u32 vaddr = _module . get_ref < u32 > ( vaddrs , i ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " **** %s export: &[%s] at 0x%x " , module_name , ppu_get_variable_name ( module_name , vnid ) , vaddr ) ;
2016-04-14 01:09:41 +02:00
// Variable linkage info
2017-04-13 01:31:42 +02:00
auto & vlink = mlink . variables [ vnid ] ;
2016-04-14 01:09:41 +02:00
2017-10-01 03:40:11 +02:00
if ( vlink . static_var & & vlink . export_addr = = vlink . static_var - > addr )
{
vlink . export_addr = 0 ;
}
if ( vlink . export_addr )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " Already linked variable '%s' in module '%s' " , ppu_get_variable_name ( module_name , vnid ) , module_name ) ;
2016-04-14 01:09:41 +02:00
}
2017-04-13 01:31:42 +02:00
//else
2016-04-14 01:09:41 +02:00
{
// Set exported variable
2017-03-22 21:23:47 +01:00
vlink . export_addr = vaddr ;
2016-04-14 01:09:41 +02:00
// Fix imports
2017-03-22 21:23:47 +01:00
for ( const auto vref : vlink . imports )
2016-04-14 01:09:41 +02:00
{
2023-08-06 18:47:23 +02:00
ppu_patch_refs ( _module , nullptr , vref , vaddr ) ;
2020-02-01 05:36:53 +01:00
//ppu_loader.warning("Exported variable '%s' in module '%s'", ppu_get_variable_name(module_name, vnid), module_name);
2016-04-14 01:09:41 +02:00
}
}
}
}
return result ;
}
2023-08-06 18:47:23 +02:00
static auto ppu_load_imports ( const ppu_module & _module , std : : vector < ppu_reloc > & relocs , ppu_linkage_info * link , u32 imports_start , u32 imports_end )
2016-04-14 01:09:41 +02:00
{
2017-07-12 20:28:33 +02:00
std : : unordered_map < u32 , void * > result ;
2023-08-06 18:47:23 +02:00
std : : lock_guard lock ( link - > mutex ) ;
2022-11-05 14:29:44 +01:00
2016-04-14 01:09:41 +02:00
for ( u32 addr = imports_start ; addr < imports_end ; )
{
2023-08-06 18:47:23 +02:00
const auto & lib = _module . get_ref < const ppu_prx_module_info > ( addr ) ;
2016-04-14 01:09:41 +02:00
2023-08-06 18:47:23 +02:00
const std : : string module_name ( & _module . get_ref < const char > ( lib . name ) ) ;
2016-04-14 01:09:41 +02:00
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " ** Imported module '%s' (ver=0x%x, attr=0x%x, 0x%x, 0x%x) [0x%x] " , module_name , lib . version , lib . attributes , lib . unk4 , lib . unk5 , addr ) ;
2016-04-14 01:09:41 +02:00
if ( lib . num_tlsvar )
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " Unexpected num_tlsvar (%u)! " , lib . num_tlsvar ) ;
2016-04-14 01:09:41 +02:00
}
// Static module
2021-01-12 11:01:06 +01:00
//const auto _sm = ppu_module_manager::get_module(module_name);
2016-04-14 01:09:41 +02:00
2017-04-13 01:31:42 +02:00
// Module linkage
auto & mlink = link - > modules [ module_name ] ;
2016-04-14 01:09:41 +02:00
const auto fnids = + lib . nids ;
const auto faddrs = + lib . addrs ;
for ( u32 i = 0 , end = lib . num_func ; i < end ; i + + )
{
2023-08-06 18:47:23 +02:00
const u32 fnid = _module . get_ref < u32 > ( fnids , i ) ;
const u32 fstub = _module . get_ref < u32 > ( faddrs , i ) ;
2016-04-14 01:09:41 +02:00
const u32 faddr = ( faddrs + i ) . addr ( ) ;
2021-03-08 17:43:35 +01:00
ppu_loader . notice ( " **** %s import: [%s] (0x%08x) -> 0x%x " , module_name , ppu_get_function_name ( module_name , fnid ) , fnid , fstub ) ;
2016-04-14 01:09:41 +02:00
// Function linkage info
auto & flink = link - > modules [ module_name ] . functions [ fnid ] ;
// Add new import
2017-07-12 20:28:33 +02:00
result . emplace ( faddr , & flink ) ;
2017-03-22 21:23:47 +01:00
flink . imports . emplace ( faddr ) ;
2017-04-13 01:31:42 +02:00
mlink . imported = true ;
2016-04-14 01:09:41 +02:00
2017-07-01 01:08:51 +02:00
// Link address (special HLE function by default)
2021-05-17 13:22:27 +02:00
const u32 link_addr = flink . export_addr ? flink . export_addr : g_fxo - > get < ppu_function_manager > ( ) . addr ;
2016-04-14 01:09:41 +02:00
2017-07-01 01:08:51 +02:00
// Write import table
2023-08-06 18:47:23 +02:00
_module . get_ref < u32 > ( faddr ) = link_addr ;
2017-07-01 01:08:51 +02:00
// Patch refs if necessary (0x2000 seems to be correct flag indicating the presence of additional info)
2023-08-06 18:47:23 +02:00
if ( const u32 frefs = ( lib . attributes & 0x2000 ) ? + _module . get_ref < u32 > ( fnids , i + lib . num_func ) : 0 )
2017-06-26 07:37:28 +02:00
{
2017-07-12 20:28:33 +02:00
result . emplace ( frefs , & flink ) ;
2017-07-01 01:08:51 +02:00
flink . frefss . emplace ( frefs ) ;
2023-08-06 18:47:23 +02:00
ppu_patch_refs ( _module , & relocs , frefs , link_addr ) ;
2017-06-26 07:37:28 +02:00
}
2020-02-01 05:36:53 +01:00
//ppu_loader.warning("Imported function '%s' in module '%s' (0x%x)", ppu_get_function_name(module_name, fnid), module_name, faddr);
2016-04-14 01:09:41 +02:00
}
const auto vnids = + lib . vnids ;
const auto vstubs = + lib . vstubs ;
for ( u32 i = 0 , end = lib . num_var ; i < end ; i + + )
{
2023-08-06 18:47:23 +02:00
const u32 vnid = _module . get_ref < u32 > ( vnids , i ) ;
const u32 vref = _module . get_ref < u32 > ( vstubs , i ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " **** %s import: &[%s] (ref=*0x%x) " , module_name , ppu_get_variable_name ( module_name , vnid ) , vref ) ;
2016-04-14 01:09:41 +02:00
// Variable linkage info
auto & vlink = link - > modules [ module_name ] . variables [ vnid ] ;
// Add new import
2017-07-12 20:28:33 +02:00
result . emplace ( vref , & vlink ) ;
2017-03-22 21:23:47 +01:00
vlink . imports . emplace ( vref ) ;
2017-04-13 01:31:42 +02:00
mlink . imported = true ;
2016-04-14 01:09:41 +02:00
// Link if available
2023-08-06 18:47:23 +02:00
ppu_patch_refs ( _module , & relocs , vref , vlink . export_addr ) ;
2016-04-14 01:09:41 +02:00
2020-02-01 05:36:53 +01:00
//ppu_loader.warning("Imported variable '%s' in module '%s' (0x%x)", ppu_get_variable_name(module_name, vnid), module_name, vlink.first);
2016-04-14 01:09:41 +02:00
}
addr + = lib . size ? lib . size : sizeof ( ppu_prx_module_info ) ;
}
2017-07-12 20:28:33 +02:00
return result ;
2016-04-14 01:09:41 +02:00
}
2021-04-02 17:33:22 +02:00
// For _sys_prx_register_module
2022-12-24 05:50:49 +01:00
void ppu_manual_load_imports_exports ( u32 imports_start , u32 imports_size , u32 exports_start , u32 exports_size , std : : basic_string < bool > & loaded_flags )
2021-04-02 17:33:22 +02:00
{
2023-04-08 17:03:05 +02:00
auto & _main = g_fxo - > get < main_ppu_module > ( ) ;
2021-04-02 17:33:22 +02:00
auto & link = g_fxo - > get < ppu_linkage_info > ( ) ;
2023-08-06 18:47:23 +02:00
ppu_module vm_all_fake_module { } ;
2023-12-18 20:56:52 +01:00
vm_all_fake_module . segs . emplace_back ( ppu_segment { 0x10000 , 0 - 0x10000u , 1 /*LOAD*/ , 0 , 0 - 0x1000u , vm : : base ( 0x10000 ) } ) ;
2023-08-06 18:47:23 +02:00
vm_all_fake_module . addr_to_seg_index . emplace ( 0x10000 , 0 ) ;
ppu_load_exports ( vm_all_fake_module , & link , exports_start , exports_start + exports_size , false , & loaded_flags ) ;
2022-11-05 16:14:34 +01:00
if ( ! imports_size )
{
return ;
}
2023-08-06 18:47:23 +02:00
ppu_load_imports ( vm_all_fake_module , _main . relocs , & link , imports_start , imports_start + imports_size ) ;
2021-04-02 17:33:22 +02:00
}
2022-07-04 15:02:17 +02:00
// For savestates
2022-07-06 14:53:48 +02:00
extern bool is_memory_compatible_for_copy_from_executable_optimization ( u32 addr , u32 size )
2022-07-04 15:02:17 +02:00
{
if ( g_cfg . savestate . state_inspection_mode )
{
return false ;
}
2022-07-06 14:53:48 +02:00
static ppu_exec_object s_ppu_exec ;
static std : : vector < char > zeroes ;
2022-07-04 15:02:17 +02:00
2022-07-06 14:53:48 +02:00
if ( ! addr )
2022-07-04 15:02:17 +02:00
{
2022-07-06 14:53:48 +02:00
// A call for cleanup
s_ppu_exec . clear ( ) ;
zeroes = { } ;
return false ;
}
if ( s_ppu_exec ! = elf_error : : ok )
{
if ( s_ppu_exec ! = elf_error : : stream )
{
// Failed before
return false ;
}
2022-07-07 21:08:39 +02:00
s_ppu_exec . open ( decrypt_self ( fs : : file ( Emu . GetBoot ( ) ) , Emu . klic . empty ( ) ? nullptr : reinterpret_cast < u8 * > ( & Emu . klic [ 0 ] ) ) ) ;
2022-07-04 15:02:17 +02:00
2022-07-06 14:53:48 +02:00
if ( s_ppu_exec ! = elf_error : : ok )
{
return false ;
}
}
for ( const auto & prog : s_ppu_exec . progs )
{
const u32 vaddr = static_cast < u32 > ( prog . p_vaddr ) ;
const u32 seg_size = static_cast < u32 > ( prog . p_filesz ) ;
const u32 aligned_vaddr = vaddr & - 0x10000 ;
const u32 vaddr_offs = vaddr & 0xffff ;
// Check if the address is a start of segment within the executable
if ( prog . p_type = = 0x1u /* LOAD */ & & seg_size & & aligned_vaddr = = addr & & prog . p_vaddr = = prog . p_paddr & & vaddr_offs + seg_size < = size )
{
zeroes . resize ( std : : max < usz > ( { zeroes . size ( ) , usz { addr + size - ( vaddr + seg_size ) } , usz { vaddr_offs } } ) ) ;
// Check if gaps between segment and allocation bounds are still zeroes-only
if ( ! std : : memcmp ( vm : : _ptr < char > ( aligned_vaddr ) , zeroes . data ( ) , vaddr_offs ) & &
! std : : memcmp ( vm : : _ptr < char > ( vaddr + seg_size ) , zeroes . data ( ) , ( addr + size - ( vaddr + seg_size ) ) ) )
{
// Test memory equality
return ! std : : memcmp ( prog . bin . data ( ) , vm : : base ( vaddr ) , seg_size ) ;
}
}
2022-07-04 15:02:17 +02:00
}
return false ;
}
void init_ppu_functions ( utils : : serial * ar , bool full = false )
{
g_fxo - > need < ppu_linkage_info > ( ) ;
if ( ar )
{
2023-11-15 20:07:42 +01:00
const u32 addr = g_fxo - > init < ppu_function_manager > ( * ar ) - > addr ;
if ( addr % 0x1000 | | ! vm : : check_addr ( addr ) )
{
fmt : : throw_exception ( " init_ppu_functions(): Failure to initialize function manager. (addr=0x%x, %s) " , addr , * ar ) ;
}
2022-07-04 15:02:17 +02:00
}
else
g_fxo - > init < ppu_function_manager > ( ) ;
if ( full )
{
// Initialize HLE modules
ppu_initialize_modules ( & g_fxo - > get < ppu_linkage_info > ( ) , ar ) ;
}
}
2023-06-25 14:53:42 +02:00
static void ppu_check_patch_spu_images ( const ppu_module & mod , const ppu_segment & seg )
2020-10-03 07:07:13 +02:00
{
2023-06-25 14:53:42 +02:00
if ( ! seg . size )
{
return ;
}
2023-08-29 14:32:26 +02:00
const bool is_firmware = mod . path . starts_with ( vfs : : get ( " /dev_flash/ " ) ) ;
2023-08-30 14:43:01 +02:00
const auto _main = g_fxo - > try_get < main_ppu_module > ( ) ;
2023-06-25 14:53:42 +02:00
const std : : string_view seg_view { ensure ( mod . get_ptr < char > ( seg . addr ) ) , seg . size } ;
2020-10-03 07:07:13 +02:00
2023-08-29 14:32:26 +02:00
auto find_first_of_multiple = [ ] ( std : : string_view data , std : : initializer_list < std : : string_view > values , usz index )
{
2023-12-18 20:56:52 +01:00
u32 pos = static_cast < u32 > ( data . size ( ) ) ;
2023-08-29 14:32:26 +02:00
for ( std : : string_view value : values )
{
if ( usz pos0 = data . substr ( index , pos - index ) . find ( value ) ; pos0 ! = umax & & pos0 + index < pos )
{
2023-12-18 20:56:52 +01:00
pos = static_cast < u32 > ( pos0 + index ) ;
2023-08-29 14:32:26 +02:00
}
}
return pos ;
} ;
extern void utilize_spu_data_segment ( u32 vaddr , const void * ls_data_vaddr , u32 size ) ;
// Search for [stqd lr,0x10(sp)] instruction or ELF file signature, whichever comes first
const std : : initializer_list < std : : string_view > prefixes = { " \177 ELF " sv , " \x24 \0 \x40 \x80 " sv } ;
2023-12-18 20:56:52 +01:00
u32 prev_bound = 0 ;
2023-08-29 14:32:26 +02:00
2023-12-18 20:56:52 +01:00
for ( u32 i = find_first_of_multiple ( seg_view , prefixes , 0 ) ; i < seg . size ; i = find_first_of_multiple ( seg_view , prefixes , utils : : align < u32 > ( i + 1 , 4 ) ) )
2020-10-03 07:07:13 +02:00
{
2023-06-25 14:53:42 +02:00
const auto elf_header = ensure ( mod . get_ptr < u8 > ( seg . addr + i ) ) ;
2020-10-03 07:07:13 +02:00
2023-08-29 14:32:26 +02:00
if ( i % 4 = = 0 & & std : : memcmp ( elf_header , " \x24 \0 \x40 \x80 " , 4 ) = = 0 )
{
bool next = true ;
const u32 old_i = i ;
for ( u32 search = i & - 128 , tries = 10 ; tries & & search > = prev_bound ; tries - - , search = utils : : sub_saturate < u32 > ( search , 128 ) )
{
if ( seg_view [ search ] ! = 0x42 & & seg_view [ search ] ! = 0x43 )
{
continue ;
}
const u32 inst1 = read_from_ptr < be_t < u32 > > ( seg_view , search ) ;
const u32 inst2 = read_from_ptr < be_t < u32 > > ( seg_view , search + 4 ) ;
const u32 inst3 = read_from_ptr < be_t < u32 > > ( seg_view , search + 8 ) ;
const u32 inst4 = read_from_ptr < be_t < u32 > > ( seg_view , search + 12 ) ;
if ( ( inst1 & 0xfe'00'00'7f ) ! = 0x42000002 | | ( inst2 & 0xfe'00'00'7f ) ! = 0x42000002 | | ( inst3 & 0xfe'00'00'7f ) ! = 0x42000002 | | ( inst4 & 0xfe'00'00'7f ) ! = 0x42000002 )
{
continue ;
}
ppu_log . success ( " Found SPURS GUID Pattern at 0x%05x " , search + seg . addr ) ;
i = search ;
next = false ;
break ;
}
if ( next )
{
continue ;
}
std : : string_view ls_segment = seg_view . substr ( i ) ;
// Bound to a bit less than LS size
ls_segment = ls_segment . substr ( 0 , 0x38000 ) ;
for ( usz addr_last = 0 , valid_count = 0 , invalid_count = 0 ; ; )
{
2023-12-29 18:33:29 +01:00
const usz instruction = ls_segment . find ( " \x24 \0 \x40 \x80 " sv , addr_last ) ;
2023-08-29 14:32:26 +02:00
if ( instruction ! = umax )
{
if ( instruction % 4 ! = i % 4 )
{
// Unaligned, continue
addr_last = instruction + ( i % 4 - instruction % 4 ) % 4 ;
continue ;
}
// FIXME: This seems to terminate SPU code prematurely in some cases
// Likely due to absolute branches
2023-12-29 18:33:29 +01:00
if ( spu_thread : : is_exec_code ( : : narrow < u32 > ( instruction ) , { reinterpret_cast < const u8 * > ( ls_segment . data ( ) ) , ls_segment . size ( ) } , 0 ) )
2023-08-29 14:32:26 +02:00
{
addr_last = instruction + 4 ;
valid_count + + ;
invalid_count = 0 ;
continue ;
}
if ( invalid_count = = 0 )
{
// Allow a single case of invalid data
addr_last = instruction + 4 ;
invalid_count + + ;
continue ;
}
addr_last = instruction ;
}
if ( addr_last > = 0x80 & & valid_count > = 2 )
{
const u32 begin = i & - 128 ;
2023-12-29 18:33:29 +01:00
u32 end = std : : min < u32 > ( seg . size , utils : : align < u32 > ( : : narrow < u32 > ( i + addr_last + 256 ) , 128 ) ) ;
2023-08-29 14:32:26 +02:00
u32 guessed_ls_addr = 0 ;
// Try to guess LS address by observing the pattern for disable/enable interrupts
// ILA R2, PC + 8
// BIE/BID R2
for ( u32 found = 0 , last_vaddr = 0 , it = begin + 16 ; it < end - 16 ; it + = 4 )
{
const u32 inst1 = read_from_ptr < be_t < u32 > > ( seg_view , it ) ;
const u32 inst2 = read_from_ptr < be_t < u32 > > ( seg_view , it + 4 ) ;
const u32 inst3 = read_from_ptr < be_t < u32 > > ( seg_view , it + 8 ) ;
const u32 inst4 = read_from_ptr < be_t < u32 > > ( seg_view , it + 12 ) ;
if ( ( inst1 & 0xfe'00'00'7f ) = = 0x42000002 & & ( inst2 & 0xfe'00'00'7f ) = = 0x42000002 & & ( inst3 & 0xfe'00'00'7f ) = = 0x42000002 & & ( inst4 & 0xfe'00'00'7f ) = = 0x42000002 )
{
// SPURS GUID pattern
end = it ;
2023-08-30 23:43:20 +02:00
ppu_log . success ( " Found SPURS GUID Pattern for terminator at 0x%05x " , end + seg . addr ) ;
2023-08-29 14:32:26 +02:00
break ;
}
if ( ( inst1 > > 7 ) % 4 = = 0 & & ( inst1 & 0xfe'00'00'7f ) = = 0x42000002 & & ( inst2 = = 0x35040100 | | inst2 = = 0x35080100 ) )
{
const u32 addr_inst = ( inst1 > > 7 ) % 0x40000 ;
if ( u32 addr_seg = addr_inst - std : : min < u32 > ( it + 8 - begin , addr_inst ) )
{
if ( last_vaddr ! = addr_seg )
{
guessed_ls_addr = 0 ;
found = 0 ;
}
found + + ;
last_vaddr = addr_seg ;
if ( found > = 2 )
{
// Good segment address
guessed_ls_addr = last_vaddr ;
ppu_log . notice ( " Found IENABLE/IDSIABLE Pattern at 0x%05x " , it + seg . addr ) ;
}
}
}
}
if ( guessed_ls_addr )
{
end = begin + std : : min < u32 > ( end - begin , SPU_LS_SIZE - guessed_ls_addr ) ;
}
ppu_log . success ( " Found valid roaming SPU code at 0x%x..0x%x (guessed_ls_addr=0x%x) " , seg . addr + begin , seg . addr + end , guessed_ls_addr ) ;
2023-08-30 14:43:01 +02:00
if ( ! is_firmware & & _main = = & mod )
2023-08-29 14:32:26 +02:00
{
// Siginify that the base address is unknown by passing 0
utilize_spu_data_segment ( guessed_ls_addr ? guessed_ls_addr : 0x4000 , seg_view . data ( ) + begin , end - begin ) ;
}
i = std : : max < u32 > ( end , i + 4 ) - 4 ;
prev_bound = i + 4 ;
}
else
{
i = old_i ;
}
break ;
}
continue ;
}
2020-10-03 07:07:13 +02:00
// Try to load SPU image
const spu_exec_object obj ( fs : : file ( elf_header , seg . size - i ) ) ;
if ( obj ! = elf_error : : ok )
{
// This address does not have an SPU elf
continue ;
}
// Segment info dump
2020-10-06 17:24:54 +02:00
std : : string name ;
2020-10-03 07:07:13 +02:00
std : : string dump ;
2021-02-02 17:18:50 +01:00
std : : basic_string < u32 > applied ;
2020-10-03 07:07:13 +02:00
// Executable hash
sha1_context sha2 ;
sha1_starts ( & sha2 ) ;
u8 sha1_hash [ 20 ] ;
for ( const auto & prog : obj . progs )
{
// Only hash the data, we are not loading it
sha1_update ( & sha2 , reinterpret_cast < const uchar * > ( & prog . p_vaddr ) , sizeof ( prog . p_vaddr ) ) ;
sha1_update ( & sha2 , reinterpret_cast < const uchar * > ( & prog . p_memsz ) , sizeof ( prog . p_memsz ) ) ;
sha1_update ( & sha2 , reinterpret_cast < const uchar * > ( & prog . p_filesz ) , sizeof ( prog . p_filesz ) ) ;
fmt : : append ( dump , " \n \t Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, p_offset=0x%llx " , prog . p_type , prog . p_vaddr , prog . p_filesz , prog . p_memsz , prog . p_offset ) ;
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_filesz > 0u )
{
2023-08-30 14:43:01 +02:00
if ( prog . p_vaddr & & ! is_firmware & & _main = = & mod )
2023-08-26 10:23:42 +02:00
{
extern void utilize_spu_data_segment ( u32 vaddr , const void * ls_data_vaddr , u32 size ) ;
utilize_spu_data_segment ( prog . p_vaddr , ( elf_header + prog . p_offset ) , prog . p_filesz ) ;
}
2020-10-03 07:07:13 +02:00
sha1_update ( & sha2 , ( elf_header + prog . p_offset ) , prog . p_filesz ) ;
}
else if ( prog . p_type = = 0x4u /* NOTE */ & & prog . p_filesz > 0u )
{
2020-10-06 17:24:54 +02:00
sha1_update ( & sha2 , ( elf_header + prog . p_offset ) , prog . p_filesz ) ;
2020-10-03 07:07:13 +02:00
// We assume that the string SPUNAME exists 0x14 bytes into the NOTE segment
2023-06-25 14:53:42 +02:00
name = ensure ( mod . get_ptr < const char > ( seg . addr + i + prog . p_offset + 0x14 ) ) ;
2020-10-03 07:07:13 +02:00
2020-10-06 17:24:54 +02:00
if ( ! name . empty ( ) )
2020-10-03 07:07:13 +02:00
{
2023-08-29 14:32:26 +02:00
fmt : : append ( dump , " \n \t SPUNAME: '%s' " , name ) ;
2020-11-07 23:56:35 +01:00
}
2020-10-03 07:07:13 +02:00
}
}
2023-08-29 14:32:26 +02:00
fmt : : append ( dump , " (image addr: 0x%x, size: 0x%x) " , seg . addr + i , obj . highest_offset ) ;
2020-10-03 07:07:13 +02:00
sha1_finish ( & sha2 , sha1_hash ) ;
// Format patch name
std : : string hash ( " SPU-0000000000000000000000000000000000000000 " ) ;
for ( u32 i = 0 ; i < sizeof ( sha1_hash ) ; i + + )
{
constexpr auto pal = " 0123456789abcdef " ;
hash [ 4 + i * 2 ] = pal [ sha1_hash [ i ] > > 4 ] ;
hash [ 5 + i * 2 ] = pal [ sha1_hash [ i ] & 15 ] ;
}
if ( g_cfg . core . spu_debug )
{
2022-01-23 23:18:07 +01:00
fs : : file temp ( fs : : get_cache_dir ( ) + " /spu_progs/ " + vfs : : escape ( name . substr ( name . find_last_of ( ' / ' ) + 1 ) ) + ' _ ' + hash . substr ( 4 ) + " .elf " , fs : : rewrite ) ;
2021-02-23 05:29:11 +01:00
2022-01-23 23:18:07 +01:00
if ( ! temp | | ! temp . write ( obj . save ( ) ) )
2021-02-23 05:29:11 +01:00
{
ppu_loader . error ( " Failed to dump SPU program from PPU executable: name='%s', hash=%s " , name , hash ) ;
}
2020-10-03 07:07:13 +02:00
}
// Try to patch each segment, will only succeed if the address exists in SPU local storage
for ( const auto & prog : obj . progs )
{
// Apply the patch
2023-07-12 19:43:33 +02:00
applied + = g_fxo - > get < patch_engine > ( ) . apply ( hash , [ & ] ( u32 addr , u32 /*size*/ ) { return addr + elf_header + prog . p_offset ; } , prog . p_filesz , prog . p_vaddr ) ;
2020-10-03 07:07:13 +02:00
if ( ! Emu . GetTitleID ( ) . empty ( ) )
{
// Alternative patch
2023-07-12 19:43:33 +02:00
applied + = g_fxo - > get < patch_engine > ( ) . apply ( Emu . GetTitleID ( ) + ' - ' + hash , [ & ] ( u32 addr , u32 /*size*/ ) { return addr + elf_header + prog . p_offset ; } , prog . p_filesz , prog . p_vaddr ) ;
2020-10-03 07:07:13 +02:00
}
}
2021-02-13 17:35:43 +01:00
if ( applied . empty ( ) )
{
ppu_loader . warning ( " SPU executable hash: %s%s " , hash , dump ) ;
}
else
{
ppu_loader . success ( " SPU executable hash: %s (<- %u)%s " , hash , applied . size ( ) , dump ) ;
}
2023-08-29 14:32:26 +02:00
2023-12-18 20:56:52 +01:00
i + = : : narrow < u32 > ( obj . highest_offset ) - 4 ;
2023-08-29 14:32:26 +02:00
prev_bound = i + 4 ;
2020-10-03 07:07:13 +02:00
}
}
2021-01-30 15:25:21 +01:00
void try_spawn_ppu_if_exclusive_program ( const ppu_module & m )
{
// If only PRX/OVL has been loaded at Emu.BootGame(), launch a single PPU thread so its memory can be viewed
2023-04-08 17:03:05 +02:00
if ( Emu . IsReady ( ) & & g_fxo - > get < main_ppu_module > ( ) . segs . empty ( ) & & ! Emu . DeserialManager ( ) )
2021-01-30 15:25:21 +01:00
{
ppu_thread_params p
{
2021-04-23 20:25:55 +02:00
. stack_addr = vm : : cast ( vm : : alloc ( SYS_PROCESS_PARAM_STACK_SIZE_MAX , vm : : stack , 4096 ) ) ,
. stack_size = SYS_PROCESS_PARAM_STACK_SIZE_MAX ,
2021-01-30 15:25:21 +01:00
} ;
2021-05-01 08:34:52 +02:00
auto ppu = idm : : make_ptr < named_thread < ppu_thread > > ( p , " test_thread " , 0 ) ;
2021-01-30 15:25:21 +01:00
2022-04-27 18:46:09 +02:00
ppu - > cia = m . funcs . empty ( ) ? m . secs [ 0 ] . addr : m . funcs [ 0 ] . addr ;
2021-01-30 15:25:21 +01:00
// For kernel explorer
g_fxo - > init < lv2_memory_container > ( 4096 ) ;
}
}
2022-10-02 11:59:41 +02:00
struct prx_names_table
{
shared_mutex mutex ;
std : : set < std : : string , std : : less < > > registered ;
atomic_t < const char * > lut [ 0x1000'0000 / 0x1'0000 ] { } ;
SAVESTATE_INIT_POS ( 4.1 ) ; // Dependency on lv2_obj
prx_names_table ( ) noexcept
{
idm : : select < lv2_obj , lv2_prx > ( [ this ] ( u32 , lv2_prx & prx )
{
install ( prx . name , prx ) ;
} ) ;
}
void install ( std : : string_view name , lv2_prx & prx )
{
if ( name . empty ( ) )
{
return ;
}
if ( name . ends_with ( " .sprx " sv ) & & name . size ( ) > ( " .sprx " sv ) . size ( ) )
{
name = name . substr ( 0 , name . size ( ) - ( " .sprx " sv ) . size ( ) ) ;
}
std : : lock_guard lock ( mutex ) ;
const auto ptr = registered . emplace ( name ) . first - > c_str ( ) ;
for ( auto & seg : prx . segs )
{
if ( ! seg . size )
{
continue ;
}
// Doesn't support addresses above 256MB because it wastes memory and is very unlikely (if somehow does occur increase it)
const u32 max0 = ( seg . addr + seg . size - 1 ) > > 16 ;
2023-12-18 20:56:52 +01:00
const u32 max = std : : min < u32 > ( : : size32 ( lut ) , max0 ) ;
2022-10-02 11:59:41 +02:00
if ( max0 > max )
{
ppu_loader . error ( " Skipping PRX name registeration: %s, max=0x%x " , name , max0 < < 16 ) ;
}
for ( u32 i = seg . addr > > 16 ; i < = max ; i + + )
{
lut [ i ] . release ( ptr ) ;
}
}
}
} ;
const char * get_prx_name_by_cia ( u32 addr )
{
if ( auto t = g_fxo - > try_get < prx_names_table > ( ) )
{
addr > > = 16 ;
if ( addr < std : : size ( t - > lut ) )
{
return t - > lut [ addr ] ;
}
}
return nullptr ;
}
2023-06-25 14:53:42 +02:00
std : : shared_ptr < lv2_prx > ppu_load_prx ( const ppu_prx_object & elf , bool virtual_load , const std : : string & path , s64 file_offset , utils : : serial * ar )
2016-04-14 01:09:41 +02:00
{
2021-02-12 12:40:55 +01:00
if ( elf ! = elf_error : : ok )
{
return nullptr ;
}
2017-07-01 01:08:51 +02:00
// Create new PRX object
2023-06-25 14:53:42 +02:00
const auto prx = ! ar & & ! virtual_load ? idm : : make_ptr < lv2_obj , lv2_prx > ( ) : std : : make_shared < lv2_prx > ( ) ;
2017-07-01 01:08:51 +02:00
// Access linkage information object
2021-03-02 12:59:19 +01:00
auto & link = g_fxo - > get < ppu_linkage_info > ( ) ;
2016-04-14 01:09:41 +02:00
2017-07-13 17:35:37 +02:00
// Initialize HLE modules
2021-03-02 12:59:19 +01:00
ppu_initialize_modules ( & link ) ;
2017-07-13 17:35:37 +02:00
2018-03-20 16:53:15 +01:00
// Library hash
sha1_context sha ;
sha1_starts ( & sha ) ;
2021-02-01 16:33:19 +01:00
u32 end = 0 ;
2021-02-02 08:33:13 +01:00
u32 toc = 0 ;
2021-02-01 16:33:19 +01:00
2023-06-25 14:53:42 +02:00
// 0x100000: Workaround for analyser glitches
u32 allocating_address = 0x100000 ;
2016-07-09 00:36:42 +02:00
for ( const auto & prog : elf . progs )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " ** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x " , prog . p_type , prog . p_vaddr , prog . p_filesz , prog . p_memsz , prog . p_flags ) ;
2016-04-14 01:09:41 +02:00
2018-03-20 16:53:15 +01:00
// Hash big-endian values
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_type ) , sizeof ( prog . p_type ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_flags ) , sizeof ( prog . p_flags ) ) ;
2018-03-20 16:53:15 +01:00
2016-04-14 01:09:41 +02:00
switch ( const u32 p_type = prog . p_type )
{
case 0x1 : // LOAD
{
2020-12-14 07:03:49 +01:00
auto & _seg = prx - > segs . emplace_back ( ) ;
_seg . flags = prog . p_flags ;
_seg . type = p_type ;
2016-04-14 01:09:41 +02:00
if ( prog . p_memsz )
{
2020-12-09 14:03:15 +01:00
const u32 mem_size = : : narrow < u32 > ( prog . p_memsz ) ;
const u32 file_size = : : narrow < u32 > ( prog . p_filesz ) ;
2021-01-12 11:01:06 +01:00
//const u32 init_addr = ::narrow<u32>(prog.p_vaddr);
2016-04-14 01:09:41 +02:00
// Alloc segment memory
2022-07-04 15:02:17 +02:00
// Or use saved address
2023-06-25 14:53:42 +02:00
u32 addr = 0 ;
if ( virtual_load )
{
addr = std : : exchange ( allocating_address , allocating_address + utils : : align < u32 > ( mem_size , 0x10000 ) ) ;
}
else
{
addr = ( ! ar ? vm : : alloc ( mem_size , vm : : main ) : ar - > operator u32 ( ) ) ;
}
_seg . ptr = vm : : base ( addr ) ;
2016-04-14 01:09:41 +02:00
2023-06-25 14:53:42 +02:00
if ( virtual_load )
{
// Leave additional room for the analyser so it can safely access beyond limit a bit
// Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries
// TODO: Use make_shared_for_overwrite when all compilers support it
const usz alloc_size = utils : : align < usz > ( mem_size , 0x10000 ) + 4096 ;
prx - > allocations . push_back ( std : : shared_ptr < u8 [ ] > ( new u8 [ alloc_size ] ) ) ;
_seg . ptr = prx - > allocations . back ( ) . get ( ) ;
std : : memset ( static_cast < u8 * > ( _seg . ptr ) + prog . bin . size ( ) , 0 , alloc_size - 4096 - prog . bin . size ( ) ) ;
}
else if ( ! vm : : check_addr ( addr ) )
2016-04-14 01:09:41 +02:00
{
2016-08-08 18:01:06 +02:00
fmt : : throw_exception ( " vm::alloc() failed (size=0x%x) " , mem_size ) ;
2016-04-14 01:09:41 +02:00
}
2023-06-25 14:53:42 +02:00
_seg . addr = addr ;
_seg . size = mem_size ;
_seg . filesz = file_size ;
prx - > addr_to_seg_index . emplace ( addr , prx - > segs . size ( ) - 1 ) ;
2017-02-10 13:20:54 +01:00
// Copy segment data
2023-06-25 14:53:42 +02:00
if ( ! ar ) std : : memcpy ( ensure ( prx - > get_ptr < void > ( addr ) ) , prog . bin . data ( ) , file_size ) ;
2020-08-27 17:43:23 +02:00
ppu_loader . warning ( " **** Loaded to 0x%x...0x%x (size=0x%x) " , addr , addr + mem_size - 1 , mem_size ) ;
2016-04-14 01:09:41 +02:00
2018-03-20 16:53:15 +01:00
// Hash segment
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_vaddr ) , sizeof ( prog . p_vaddr ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_memsz ) , sizeof ( prog . p_memsz ) ) ;
2018-03-20 16:53:15 +01:00
sha1_update ( & sha , prog . bin . data ( ) , prog . bin . size ( ) ) ;
2017-02-10 13:20:54 +01:00
// Initialize executable code if necessary
2023-06-25 14:53:42 +02:00
if ( prog . p_flags & 0x1 & & ! virtual_load )
2017-02-10 13:20:54 +01:00
{
ppu_register_range ( addr , mem_size ) ;
}
2016-04-14 01:09:41 +02:00
}
break ;
}
case 0x700000a4 : break ; // Relocations
2020-02-01 05:36:53 +01:00
default : ppu_loader . error ( " Unknown segment type! 0x%08x " , p_type ) ;
2016-04-14 01:09:41 +02:00
}
}
2016-07-09 00:36:42 +02:00
for ( const auto & s : elf . shdrs )
{
2022-04-27 18:46:09 +02:00
ppu_loader . notice ( " ** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x " , std : : bit_cast < u32 > ( s . sh_type ) , s . sh_addr , s . sh_size , s . _sh_flags ) ;
2016-07-09 00:36:42 +02:00
2022-04-27 18:46:09 +02:00
if ( s . sh_type ! = sec_type : : sht_progbits ) continue ;
2021-01-05 14:15:48 +01:00
2016-07-09 00:36:42 +02:00
const u32 addr = vm : : cast ( s . sh_addr ) ;
const u32 size = vm : : cast ( s . sh_size ) ;
2021-01-05 14:15:48 +01:00
if ( addr & & size ) // TODO: some sections with addr=0 are valid
2016-07-09 00:36:42 +02:00
{
2020-12-18 08:39:54 +01:00
for ( usz i = 0 ; i < prx - > segs . size ( ) ; i + + )
2016-07-09 00:36:42 +02:00
{
const u32 saddr = static_cast < u32 > ( elf . progs [ i ] . p_vaddr ) ;
2016-07-24 01:59:50 +02:00
if ( addr > = saddr & & addr < saddr + elf . progs [ i ] . p_memsz )
2016-07-09 00:36:42 +02:00
{
// "Relocate" section
2017-07-01 01:08:51 +02:00
ppu_segment _sec ;
_sec . addr = addr - saddr + prx - > segs [ i ] . addr ;
_sec . size = size ;
2022-04-27 18:46:09 +02:00
_sec . type = std : : bit_cast < u32 > ( s . sh_type ) ;
_sec . flags = static_cast < u32 > ( s . _sh_flags & 7 ) ;
2017-08-22 23:42:12 +02:00
_sec . filesz = 0 ;
2017-07-01 01:08:51 +02:00
prx - > secs . emplace_back ( _sec ) ;
2021-02-01 16:33:19 +01:00
2021-02-02 17:54:43 +01:00
if ( _sec . flags & 0x4 & & i = = 0 )
2021-02-01 16:33:19 +01:00
{
end = std : : max < u32 > ( end , _sec . addr + _sec . size ) ;
}
2016-07-09 00:36:42 +02:00
break ;
}
}
}
}
2016-04-14 01:09:41 +02:00
// Do relocations
2016-07-09 00:36:42 +02:00
for ( auto & prog : elf . progs )
2016-04-14 01:09:41 +02:00
{
2021-01-12 11:01:06 +01:00
switch ( prog . p_type )
2016-04-14 01:09:41 +02:00
{
case 0x700000a4 :
{
// Relocation information of the SCE_PPURELA segment
struct ppu_prx_relocation_info
{
be_t < u64 > offset ;
be_t < u16 > unk0 ;
u8 index_value ;
u8 index_addr ;
be_t < u32 > type ;
vm : : bptr < void , u64 > ptr ;
} ;
for ( uint i = 0 ; i < prog . p_filesz ; i + = sizeof ( ppu_prx_relocation_info ) )
{
const auto & rel = reinterpret_cast < const ppu_prx_relocation_info & > ( prog . bin [ i ] ) ;
2023-10-12 09:21:18 +02:00
if ( rel . offset > = utils : : align < u64 > ( : : at32 ( prx - > segs , rel . index_addr ) . size , 0x100 ) )
2020-12-14 07:03:49 +01:00
{
2023-10-12 09:21:18 +02:00
fmt : : throw_exception ( " Relocation offset out of segment memory! (offset=0x%x, index_addr=%u, seg_size=0x%x) " , rel . offset , rel . index_addr , prx - > segs [ rel . index_addr ] . size ) ;
2020-12-14 07:03:49 +01:00
}
2022-09-19 14:57:51 +02:00
const u32 data_base = rel . index_value = = 0xFF ? 0 : : : at32 ( prx - > segs , rel . index_value ) . addr ;
2020-12-14 07:03:49 +01:00
if ( rel . index_value ! = 0xFF & & ! data_base )
{
fmt : : throw_exception ( " Empty segment has been referenced for relocation data! (reloc_offset=0x%x, index_value=%u) " , i , rel . index_value ) ;
}
2017-07-01 01:08:51 +02:00
ppu_reloc _rel ;
2022-09-19 14:57:51 +02:00
const u32 raddr = _rel . addr = vm : : cast ( : : at32 ( prx - > segs , rel . index_addr ) . addr + rel . offset ) ;
2017-07-01 01:08:51 +02:00
const u32 rtype = _rel . type = rel . type ;
2020-12-14 07:03:49 +01:00
const u64 rdata = _rel . data = data_base + rel . ptr . addr ( ) ;
2017-07-01 01:08:51 +02:00
prx - > relocs . emplace_back ( _rel ) ;
2016-04-14 01:09:41 +02:00
2022-07-04 15:02:17 +02:00
if ( ar )
{
break ;
}
2017-07-01 01:08:51 +02:00
switch ( rtype )
2016-04-14 01:09:41 +02:00
{
2017-06-19 06:59:02 +02:00
case 1 : // R_PPC64_ADDR32
2016-04-14 01:09:41 +02:00
{
2023-06-25 14:53:42 +02:00
const u32 value = * ensure ( prx - > get_ptr < u32 > ( raddr ) ) = static_cast < u32 > ( rdata ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(1): 0x%x <- 0x%08x (0x%llx) " , raddr , value , rdata ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 4 : //R_PPC64_ADDR16_LO
2016-04-14 01:09:41 +02:00
{
2023-06-25 14:53:42 +02:00
const u16 value = * ensure ( prx - > get_ptr < u16 > ( raddr ) ) = static_cast < u16 > ( rdata ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(4): 0x%x <- 0x%04x (0x%llx) " , raddr , value , rdata ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 5 : //R_PPC64_ADDR16_HI
2016-04-14 01:09:41 +02:00
{
2023-06-25 14:53:42 +02:00
const u16 value = * ensure ( prx - > get_ptr < u16 > ( raddr ) ) = static_cast < u16 > ( rdata > > 16 ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(5): 0x%x <- 0x%04x (0x%llx) " , raddr , value , rdata ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 6 : //R_PPC64_ADDR16_HA
2016-04-14 01:09:41 +02:00
{
2023-06-25 14:53:42 +02:00
const u16 value = * ensure ( prx - > get_ptr < u16 > ( raddr ) ) = static_cast < u16 > ( rdata > > 16 ) + ( rdata & 0x8000 ? 1 : 0 ) ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(6): 0x%x <- 0x%04x (0x%llx) " , raddr , value , rdata ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 10 : //R_PPC64_REL24
2016-07-09 00:36:42 +02:00
{
2023-06-25 14:53:42 +02:00
const u32 value = * ensure ( prx - > get_ptr < ppu_bf_t < be_t < u32 > , 6 , 24 > > ( raddr ) ) = static_cast < u32 > ( rdata - raddr ) > > 2 ;
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " **** RELOCATION(10): 0x%x <- 0x%06x (0x%llx) " , raddr , value , rdata ) ;
2016-07-09 00:36:42 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 11 : //R_PPC64_REL14
{
2023-06-25 14:53:42 +02:00
const u32 value = * ensure ( prx - > get_ptr < ppu_bf_t < be_t < u32 > , 16 , 14 > > ( raddr ) ) = static_cast < u32 > ( rdata - raddr ) > > 2 ;
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " **** RELOCATION(11): 0x%x <- 0x%06x (0x%llx) " , raddr , value , rdata ) ;
2017-06-19 06:59:02 +02:00
break ;
}
case 38 : //R_PPC64_ADDR64
{
2023-06-25 14:53:42 +02:00
const u64 value = * ensure ( prx - > get_ptr < u64 > ( raddr ) ) = rdata ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(38): 0x%x <- 0x%016llx (0x%llx) " , raddr , value , rdata ) ;
2017-06-19 06:59:02 +02:00
break ;
}
case 44 : //R_PPC64_REL64
2016-07-09 00:36:42 +02:00
{
2023-06-25 14:53:42 +02:00
const u64 value = * ensure ( prx - > get_ptr < u64 > ( raddr ) ) = rdata - raddr ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(44): 0x%x <- 0x%016llx (0x%llx) " , raddr , value , rdata ) ;
2016-07-09 00:36:42 +02:00
break ;
}
2017-06-19 06:59:02 +02:00
case 57 : //R_PPC64_ADDR16_LO_DS
2016-07-09 00:36:42 +02:00
{
2023-06-25 14:53:42 +02:00
const u16 value = * ensure ( prx - > get_ptr < ppu_bf_t < be_t < u16 > , 0 , 14 > > ( raddr ) ) = static_cast < u16 > ( rdata ) > > 2 ;
2020-02-01 05:36:53 +01:00
ppu_loader . trace ( " **** RELOCATION(57): 0x%x <- 0x%04x (0x%llx) " , raddr , value , rdata ) ;
2016-07-09 00:36:42 +02:00
break ;
}
2020-02-01 05:36:53 +01:00
default : ppu_loader . error ( " **** RELOCATION(%u): Illegal/Unknown type! (addr=0x%x; 0x%llx) " , rtype , raddr , rdata ) ;
2017-07-01 01:08:51 +02:00
}
if ( rdata = = 0 )
{
2020-02-01 05:36:53 +01:00
ppu_loader . todo ( " **** RELOCATION(%u): 0x%x <- (zero-based value) " , rtype , raddr ) ;
2016-04-14 01:09:41 +02:00
}
}
break ;
}
2021-04-09 21:12:47 +02:00
default : break ;
2016-04-14 01:09:41 +02:00
}
}
2016-07-09 00:36:42 +02:00
if ( ! elf . progs . empty ( ) & & elf . progs [ 0 ] . p_paddr )
2016-04-14 01:09:41 +02:00
{
struct ppu_prx_library_info
{
be_t < u16 > attributes ;
2017-08-22 23:42:12 +02:00
u8 version [ 2 ] ;
2016-04-14 01:09:41 +02:00
char name [ 28 ] ;
be_t < u32 > toc ;
be_t < u32 > exports_start ;
be_t < u32 > exports_end ;
be_t < u32 > imports_start ;
be_t < u32 > imports_end ;
} ;
// Access library information (TODO)
2023-12-29 18:33:29 +01:00
const auto lib_info = ensure ( prx - > get_ptr < const ppu_prx_library_info > ( : : narrow < u32 > ( prx - > segs [ 0 ] . addr + elf . progs [ 0 ] . p_paddr - elf . progs [ 0 ] . p_offset ) ) ) ;
2020-03-05 15:12:40 +01:00
const std : : string lib_name = lib_info - > name ;
2016-04-14 01:09:41 +02:00
2020-03-05 15:12:40 +01:00
strcpy_trunc ( prx - > module_info_name , lib_name ) ;
2017-08-22 23:42:12 +02:00
prx - > module_info_version [ 0 ] = lib_info - > version [ 0 ] ;
prx - > module_info_version [ 1 ] = lib_info - > version [ 1 ] ;
prx - > module_info_attributes = lib_info - > attributes ;
2023-01-15 21:12:54 +01:00
2022-11-05 16:14:34 +01:00
prx - > exports_start = lib_info - > exports_start ;
prx - > exports_end = lib_info - > exports_end ;
2017-08-22 23:42:12 +02:00
2023-12-29 18:33:29 +01:00
for ( u32 start = prx - > exports_start , size = 0 ; ; size + + )
2023-01-15 21:12:54 +01:00
{
if ( start > = prx - > exports_end )
{
// Preallocate storage
prx - > m_external_loaded_flags . resize ( size ) ;
break ;
}
2023-06-25 14:53:42 +02:00
const u8 increment = * ensure ( prx - > get_ptr < u8 > ( start ) ) ;
start + = increment ? increment : sizeof ( ppu_prx_module_info ) ;
2023-01-15 21:12:54 +01:00
}
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " Library %s (rtoc=0x%x): " , lib_name , lib_info - > toc ) ;
2016-04-14 01:09:41 +02:00
2023-08-06 18:47:23 +02:00
ppu_linkage_info dummy { } ;
prx - > specials = ppu_load_exports ( * prx , virtual_load ? & dummy : & link , prx - > exports_start , prx - > exports_end , true ) ;
prx - > imports = ppu_load_imports ( * prx , prx - > relocs , virtual_load ? & dummy : & link , lib_info - > imports_start , lib_info - > imports_end ) ;
if ( virtual_load )
2023-06-25 14:53:42 +02:00
{
2023-08-06 18:47:23 +02:00
prx - > imports . clear ( ) ;
2023-06-25 14:53:42 +02:00
}
2018-03-17 18:41:35 +01:00
std : : stable_sort ( prx - > relocs . begin ( ) , prx - > relocs . end ( ) ) ;
2021-02-02 08:33:13 +01:00
toc = lib_info - > toc ;
2016-04-14 01:09:41 +02:00
}
else
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " Library %s: PRX library info not found " ) ;
2016-04-14 01:09:41 +02:00
}
prx - > start . set ( prx - > specials [ 0xbc9a0086 ] ) ;
prx - > stop . set ( prx - > specials [ 0xab779874 ] ) ;
prx - > exit . set ( prx - > specials [ 0x3ab9a95e ] ) ;
2018-03-22 20:57:20 +01:00
prx - > prologue . set ( prx - > specials [ 0x0d10fd3f ] ) ;
prx - > epilogue . set ( prx - > specials [ 0x330f7005 ] ) ;
2017-07-01 01:08:51 +02:00
prx - > name = path . substr ( path . find_last_of ( ' / ' ) + 1 ) ;
prx - > path = path ;
2021-05-26 22:38:17 +02:00
prx - > offset = file_offset ;
2017-07-13 17:35:37 +02:00
2022-10-02 11:59:41 +02:00
g_fxo - > need < prx_names_table > ( ) ;
g_fxo - > get < prx_names_table > ( ) . install ( prx - > name , * prx ) ;
2018-03-20 16:53:15 +01:00
sha1_finish ( & sha , prx - > sha1 ) ;
// Format patch name
2021-02-08 16:04:50 +01:00
std : : string hash = fmt : : format ( " PRX-%s " , fmt : : base57 ( prx - > sha1 ) ) ;
2018-03-20 16:53:15 +01:00
2022-09-03 05:46:16 +02:00
if ( prx - > path . ends_with ( " sys/external/liblv2.sprx " sv ) )
{
liblv2_begin = prx - > segs [ 0 ] . addr ;
liblv2_end = prx - > segs [ 0 ] . addr + prx - > segs [ 0 ] . size ;
}
2021-02-08 16:04:50 +01:00
std : : basic_string < u32 > applied ;
2018-03-20 16:53:15 +01:00
2021-02-08 16:04:50 +01:00
for ( usz i = 0 ; i < prx - > segs . size ( ) ; i + + )
2018-03-20 16:53:15 +01:00
{
2021-02-08 16:04:50 +01:00
const auto & seg = prx - > segs [ i ] ;
if ( ! seg . size ) continue ;
const std : : string hash_seg = fmt : : format ( " %s-%u " , hash , i ) ;
// Apply the patch
2023-07-12 19:43:33 +02:00
auto _applied = g_fxo - > get < patch_engine > ( ) . apply ( hash_seg , [ & ] ( u32 addr , u32 size ) { return prx - > get_ptr < u8 > ( addr + seg . addr , size ) ; } , seg . size ) ;
2021-02-08 16:04:50 +01:00
if ( ! Emu . GetTitleID ( ) . empty ( ) )
{
// Alternative patch
2023-07-12 19:43:33 +02:00
_applied + = g_fxo - > get < patch_engine > ( ) . apply ( Emu . GetTitleID ( ) + ' - ' + hash_seg , [ & ] ( u32 addr , u32 size ) { return prx - > get_ptr < u8 > ( addr + seg . addr , size ) ; } , seg . size ) ;
2021-02-08 16:04:50 +01:00
}
// Rebase patch offsets
std : : for_each ( _applied . begin ( ) , _applied . end ( ) , [ & ] ( u32 & res ) { if ( res ! = umax ) res + = seg . addr ; } ) ;
2021-03-02 12:59:19 +01:00
2021-02-08 16:04:50 +01:00
applied + = _applied ;
2021-02-13 17:35:43 +01:00
if ( _applied . empty ( ) )
{
ppu_loader . warning ( " PRX hash of %s[%u]: %s " , prx - > name , i , hash_seg ) ;
}
else
{
ppu_loader . success ( " PRX hash of %s[%u]: %s (<- %u) " , prx - > name , i , hash_seg , _applied . size ( ) ) ;
}
2018-03-20 16:53:15 +01:00
}
2023-08-24 22:03:51 +02:00
// Disabled for PRX for now (problematic and does not seem to have any benefit)
end = 0 ;
if ( ! applied . empty ( ) | | ar )
{
// Compare memory changes in memory after executable code sections end
if ( end > = prx - > segs [ 0 ] . addr & & end < prx - > segs [ 0 ] . addr + prx - > segs [ 0 ] . size )
{
for ( const auto & prog : elf . progs )
{
// Find the first segment
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz )
{
std : : basic_string_view < uchar > elf_memory { prog . bin . data ( ) , prog . bin . size ( ) } ;
elf_memory . remove_prefix ( end - prx - > segs [ 0 ] . addr ) ;
if ( elf_memory ! = std : : basic_string_view < uchar > { & prx - > get_ref < uchar > ( end ) , elf_memory . size ( ) } )
{
// There are changes, disable analysis optimization
ppu_loader . notice ( " Disabling analysis optimization due to memory changes from original file " ) ;
end = 0 ;
}
break ;
}
}
}
}
2020-10-03 07:07:13 +02:00
// Embedded SPU elf patching
for ( const auto & seg : prx - > segs )
{
2023-06-25 14:53:42 +02:00
ppu_check_patch_spu_images ( * prx , seg ) ;
2020-10-03 07:07:13 +02:00
}
2021-02-02 19:14:35 +01:00
prx - > analyse ( toc , 0 , end , applied ) ;
2021-02-02 08:33:13 +01:00
2023-06-25 14:53:42 +02:00
if ( ! ar & & ! virtual_load )
{
try_spawn_ppu_if_exclusive_program ( * prx ) ;
}
2017-07-13 17:35:37 +02:00
2016-04-14 01:09:41 +02:00
return prx ;
}
2017-07-05 21:52:02 +02:00
void ppu_unload_prx ( const lv2_prx & prx )
{
2023-08-06 08:43:13 +02:00
if ( prx . segs . empty ( ) | | prx . segs [ 0 ] . ptr ! = vm : : base ( prx . segs [ 0 ] . addr ) )
2023-08-05 20:33:00 +02:00
{
return ;
}
2022-11-05 14:29:44 +01:00
std : : unique_lock lock ( g_fxo - > get < ppu_linkage_info > ( ) . mutex , std : : defer_lock ) ;
2017-07-12 20:28:33 +02:00
// Clean linkage info
for ( auto & imp : prx . imports )
{
2022-11-05 14:29:44 +01:00
if ( ! lock )
{
lock . lock ( ) ;
}
2020-05-06 17:18:30 +02:00
auto pinfo = static_cast < ppu_linkage_info : : module_data : : info * > ( imp . second ) ;
2017-07-12 20:28:33 +02:00
pinfo - > frefss . erase ( imp . first ) ;
pinfo - > imports . erase ( imp . first ) ;
}
2017-10-01 03:40:11 +02:00
//for (auto& exp : prx.exports)
//{
2020-05-06 17:18:30 +02:00
// auto pinfo = static_cast<ppu_linkage_info::module_data::info*>(exp.second);
2017-10-01 03:40:11 +02:00
// if (pinfo->static_func)
// {
2021-05-17 13:22:27 +02:00
// pinfo->export_addr = g_fxo->get<ppu_function_manager>().func_addr(pinfo->static_func->index);
2017-10-01 03:40:11 +02:00
// }
// else if (pinfo->static_var)
// {
// pinfo->export_addr = pinfo->static_var->addr;
// }
// else
// {
// pinfo->export_addr = 0;
// }
//}
2022-11-05 14:29:44 +01:00
if ( lock )
{
lock . unlock ( ) ;
}
2022-09-03 05:46:16 +02:00
if ( prx . path . ends_with ( " sys/external/liblv2.sprx " sv ) )
{
liblv2_begin = 0 ;
liblv2_end = 0 ;
}
2021-09-01 12:38:17 +02:00
// Format patch name
std : : string hash = fmt : : format ( " PRX-%s " , fmt : : base57 ( prx . sha1 ) ) ;
2017-07-05 21:52:02 +02:00
for ( auto & seg : prx . segs )
{
2021-09-01 12:38:17 +02:00
if ( ! seg . size ) continue ;
2023-08-05 20:33:00 +02:00
vm : : dealloc ( seg . addr , vm : : main ) ;
2021-09-01 12:38:17 +02:00
const std : : string hash_seg = fmt : : format ( " %s-%u " , hash , & seg - prx . segs . data ( ) ) ;
// Deallocatte memory used for patches
g_fxo - > get < patch_engine > ( ) . unload ( hash_seg ) ;
if ( ! Emu . GetTitleID ( ) . empty ( ) )
{
// Alternative patch
g_fxo - > get < patch_engine > ( ) . unload ( Emu . GetTitleID ( ) + ' - ' + hash_seg ) ;
}
2017-07-05 21:52:02 +02:00
}
}
2023-06-25 14:53:42 +02:00
bool ppu_load_exec ( const ppu_exec_object & elf , bool virtual_load , const std : : string & elf_path , utils : : serial * ar )
2016-04-14 01:09:41 +02:00
{
2021-02-12 12:40:55 +01:00
if ( elf ! = elf_error : : ok )
{
return false ;
}
2021-01-30 15:25:21 +01:00
// Check if it is a standalone executable first
for ( const auto & prog : elf . progs )
{
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz )
{
using addr_range = utils : : address_range ;
const addr_range r = addr_range : : start_length ( static_cast < u32 > ( prog . p_vaddr ) , static_cast < u32 > ( prog . p_memsz ) ) ;
2021-05-22 09:35:15 +02:00
if ( ( prog . p_vaddr | prog . p_memsz ) > u32 { umax } | | ! r . valid ( ) | | ! r . inside ( addr_range : : start_length ( 0x00000000 , 0x30000000 ) ) )
2021-01-30 15:25:21 +01:00
{
return false ;
}
}
}
2022-07-04 15:02:17 +02:00
init_ppu_functions ( ar , false ) ;
2021-03-02 12:59:19 +01:00
2017-07-01 01:08:51 +02:00
// Set for delayed initialization in ppu_initialize()
2023-04-08 17:03:05 +02:00
auto & _main = g_fxo - > get < main_ppu_module > ( ) ;
2017-07-01 01:08:51 +02:00
2016-04-14 01:09:41 +02:00
// Access linkage information object
2021-03-02 12:59:19 +01:00
auto & link = g_fxo - > get < ppu_linkage_info > ( ) ;
2016-04-14 01:09:41 +02:00
2016-07-19 01:33:25 +02:00
// TLS information
2016-07-24 01:59:50 +02:00
u32 tls_vaddr = 0 ;
u32 tls_fsize = 0 ;
u32 tls_vsize = 0 ;
2016-07-19 01:33:25 +02:00
// Process information
2021-04-23 20:25:55 +02:00
u32 sdk_version = SYS_PROCESS_PARAM_SDK_VERSION_UNKNOWN ;
2019-01-17 17:31:25 +01:00
s32 primary_prio = 1001 ;
2021-04-23 20:25:55 +02:00
u32 primary_stacksize = SYS_PROCESS_PARAM_STACK_SIZE_MAX ;
u32 malloc_pagesize = SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_1M ;
2019-07-03 19:17:04 +02:00
u32 ppc_seg = 0 ;
2016-07-19 01:33:25 +02:00
2021-02-01 16:33:19 +01:00
// Limit for analysis
u32 end = 0 ;
2017-07-17 15:20:29 +02:00
// Executable hash
sha1_context sha ;
sha1_starts ( & sha ) ;
2021-01-30 15:25:21 +01:00
struct on_fatal_error
{
2021-03-02 12:59:19 +01:00
ppu_module & _main ;
2021-01-30 15:25:21 +01:00
bool errored = true ;
~ on_fatal_error ( )
{
if ( ! errored )
{
return ;
}
// Revert previous allocations on an error
2021-03-02 12:59:19 +01:00
for ( const auto & seg : _main . segs )
2021-01-30 15:25:21 +01:00
{
vm : : dealloc ( seg . addr ) ;
}
}
} error_handler { _main } ;
2023-06-25 14:53:42 +02:00
if ( virtual_load )
{
// No need for cleanup
error_handler . errored = false ;
}
2023-06-29 07:42:21 +02:00
const auto old_process_info = g_ps3_process_info ;
2016-04-14 01:09:41 +02:00
// Allocate memory at fixed positions
2016-07-09 00:36:42 +02:00
for ( const auto & prog : elf . progs )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " ** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x " , prog . p_type , prog . p_vaddr , prog . p_filesz , prog . p_memsz , prog . p_flags ) ;
2016-06-07 22:24:20 +02:00
2017-07-01 01:08:51 +02:00
ppu_segment _seg ;
2020-12-09 14:03:15 +01:00
const u32 addr = _seg . addr = vm : : cast ( prog . p_vaddr ) ;
const u32 size = _seg . size = : : narrow < u32 > ( prog . p_memsz ) ;
2017-07-01 01:08:51 +02:00
const u32 type = _seg . type = prog . p_type ;
2021-01-12 11:01:06 +01:00
_seg . flags = prog . p_flags ;
2020-12-09 14:03:15 +01:00
_seg . filesz = : : narrow < u32 > ( prog . p_filesz ) ;
2017-07-17 15:20:29 +02:00
// Hash big-endian values
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_type ) , sizeof ( prog . p_type ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_flags ) , sizeof ( prog . p_flags ) ) ;
2017-07-17 15:20:29 +02:00
2017-07-01 01:08:51 +02:00
if ( type = = 0x1 /* LOAD */ & & prog . p_memsz )
2016-04-14 01:09:41 +02:00
{
if ( prog . bin . size ( ) > size | | prog . bin . size ( ) ! = prog . p_filesz )
2021-01-30 15:25:21 +01:00
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " ppu_load_exec(): Invalid binary size (0x%llx, memsz=0x%x) " , prog . bin . size ( ) , size ) ;
2021-01-30 15:25:21 +01:00
return false ;
}
2016-04-14 01:09:41 +02:00
2022-07-06 14:53:48 +02:00
const bool already_loaded = ar & & vm : : check_addr ( addr , vm : : page_readable , size ) ;
2022-07-04 15:02:17 +02:00
2023-06-25 14:53:42 +02:00
_seg . ptr = vm : : base ( addr ) ;
if ( virtual_load )
{
// Leave additional room for the analyser so it can safely access beyond limit a bit
// Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries
// TODO: Use make_shared_for_overwrite when all compilers support it
const usz alloc_size = utils : : align < usz > ( size , 0x10000 ) + 4096 ;
_main . allocations . push_back ( std : : shared_ptr < u8 [ ] > ( new u8 [ alloc_size ] ) ) ;
_seg . ptr = _main . allocations . back ( ) . get ( ) ;
std : : memset ( static_cast < u8 * > ( _seg . ptr ) + prog . bin . size ( ) , 0 , alloc_size - 4096 - prog . bin . size ( ) ) ;
}
else if ( already_loaded )
2022-07-04 15:02:17 +02:00
{
}
2023-08-21 11:43:53 +02:00
else if ( ! [ & ] ( ) - > bool
2020-02-28 18:47:22 +01:00
{
2023-08-21 11:43:53 +02:00
// 1M pages if it is RSX shared
const u32 area_flags = ( _seg . flags > > 28 ) ? vm : : page_size_1m : vm : : page_size_64k ;
const u32 alloc_at = std : : max < u32 > ( addr & - 0x10000000 , 0x10000 ) ;
2020-02-28 18:47:22 +01:00
2023-08-21 11:43:53 +02:00
const auto area = vm : : reserve_map ( vm : : any , std : : max < u32 > ( addr & - 0x10000000 , 0x10000 ) , 0x10000000 , area_flags ) ;
if ( ! area )
{
return false ;
}
if ( area - > addr ! = alloc_at | | ( area - > flags & 0xf00 ) ! = area_flags )
2020-02-28 18:47:22 +01:00
{
2023-08-21 11:43:53 +02:00
ppu_loader . error ( " Failed to allocate memory at 0x%x - conflicting memory area exists: area->addr=0x%x, area->flags=0x%x " , addr , area - > addr , area - > flags ) ;
2021-01-30 15:25:21 +01:00
return false ;
2020-02-28 18:47:22 +01:00
}
2023-08-21 11:43:53 +02:00
return area - > falloc ( addr , size ) ;
} ( ) )
{
ppu_loader . error ( " ppu_load_exec(): vm::falloc() failed (addr=0x%x, memsz=0x%x) " , addr , size ) ;
return false ;
2020-02-28 18:47:22 +01:00
}
2016-04-14 01:09:41 +02:00
2023-06-25 14:53:42 +02:00
// Store only LOAD segments (TODO)
_main . segs . emplace_back ( _seg ) ;
_main . addr_to_seg_index . emplace ( addr , _main . segs . size ( ) - 1 ) ;
2017-07-17 15:20:29 +02:00
// Copy segment data, hash it
2022-07-04 15:02:17 +02:00
if ( ! already_loaded )
{
2023-06-25 14:53:42 +02:00
std : : memcpy ( _main . get_ptr < void > ( addr ) , prog . bin . data ( ) , prog . bin . size ( ) ) ;
2022-07-04 15:02:17 +02:00
}
else
{
// For backwards compatibility: already loaded memory will always be writable
const u32 size0 = utils : : align ( size + addr % 0x10000 , 0x10000 ) ;
const u32 addr0 = addr & - 0x10000 ;
vm : : page_protect ( addr0 , size0 , 0 , vm : : page_writable | vm : : page_readable , vm : : page_executable ) ;
}
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_vaddr ) , sizeof ( prog . p_vaddr ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_memsz ) , sizeof ( prog . p_memsz ) ) ;
2017-07-17 15:20:29 +02:00
sha1_update ( & sha , prog . bin . data ( ) , prog . bin . size ( ) ) ;
2016-06-07 22:24:20 +02:00
2017-02-10 13:20:54 +01:00
// Initialize executable code if necessary
2023-06-25 14:53:42 +02:00
if ( prog . p_flags & 0x1 & & ! virtual_load )
2017-02-10 13:20:54 +01:00
{
ppu_register_range ( addr , size ) ;
}
2016-07-07 20:42:39 +02:00
}
}
2017-03-22 21:23:47 +01:00
// Load section list, used by the analyser
2016-07-09 00:36:42 +02:00
for ( const auto & s : elf . shdrs )
2016-07-07 20:42:39 +02:00
{
2022-04-27 18:46:09 +02:00
ppu_loader . notice ( " ** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x " , std : : bit_cast < u32 > ( s . sh_type ) , s . sh_addr , s . sh_size , s . _sh_flags ) ;
2016-07-07 20:42:39 +02:00
2022-04-27 18:46:09 +02:00
if ( s . sh_type ! = sec_type : : sht_progbits ) continue ;
2021-01-05 14:15:48 +01:00
2017-07-01 01:08:51 +02:00
ppu_segment _sec ;
const u32 addr = _sec . addr = vm : : cast ( s . sh_addr ) ;
const u32 size = _sec . size = vm : : cast ( s . sh_size ) ;
2021-01-12 11:01:06 +01:00
2022-04-27 18:46:09 +02:00
_sec . type = std : : bit_cast < u32 > ( s . sh_type ) ;
_sec . flags = static_cast < u32 > ( s . _sh_flags & 7 ) ;
2017-08-22 23:42:12 +02:00
_sec . filesz = 0 ;
2016-07-07 20:42:39 +02:00
2021-01-05 14:15:48 +01:00
if ( addr & & size )
2016-07-07 20:42:39 +02:00
{
2021-03-02 12:59:19 +01:00
_main . secs . emplace_back ( _sec ) ;
2021-02-01 16:33:19 +01:00
2021-03-02 12:59:19 +01:00
if ( _sec . flags & 0x4 & & addr > = _main . segs [ 0 ] . addr & & addr + size < = _main . segs [ 0 ] . addr + _main . segs [ 0 ] . size )
2021-02-01 16:33:19 +01:00
{
end = std : : max < u32 > ( end , addr + size ) ;
}
2016-04-14 01:09:41 +02:00
}
}
2021-03-02 12:59:19 +01:00
sha1_finish ( & sha , _main . sha1 ) ;
2017-07-17 15:20:29 +02:00
// Format patch name
std : : string hash ( " PPU-0000000000000000000000000000000000000000 " ) ;
2018-03-20 16:53:15 +01:00
for ( u32 i = 0 ; i < 20 ; i + + )
2017-07-17 15:20:29 +02:00
{
constexpr auto pal = " 0123456789abcdef " ;
2021-03-02 12:59:19 +01:00
hash [ 4 + i * 2 ] = pal [ _main . sha1 [ i ] > > 4 ] ;
hash [ 5 + i * 2 ] = pal [ _main . sha1 [ i ] & 15 ] ;
2017-07-17 15:20:29 +02:00
}
2022-06-18 21:30:38 +02:00
Emu . SetExecutableHash ( hash ) ;
2017-07-17 15:20:29 +02:00
// Apply the patch
2023-07-12 19:43:33 +02:00
auto applied = g_fxo - > get < patch_engine > ( ) . apply ( ! ar ? hash : std : : string { } , [ & ] ( u32 addr , u32 size ) { return _main . get_ptr < u8 > ( addr , size ) ; } ) ;
2017-07-17 15:20:29 +02:00
2022-07-06 14:53:48 +02:00
if ( ! ar & & ! Emu . GetTitleID ( ) . empty ( ) )
2017-07-17 15:20:29 +02:00
{
// Alternative patch
2023-07-12 19:43:33 +02:00
applied + = g_fxo - > get < patch_engine > ( ) . apply ( Emu . GetTitleID ( ) + ' - ' + hash , [ & ] ( u32 addr , u32 size ) { return _main . get_ptr < u8 > ( addr , size ) ; } ) ;
2017-07-17 15:20:29 +02:00
}
2023-08-24 22:03:51 +02:00
if ( ! applied . empty ( ) | | ar )
{
// Compare memory changes in memory after executable code sections end
if ( end > = _main . segs [ 0 ] . addr & & end < _main . segs [ 0 ] . addr + _main . segs [ 0 ] . size )
{
for ( const auto & prog : elf . progs )
{
// Find the first segment
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz )
{
std : : basic_string_view < uchar > elf_memory { prog . bin . data ( ) , prog . bin . size ( ) } ;
elf_memory . remove_prefix ( end - _main . segs [ 0 ] . addr ) ;
if ( elf_memory ! = std : : basic_string_view < uchar > { & _main . get_ref < u8 > ( end ) , elf_memory . size ( ) } )
{
// There are changes, disable analysis optimization
ppu_loader . notice ( " Disabling analysis optimization due to memory changes from original file " ) ;
end = 0 ;
}
break ;
}
}
}
}
2021-02-13 17:35:43 +01:00
if ( applied . empty ( ) )
{
ppu_loader . warning ( " PPU executable hash: %s " , hash ) ;
}
else
{
ppu_loader . success ( " PPU executable hash: %s (<- %u) " , hash , applied . size ( ) ) ;
}
2017-07-17 15:20:29 +02:00
2017-03-22 21:23:47 +01:00
// Initialize HLE modules
2022-07-04 15:02:17 +02:00
ppu_initialize_modules ( & link , ar ) ;
2017-03-22 21:23:47 +01:00
2020-01-07 10:10:23 +01:00
// Embedded SPU elf patching
2021-03-02 12:59:19 +01:00
for ( const auto & seg : _main . segs )
2020-01-07 10:10:23 +01:00
{
2023-06-25 14:53:42 +02:00
ppu_check_patch_spu_images ( _main , seg ) ;
2020-01-07 10:10:23 +01:00
}
2018-10-26 12:08:45 +02:00
// Static HLE patching
2023-06-25 14:53:42 +02:00
if ( g_cfg . core . hook_functions & & ! virtual_load )
2018-10-26 12:08:45 +02:00
{
2019-09-19 14:58:02 +02:00
auto shle = g_fxo - > init < statichle_handler > ( 0 ) ;
2018-10-26 12:08:45 +02:00
2021-03-02 12:59:19 +01:00
for ( u32 i = _main . segs [ 0 ] . addr ; i < ( _main . segs [ 0 ] . addr + _main . segs [ 0 ] . size ) ; i + = 4 )
2018-10-26 12:08:45 +02:00
{
vm : : cptr < u8 > _ptr = vm : : cast ( i ) ;
2021-03-02 12:59:19 +01:00
shle - > check_against_patterns ( _ptr , ( _main . segs [ 0 ] . addr + _main . segs [ 0 ] . size ) - i , i ) ;
2018-10-26 12:08:45 +02:00
}
}
2019-11-01 20:21:15 +01:00
// Read control flags (0 if doesn't exist)
g_ps3_process_info . ctrl_flags1 = 0 ;
2020-01-03 06:38:30 +01:00
if ( bool not_found = g_ps3_process_info . self_info . valid )
2019-11-01 20:21:15 +01:00
{
2023-04-19 21:04:54 +02:00
for ( const auto & ctrl : g_ps3_process_info . self_info . supplemental_hdr )
2019-11-01 20:21:15 +01:00
{
if ( ctrl . type = = 1 )
{
if ( ! std : : exchange ( not_found , false ) )
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " More than one control flags header found! (flags1=0x%x) " ,
2023-04-19 21:04:54 +02:00
ctrl . PS3_plaintext_capability_header . ctrl_flag1 ) ;
2019-11-01 20:21:15 +01:00
break ;
}
2023-04-19 21:04:54 +02:00
g_ps3_process_info . ctrl_flags1 | = ctrl . PS3_plaintext_capability_header . ctrl_flag1 ;
2019-11-01 20:21:15 +01:00
}
}
2020-01-03 06:38:30 +01:00
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " SELF header information found: ctrl_flags1=0x%x, authid=0x%llx " ,
2023-04-19 20:27:01 +02:00
g_ps3_process_info . ctrl_flags1 , g_ps3_process_info . self_info . prog_id_hdr . program_authority_id ) ;
2019-11-01 20:21:15 +01:00
}
2016-04-14 01:09:41 +02:00
// Load other programs
2016-07-09 00:36:42 +02:00
for ( auto & prog : elf . progs )
2016-04-14 01:09:41 +02:00
{
switch ( const u32 p_type = prog . p_type )
{
2016-07-24 01:59:50 +02:00
case 0x00000001 : break ; // LOAD (already loaded)
2016-04-14 01:09:41 +02:00
2016-07-24 01:59:50 +02:00
case 0x00000007 : // TLS
2016-04-14 01:09:41 +02:00
{
2021-01-30 15:25:21 +01:00
ppu_loader . notice ( " TLS info segment found: tls-image=*0x%x, image-size=0x%x, tls-size=0x%x " , prog . p_vaddr , prog . p_filesz , prog . p_memsz ) ;
2021-02-01 16:33:19 +01:00
2021-05-22 09:35:15 +02:00
if ( ( prog . p_vaddr | prog . p_filesz | prog . p_memsz ) > u32 { umax } )
2021-01-30 15:25:21 +01:00
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " ppu_load_exec(): TLS segment is invalid! " ) ;
2021-01-30 15:25:21 +01:00
return false ;
}
2020-12-09 16:04:52 +01:00
tls_vaddr = vm : : cast ( prog . p_vaddr ) ;
2020-12-09 14:03:15 +01:00
tls_fsize = : : narrow < u32 > ( prog . p_filesz ) ;
tls_vsize = : : narrow < u32 > ( prog . p_memsz ) ;
2016-04-14 01:09:41 +02:00
break ;
}
2016-07-24 01:59:50 +02:00
case 0x60000001 : // LOOS+1
2016-04-14 01:09:41 +02:00
{
if ( prog . p_filesz )
{
struct process_param_t
{
be_t < u32 > size ;
be_t < u32 > magic ;
be_t < u32 > version ;
be_t < u32 > sdk_version ;
be_t < s32 > primary_prio ;
be_t < u32 > primary_stacksize ;
be_t < u32 > malloc_pagesize ;
be_t < u32 > ppc_seg ;
//be_t<u32> crash_dump_param_addr;
} ;
2023-06-25 14:53:42 +02:00
const auto & info = * ensure ( _main . get_ptr < process_param_t > ( vm : : cast ( prog . p_vaddr ) ) ) ;
2016-04-14 01:09:41 +02:00
if ( info . size < sizeof ( process_param_t ) )
{
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " Bad process_param size! [0x%x : 0x%x] " , info . size , sizeof ( process_param_t ) ) ;
2016-04-14 01:09:41 +02:00
}
2016-07-19 01:33:25 +02:00
2021-04-23 20:25:55 +02:00
if ( info . magic ! = SYS_PROCESS_PARAM_MAGIC )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " Bad process_param magic! [0x%x] " , info . magic ) ;
2016-04-14 01:09:41 +02:00
}
else
{
2016-07-19 01:33:25 +02:00
sdk_version = info . sdk_version ;
2019-01-17 17:31:25 +01:00
2020-01-31 14:43:59 +01:00
if ( s32 prio = info . primary_prio ; prio < 3072
2019-11-01 20:21:15 +01:00
& & ( prio > = ( g_ps3_process_info . debug_or_root ( ) ? 0 : - 512 ) ) )
2019-01-17 17:31:25 +01:00
{
primary_prio = prio ;
}
2016-07-19 01:33:25 +02:00
primary_stacksize = info . primary_stacksize ;
malloc_pagesize = info . malloc_pagesize ;
2019-07-03 19:17:04 +02:00
ppc_seg = info . ppc_seg ;
2016-07-19 01:33:25 +02:00
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " *** sdk version: 0x%x " , info . sdk_version ) ;
ppu_loader . notice ( " *** primary prio: %d " , info . primary_prio ) ;
ppu_loader . notice ( " *** primary stacksize: 0x%x " , info . primary_stacksize ) ;
ppu_loader . notice ( " *** malloc pagesize: 0x%x " , info . malloc_pagesize ) ;
ppu_loader . notice ( " *** ppc seg: 0x%x " , info . ppc_seg ) ;
//ppu_loader.notice("*** crash dump param addr: 0x%x", info.crash_dump_param_addr);
2016-04-14 01:09:41 +02:00
}
}
break ;
}
2016-07-24 01:59:50 +02:00
case 0x60000002 : // LOOS+2
2016-04-14 01:09:41 +02:00
{
if ( prog . p_filesz )
{
struct ppu_proc_prx_param_t
{
be_t < u32 > size ;
be_t < u32 > magic ;
be_t < u32 > version ;
be_t < u32 > unk0 ;
be_t < u32 > libent_start ;
be_t < u32 > libent_end ;
be_t < u32 > libstub_start ;
be_t < u32 > libstub_end ;
be_t < u16 > ver ;
be_t < u16 > unk1 ;
be_t < u32 > unk2 ;
} ;
2023-06-25 14:53:42 +02:00
const auto & proc_prx_param = * ensure ( _main . get_ptr < const ppu_proc_prx_param_t > ( vm : : cast ( prog . p_vaddr ) ) ) ;
2016-04-14 01:09:41 +02:00
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " * libent_start = *0x%x " , proc_prx_param . libent_start ) ;
ppu_loader . notice ( " * libstub_start = *0x%x " , proc_prx_param . libstub_start ) ;
ppu_loader . notice ( " * unk0 = 0x%x " , proc_prx_param . unk0 ) ;
ppu_loader . notice ( " * unk2 = 0x%x " , proc_prx_param . unk2 ) ;
2016-06-07 22:24:20 +02:00
2020-02-19 16:26:41 +01:00
if ( proc_prx_param . magic ! = 0x1b434cecu )
2016-04-14 01:09:41 +02:00
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " ppu_load_exec(): Bad magic! (0x%x) " , proc_prx_param . magic ) ;
2021-01-30 15:25:21 +01:00
return false ;
2016-04-14 01:09:41 +02:00
}
2023-08-06 18:47:23 +02:00
ppu_linkage_info dummy { } ;
2023-08-07 14:44:22 +02:00
ppu_load_exports ( _main , virtual_load ? & dummy : & link , proc_prx_param . libent_start , proc_prx_param . libent_end ) ;
ppu_load_imports ( _main , _main . relocs , virtual_load ? & dummy : & link , proc_prx_param . libstub_start , proc_prx_param . libstub_end ) ;
2023-06-25 14:53:42 +02:00
2021-03-02 12:59:19 +01:00
std : : stable_sort ( _main . relocs . begin ( ) , _main . relocs . end ( ) ) ;
2016-04-14 01:09:41 +02:00
}
break ;
}
default :
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " Unknown phdr type (0x%08x) " , p_type ) ;
2016-04-14 01:09:41 +02:00
}
}
}
2022-07-04 15:02:17 +02:00
// Initialize memory stats (according to sdk version)
u32 mem_size ;
2023-01-19 19:11:46 +01:00
if ( Emu . IsVsh ( ) )
2022-07-04 15:02:17 +02:00
{
// Because vsh.self comes before any generic application, more memory is available to it
mem_size = 0xF000000 ;
}
else if ( sdk_version > 0x0021FFFF )
{
mem_size = 0xD500000 ;
}
else if ( sdk_version > 0x00192FFF )
{
mem_size = 0xD300000 ;
}
else if ( sdk_version > 0x0018FFFF )
{
mem_size = 0xD100000 ;
}
else if ( sdk_version > 0x0017FFFF )
{
mem_size = 0xD000000 ;
}
else if ( sdk_version > 0x00154FFF )
{
mem_size = 0xCC00000 ;
}
else
{
mem_size = 0xC800000 ;
}
if ( g_cfg . core . debug_console_mode )
{
// TODO: Check for all sdk versions
mem_size + = 0xC000000 ;
}
2016-04-14 01:09:41 +02:00
// Initialize process
2017-02-26 16:56:31 +01:00
std : : vector < std : : shared_ptr < lv2_prx > > loaded_modules ;
2016-04-14 01:09:41 +02:00
2020-12-07 18:10:34 +01:00
// Module list to load at startup
2017-03-02 22:35:19 +01:00
std : : set < std : : string > load_libs ;
2016-04-14 01:09:41 +02:00
2020-12-08 20:22:08 +01:00
if ( g_cfg . core . libraries_control . get_set ( ) . count ( " liblv2.sprx:lle " ) | | ! g_cfg . core . libraries_control . get_set ( ) . count ( " liblv2.sprx:hle " ) )
2016-04-14 01:09:41 +02:00
{
2020-12-07 18:10:34 +01:00
// Will load libsysmodule.sprx internally
load_libs . emplace ( " liblv2.sprx " ) ;
2017-03-02 22:35:19 +01:00
}
2020-12-08 20:22:08 +01:00
else if ( g_cfg . core . libraries_control . get_set ( ) . count ( " libsysmodule.sprx:lle " ) | | ! g_cfg . core . libraries_control . get_set ( ) . count ( " libsysmodule.sprx:hle " ) )
2017-07-10 21:29:39 +02:00
{
2020-12-07 18:10:34 +01:00
// Load only libsysmodule.sprx
load_libs . emplace ( " libsysmodule.sprx " ) ;
}
2023-06-25 14:53:42 +02:00
if ( ar | | Emu . IsVsh ( ) | | virtual_load )
2021-04-02 19:39:47 +02:00
{
2022-07-04 15:02:17 +02:00
// Cannot be used with vsh.self or savestates (they self-manage itself)
2021-04-02 19:39:47 +02:00
load_libs . clear ( ) ;
}
2020-12-07 18:10:34 +01:00
const std : : string lle_dir = vfs : : get ( " /dev_flash/sys/external/ " ) ;
if ( ! fs : : is_file ( lle_dir + " liblv2.sprx " ) )
{
ppu_loader . error ( " PS3 firmware is not installed or the installed firmware is invalid. "
" \n You should install the PS3 Firmware (Menu: File -> Install Firmware). "
" \n Visit https://rpcs3.net/ for Quickstart Guide and more information. " ) ;
2017-07-10 21:29:39 +02:00
}
2017-12-25 19:06:09 +01:00
2019-08-07 17:41:27 +02:00
// Program entry
2023-06-07 13:34:39 +02:00
u32 entry = static_cast < u32 > ( elf . header . e_entry ) ; // Run entry from elf (HLE)
2019-08-07 17:41:27 +02:00
2022-07-04 15:02:17 +02:00
// Set path (TODO)
_main . name . clear ( ) ;
2023-06-24 19:44:21 +02:00
_main . path = elf_path ;
2022-07-04 15:02:17 +02:00
2023-04-08 17:03:05 +02:00
_main . elf_entry = static_cast < u32 > ( elf . header . e_entry ) ;
_main . seg0_code_end = end ;
2023-09-09 12:28:33 +02:00
_main . applied_patches = applied ;
2022-07-04 15:02:17 +02:00
2023-06-29 07:42:21 +02:00
if ( ! virtual_load )
{
// Set SDK version
g_ps3_process_info . sdk_ver = sdk_version ;
2022-07-04 15:02:17 +02:00
2023-06-29 07:42:21 +02:00
// Set ppc fixed allocations segment permission
g_ps3_process_info . ppc_seg = ppc_seg ;
2022-07-04 15:02:17 +02:00
2023-06-29 07:42:21 +02:00
if ( Emu . init_mem_containers )
{
// Refer to sys_process_exit2 for explanation
// Make init_mem_containers empty before call
const auto callback = std : : move ( Emu . init_mem_containers ) ;
callback ( mem_size ) ;
}
else if ( ! ar )
{
g_fxo - > init < id_manager : : id_map < lv2_memory_container > > ( ) ;
g_fxo - > init < lv2_memory_container > ( mem_size ) ;
}
2022-07-04 19:12:22 +02:00
2023-06-25 14:53:42 +02:00
void init_fxo_for_exec ( utils : : serial * ar , bool full ) ;
init_fxo_for_exec ( ar , false ) ;
2023-08-05 20:33:00 +02:00
liblv2_begin = 0 ;
liblv2_end = 0 ;
2023-06-25 14:53:42 +02:00
}
else
{
2023-06-29 07:42:21 +02:00
g_ps3_process_info = old_process_info ;
2023-06-25 14:53:42 +02:00
}
2022-07-04 15:02:17 +02:00
2017-03-02 22:35:19 +01:00
if ( ! load_libs . empty ( ) )
2016-04-14 01:09:41 +02:00
{
2017-03-02 22:35:19 +01:00
for ( const auto & name : load_libs )
2016-04-14 01:09:41 +02:00
{
2017-03-02 22:35:19 +01:00
const ppu_prx_object obj = decrypt_self ( fs : : file ( lle_dir + name ) ) ;
2016-04-14 01:09:41 +02:00
2016-07-09 00:36:42 +02:00
if ( obj = = elf_error : : ok )
2016-04-14 01:09:41 +02:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " Loading library: %s " , name ) ;
2016-04-14 01:09:41 +02:00
2023-06-25 14:53:42 +02:00
auto prx = ppu_load_prx ( obj , false , lle_dir + name , 0 , nullptr ) ;
2022-11-05 16:14:34 +01:00
prx - > state = PRX_STATE_STARTED ;
prx - > load_exports ( ) ;
2016-06-07 22:24:20 +02:00
2016-07-27 23:43:22 +02:00
if ( prx - > funcs . empty ( ) )
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " Module %s has no functions! " , name ) ;
2016-07-27 23:43:22 +02:00
}
else
{
// TODO: fix arguments
2017-07-01 01:08:51 +02:00
prx - > validate ( prx - > funcs [ 0 ] . addr ) ;
2016-07-27 23:43:22 +02:00
}
2017-02-26 16:56:31 +01:00
2019-08-07 17:41:27 +02:00
if ( name = = " liblv2.sprx " )
{
// Run liblv2.sprx entry point (TODO)
entry = prx - > start . addr ( ) ;
}
else
{
loaded_modules . emplace_back ( std : : move ( prx ) ) ;
}
2016-04-14 01:09:41 +02:00
}
else
{
2020-12-07 18:10:34 +01:00
ppu_loader . error ( " Failed to load /dev_flash/sys/external/%s: %s (forcing HLE implementation) " , name , obj . get_error ( ) ) ;
2016-04-14 01:09:41 +02:00
}
}
}
2023-06-25 14:53:42 +02:00
if ( ar | | virtual_load )
2022-07-04 15:02:17 +02:00
{
error_handler . errored = false ;
return true ;
}
2019-07-03 19:17:04 +02:00
if ( ppc_seg ! = 0x0 )
{
if ( ppc_seg ! = 0x1 )
{
2020-02-01 05:36:53 +01:00
ppu_loader . todo ( " Unknown ppc_seg flag value = 0x%x " , ppc_seg ) ;
2019-07-03 19:17:04 +02:00
}
// Additional segment for fixed allocations
if ( ! vm : : map ( 0x30000000 , 0x10000000 , 0x200 ) )
{
2020-12-09 16:04:52 +01:00
fmt : : throw_exception ( " Failed to map ppc_seg's segment! " ) ;
2019-07-03 19:17:04 +02:00
}
}
2016-07-19 01:33:25 +02:00
2017-12-25 19:06:09 +01:00
// Fix primary stack size
2018-10-11 00:17:19 +02:00
switch ( u32 sz = primary_stacksize )
2017-12-25 19:06:09 +01:00
{
2021-04-23 20:25:55 +02:00
case SYS_PROCESS_PRIMARY_STACK_SIZE_32K : primary_stacksize = 32 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_64K : primary_stacksize = 64 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_96K : primary_stacksize = 96 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_128K : primary_stacksize = 128 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_256K : primary_stacksize = 256 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_512K : primary_stacksize = 512 * 1024 ; break ;
case SYS_PROCESS_PRIMARY_STACK_SIZE_1M : primary_stacksize = 1024 * 1024 ; break ;
2018-10-11 00:17:19 +02:00
default :
{
2021-04-23 20:25:55 +02:00
// According to elad335, the min value seems to be 64KB instead of the expected 4KB (SYS_PROCESS_PARAM_STACK_SIZE_MIN)
primary_stacksize = utils : : align < u32 > ( std : : clamp < u32 > ( sz , 0x10000 , SYS_PROCESS_PARAM_STACK_SIZE_MAX ) , 4096 ) ;
2018-10-11 00:17:19 +02:00
break ;
}
2017-12-25 19:06:09 +01:00
}
2016-07-27 23:43:22 +02:00
// Initialize main thread
2018-10-11 00:17:19 +02:00
ppu_thread_params p { } ;
p . stack_addr = vm : : cast ( vm : : alloc ( primary_stacksize , vm : : stack , 4096 ) ) ;
p . stack_size = primary_stacksize ;
2023-06-07 13:34:39 +02:00
p . entry = vm : : _ref < ppu_func_opd_t > ( entry ) ;
2018-10-11 00:17:19 +02:00
2021-05-01 08:34:52 +02:00
auto ppu = idm : : make_ptr < named_thread < ppu_thread > > ( p , " main_thread " , primary_prio , 1 ) ;
2016-07-27 23:43:22 +02:00
2017-09-18 18:16:36 +02:00
// Write initial data (exitspawn)
2020-02-26 21:13:54 +01:00
if ( ! Emu . data . empty ( ) )
2017-09-18 18:16:36 +02:00
{
std : : memcpy ( vm : : base ( ppu - > stack_addr + ppu - > stack_size - : : size32 ( Emu . data ) ) , Emu . data . data ( ) , Emu . data . size ( ) ) ;
2023-08-12 01:03:24 +02:00
ppu - > gpr [ 1 ] - = utils : : align < u32 > ( : : size32 ( Emu . data ) , 0x10 ) ;
2017-09-18 18:16:36 +02:00
}
2023-08-10 21:04:15 +02:00
// Initialize process arguments
// Calculate storage requirements on the stack
2023-08-12 01:03:24 +02:00
const u32 pointers_storage_size = u32 { sizeof ( u64 ) } * utils : : align < u32 > ( : : size32 ( Emu . envp ) + : : size32 ( Emu . argv ) + 2 , 2 ) ;
2023-08-10 21:04:15 +02:00
u32 stack_alloc_size = pointers_storage_size ;
for ( const auto & arg : Emu . argv )
{
stack_alloc_size + = utils : : align < u32 > ( : : size32 ( arg ) + 1 , 0x10 ) ;
}
for ( const auto & arg : Emu . envp )
{
stack_alloc_size + = utils : : align < u32 > ( : : size32 ( arg ) + 1 , 0x10 ) ;
}
ensure ( ppu - > stack_size > stack_alloc_size ) ;
2023-12-29 18:33:29 +01:00
vm : : ptr < u64 > args = vm : : cast ( static_cast < u32 > ( ppu - > stack_addr + ppu - > stack_size - stack_alloc_size - utils : : align < u32 > ( : : size32 ( Emu . data ) , 0x10 ) ) ) ;
2023-08-10 21:04:15 +02:00
vm : : ptr < u8 > args_data = vm : : cast ( args . addr ( ) + pointers_storage_size ) ;
const vm : : ptr < u64 > argv = args ;
for ( const auto & arg : Emu . argv )
{
const u32 arg_size = : : size32 ( arg ) + 1 ;
std : : memcpy ( args_data . get_ptr ( ) , arg . data ( ) , arg_size ) ;
* args + + = args_data . addr ( ) ;
args_data = vm : : cast ( args_data . addr ( ) + utils : : align < u32 > ( arg_size , 0x10 ) ) ;
}
* args + + = 0 ;
2023-08-12 01:03:24 +02:00
const vm : : ptr < u64 > envp = args ;
2023-08-10 21:04:15 +02:00
args = envp ;
for ( const auto & arg : Emu . envp )
{
const u32 arg_size = : : size32 ( arg ) + 1 ;
std : : memcpy ( args_data . get_ptr ( ) , arg . data ( ) , arg_size ) ;
* args + + = args_data . addr ( ) ;
args_data = vm : : cast ( args_data . addr ( ) + utils : : align < u32 > ( arg_size , 0x10 ) ) ;
}
* args + + = 0 ;
ppu - > gpr [ 1 ] - = stack_alloc_size ;
2022-06-11 14:12:42 +02:00
ensure ( g_fxo - > get < lv2_memory_container > ( ) . take ( primary_stacksize ) ) ;
2018-11-20 22:17:53 +01:00
2017-01-22 20:03:57 +01:00
ppu - > cmd_push ( { ppu_cmd : : initialize , 0 } ) ;
2023-06-07 13:34:39 +02:00
if ( entry = = static_cast < u32 > ( elf . header . e_entry ) & & ! Emu . IsVsh ( ) )
2016-07-27 23:43:22 +02:00
{
// Set TLS args, call sys_initialize_tls
ppu - > cmd_list
( {
{ ppu_cmd : : set_args , 4 } , u64 { ppu - > id } , u64 { tls_vaddr } , u64 { tls_fsize } , u64 { tls_vsize } ,
{ ppu_cmd : : hle_call , FIND_FUNC ( sys_initialize_tls ) } ,
} ) ;
2021-04-02 19:39:47 +02:00
}
2017-04-13 01:31:42 +02:00
2016-07-27 23:43:22 +02:00
// Run start functions
2017-02-26 16:56:31 +01:00
for ( const auto & prx : loaded_modules )
2016-07-27 23:43:22 +02:00
{
2017-02-26 16:56:31 +01:00
if ( ! prx - > start )
{
continue ;
}
2016-07-27 23:43:22 +02:00
// Reset arguments, run module entry point function
ppu - > cmd_list
( {
{ ppu_cmd : : set_args , 2 } , u64 { 0 } , u64 { 0 } ,
2017-02-26 16:56:31 +01:00
{ ppu_cmd : : lle_call , prx - > start . addr ( ) } ,
2016-07-27 23:43:22 +02:00
} ) ;
}
2016-04-14 01:09:41 +02:00
2016-07-27 23:43:22 +02:00
// Set command line arguments, run entry function
ppu - > cmd_list
( {
2023-08-10 21:04:15 +02:00
{ ppu_cmd : : set_args , 8 } , u64 { Emu . argv . size ( ) } , u64 { argv . addr ( ) } , u64 { envp . addr ( ) } , u64 { Emu . envp . size ( ) } , u64 { ppu - > id } , u64 { tls_vaddr } , u64 { tls_fsize } , u64 { tls_vsize } ,
2017-04-13 01:31:42 +02:00
{ ppu_cmd : : set_gpr , 11 } , u64 { elf . header . e_entry } ,
2016-07-27 23:43:22 +02:00
{ ppu_cmd : : set_gpr , 12 } , u64 { malloc_pagesize } ,
2023-06-07 13:34:39 +02:00
{ ppu_cmd : : entry_call , 0 } ,
2016-07-27 23:43:22 +02:00
} ) ;
2016-05-13 15:55:34 +02:00
2016-07-27 23:43:22 +02:00
// Set actual memory protection (experimental)
2016-07-24 01:59:50 +02:00
for ( const auto & prog : elf . progs )
{
const u32 addr = static_cast < u32 > ( prog . p_vaddr ) ;
const u32 size = static_cast < u32 > ( prog . p_memsz ) ;
2022-05-03 20:08:56 +02:00
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz & & ( prog . p_flags & 0x022000002 ) = = 0u /* W */ )
2016-07-24 01:59:50 +02:00
{
2022-05-03 20:08:56 +02:00
// Set memory protection to read-only when necessary (only if PPU-W, SPU-W, RSX-W are all disabled)
2020-12-18 15:43:34 +01:00
ensure ( vm : : page_protect ( addr , utils : : align ( size , 0x1000 ) , 0 , 0 , vm : : page_writable ) ) ;
2016-07-24 01:59:50 +02:00
}
}
2021-01-30 15:25:21 +01:00
error_handler . errored = false ;
return true ;
2016-04-14 01:09:41 +02:00
}
2017-12-31 15:38:02 +01:00
2023-06-25 14:53:42 +02:00
std : : pair < std : : shared_ptr < lv2_overlay > , CellError > ppu_load_overlay ( const ppu_exec_object & elf , bool virtual_load , const std : : string & path , s64 file_offset , utils : : serial * ar )
2017-12-31 15:38:02 +01:00
{
2021-02-12 12:40:55 +01:00
if ( elf ! = elf_error : : ok )
{
return { nullptr , CELL_ENOENT } ;
}
2017-12-31 15:38:02 +01:00
// Access linkage information object
2021-03-02 12:59:19 +01:00
auto & link = g_fxo - > get < ppu_linkage_info > ( ) ;
2017-12-31 15:38:02 +01:00
// Executable hash
sha1_context sha ;
sha1_starts ( & sha ) ;
2021-01-30 14:08:22 +01:00
// Check if it is an overlay executable first
for ( const auto & prog : elf . progs )
{
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz )
{
using addr_range = utils : : address_range ;
const addr_range r = addr_range : : start_length ( : : narrow < u32 > ( prog . p_vaddr ) , : : narrow < u32 > ( prog . p_memsz ) ) ;
if ( ! r . valid ( ) | | ! r . inside ( addr_range : : start_length ( 0x30000000 , 0x10000000 ) ) )
{
// TODO: Check error and if there's a better way to error check
return { nullptr , CELL_ENOEXEC } ;
}
}
}
2023-08-22 23:31:08 +02:00
std : : shared_ptr < lv2_overlay > ovlm = std : : make_shared < lv2_overlay > ( ) ;
2021-01-30 14:08:22 +01:00
2021-02-13 17:35:43 +01:00
// Set path (TODO)
ovlm - > name = path . substr ( path . find_last_of ( ' / ' ) + 1 ) ;
ovlm - > path = path ;
2021-05-26 22:38:17 +02:00
ovlm - > offset = file_offset ;
2021-02-13 17:35:43 +01:00
2021-02-01 16:33:19 +01:00
u32 end = 0 ;
2017-12-31 15:38:02 +01:00
// Allocate memory at fixed positions
for ( const auto & prog : elf . progs )
{
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " ** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x " , prog . p_type , prog . p_vaddr , prog . p_filesz , prog . p_memsz , prog . p_flags ) ;
2017-12-31 15:38:02 +01:00
ppu_segment _seg ;
2020-12-09 16:04:52 +01:00
const u32 addr = _seg . addr = vm : : cast ( prog . p_vaddr ) ;
2020-12-09 14:03:15 +01:00
const u32 size = _seg . size = : : narrow < u32 > ( prog . p_memsz ) ;
2017-12-31 15:38:02 +01:00
const u32 type = _seg . type = prog . p_type ;
2021-01-12 11:01:06 +01:00
_seg . flags = prog . p_flags ;
2020-12-09 14:03:15 +01:00
_seg . filesz = : : narrow < u32 > ( prog . p_filesz ) ;
2017-12-31 15:38:02 +01:00
// Hash big-endian values
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_type ) , sizeof ( prog . p_type ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_flags ) , sizeof ( prog . p_flags ) ) ;
2017-12-31 15:38:02 +01:00
if ( type = = 0x1 /* LOAD */ & & prog . p_memsz )
{
if ( prog . bin . size ( ) > size | | prog . bin . size ( ) ! = prog . p_filesz )
fmt : : throw_exception ( " Invalid binary size (0x%llx, memsz=0x%x) " , prog . bin . size ( ) , size ) ;
2022-07-06 19:37:29 +02:00
const bool already_loaded = ! ! ar ; // Unimplemented optimization for savestates
2022-07-04 15:02:17 +02:00
2023-06-25 14:53:42 +02:00
_seg . ptr = vm : : base ( addr ) ;
if ( virtual_load )
{
// Leave additional room for the analyser so it can safely access beyond limit a bit
// Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries
// TODO: Use make_shared_for_overwrite when all compilers support it
const usz alloc_size = utils : : align < usz > ( size , 0x10000 ) + 4096 ;
ovlm - > allocations . push_back ( std : : shared_ptr < u8 [ ] > ( new u8 [ alloc_size ] ) ) ;
_seg . ptr = ovlm - > allocations . back ( ) . get ( ) ;
std : : memset ( static_cast < u8 * > ( _seg . ptr ) + prog . bin . size ( ) , 0 , alloc_size - 4096 - prog . bin . size ( ) ) ;
}
else if ( already_loaded )
2022-07-04 15:02:17 +02:00
{
if ( ! vm : : check_addr ( addr , vm : : page_readable , size ) )
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " ppu_load_overlay(): Archived PPU overlay memory has not been found! (addr=0x%x, memsz=0x%x) " , addr , size ) ;
2022-07-04 15:02:17 +02:00
return { nullptr , CELL_EABORT } ;
}
}
else if ( ! vm : : get ( vm : : any , 0x30000000 ) - > falloc ( addr , size ) )
2021-01-30 14:08:22 +01:00
{
ppu_loader . error ( " ppu_load_overlay(): vm::falloc() failed (addr=0x%x, memsz=0x%x) " , addr , size ) ;
// Revert previous allocations
for ( const auto & seg : ovlm - > segs )
{
ensure ( vm : : dealloc ( seg . addr ) ) ;
}
// TODO: Check error code, maybe disallow more than one overlay instance completely
return { nullptr , CELL_EBUSY } ;
}
2017-12-31 15:38:02 +01:00
2023-06-25 14:53:42 +02:00
// Store only LOAD segments (TODO)
ovlm - > segs . emplace_back ( _seg ) ;
ovlm - > addr_to_seg_index . emplace ( addr , ovlm - > segs . size ( ) - 1 ) ;
2017-12-31 15:38:02 +01:00
// Copy segment data, hash it
2023-06-25 14:53:42 +02:00
if ( ! already_loaded ) std : : memcpy ( ensure ( ovlm - > get_ptr < void > ( addr ) ) , prog . bin . data ( ) , prog . bin . size ( ) ) ;
2019-11-29 23:28:06 +01:00
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_vaddr ) , sizeof ( prog . p_vaddr ) ) ;
sha1_update ( & sha , reinterpret_cast < const uchar * > ( & prog . p_memsz ) , sizeof ( prog . p_memsz ) ) ;
2017-12-31 15:38:02 +01:00
sha1_update ( & sha , prog . bin . data ( ) , prog . bin . size ( ) ) ;
// Initialize executable code if necessary
2023-06-25 14:53:42 +02:00
if ( prog . p_flags & 0x1 & & ! virtual_load )
2017-12-31 15:38:02 +01:00
{
ppu_register_range ( addr , size ) ;
}
}
}
// Load section list, used by the analyser
for ( const auto & s : elf . shdrs )
{
2022-04-27 18:46:09 +02:00
ppu_loader . notice ( " ** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x " , std : : bit_cast < u32 > ( s . sh_type ) , s . sh_addr , s . sh_size , s . _sh_flags ) ;
2017-12-31 15:38:02 +01:00
2022-04-27 18:46:09 +02:00
if ( s . sh_type ! = sec_type : : sht_progbits ) continue ;
2021-01-05 14:15:48 +01:00
2017-12-31 15:38:02 +01:00
ppu_segment _sec ;
const u32 addr = _sec . addr = vm : : cast ( s . sh_addr ) ;
const u32 size = _sec . size = vm : : cast ( s . sh_size ) ;
2021-01-12 11:01:06 +01:00
2022-04-27 18:46:09 +02:00
_sec . type = std : : bit_cast < u32 > ( s . sh_type ) ;
_sec . flags = static_cast < u32 > ( s . _sh_flags & 7 ) ;
2017-12-31 15:38:02 +01:00
_sec . filesz = 0 ;
2021-01-05 14:15:48 +01:00
if ( addr & & size )
2017-12-31 15:38:02 +01:00
{
ovlm - > secs . emplace_back ( _sec ) ;
2021-02-01 16:33:19 +01:00
2021-02-02 17:54:43 +01:00
if ( _sec . flags & 0x4 & & addr > = ovlm - > segs [ 0 ] . addr & & addr + size < = ovlm - > segs [ 0 ] . addr + ovlm - > segs [ 0 ] . size )
2021-02-01 16:33:19 +01:00
{
end = std : : max < u32 > ( end , addr + size ) ;
}
2017-12-31 15:38:02 +01:00
}
}
sha1_finish ( & sha , ovlm - > sha1 ) ;
// Format patch name
std : : string hash ( " OVL-0000000000000000000000000000000000000000 " ) ;
for ( u32 i = 0 ; i < 20 ; i + + )
{
constexpr auto pal = " 0123456789abcdef " ;
hash [ 4 + i * 2 ] = pal [ ovlm - > sha1 [ i ] > > 4 ] ;
hash [ 5 + i * 2 ] = pal [ ovlm - > sha1 [ i ] & 15 ] ;
}
// Apply the patch
2023-07-12 19:43:33 +02:00
auto applied = g_fxo - > get < patch_engine > ( ) . apply ( hash , [ ovlm ] ( u32 addr , u32 size ) { return ovlm - > get_ptr < u8 > ( addr , size ) ; } ) ;
2017-12-31 15:38:02 +01:00
if ( ! Emu . GetTitleID ( ) . empty ( ) )
{
// Alternative patch
2023-07-12 19:43:33 +02:00
applied + = g_fxo - > get < patch_engine > ( ) . apply ( Emu . GetTitleID ( ) + ' - ' + hash , [ ovlm ] ( u32 addr , u32 size ) { return ovlm - > get_ptr < u8 > ( addr , size ) ; } ) ;
2017-12-31 15:38:02 +01:00
}
2023-08-24 22:03:51 +02:00
if ( ! applied . empty ( ) | | ar )
{
// Compare memory changes in memory after executable code sections end
if ( end > = ovlm - > segs [ 0 ] . addr & & end < ovlm - > segs [ 0 ] . addr + ovlm - > segs [ 0 ] . size )
{
for ( const auto & prog : elf . progs )
{
// Find the first segment
if ( prog . p_type = = 0x1u /* LOAD */ & & prog . p_memsz )
{
std : : basic_string_view < uchar > elf_memory { prog . bin . data ( ) , prog . bin . size ( ) } ;
elf_memory . remove_prefix ( end - ovlm - > segs [ 0 ] . addr ) ;
if ( elf_memory ! = std : : basic_string_view < uchar > { & ovlm - > get_ref < u8 > ( end ) , elf_memory . size ( ) } )
{
// There are changes, disable analysis optimization
ppu_loader . notice ( " Disabling analysis optimization due to memory changes from original file " ) ;
end = 0 ;
}
break ;
}
}
}
}
2020-10-03 07:07:13 +02:00
// Embedded SPU elf patching
for ( const auto & seg : ovlm - > segs )
{
2023-06-25 14:53:42 +02:00
ppu_check_patch_spu_images ( * ovlm , seg ) ;
2020-10-03 07:07:13 +02:00
}
2021-02-13 17:35:43 +01:00
if ( applied . empty ( ) )
{
ppu_loader . warning ( " OVL hash of %s: %s " , ovlm - > name , hash ) ;
}
else
{
ppu_loader . success ( " OVL hash of %s: %s (<- %u) " , ovlm - > name , hash , applied . size ( ) ) ;
}
2017-12-31 15:38:02 +01:00
// Load other programs
for ( auto & prog : elf . progs )
{
switch ( const u32 p_type = prog . p_type )
{
case 0x00000001 : break ; // LOAD (already loaded)
case 0x60000001 : // LOOS+1
{
if ( prog . p_filesz )
{
struct process_param_t
{
be_t < u32 > size ; //0x60
be_t < u32 > magic ; //string OVLM
be_t < u32 > version ; //0x17000
be_t < u32 > sdk_version ; //seems to be correct
//string "stage_ovlm"
//and a lot of zeros.
} ;
2023-06-25 14:53:42 +02:00
const auto & info = * ensure ( ovlm - > get_ptr < process_param_t > ( vm : : cast ( prog . p_vaddr ) ) ) ;
2017-12-31 15:38:02 +01:00
if ( info . size < sizeof ( process_param_t ) )
{
2020-02-01 05:36:53 +01:00
ppu_loader . warning ( " Bad process_param size! [0x%x : 0x%x] " , info . size , u32 { sizeof ( process_param_t ) } ) ;
2017-12-31 15:38:02 +01:00
}
2020-02-19 18:03:59 +01:00
if ( info . magic ! = 0x4f564c4du ) //string "OVLM"
2017-12-31 15:38:02 +01:00
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " Bad process_param magic! [0x%x] " , info . magic ) ;
2017-12-31 15:38:02 +01:00
}
else
{
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " *** sdk version: 0x%x " , info . sdk_version ) ;
2017-12-31 15:38:02 +01:00
}
}
break ;
}
case 0x60000002 : // LOOS+2 seems to be 0x0 in size for overlay elfs, at least in known cases
{
if ( prog . p_filesz )
{
struct ppu_proc_prx_param_t
{
be_t < u32 > size ;
be_t < u32 > magic ;
be_t < u32 > version ;
be_t < u32 > unk0 ;
be_t < u32 > libent_start ;
be_t < u32 > libent_end ;
be_t < u32 > libstub_start ;
be_t < u32 > libstub_end ;
be_t < u16 > ver ;
be_t < u16 > unk1 ;
be_t < u32 > unk2 ;
} ;
2023-06-25 14:53:42 +02:00
const auto & proc_prx_param = * ensure ( ovlm - > get_ptr < const ppu_proc_prx_param_t > ( vm : : cast ( prog . p_vaddr ) ) ) ;
2017-12-31 15:38:02 +01:00
2020-02-01 05:36:53 +01:00
ppu_loader . notice ( " * libent_start = *0x%x " , proc_prx_param . libent_start ) ;
ppu_loader . notice ( " * libstub_start = *0x%x " , proc_prx_param . libstub_start ) ;
ppu_loader . notice ( " * unk0 = 0x%x " , proc_prx_param . unk0 ) ;
ppu_loader . notice ( " * unk2 = 0x%x " , proc_prx_param . unk2 ) ;
2017-12-31 15:38:02 +01:00
2020-02-19 16:26:41 +01:00
if ( proc_prx_param . magic ! = 0x1b434cecu )
2017-12-31 15:38:02 +01:00
{
fmt : : throw_exception ( " Bad magic! (0x%x) " , proc_prx_param . magic ) ;
}
2023-08-06 18:47:23 +02:00
ppu_linkage_info dummy { } ;
2023-08-07 14:44:22 +02:00
ppu_load_exports ( * ovlm , virtual_load ? & dummy : & link , proc_prx_param . libent_start , proc_prx_param . libent_end ) ;
ppu_load_imports ( * ovlm , ovlm - > relocs , virtual_load ? & dummy : & link , proc_prx_param . libstub_start , proc_prx_param . libstub_end ) ;
2017-12-31 15:38:02 +01:00
}
break ;
}
default :
{
2020-02-01 05:36:53 +01:00
ppu_loader . error ( " Unknown phdr type (0x%08x) " , p_type ) ;
2017-12-31 15:38:02 +01:00
}
}
}
ovlm - > entry = static_cast < u32 > ( elf . header . e_entry ) ;
2023-09-09 12:28:33 +02:00
ovlm - > seg0_code_end = end ;
ovlm - > applied_patches = std : : move ( applied ) ;
const bool is_being_used_in_emulation = ( vm : : base ( ovlm - > segs [ 0 ] . addr ) = = ovlm - > segs [ 0 ] . ptr ) ;
if ( ! is_being_used_in_emulation )
{
// Postpone to later
return { std : : move ( ovlm ) , { } } ;
}
2017-12-31 15:38:02 +01:00
2023-07-14 16:57:43 +02:00
const auto cpu = cpu_thread : : get_current ( ) ;
2017-12-31 15:38:02 +01:00
// Analyse executable (TODO)
2023-09-09 12:28:33 +02:00
if ( ! ovlm - > analyse ( 0 , ovlm - > entry , end , ovlm - > applied_patches , ! cpu ? std : : function < bool ( ) > ( ) : [ cpu ] ( )
2023-07-14 16:57:43 +02:00
{
2023-09-09 12:28:33 +02:00
return ! ! ( cpu - > state & cpu_flag : : exit ) ;
2023-07-14 16:57:43 +02:00
} ) )
{
return { nullptr , CellError { CELL_CANCEL + 0u } } ;
}
2017-12-31 15:38:02 +01:00
// Validate analyser results (not required)
ovlm - > validate ( 0 ) ;
2023-06-25 14:53:42 +02:00
if ( ! ar & & ! virtual_load )
2022-07-04 15:02:17 +02:00
{
2023-12-28 18:37:24 +01:00
ensure ( idm : : import_existing < lv2_obj , lv2_overlay > ( ovlm ) ) ;
2022-07-04 15:02:17 +02:00
try_spawn_ppu_if_exclusive_program ( * ovlm ) ;
}
2021-01-30 15:25:21 +01:00
2021-01-30 14:08:22 +01:00
return { std : : move ( ovlm ) , { } } ;
2017-12-31 15:38:02 +01:00
}
2022-04-27 18:46:09 +02:00
bool ppu_load_rel_exec ( const ppu_rel_object & elf )
{
ppu_module relm { } ;
struct on_fatal_error
{
ppu_module & relm ;
bool errored = true ;
~ on_fatal_error ( )
{
if ( ! errored )
{
return ;
}
// Revert previous allocations on an error
for ( const auto & seg : relm . secs )
{
vm : : dealloc ( seg . addr ) ;
}
}
} error_handler { relm } ;
u32 memsize = 0 ;
for ( const auto & s : elf . shdrs )
{
if ( s . sh_type ! = sec_type : : sht_progbits )
{
memsize = utils : : align < u32 > ( memsize + vm : : cast ( s . sh_size ) , 128 ) ;
}
}
u32 addr = vm : : alloc ( memsize , vm : : main ) ;
if ( ! addr )
{
2023-06-25 14:53:42 +02:00
ppu_loader . error ( " ppu_load_rel_exec(): vm::alloc() failed (memsz=0x%x) " , memsize ) ;
2022-04-27 18:46:09 +02:00
return false ;
}
ppu_register_range ( addr , memsize ) ;
// Copy references to sections for the purpose of sorting executable sections before non-executable ones
std : : vector < const elf_shdata < elf_be , u64 > * > shdrs ( elf . shdrs . size ( ) ) ;
for ( auto & ref : shdrs )
{
ref = & elf . shdrs [ & ref - shdrs . data ( ) ] ;
}
std : : stable_sort ( shdrs . begin ( ) , shdrs . end ( ) , [ ] ( auto & a , auto & b ) - > bool
{
const bs_t < sh_flag > flags_a_has = a - > sh_flags ( ) - b - > sh_flags ( ) ;
return flags_a_has . all_of ( sh_flag : : shf_execinstr ) ;
} ) ;
// Load sections
for ( auto ptr : shdrs )
{
const auto & s = * ptr ;
ppu_loader . notice ( " ** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x " , std : : bit_cast < u32 > ( s . sh_type ) , s . sh_addr , s . sh_size , s . _sh_flags ) ;
2022-09-13 15:08:55 +02:00
2022-04-27 18:46:09 +02:00
if ( s . sh_type = = sec_type : : sht_progbits & & s . sh_size & & s . sh_flags ( ) . all_of ( sh_flag : : shf_alloc ) )
{
ppu_segment _sec ;
const u32 size = _sec . size = vm : : cast ( s . sh_size ) ;
_sec . type = std : : bit_cast < u32 > ( s . sh_type ) ;
_sec . flags = static_cast < u32 > ( s . _sh_flags & 7 ) ;
_sec . filesz = size ;
_sec . addr = addr ;
relm . secs . emplace_back ( _sec ) ;
2023-08-03 10:15:34 +02:00
std : : memcpy ( vm : : base ( addr ) , s . get_bin ( ) . data ( ) , size ) ;
2022-04-27 18:46:09 +02:00
addr = utils : : align < u32 > ( addr + size , 128 ) ;
}
}
try_spawn_ppu_if_exclusive_program ( relm ) ;
error_handler . errored = false ;
return true ;
}