2020-12-05 13:08:24 +01:00
# pragma once
2015-07-21 22:14:04 +02:00
2015-02-26 01:54:49 +01:00
# include "Emu/CPU/CPUThread.h"
2016-04-14 01:09:41 +02:00
# include "Emu/Cell/SPUInterpreter.h"
2019-01-15 16:31:21 +01:00
# include "Emu/Memory/vm.h"
2013-08-03 11:40:03 +02:00
# include "MFC.h"
2014-01-14 20:03:48 +01:00
2020-12-13 14:34:45 +01:00
# include "util/v128.hpp"
2020-12-22 09:42:57 +01:00
# include "util/logs.hpp"
2020-12-13 14:34:45 +01:00
# include "util/to_endian.hpp"
2018-04-09 16:45:37 +02:00
2020-02-01 09:36:09 +01:00
LOG_CHANNEL ( spu_log , " SPU " ) ;
2017-02-03 22:36:04 +01:00
struct lv2_event_queue ;
2017-01-29 17:50:18 +01:00
struct lv2_spu_group ;
struct lv2_int_tag ;
2015-03-04 05:42:04 +01:00
2020-11-15 05:37:56 +01:00
namespace utils
{
class shm ;
}
2018-04-09 16:45:37 +02:00
// JIT Block
2018-10-11 00:17:19 +02:00
using spu_function_t = void ( * ) ( spu_thread & , void * , u8 * ) ;
2018-04-09 16:45:37 +02:00
2015-03-02 03:10:41 +01:00
// SPU Channels
enum : u32
2012-11-15 00:39:56 +01:00
{
2015-07-21 22:14:04 +02:00
SPU_RdEventStat = 0 , // Read event status with mask applied
SPU_WrEventMask = 1 , // Write event mask
SPU_WrEventAck = 2 , // Write end of event processing
SPU_RdSigNotify1 = 3 , // Signal notification 1
SPU_RdSigNotify2 = 4 , // Signal notification 2
SPU_WrDec = 7 , // Write decrementer count
SPU_RdDec = 8 , // Read decrementer count
SPU_RdEventMask = 11 , // Read event mask
SPU_RdMachStat = 13 , // Read SPU run status
SPU_WrSRR0 = 14 , // Write SPU machine state save/restore register 0 (SRR0)
SPU_RdSRR0 = 15 , // Read SPU machine state save/restore register 0 (SRR0)
SPU_WrOutMbox = 28 , // Write outbound mailbox contents
SPU_RdInMbox = 29 , // Read inbound mailbox contents
SPU_WrOutIntrMbox = 30 , // Write outbound interrupt mailbox contents (interrupting PPU)
2020-09-19 14:08:35 +02:00
SPU_Set_Bkmk_Tag = 69 , // Causes an event that can be logged in the performance monitor logic if enabled in the SPU
SPU_PM_Start_Ev = 70 , // Starts the performance monitor event if enabled
SPU_PM_Stop_Ev = 71 , // Stops the performance monitor event if enabled
2012-11-15 00:39:56 +01:00
} ;
2015-03-02 03:10:41 +01:00
// MFC Channels
enum : u32
2012-11-15 00:39:56 +01:00
{
2015-07-21 22:14:04 +02:00
MFC_WrMSSyncReq = 9 , // Write multisource synchronization request
MFC_RdTagMask = 12 , // Read tag mask
MFC_LSA = 16 , // Write local memory address command parameter
MFC_EAH = 17 , // Write high order DMA effective address command parameter
MFC_EAL = 18 , // Write low order DMA effective address command parameter
MFC_Size = 19 , // Write DMA transfer size command parameter
MFC_TagID = 20 , // Write tag identifier command parameter
MFC_Cmd = 21 , // Write and enqueue DMA command with associated class ID
MFC_WrTagMask = 22 , // Write tag mask
MFC_WrTagUpdate = 23 , // Write request for conditional or unconditional tag status update
MFC_RdTagStat = 24 , // Read tag status with mask applied
MFC_RdListStallStat = 25 , // Read DMA list stall-and-notify status
MFC_WrListStallAck = 26 , // Write DMA list stall-and-notify acknowledge
MFC_RdAtomicStat = 27 , // Read completion status of last completed immediate MFC atomic update command
2012-11-15 00:39:56 +01:00
} ;
2015-03-02 03:10:41 +01:00
// SPU Events
enum : u32
2014-08-21 00:12:56 +02:00
{
2015-07-21 22:14:04 +02:00
SPU_EVENT_MS = 0x1000 , // Multisource Synchronization event
SPU_EVENT_A = 0x800 , // Privileged Attention event
SPU_EVENT_LR = 0x400 , // Lock Line Reservation Lost event
SPU_EVENT_S1 = 0x200 , // Signal Notification Register 1 available
SPU_EVENT_S2 = 0x100 , // Signal Notification Register 2 available
SPU_EVENT_LE = 0x80 , // SPU Outbound Mailbox available
SPU_EVENT_ME = 0x40 , // SPU Outbound Interrupt Mailbox available
SPU_EVENT_TM = 0x20 , // SPU Decrementer became negative (?)
SPU_EVENT_MB = 0x10 , // SPU Inbound mailbox available
2018-03-24 17:21:50 +01:00
SPU_EVENT_QV = 0x8 , // MFC SPU Command Queue available
2015-07-21 22:14:04 +02:00
SPU_EVENT_SN = 0x2 , // MFC List Command stall-and-notify event
SPU_EVENT_TG = 0x1 , // MFC Tag Group status update event
2020-10-12 16:19:06 +02:00
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR | SPU_EVENT_TM | SPU_EVENT_SN | SPU_EVENT_S1 | SPU_EVENT_S2 , // Mask of implemented events
2017-02-13 14:12:24 +01:00
SPU_EVENT_INTR_IMPLEMENTED = SPU_EVENT_SN ,
2021-09-23 17:17:16 +02:00
SPU_EVENT_INTR_BUSY_CHECK = SPU_EVENT_IMPLEMENTED & ~ SPU_EVENT_INTR_IMPLEMENTED ,
2014-08-21 00:12:56 +02:00
} ;
2015-03-02 03:10:41 +01:00
// SPU Class 0 Interrupts
enum : u64
{
SPU_INT0_STAT_DMA_ALIGNMENT_INT = ( 1ull < < 0 ) ,
SPU_INT0_STAT_INVALID_DMA_CMD_INT = ( 1ull < < 1 ) ,
SPU_INT0_STAT_SPU_ERROR_INT = ( 1ull < < 2 ) ,
} ;
// SPU Class 2 Interrupts
enum : u64
{
SPU_INT2_STAT_MAILBOX_INT = ( 1ull < < 0 ) ,
SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT = ( 1ull < < 1 ) ,
SPU_INT2_STAT_SPU_HALT_OR_STEP_INT = ( 1ull < < 2 ) ,
SPU_INT2_STAT_DMA_TAG_GROUP_COMPLETION_INT = ( 1ull < < 3 ) ,
2015-03-03 00:34:49 +01:00
SPU_INT2_STAT_SPU_MAILBOX_THRESHOLD_INT = ( 1ull < < 4 ) ,
2015-03-02 03:10:41 +01:00
} ;
2020-04-15 11:55:01 +02:00
enum : u32
2013-07-12 14:42:17 +02:00
{
2015-03-02 03:10:41 +01:00
SPU_RUNCNTL_STOP_REQUEST = 0 ,
SPU_RUNCNTL_RUN_REQUEST = 1 ,
2013-07-12 14:42:17 +02:00
} ;
2015-03-02 03:10:41 +01:00
// SPU Status Register bits (not accurate)
2020-04-15 11:55:01 +02:00
enum : u32
2013-07-12 14:42:17 +02:00
{
2014-04-04 15:25:38 +02:00
SPU_STATUS_STOPPED = 0x0 ,
SPU_STATUS_RUNNING = 0x1 ,
SPU_STATUS_STOPPED_BY_STOP = 0x2 ,
SPU_STATUS_STOPPED_BY_HALT = 0x4 ,
SPU_STATUS_WAITING_FOR_CHANNEL = 0x8 ,
SPU_STATUS_SINGLE_STEP = 0x10 ,
2020-04-15 11:55:01 +02:00
SPU_STATUS_IS_ISOLATED = 0x80 ,
2013-07-12 14:42:17 +02:00
} ;
2020-07-15 20:57:39 +02:00
enum : s32
{
SPU_LS_SIZE = 0x40000 ,
} ;
2014-03-15 21:46:53 +01:00
enum : u32
{
2014-04-04 15:25:38 +02:00
SYS_SPU_THREAD_BASE_LOW = 0xf0000000 ,
2015-03-04 22:51:14 +01:00
SYS_SPU_THREAD_OFFSET = 0x100000 ,
SYS_SPU_THREAD_SNR1 = 0x5400c ,
SYS_SPU_THREAD_SNR2 = 0x5C00c ,
2014-03-15 21:46:53 +01:00
} ;
2014-06-25 00:16:44 +02:00
enum
{
MFC_LSA_offs = 0x3004 ,
MFC_EAH_offs = 0x3008 ,
MFC_EAL_offs = 0x300C ,
MFC_Size_Tag_offs = 0x3010 ,
MFC_Class_CMD_offs = 0x3014 ,
MFC_CMDStatus_offs = 0x3014 ,
MFC_QStatus_offs = 0x3104 ,
Prxy_QueryType_offs = 0x3204 ,
Prxy_QueryMask_offs = 0x321C ,
Prxy_TagStatus_offs = 0x322C ,
SPU_Out_MBox_offs = 0x4004 ,
SPU_In_MBox_offs = 0x400C ,
SPU_MBox_Status_offs = 0x4014 ,
SPU_RunCntl_offs = 0x401C ,
SPU_Status_offs = 0x4024 ,
SPU_NPC_offs = 0x4034 ,
SPU_RdSigNotify1_offs = 0x1400C ,
SPU_RdSigNotify2_offs = 0x1C00C ,
} ;
2016-04-14 01:09:41 +02:00
enum : u32
{
RAW_SPU_BASE_ADDR = 0xE0000000 ,
RAW_SPU_OFFSET = 0x00100000 ,
RAW_SPU_LS_OFFSET = 0x00000000 ,
RAW_SPU_PROB_OFFSET = 0x00040000 ,
2020-11-15 05:37:56 +01:00
SPU_FAKE_BASE_ADDR = 0xE8000000 ,
2016-04-14 01:09:41 +02:00
} ;
2018-04-15 14:37:34 +02:00
struct spu_channel
2015-03-02 03:10:41 +01:00
{
2018-04-15 14:37:34 +02:00
// Low 32 bits contain value
atomic_t < u64 > data ;
2015-03-02 03:10:41 +01:00
public :
2018-04-15 14:37:34 +02:00
static const u32 off_wait = 32 ;
static const u32 off_count = 63 ;
static const u64 bit_wait = 1ull < < off_wait ;
static const u64 bit_count = 1ull < < off_count ;
// Returns true on success
2015-07-03 18:07:36 +02:00
bool try_push ( u32 value )
2015-03-02 03:10:41 +01:00
{
2020-06-26 11:17:15 +02:00
return data . fetch_op ( [ value ] ( u64 & data )
2015-03-02 03:10:41 +01:00
{
2020-06-26 11:17:15 +02:00
if ( ! ( data & bit_count ) ) [ [ likely ] ]
2015-03-02 03:10:41 +01:00
{
2018-04-15 14:37:34 +02:00
data = bit_count | value ;
2020-06-26 11:17:15 +02:00
return true ;
2015-03-02 03:10:41 +01:00
}
2015-08-26 04:54:06 +02:00
2020-06-26 11:17:15 +02:00
return false ;
} ) . second ;
2015-03-02 03:10:41 +01:00
}
2018-04-15 14:37:34 +02:00
// Push performing bitwise OR with previous value, may require notification
2020-10-12 16:19:06 +02:00
bool push_or ( u32 value )
2015-03-02 03:10:41 +01:00
{
2020-02-10 09:55:53 +01:00
const u64 old = data . fetch_op ( [ value ] ( u64 & data )
2015-07-17 18:27:12 +02:00
{
2018-04-15 14:37:34 +02:00
data & = ~ bit_wait ;
data | = bit_count | value ;
2015-07-17 18:27:12 +02:00
} ) ;
2015-08-26 04:54:06 +02:00
2018-04-15 14:37:34 +02:00
if ( old & bit_wait )
{
2020-06-26 11:17:15 +02:00
data . notify_one ( ) ;
2018-04-15 14:37:34 +02:00
}
2020-10-12 16:19:06 +02:00
return ( old & bit_count ) = = 0 ;
2015-03-02 03:10:41 +01:00
}
2017-02-17 20:35:57 +01:00
bool push_and ( u32 value )
{
2018-04-15 14:37:34 +02:00
return ( data . fetch_and ( ~ u64 { value } ) & value ) ! = 0 ;
2017-02-17 20:35:57 +01:00
}
2018-04-15 14:37:34 +02:00
// Push unconditionally (overwriting previous value), may require notification
2020-10-12 16:19:06 +02:00
bool push ( u32 value )
2015-03-02 03:10:41 +01:00
{
2020-10-12 16:19:06 +02:00
const u64 old = data . exchange ( bit_count | value ) ;
if ( old & bit_wait )
2015-03-02 03:10:41 +01:00
{
2020-06-26 11:17:15 +02:00
data . notify_one ( ) ;
2018-04-15 14:37:34 +02:00
}
2020-10-12 16:19:06 +02:00
return ( old & bit_count ) = = 0 ;
2015-03-02 03:10:41 +01:00
}
2018-04-15 14:37:34 +02:00
// Returns true on success
2016-04-19 15:04:02 +02:00
bool try_pop ( u32 & out )
2015-03-02 03:10:41 +01:00
{
2020-06-26 11:17:15 +02:00
return data . fetch_op ( [ & ] ( u64 & data )
2015-03-02 03:10:41 +01:00
{
2020-02-05 08:00:08 +01:00
if ( data & bit_count ) [ [ likely ] ]
2016-04-19 15:04:02 +02:00
{
2018-04-15 14:37:34 +02:00
out = static_cast < u32 > ( data ) ;
data = 0 ;
2020-06-26 11:17:15 +02:00
return true ;
2016-04-19 15:04:02 +02:00
}
2015-08-26 04:54:06 +02:00
2020-06-26 11:17:15 +02:00
return false ;
} ) . second ;
2015-07-17 18:27:12 +02:00
}
2020-05-12 18:17:50 +02:00
// Reading without modification
bool try_read ( u32 & out ) const
{
const u64 old = data . load ( ) ;
if ( old & bit_count ) [ [ likely ] ]
{
out = static_cast < u32 > ( old ) ;
return true ;
}
return false ;
}
2018-04-15 14:37:34 +02:00
// Pop unconditionally (loading last value), may require notification
2020-06-26 11:17:15 +02:00
u32 pop ( )
2015-07-17 18:27:12 +02:00
{
2018-04-15 14:37:34 +02:00
// Value is not cleared and may be read again
const u64 old = data . fetch_and ( ~ ( bit_count | bit_wait ) ) ;
2015-08-26 04:54:06 +02:00
2018-04-15 14:37:34 +02:00
if ( old & bit_wait )
{
2020-06-26 11:17:15 +02:00
data . notify_one ( ) ;
2018-04-15 14:37:34 +02:00
}
2015-08-26 04:54:06 +02:00
2018-04-15 14:37:34 +02:00
return static_cast < u32 > ( old ) ;
2015-03-02 03:10:41 +01:00
}
2020-06-26 11:17:15 +02:00
// Waiting for channel pop state availability, actually popping if specified
s64 pop_wait ( cpu_thread & spu , bool pop = true )
{
while ( true )
{
const u64 old = data . fetch_op ( [ & ] ( u64 & data )
{
if ( data & bit_count ) [ [ likely ] ]
{
if ( pop )
{
data = 0 ;
return true ;
}
return false ;
}
data = bit_wait ;
return true ;
} ) . first ;
if ( old & bit_count )
{
return static_cast < u32 > ( old ) ;
}
if ( spu . is_stopped ( ) )
{
return - 1 ;
}
2021-02-13 15:50:07 +01:00
thread_ctrl : : wait_on ( data , bit_wait ) ;
2020-06-26 11:17:15 +02:00
}
}
// Waiting for channel push state availability, actually pushing if specified
bool push_wait ( cpu_thread & spu , u32 value , bool push = true )
{
2020-08-28 00:18:24 +02:00
while ( true )
2020-06-26 11:17:15 +02:00
{
u64 state ;
data . fetch_op ( [ & ] ( u64 & data )
{
if ( data & bit_count ) [ [ unlikely ] ]
{
data | = bit_wait ;
}
else if ( push )
{
data = bit_count | value ;
}
else
{
state = data ;
return false ;
}
state = data ;
return true ;
} ) ;
if ( ! ( state & bit_wait ) )
{
return true ;
}
if ( spu . is_stopped ( ) )
{
return false ;
}
2021-02-13 15:50:07 +01:00
thread_ctrl : : wait_on ( data , state ) ;
2020-06-26 11:17:15 +02:00
}
}
2015-08-26 04:54:06 +02:00
void set_value ( u32 value , bool count = true )
2015-03-02 03:10:41 +01:00
{
2019-01-28 16:39:39 +01:00
data . release ( u64 { count } < < off_count | value ) ;
2015-03-02 03:10:41 +01:00
}
2021-04-09 21:12:47 +02:00
u32 get_value ( ) const
2015-03-02 03:10:41 +01:00
{
2018-04-15 14:37:34 +02:00
return static_cast < u32 > ( data ) ;
2015-03-02 03:10:41 +01:00
}
2021-04-09 21:12:47 +02:00
u32 get_count ( ) const
2015-03-02 03:10:41 +01:00
{
2018-04-15 14:37:34 +02:00
return static_cast < u32 > ( data > > off_count ) ;
2015-03-02 03:10:41 +01:00
}
} ;
struct spu_channel_4_t
{
2016-04-14 01:09:41 +02:00
struct alignas ( 16 ) sync_var_t
2015-03-02 03:10:41 +01:00
{
2016-07-27 23:43:22 +02:00
u8 waiting ;
u8 count ;
2020-05-12 18:17:50 +02:00
u8 _pad [ 2 ] ;
2015-03-02 03:10:41 +01:00
u32 value0 ;
u32 value1 ;
u32 value2 ;
} ;
2015-09-18 00:41:14 +02:00
atomic_t < sync_var_t > values ;
2015-07-08 17:01:59 +02:00
atomic_t < u32 > value3 ;
2015-03-02 03:10:41 +01:00
public :
void clear ( )
{
2019-02-09 10:34:36 +01:00
values . release ( { } ) ;
2015-03-02 03:10:41 +01:00
}
2015-07-17 18:27:12 +02:00
// push unconditionally (overwriting latest value), returns true if needs signaling
2016-04-19 15:04:02 +02:00
void push ( cpu_thread & spu , u32 value )
2015-03-02 03:10:41 +01:00
{
2019-11-01 11:57:14 +01:00
value3 . release ( value ) ;
2015-03-02 03:10:41 +01:00
2020-02-10 09:55:53 +01:00
if ( values . atomic_op ( [ value ] ( sync_var_t & data ) - > bool
2015-03-02 03:10:41 +01:00
{
switch ( data . count + + )
{
case 0 : data . value0 = value ; break ;
case 1 : data . value1 = value ; break ;
case 2 : data . value2 = value ; break ;
default : data . count = 4 ;
}
2015-07-17 18:27:12 +02:00
if ( data . waiting )
{
data . waiting = 0 ;
return true ;
}
return false ;
2016-04-19 15:04:02 +02:00
} ) )
{
2016-09-07 00:38:52 +02:00
spu . notify ( ) ;
2016-04-19 15:04:02 +02:00
}
2015-03-02 03:10:41 +01:00
}
2016-04-19 15:04:02 +02:00
// returns non-zero value on success: queue size before removal
uint try_pop ( u32 & out )
2015-03-02 03:10:41 +01:00
{
2016-04-19 15:04:02 +02:00
return values . atomic_op ( [ & ] ( sync_var_t & data )
2015-03-02 03:10:41 +01:00
{
2016-04-19 15:04:02 +02:00
const uint result = data . count ;
2015-07-17 18:27:12 +02:00
2016-04-19 15:04:02 +02:00
if ( result ! = 0 )
2015-03-02 03:10:41 +01:00
{
2015-07-17 18:27:12 +02:00
data . waiting = 0 ;
data . count - - ;
2016-04-19 15:04:02 +02:00
out = data . value0 ;
2015-03-02 03:10:41 +01:00
data . value0 = data . value1 ;
data . value1 = data . value2 ;
2015-09-18 00:41:14 +02:00
data . value2 = this - > value3 ;
2015-07-17 18:27:12 +02:00
}
else
{
data . waiting = 1 ;
2015-03-02 03:10:41 +01:00
}
2015-07-17 18:27:12 +02:00
return result ;
} ) ;
2015-03-02 03:10:41 +01:00
}
2020-05-12 18:17:50 +02:00
// returns current queue size without modification
uint try_read ( u32 ( & out ) [ 4 ] ) const
{
const sync_var_t data = values . load ( ) ;
const uint result = data . count ;
if ( result ! = 0 )
{
out [ 0 ] = data . value0 ;
out [ 1 ] = data . value1 ;
out [ 2 ] = data . value2 ;
out [ 3 ] = value3 ;
}
return result ;
}
2020-04-03 11:18:00 +02:00
u32 get_count ( ) const
2015-03-02 03:10:41 +01:00
{
2020-04-03 11:18:00 +02:00
return std : : as_const ( values ) . raw ( ) . count ;
2015-03-02 03:10:41 +01:00
}
2015-07-17 18:27:12 +02:00
void set_values ( u32 count , u32 value0 , u32 value1 = 0 , u32 value2 = 0 , u32 value3 = 0 )
{
2020-05-12 18:17:50 +02:00
this - > values . raw ( ) = { 0 , static_cast < u8 > ( count ) , { } , value0 , value1 , value2 } ;
2015-09-18 00:41:14 +02:00
this - > value3 = value3 ;
2015-07-17 18:27:12 +02:00
}
2015-03-02 03:10:41 +01:00
} ;
2015-07-12 23:02:02 +02:00
struct spu_int_ctrl_t
2015-03-02 03:10:41 +01:00
{
2015-07-08 17:01:59 +02:00
atomic_t < u64 > mask ;
atomic_t < u64 > stat ;
2015-03-02 03:10:41 +01:00
2021-05-14 16:55:07 +02:00
std : : shared_ptr < struct lv2_int_tag > tag ;
2015-03-02 22:09:20 +01:00
2015-07-12 23:02:02 +02:00
void set ( u64 ints ) ;
2015-03-02 22:09:20 +01:00
2016-04-14 01:09:41 +02:00
void clear ( u64 ints )
{
stat & = ~ ints ;
}
2015-09-18 00:41:14 +02:00
void clear ( )
{
2019-02-09 10:34:36 +01:00
mask . release ( 0 ) ;
stat . release ( 0 ) ;
2019-12-21 07:16:47 +01:00
tag . reset ( ) ;
2015-09-18 00:41:14 +02:00
}
2015-03-02 03:10:41 +01:00
} ;
2015-08-26 04:54:06 +02:00
struct spu_imm_table_t
2014-10-07 15:35:44 +02:00
{
2015-08-06 15:31:13 +02:00
v128 sldq_pshufb [ 32 ] ; // table for SHLQBYBI, SHLQBY, SHLQBYI instructions
v128 srdq_pshufb [ 32 ] ; // table for ROTQMBYBI, ROTQMBY, ROTQMBYI instructions
v128 rldq_pshufb [ 16 ] ; // table for ROTQBYBI, ROTQBY, ROTQBYI instructions
2014-10-07 15:35:44 +02:00
2015-04-05 20:01:15 +02:00
class scale_table_t
2014-10-07 15:35:44 +02:00
{
2018-05-28 12:40:31 +02:00
std : : array < v128 , 155 + 174 > m_data ;
2015-04-05 20:01:15 +02:00
public :
2016-05-13 15:55:34 +02:00
scale_table_t ( ) ;
2015-04-05 20:01:15 +02:00
2020-12-21 15:12:05 +01:00
FORCE_INLINE const auto & operator [ ] ( s32 scale ) const
2014-10-07 15:35:44 +02:00
{
2018-05-28 12:40:31 +02:00
return m_data [ scale + 155 ] . vf ;
2014-10-07 15:35:44 +02:00
}
2015-04-05 20:01:15 +02:00
}
const scale ;
2016-05-13 15:55:34 +02:00
spu_imm_table_t ( ) ;
2014-10-07 15:35:44 +02:00
} ;
2015-08-26 04:54:06 +02:00
extern const spu_imm_table_t g_spu_imm ;
2014-10-07 15:35:44 +02:00
2015-01-18 19:17:51 +01:00
enum FPSCR_EX
{
//Single-precision exceptions
FPSCR_SOVF = 1 < < 2 , //Overflow
FPSCR_SUNF = 1 < < 1 , //Underflow
FPSCR_SDIFF = 1 < < 0 , //Different (could be IEEE non-compliant)
//Double-precision exceptions
FPSCR_DOVF = 1 < < 13 , //Overflow
FPSCR_DUNF = 1 < < 12 , //Underflow
FPSCR_DINX = 1 < < 11 , //Inexact
FPSCR_DINV = 1 < < 10 , //Invalid operation
FPSCR_DNAN = 1 < < 9 , //NaN
FPSCR_DDENORM = 1 < < 8 , //Denormal
} ;
2013-11-09 16:41:56 +01:00
//Is 128 bits, but bits 0-19, 24-28, 32-49, 56-60, 64-81, 88-92, 96-115, 120-124 are unused
2015-01-18 19:17:51 +01:00
class SPU_FPSCR
2013-11-09 16:41:56 +01:00
{
public :
2015-01-18 19:17:51 +01:00
u32 _u32 [ 4 ] ;
2013-11-09 16:41:56 +01:00
2015-01-18 19:17:51 +01:00
SPU_FPSCR ( ) { }
2013-11-09 16:41:56 +01:00
2014-04-01 02:33:55 +02:00
std : : string ToString ( ) const
2013-11-09 16:41:56 +01:00
{
2015-08-12 20:38:17 +02:00
return fmt : : format ( " %08x%08x%08x%08x " , _u32 [ 3 ] , _u32 [ 2 ] , _u32 [ 1 ] , _u32 [ 0 ] ) ;
2013-11-09 16:41:56 +01:00
}
void Reset ( )
{
memset ( this , 0 , sizeof ( * this ) ) ;
}
2015-01-18 19:17:51 +01:00
//slice -> 0 - 1 (double-precision slice index)
2015-08-06 15:31:13 +02:00
//NOTE: slices follow v128 indexing, i.e. slice 0 is RIGHT end of register!
2015-01-18 19:19:10 +01:00
//roundTo -> FPSCR_RN_*
2013-11-09 16:41:56 +01:00
void setSliceRounding ( u8 slice , u8 roundTo )
{
2015-01-18 19:17:51 +01:00
int shift = 8 + 2 * slice ;
//rounding is located in the left end of the FPSCR
this - > _u32 [ 3 ] = ( this - > _u32 [ 3 ] & ~ ( 3 < < shift ) ) | ( roundTo < < shift ) ;
2013-11-09 16:41:56 +01:00
}
//Slice 0 or 1
2014-08-31 22:12:09 +02:00
u8 checkSliceRounding ( u8 slice ) const
2013-11-09 16:41:56 +01:00
{
switch ( slice )
{
case 0 :
2015-01-18 19:17:51 +01:00
return this - > _u32 [ 3 ] > > 8 & 0x3 ;
2018-02-09 13:24:46 +01:00
2013-11-09 16:41:56 +01:00
case 1 :
2015-01-18 19:17:51 +01:00
return this - > _u32 [ 3 ] > > 10 & 0x3 ;
2014-04-28 05:34:47 +02:00
default :
2020-12-09 16:04:52 +01:00
fmt : : throw_exception ( " Unexpected slice value (%d) " , slice ) ;
2013-11-09 16:41:56 +01:00
}
}
2015-01-18 19:17:51 +01:00
//Single-precision exception flags (all 4 slices)
2013-11-09 16:41:56 +01:00
//slice -> slice number (0-3)
2015-01-18 19:17:51 +01:00
//exception: FPSCR_S* bitmask
void setSinglePrecisionExceptionFlags ( u8 slice , u32 exceptions )
2013-11-09 16:41:56 +01:00
{
2015-01-18 19:17:51 +01:00
_u32 [ slice ] | = exceptions ;
}
//Single-precision divide-by-zero flags (all 4 slices)
//slice -> slice number (0-3)
void setDivideByZeroFlag ( u8 slice )
{
_u32 [ 0 ] | = 1 < < ( 8 + slice ) ;
}
//Double-precision exception flags
//slice -> slice number (0-1)
//exception: FPSCR_D* bitmask
void setDoublePrecisionExceptionFlags ( u8 slice , u32 exceptions )
{
_u32 [ 1 + slice ] | = exceptions ;
2013-11-09 16:41:56 +01:00
}
2015-01-31 21:46:06 +01:00
// Write the FPSCR
2015-08-06 15:31:13 +02:00
void Write ( const v128 & r )
2015-01-31 21:46:06 +01:00
{
_u32 [ 3 ] = r . _u32 [ 3 ] & 0x00000F07 ;
_u32 [ 2 ] = r . _u32 [ 2 ] & 0x00003F07 ;
_u32 [ 1 ] = r . _u32 [ 1 ] & 0x00003F07 ;
_u32 [ 0 ] = r . _u32 [ 0 ] & 0x00000F07 ;
}
2015-02-01 21:02:40 +01:00
// Read the FPSCR
2015-08-06 15:31:13 +02:00
void Read ( v128 & r )
2015-02-01 21:02:40 +01:00
{
r . _u32 [ 3 ] = _u32 [ 3 ] ;
r . _u32 [ 2 ] = _u32 [ 2 ] ;
r . _u32 [ 1 ] = _u32 [ 1 ] ;
r . _u32 [ 0 ] = _u32 [ 0 ] ;
}
2013-11-09 16:41:56 +01:00
} ;
2020-07-17 10:18:04 +02:00
enum class spu_type : u32
{
threaded ,
raw ,
isolated ,
} ;
2018-10-11 00:17:19 +02:00
class spu_thread : public cpu_thread
2012-11-15 00:39:56 +01:00
{
2016-04-14 01:09:41 +02:00
public :
2020-03-31 02:11:37 +02:00
virtual std : : string dump_regs ( ) const override ;
virtual std : : string dump_callstack ( ) const override ;
2020-07-03 06:56:55 +02:00
virtual std : : vector < std : : pair < u32 , u32 > > dump_callstack_list ( ) const override ;
2020-03-31 02:11:37 +02:00
virtual std : : string dump_misc ( ) const override ;
2018-10-11 00:17:19 +02:00
virtual void cpu_task ( ) override final ;
2020-07-30 11:07:18 +02:00
virtual void cpu_return ( ) override ;
2020-06-27 16:38:49 +02:00
virtual void cpu_work ( ) override ;
2018-10-11 00:17:19 +02:00
virtual ~ spu_thread ( ) override ;
2021-01-19 19:15:57 +01:00
void cleanup ( ) ;
2016-08-13 16:58:19 +02:00
void cpu_init ( ) ;
2016-04-14 01:09:41 +02:00
2017-01-25 18:50:30 +01:00
static const u32 id_base = 0x02000000 ; // TODO (used to determine thread type)
static const u32 id_step = 1 ;
2020-11-17 13:00:43 +01:00
static const u32 id_count = ( 0xFFFC0000 - SPU_FAKE_BASE_ADDR ) / SPU_LS_SIZE ;
2017-01-25 18:50:30 +01:00
2020-11-15 05:37:56 +01:00
spu_thread ( lv2_spu_group * group , u32 index , std : : string_view name , u32 lv2_id , bool is_isolated = false , u32 option = 0 ) ;
2016-04-14 01:09:41 +02:00
2021-03-30 17:31:46 +02:00
spu_thread ( const spu_thread & ) = delete ;
spu_thread & operator = ( const spu_thread & ) = delete ;
2021-06-27 12:18:48 +02:00
using cpu_thread : : operator = ;
2018-04-09 16:45:37 +02:00
u32 pc = 0 ;
2021-02-19 12:53:09 +01:00
u32 dbg_step_pc = 0 ;
2018-04-09 16:45:37 +02:00
2019-05-17 22:54:47 +02:00
// May be used internally by recompilers.
u32 base_pc = 0 ;
// May be used by recompilers.
u8 * memory_base_addr = vm : : g_base_addr ;
2017-02-17 20:35:57 +01:00
// General-Purpose Registers
std : : array < v128 , 128 > gpr ;
2015-08-26 04:54:06 +02:00
SPU_FPSCR fpscr ;
2014-08-21 00:12:56 +02:00
2017-02-17 20:35:57 +01:00
// MFC command data
spu_mfc_cmd ch_mfc_cmd ;
2015-01-29 15:50:34 +01:00
2018-03-24 22:03:32 +01:00
// MFC command queue
spu_mfc_cmd mfc_queue [ 16 ] { } ;
u32 mfc_size = 0 ;
2018-04-03 15:09:43 +02:00
u32 mfc_barrier = - 1 ;
u32 mfc_fence = - 1 ;
2020-01-07 22:00:06 +01:00
2020-06-27 16:38:49 +02:00
// Timestamp of the first postponed command (transfers shuffling related)
u64 mfc_last_timestamp = 0 ;
2020-01-07 22:00:06 +01:00
// MFC proxy command data
spu_mfc_cmd mfc_prxy_cmd ;
shared_mutex mfc_prxy_mtx ;
2018-03-24 22:03:32 +01:00
atomic_t < u32 > mfc_prxy_mask ;
2020-01-07 22:00:06 +01:00
// Tracks writes to MFC proxy command data
union
{
u8 all ;
bf_t < u8 , 0 , 1 > lsa ;
bf_t < u8 , 1 , 1 > eal ;
bf_t < u8 , 2 , 1 > eah ;
bf_t < u8 , 3 , 1 > tag_size ;
bf_t < u8 , 4 , 1 > cmd ;
} mfc_prxy_write_state { } ;
2017-02-17 20:35:57 +01:00
// Reservation Data
u64 rtime = 0 ;
2020-04-07 19:29:11 +02:00
alignas ( 64 ) std : : byte rdata [ 128 ] { } ;
2017-02-17 20:35:57 +01:00
u32 raddr = 0 ;
2014-06-23 03:03:16 +02:00
2020-10-26 02:05:17 +01:00
// Range Lock pointer
atomic_t < u64 , 64 > * range_lock { } ;
2017-02-13 14:12:24 +01:00
u32 srr0 ;
2018-03-24 22:03:32 +01:00
u32 ch_tag_upd ;
u32 ch_tag_mask ;
2018-04-15 14:37:34 +02:00
spu_channel ch_tag_stat ;
2018-03-24 22:03:32 +01:00
u32 ch_stall_mask ;
2018-04-15 14:37:34 +02:00
spu_channel ch_stall_stat ;
spu_channel ch_atomic_stat ;
2014-09-30 21:06:04 +02:00
2019-10-24 20:03:17 +02:00
spu_channel_4_t ch_in_mbox { } ;
2014-09-30 21:06:04 +02:00
2018-04-15 14:37:34 +02:00
spu_channel ch_out_mbox ;
spu_channel ch_out_intr_mbox ;
2013-11-03 20:23:16 +01:00
2018-11-19 07:07:44 +01:00
u64 snr_config = 0 ; // SPU SNR Config Register
2013-11-03 20:23:16 +01:00
2019-10-24 20:03:17 +02:00
spu_channel ch_snr1 { } ; // SPU Signal Notification Register 1
spu_channel ch_snr2 { } ; // SPU Signal Notification Register 2
2013-11-03 20:23:16 +01:00
2018-08-06 09:19:47 +02:00
union ch_events_t
{
u64 all ;
bf_t < u64 , 0 , 16 > events ;
2020-10-12 16:19:06 +02:00
bf_t < u64 , 16 , 8 > locks ;
2018-08-06 09:19:47 +02:00
bf_t < u64 , 30 , 1 > waiting ;
bf_t < u64 , 31 , 1 > count ;
bf_t < u64 , 32 , 32 > mask ;
} ;
atomic_t < ch_events_t > ch_events ;
bool interrupts_enabled ;
2013-11-03 20:23:16 +01:00
2015-03-02 03:10:41 +01:00
u64 ch_dec_start_timestamp ; // timestamp of writing decrementer value
u32 ch_dec_value ; // written decrementer value
2013-11-03 20:23:16 +01:00
2015-07-08 17:01:59 +02:00
atomic_t < u32 > run_ctrl ; // SPU Run Control register (only provided to get latest data written)
2020-02-19 21:25:53 +01:00
shared_mutex run_ctrl_mtx ;
2020-01-21 19:05:45 +01:00
struct alignas ( 8 ) status_npc_sync_var
{
u32 status ; // SPU Status register
u32 npc ; // SPU Next Program Counter register
} ;
2020-02-28 08:43:37 +01:00
atomic_t < status_npc_sync_var > status_npc ;
2015-07-12 23:02:02 +02:00
std : : array < spu_int_ctrl_t , 3 > int_ctrl ; // SPU Class 0, 1, 2 Interrupt Management
2015-03-02 03:10:41 +01:00
2021-05-14 13:21:10 +02:00
std : : array < std : : pair < u32 , std : : shared_ptr < lv2_event_queue > > , 32 > spuq ; // Event Queue Keys for SPU Thread
std : : shared_ptr < lv2_event_queue > spup [ 64 ] ; // SPU Ports
2020-05-09 18:49:12 +02:00
spu_channel exit_status { } ; // Threaded SPU exit status (not a channel, but the interface fits)
2020-05-11 20:24:04 +02:00
atomic_t < u32 > last_exit_status ; // Value to be written in exit_status after checking group termination
2021-05-12 07:12:18 +02:00
lv2_spu_group * const group ; // SPU Thread Group (access by the spu threads in the group only! From other threads obtain a shared pointer to group using group ID)
2015-07-01 00:25:52 +02:00
const u32 index ; // SPU index
2020-11-15 05:37:56 +01:00
std : : shared_ptr < utils : : shm > shm ; // SPU memory
2020-08-28 00:18:24 +02:00
const std : : add_pointer_t < u8 > ls ; // SPU LS pointer
2020-07-17 10:18:04 +02:00
const spu_type thread_type ;
2020-07-30 15:01:25 +02:00
const u32 option ; // sys_spu_thread_initialize option
2019-11-03 00:44:02 +01:00
const u32 lv2_id ; // The actual id that is used by syscalls
2015-07-01 00:25:52 +02:00
2020-02-28 08:43:37 +01:00
// Thread name
2020-11-26 10:30:51 +01:00
atomic_ptr < std : : string > spu_tname ;
2016-06-25 15:54:08 +02:00
2018-04-09 16:45:37 +02:00
std : : unique_ptr < class spu_recompiler_base > jit ; // Recompiler instance
u64 block_counter = 0 ;
u64 block_recover = 0 ;
u64 block_failure = 0 ;
2019-05-05 15:28:41 +02:00
u64 saved_native_sp = 0 ; // Host thread's stack pointer for emulated longjmp
2020-10-29 03:01:45 +01:00
u64 ftx = 0 ; // Failed transactions
u64 stx = 0 ; // Succeeded transactions (pure counters)
2020-10-29 03:35:09 +01:00
u64 last_ftsc = 0 ;
u64 last_ftime = 0 ;
u32 last_faddr = 0 ;
u64 last_fail = 0 ;
u64 last_succ = 0 ;
2021-07-10 10:56:48 +02:00
std : : vector < mfc_cmd_dump > mfc_history ;
2021-02-26 10:20:25 +01:00
u64 mfc_dump_idx = 0 ;
2021-07-10 10:56:48 +02:00
static constexpr u32 max_mfc_dump_idx = 2048 ;
2021-02-26 10:20:25 +01:00
2021-09-17 19:18:12 +02:00
bool in_cpu_work = false ;
2021-09-23 17:17:16 +02:00
bool allow_interrupts_in_cpu_work = false ;
u8 cpu_work_iteration_count = 0 ;
2021-09-17 19:18:12 +02:00
2018-06-03 23:20:14 +02:00
std : : array < v128 , 0x4000 > stack_mirror ; // Return address information
2020-04-13 20:53:19 +02:00
const char * current_func { } ; // Current STOP or RDCH blocking function
u64 start_time { } ; // Starting time of STOP or RDCH bloking function
2020-11-15 06:45:28 +01:00
atomic_t < u8 > debugger_float_mode = 0 ;
2016-04-14 01:09:41 +02:00
void push_snr ( u32 number , u32 value ) ;
2020-10-30 00:07:20 +01:00
static void do_dma_transfer ( spu_thread * _this , const spu_mfc_cmd & args , u8 * ls ) ;
2018-03-24 22:03:32 +01:00
bool do_dma_check ( const spu_mfc_cmd & args ) ;
bool do_list_transfer ( spu_mfc_cmd & args ) ;
2018-04-03 21:42:47 +02:00
void do_putlluc ( const spu_mfc_cmd & args ) ;
2020-09-11 05:49:19 +02:00
bool do_putllc ( const spu_mfc_cmd & args ) ;
2021-09-23 17:17:16 +02:00
bool do_mfc ( bool can_escape = true , bool must_finish = true ) ;
2021-04-09 21:12:47 +02:00
u32 get_mfc_completed ( ) const ;
2018-03-24 22:03:32 +01:00
2019-01-15 16:31:21 +01:00
bool process_mfc_cmd ( ) ;
2018-08-06 09:19:47 +02:00
ch_events_t get_events ( u32 mask_hint = - 1 , bool waiting = false , bool reading = false ) ;
void set_events ( u32 bits ) ;
2015-07-16 13:32:19 +02:00
void set_interrupt_status ( bool enable ) ;
2021-04-09 21:12:47 +02:00
bool check_mfc_interrupts ( u32 next_pc ) ;
2020-11-10 15:57:06 +01:00
bool is_exec_code ( u32 addr ) const ; // Only a hint, do not rely on it other than debugging purposes
2015-03-02 03:10:41 +01:00
u32 get_ch_count ( u32 ch ) ;
2018-04-30 18:39:06 +02:00
s64 get_ch_value ( u32 ch ) ;
2016-04-19 15:04:02 +02:00
bool set_ch_value ( u32 ch , u32 value ) ;
bool stop_and_signal ( u32 code ) ;
2015-03-02 03:10:41 +01:00
void halt ( ) ;
2014-03-17 16:07:47 +01:00
2016-04-14 01:09:41 +02:00
void fast_call ( u32 ls_addr ) ;
2020-10-08 08:14:35 +02:00
bool capture_local_storage ( ) const ;
2015-09-26 22:46:04 +02:00
// Convert specified SPU LS address to a pointer of specified (possibly converted to BE) type
2016-04-14 01:09:41 +02:00
template < typename T >
2020-07-17 10:18:04 +02:00
to_be_t < T > * _ptr ( u32 lsa ) const
2015-09-26 22:46:04 +02:00
{
2020-11-07 17:12:52 +01:00
return reinterpret_cast < to_be_t < T > * > ( ls + ( lsa % SPU_LS_SIZE ) ) ;
2015-09-26 22:46:04 +02:00
}
// Convert specified SPU LS address to a reference of specified (possibly converted to BE) type
2016-04-14 01:09:41 +02:00
template < typename T >
2020-07-17 10:18:04 +02:00
to_be_t < T > & _ref ( u32 lsa ) const
2015-09-26 22:46:04 +02:00
{
return * _ptr < T > ( lsa ) ;
}
2018-10-11 00:17:19 +02:00
2020-07-17 10:18:04 +02:00
spu_type get_type ( ) const
{
return thread_type ;
}
2021-02-26 10:20:25 +01:00
u32 vm_offset ( ) const
{
return group ? SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * ( id & 0xffffff ) : RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index ;
}
2020-09-15 20:19:58 +02:00
// Returns true if reservation existed but was just discovered to be lost
// It is safe to use on any address, even if not directly accessed by SPU (so it's slower)
2021-04-09 21:12:47 +02:00
bool reservation_check ( u32 addr , const decltype ( rdata ) & data ) const ;
2020-09-15 20:19:58 +02:00
2018-10-11 00:17:19 +02:00
bool read_reg ( const u32 addr , u32 & value ) ;
bool write_reg ( const u32 addr , const u32 value ) ;
static atomic_t < u32 > g_raw_spu_ctr ;
static atomic_t < u32 > g_raw_spu_id [ 5 ] ;
static u32 find_raw_spu ( u32 id )
{
2020-02-05 08:00:08 +01:00
if ( id < std : : size ( g_raw_spu_id ) ) [ [ likely ] ]
2018-10-11 00:17:19 +02:00
{
return g_raw_spu_id [ id ] ;
}
return - 1 ;
}
2021-05-01 08:34:52 +02:00
// For named_thread ctor
const struct thread_name_t
{
const spu_thread * _this ;
operator std : : string ( ) const ;
} thread_name { this } ;
2021-08-13 05:38:53 +02:00
// For lv2_obj::schedule<spu_thread>
const struct priority_t
{
const spu_thread * _this ;
operator s32 ( ) const ;
} prio { this } ;
2015-01-18 19:17:51 +01:00
} ;
2020-04-13 20:53:19 +02:00
class spu_function_logger
{
spu_thread & spu ;
public :
spu_function_logger ( spu_thread & spu , const char * func ) ;
~ spu_function_logger ( )
{
spu . start_time = 0 ;
}
} ;