vm::var improved, cleanup

Mostly vm::var initialization introduced.
Added vm::make_var function.
This commit is contained in:
Nekotekina 2015-09-26 23:46:04 +03:00
parent cc02a147d3
commit a974ee009e
116 changed files with 2763 additions and 3019 deletions

View file

@ -88,7 +88,7 @@ void cellSpursModulePutTrace(CellSpursTracePacket * packet, u32 dmaTagId) {
/// Check for execution right requests
u32 cellSpursModulePollStatus(SPUThread & spu, u32 * status) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
spu.gpr[3]._u32[3] = 1;
if (ctxt->spurs->flags1 & SF1_32_WORKLOADS) {
@ -108,7 +108,7 @@ u32 cellSpursModulePollStatus(SPUThread & spu, u32 * status) {
/// Exit current workload
void cellSpursModuleExit(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
spu.pc = ctxt->exitToKernelAddr - 4;
throw SpursModuleExit();
}
@ -159,7 +159,7 @@ void spursHalt(SPUThread & spu) {
/// Select a workload to run
bool spursKernel1SelectWorkload(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
// The first and only argument to this function is a boolean that is set to false if the function
// is called by the SPURS kernel and set to true if called by cellSpursModulePollStatus.
@ -171,7 +171,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
// lock the first 0x80 bytes of spurs
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
// Calculate the contention (number of SPUs used) for each workload
u8 contention[CELL_SPURS_MAX_WORKLOAD];
@ -305,7 +305,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
}
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);
std::memcpy(vm::base(spu.offset + 0x100), spurs, 128);
});
u64 result = (u64)wklSelectedId << 32;
@ -316,7 +316,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
/// Select a workload to run
bool spursKernel2SelectWorkload(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
// The first and only argument to this function is a boolean that is set to false if the function
// is called by the SPURS kernel and set to true if called by cellSpursModulePollStatus.
@ -328,7 +328,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
// lock the first 0x80 bytes of spurs
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
// Calculate the contention (number of SPUs used) for each workload
u8 contention[CELL_SPURS_MAX_WORKLOAD2];
@ -452,7 +452,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
}
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);
std::memcpy(vm::base(spu.offset + 0x100), spurs, 128);
});
u64 result = (u64)wklSelectedId << 32;
@ -463,7 +463,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
/// SPURS kernel dispatch workload
void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
auto isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
auto pollStatus = (u32)widAndPollStatus;
@ -474,12 +474,12 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
wid < CELL_SPURS_MAX_WORKLOAD2 && isKernel2 ? &ctxt->spurs->wklInfo2[wid & 0xf] :
&ctxt->spurs->wklInfoSysSrv;
memcpy(vm::get_ptr(spu.offset + 0x3FFE0), wklInfoOffset, 0x20);
std::memcpy(vm::base(spu.offset + 0x3FFE0), wklInfoOffset, 0x20);
// Load the workload to LS
auto wklInfo = vm::get_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x3FFE0);
auto wklInfo = vm::_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x3FFE0);
if (ctxt->wklCurrentAddr != wklInfo->addr) {
switch (wklInfo->addr.addr().value()) {
switch (wklInfo->addr.addr()) {
case SPURS_IMG_ADDR_SYS_SRV_WORKLOAD:
spu.RegisterHleFunction(0xA00, spursSysServiceEntry);
break;
@ -487,7 +487,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
spu.RegisterHleFunction(0xA00, spursTasksetEntry);
break;
default:
memcpy(vm::get_ptr(spu.offset + 0xA00), wklInfo->addr.get_ptr(), wklInfo->size);
std::memcpy(vm::base(spu.offset + 0xA00), wklInfo->addr.get_ptr(), wklInfo->size);
break;
}
@ -511,7 +511,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
/// SPURS kernel workload exit
bool spursKernelWorkloadExit(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
auto isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
// Select next workload to run
@ -533,7 +533,7 @@ bool spursKernelEntry(SPUThread & spu) {
CHECK_EMU_STATUS;
}
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
memset(ctxt, 0, sizeof(SpursKernelContext));
// Save arguments
@ -579,7 +579,7 @@ bool spursKernelEntry(SPUThread & spu) {
/// Entry point of the system service
bool spursSysServiceEntry(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + spu.gpr[3]._u32[3]);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + spu.gpr[3]._u32[3]);
auto arg = spu.gpr[4]._u64[1];
auto pollStatus = spu.gpr[5]._u32[3];
@ -607,8 +607,8 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
std::unique_lock<std::mutex> lock(spu.mutex, std::defer_lock);
while (true) {
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
vm::reservation_acquire(vm::base(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
auto spurs = vm::_ptr<CellSpurs>(spu.offset + 0x100);
// Find the number of SPUs that are idling in this SPURS instance
u32 nIdlingSpus = 0;
@ -680,7 +680,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
continue;
}
if (vm::reservation_update(VM_CAST(ctxt->spurs.addr()), vm::get_ptr(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) {
if (vm::reservation_update(VM_CAST(ctxt->spurs.addr()), vm::base(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) {
break;
}
}
@ -692,7 +692,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
/// Main function for the system service
void spursSysServiceMain(SPUThread & spu, u32 pollStatus) {
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
if (!ctxt->spurs.aligned()) {
assert(!"spursSysServiceMain(): invalid spurs alignment");
@ -703,10 +703,10 @@ void spursSysServiceMain(SPUThread & spu, u32 pollStatus) {
if (ctxt->sysSrvInitialised == 0) {
ctxt->sysSrvInitialised = 1;
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
vm::reservation_acquire(vm::base(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
// Halt if already initialised
if (spurs->sysSrvOnSpu & (1 << ctxt->spuNum)) {
@ -716,7 +716,7 @@ void spursSysServiceMain(SPUThread & spu, u32 pollStatus) {
spurs->sysSrvOnSpu |= 1 << ctxt->spuNum;
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
ctxt->traceBuffer = 0;
@ -796,7 +796,7 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
bool terminate = false;
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
// Terminate request
if (spurs->sysSrvMsgTerminate & (1 << ctxt->spuNum)) {
@ -815,7 +815,7 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
updateTrace = true;
}
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
// Process update workload message
@ -836,24 +836,24 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
/// Activate a workload
void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt) {
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
memcpy(vm::get_ptr(spu.offset + 0x30000), vm::get_ptr(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo1))), 0x200);
auto spurs = vm::_ptr<CellSpurs>(spu.offset + 0x100);
std::memcpy(vm::base(spu.offset + 0x30000), ctxt->spurs->wklInfo1, 0x200);
if (spurs->flags1 & SF1_32_WORKLOADS) {
memcpy(vm::get_ptr(spu.offset + 0x30200), vm::get_ptr(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo2))), 0x200);
std::memcpy(vm::base(spu.offset + 0x30200), ctxt->spurs->wklInfo2, 0x200);
}
u32 wklShutdownBitSet = 0;
ctxt->wklRunnable1 = 0;
ctxt->wklRunnable2 = 0;
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
auto wklInfo1 = vm::get_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x30000);
auto wklInfo1 = vm::_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x30000);
// Copy the priority of the workload for this SPU and its unique id to the LS
ctxt->priority[i] = wklInfo1[i].priority[ctxt->spuNum] == 0 ? 0 : 0x10 - wklInfo1[i].priority[ctxt->spuNum];
ctxt->wklUniqueId[i] = wklInfo1[i].uniqueId;
if (spurs->flags1 & SF1_32_WORKLOADS) {
auto wklInfo2 = vm::get_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x30200);
auto wklInfo2 = vm::_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x30200);
// Copy the priority of the workload for this SPU to the LS
if (wklInfo2[i].priority[ctxt->spuNum]) {
@ -863,7 +863,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
}
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
// Update workload status and runnable flag based on the workload state
@ -905,7 +905,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
}
}
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
if (wklShutdownBitSet) {
@ -920,7 +920,7 @@ void spursSysServiceUpdateShutdownCompletionEvents(SPUThread & spu, SpursKernelC
u32 wklNotifyBitSet;
u8 spuPort;
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
wklNotifyBitSet = 0;
spuPort = spurs->spuPort;;
@ -940,7 +940,7 @@ void spursSysServiceUpdateShutdownCompletionEvents(SPUThread & spu, SpursKernelC
}
}
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
if (wklNotifyBitSet) {
@ -962,7 +962,7 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
u8 sysSrvMsgUpdateTrace;
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
auto& trace = spurs->sysSrvTrace.raw();
sysSrvMsgUpdateTrace = trace.sysSrvMsgUpdateTrace;
@ -981,19 +981,19 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
notify = true;
}
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
// Get trace parameters from CellSpurs and store them in the LS
if (((sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) != 0) || (arg3 != 0)) {
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x80), VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, traceBuffer)), 128);
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x80 - offsetof(CellSpurs, traceBuffer));
vm::reservation_acquire(vm::base(spu.offset + 0x80), VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, traceBuffer)), 128);
auto spurs = vm::_ptr<CellSpurs>(spu.offset + 0x80 - offsetof(CellSpurs, traceBuffer));
if (ctxt->traceMsgCount != 0xFF || spurs->traceBuffer.addr() == 0) {
spursSysServiceTraceSaveCount(spu, ctxt);
} else {
memcpy(vm::get_ptr(spu.offset + 0x2C00), vm::get_ptr(spurs->traceBuffer.addr() & -0x4), 0x80);
auto traceBuffer = vm::get_ptr<CellSpursTraceInfo>(spu.offset + 0x2C00);
std::memcpy(vm::base(spu.offset + 0x2C00), vm::base(spurs->traceBuffer.addr() & -0x4), 0x80);
auto traceBuffer = vm::_ptr<CellSpursTraceInfo>(spu.offset + 0x2C00);
ctxt->traceMsgCount = traceBuffer->count[ctxt->spuNum];
}
@ -1005,7 +1005,7 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
}
if (notify) {
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x2D80 - offsetof(CellSpurs, wklState1));
auto spurs = vm::_ptr<CellSpurs>(spu.offset + 0x2D80 - offsetof(CellSpurs, wklState1));
sys_spu_thread_send_event(spu, spurs->spuPort, 2, 0);
}
}
@ -1017,7 +1017,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
bool do_return = false;
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
if (spurs->sysSrvPreemptWklId[ctxt->spuNum] == 0xFF) {
do_return = true;
@ -1027,7 +1027,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
wklId = spurs->sysSrvPreemptWklId[ctxt->spuNum];
spurs->sysSrvPreemptWklId[ctxt->spuNum] = 0xFF;
memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->wklState1, 128);
std::memcpy(vm::base(spu.offset + 0x2D80), spurs->wklState1, 128);
});
if (do_return) return;
@ -1035,7 +1035,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
spursSysServiceActivateWorkload(spu, ctxt);
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto spurs = ctxt->spurs.get_ptr_priv();
if (wklId >= CELL_SPURS_MAX_WORKLOAD) {
spurs->wklCurrentContention[wklId & 0x0F] -= 0x10;
@ -1045,7 +1045,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
spurs->wklIdleSpuCountOrReadyCount2[wklId & 0x0F].raw() -= 1;
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);
std::memcpy(vm::base(spu.offset + 0x100), spurs, 128);
});
// Set the current workload id to the id of the pre-empted workload since cellSpursModulePutTrace
@ -1080,8 +1080,8 @@ enum SpursTasksetRequest {
/// Taskset PM entry point
bool spursTasksetEntry(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto kernelCtxt = vm::get_ptr<SpursKernelContext>(spu.offset + spu.gpr[3]._u32[3]);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto kernelCtxt = vm::_ptr<SpursKernelContext>(spu.offset + spu.gpr[3]._u32[3]);
auto arg = spu.gpr[4]._u64[1];
auto pollStatus = spu.gpr[5]._u32[3];
@ -1117,7 +1117,7 @@ bool spursTasksetEntry(SPUThread & spu) {
/// Entry point into the Taskset PM for task syscalls
bool spursTasksetSyscallEntry(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
try {
// Save task context
@ -1145,7 +1145,7 @@ bool spursTasksetSyscallEntry(SPUThread & spu) {
/// Resume a task
void spursTasksetResumeTask(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
// Restore task context
spu.gpr[0] = ctxt->savedContextLr;
@ -1159,8 +1159,8 @@ void spursTasksetResumeTask(SPUThread & spu) {
/// Start a task
void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::_ptr<CellSpursTaskset>(spu.offset + 0x2700);
spu.gpr[2].clear();
spu.gpr[3] = v128::from64r(taskArgs._u64[0], taskArgs._u64[1]);
@ -1175,13 +1175,13 @@ void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
/// Process a request and update the state of the taskset
s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 * isWaiting) {
auto kernelCtxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto kernelCtxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
s32 rc = CELL_OK;
s32 numNewlyReadyTasks;
vm::reservation_op(VM_CAST(ctxt->taskset.addr()), 128, [&]() {
auto taskset = ctxt->taskset.priv_ptr();
auto taskset = ctxt->taskset.get_ptr_priv();
// Verify taskset state is valid
be_t<v128> _0(v128::from32(0));
@ -1315,12 +1315,12 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 *
taskset->signalled = signalled;
taskset->ready = ready;
memcpy(vm::get_ptr(spu.offset + 0x2700), taskset, 128);
std::memcpy(vm::base(spu.offset + 0x2700), taskset, 128);
});
// Increment the ready count of the workload by the number of tasks that have become ready
vm::reservation_op(VM_CAST(kernelCtxt->spurs.addr()), 128, [&]() {
auto spurs = kernelCtxt->spurs.priv_ptr();
auto spurs = kernelCtxt->spurs.get_ptr_priv();
s32 readyCount = kernelCtxt->wklCurrentId < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[kernelCtxt->wklCurrentId].load() : spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F].load();
readyCount += numNewlyReadyTasks;
@ -1332,7 +1332,7 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 *
spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F] = readyCount;
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);
std::memcpy(vm::base(spu.offset + 0x100), spurs, 128);
});
return rc;
@ -1359,7 +1359,7 @@ bool spursTasksetPollStatus(SPUThread & spu) {
/// Exit the Taskset PM
void spursTasksetExit(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
// Trace - STOP
CellSpursTracePacket pkt;
@ -1379,9 +1379,9 @@ void spursTasksetExit(SPUThread & spu) {
/// Invoked when a task exits
void spursTasksetOnTaskExit(SPUThread & spu, u64 addr, u32 taskId, s32 exitCode, u64 args) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
memcpy(vm::get_ptr(spu.offset + 0x10000), vm::get_ptr(addr & -0x80), (addr & 0x7F) << 11);
std::memcpy(vm::base(spu.offset + 0x10000), vm::base(addr & -0x80), (addr & 0x7F) << 11);
spu.gpr[3]._u64[1] = ctxt->taskset.addr();
spu.gpr[4]._u32[3] = taskId;
@ -1392,8 +1392,8 @@ void spursTasksetOnTaskExit(SPUThread & spu, u64 addr, u32 taskId, s32 exitCode,
/// Save the context of a task
s32 spursTasketSaveTaskContext(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskInfo = vm::get_ptr<CellSpursTaskset::TaskInfo>(spu.offset + 0x2780);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskInfo = vm::_ptr<CellSpursTaskset::TaskInfo>(spu.offset + 0x2780);
//spursDmaWaitForCompletion(spu, 0xFFFFFFFF);
@ -1430,13 +1430,13 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
// Store the processor context
const u32 contextSaveStorage = VM_CAST(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
memcpy(vm::get_ptr(contextSaveStorage), vm::get_ptr(spu.offset + 0x2C80), 0x380);
std::memcpy(vm::base(contextSaveStorage), vm::base(spu.offset + 0x2C80), 0x380);
// Save LS context
for (auto i = 6; i < 128; i++) {
if (ls_pattern._bit[i]) {
// TODO: Combine DMA requests for consecutive blocks into a single request
memcpy(vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), 0x800);
std::memcpy(vm::base(contextSaveStorage + 0x400 + ((i - 6) << 11)), vm::base(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), 0x800);
}
}
@ -1446,8 +1446,8 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
/// Taskset dispatcher
void spursTasksetDispatch(SPUThread & spu) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::_ptr<CellSpursTaskset>(spu.offset + 0x2700);
u32 taskId;
u32 isWaiting;
@ -1460,10 +1460,10 @@ void spursTasksetDispatch(SPUThread & spu) {
ctxt->taskId = taskId;
// DMA in the task info for the selected task
memcpy(vm::get_ptr(spu.offset + 0x2780), &ctxt->taskset->task_info[taskId], sizeof(CellSpursTaskset::TaskInfo));
auto taskInfo = vm::get_ptr<CellSpursTaskset::TaskInfo>(spu.offset + 0x2780);
std::memcpy(vm::base(spu.offset + 0x2780), &ctxt->taskset->task_info[taskId], sizeof(CellSpursTaskset::TaskInfo));
auto taskInfo = vm::_ptr<CellSpursTaskset::TaskInfo>(spu.offset + 0x2780);
auto elfAddr = taskInfo->elf.addr().value();
taskInfo->elf.set(taskInfo->elf.addr() & 0xFFFFFFFFFFFFFFF8ull);
taskInfo->elf.set(taskInfo->elf.addr() & 0xFFFFFFFFFFFFFFF8);
// Trace - Task: Incident=dispatch
CellSpursTracePacket pkt;
@ -1475,7 +1475,7 @@ void spursTasksetDispatch(SPUThread & spu) {
if (isWaiting == 0) {
// If we reach here it means that the task is being started and not being resumed
memset(vm::get_ptr<void>(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
std::memset(vm::base(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
ctxt->guidAddr = CELL_SPURS_TASK_TOP;
u32 entryPoint;
@ -1495,7 +1495,7 @@ void spursTasksetDispatch(SPUThread & spu) {
ctxt->x2FD4 = elfAddr & 5; // TODO: Figure this out
if ((elfAddr & 5) == 1) {
memcpy(vm::get_ptr(spu.offset + 0x2FC0), &((CellSpursTaskset2*)(ctxt->taskset.get_ptr()))->task_exit_code[taskId], 0x10);
std::memcpy(vm::base(spu.offset + 0x2FC0), &((CellSpursTaskset2*)(ctxt->taskset.get_ptr()))->task_exit_code[taskId], 0x10);
}
// Trace - GUID
@ -1513,7 +1513,7 @@ void spursTasksetDispatch(SPUThread & spu) {
spursTasksetStartTask(spu, taskInfo->args);
} else {
if (taskset->enable_clear_ls) {
memset(vm::get_ptr<void>(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
std::memset(vm::base(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
}
// If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area as well
@ -1529,11 +1529,11 @@ void spursTasksetDispatch(SPUThread & spu) {
// Load saved context from main memory to LS
const u32 contextSaveStorage = VM_CAST(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
memcpy(vm::get_ptr(spu.offset + 0x2C80), vm::get_ptr(contextSaveStorage), 0x380);
std::memcpy(vm::base(spu.offset + 0x2C80), vm::base(contextSaveStorage), 0x380);
for (auto i = 6; i < 128; i++) {
if (ls_pattern._bit[i]) {
// TODO: Combine DMA requests for consecutive blocks into a single request
memcpy(vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), 0x800);
std::memcpy(vm::base(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), vm::base(contextSaveStorage + 0x400 + ((i - 6) << 11)), 0x800);
}
}
@ -1563,8 +1563,8 @@ void spursTasksetDispatch(SPUThread & spu) {
/// Process a syscall request
s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.offset + 0x2700);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto taskset = vm::_ptr<CellSpursTaskset>(spu.offset + 0x2700);
// If the 0x10 bit is set in syscallNum then its the 2nd version of the
// syscall (e.g. cellSpursYield2 instead of cellSpursYield) and so don't wait
@ -1642,7 +1642,7 @@ s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) {
cellSpursModulePutTrace(&pkt, ctxt->dmaTagId);
// Clear the GUID of the task
memset(vm::get_ptr<void>(spu.offset + ctxt->guidAddr), 0, 0x10);
std::memset(vm::base(spu.offset + ctxt->guidAddr), 0, 0x10);
if (spursTasksetPollStatus(spu)) {
spursTasksetExit(spu);
@ -1656,8 +1656,8 @@ s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) {
/// Initialise the Taskset PM
void spursTasksetInit(SPUThread & spu, u32 pollStatus) {
auto ctxt = vm::get_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto kernelCtxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
auto ctxt = vm::_ptr<SpursTasksetContext>(spu.offset + 0x2700);
auto kernelCtxt = vm::_ptr<SpursKernelContext>(spu.offset + 0x100);
kernelCtxt->moduleId[0] = 'T';
kernelCtxt->moduleId[1] = 'K';