rpcsx/rpcs3/Emu/SysCalls/Modules/cellSync.cpp

1681 lines
36 KiB
C++
Raw Normal View History

#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/SysCalls/Modules.h"
2014-08-23 22:40:04 +02:00
#include "Emu/SysCalls/lv2/sys_sync.h"
2014-12-22 01:56:04 +01:00
#include "Emu/SysCalls/lv2/sys_event.h"
2014-08-23 16:51:51 +02:00
#include "Emu/SysCalls/lv2/sys_process.h"
#include "Emu/Event.h"
2014-07-20 19:14:04 +02:00
#include "cellSync.h"
extern Module<> cellSync;
s32 cellSyncMutexInitialize(vm::ptr<CellSyncMutex> mutex)
{
cellSync.Log("cellSyncMutexInitialize(mutex=*0x%x)", mutex);
2014-07-20 19:14:04 +02:00
if (!mutex)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-03-10 20:16:31 +01:00
if (!mutex.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
mutex->exchange({});
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncMutexLock(PPUThread& ppu, vm::ptr<CellSyncMutex> mutex)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncMutexLock(mutex=*0x%x)", mutex);
2014-07-20 19:14:04 +02:00
if (!mutex)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-03-10 20:16:31 +01:00
if (!mutex.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
// increase acq value and remember its old value
const auto order = mutex->atomic_op(&sync_mutex_t::acquire);
// wait until rel value is equal to old acq value
vm::wait_op(ppu, mutex.addr(), 4, WRAP_EXPR(mutex->load().rel == order));
2014-07-20 19:14:04 +02:00
_mm_mfence();
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
2014-08-31 11:54:12 +02:00
s32 cellSyncMutexTryLock(vm::ptr<CellSyncMutex> mutex)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncMutexTryLock(mutex=*0x%x)", mutex);
2014-07-20 19:14:04 +02:00
if (!mutex)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-03-10 20:16:31 +01:00
if (!mutex.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
if (!mutex->atomic_op(&sync_mutex_t::try_lock))
{
return CELL_SYNC_ERROR_BUSY;
}
return CELL_OK;
}
2014-08-31 11:54:12 +02:00
s32 cellSyncMutexUnlock(vm::ptr<CellSyncMutex> mutex)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncMutexUnlock(mutex=*0x%x)", mutex);
2014-07-20 19:14:04 +02:00
if (!mutex)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!mutex.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
mutex->atomic_op(&sync_mutex_t::unlock);
vm::notify_at(mutex.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncBarrierInitialize(vm::ptr<CellSyncBarrier> barrier, u16 total_count)
{
cellSync.Log("cellSyncBarrierInitialize(barrier=*0x%x, total_count=%d)", barrier, total_count);
if (!barrier)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!barrier.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
if (!total_count || total_count > 32767)
{
return CELL_SYNC_ERROR_INVAL;
}
// clear current value, write total_count and sync
barrier->exchange({ 0, total_count });
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncBarrierNotify(PPUThread& ppu, vm::ptr<CellSyncBarrier> barrier)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncBarrierNotify(barrier=*0x%x)", barrier);
if (!barrier)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!barrier.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_notify)));
2014-10-11 00:37:20 +02:00
vm::notify_at(barrier.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
2014-08-31 11:54:12 +02:00
s32 cellSyncBarrierTryNotify(vm::ptr<CellSyncBarrier> barrier)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncBarrierTryNotify(barrier=*0x%x)", barrier);
if (!barrier)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!barrier.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
_mm_mfence();
2015-03-07 22:20:38 +01:00
if (!barrier->atomic_op(&sync_barrier_t::try_notify))
2014-08-07 09:59:56 +02:00
{
return CELL_SYNC_ERROR_BUSY;
2014-08-07 09:59:56 +02:00
}
vm::notify_at(barrier.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncBarrierWait(PPUThread& ppu, vm::ptr<CellSyncBarrier> barrier)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncBarrierWait(barrier=*0x%x)", barrier);
if (!barrier)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!barrier.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
_mm_mfence();
vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_wait)));
2014-10-11 00:37:20 +02:00
vm::notify_at(barrier.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
2014-08-31 11:54:12 +02:00
s32 cellSyncBarrierTryWait(vm::ptr<CellSyncBarrier> barrier)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncBarrierTryWait(barrier=*0x%x)", barrier);
if (!barrier)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!barrier.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
_mm_mfence();
if (!barrier->atomic_op(&sync_barrier_t::try_wait))
2014-10-11 00:37:20 +02:00
{
return CELL_SYNC_ERROR_BUSY;
2014-10-11 00:37:20 +02:00
}
vm::notify_at(barrier.addr(), 4);
2015-03-07 22:20:38 +01:00
2014-10-11 00:37:20 +02:00
return CELL_OK;
}
s32 cellSyncRwmInitialize(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer, u32 buffer_size)
{
cellSync.Log("cellSyncRwmInitialize(rwm=*0x%x, buffer=*0x%x, buffer_size=0x%x)", rwm, buffer, buffer_size);
2014-09-05 22:26:36 +02:00
if (!rwm || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!rwm.aligned() || buffer % 128)
{
return CELL_SYNC_ERROR_ALIGN;
}
if (buffer_size % 128 || buffer_size > 0x4000)
{
return CELL_SYNC_ERROR_INVAL;
}
// clear readers and writers, write buffer_size, buffer addr and sync
rwm->ctrl.store({});
rwm->size = buffer_size;
rwm->buffer = buffer;
2014-10-16 21:34:17 +02:00
_mm_mfence();
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncRwmRead(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncRwmRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
2014-09-05 22:26:36 +02:00
if (!rwm || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!rwm.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
// wait until `writers` is zero, increase `readers`
vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_read_begin)));
// copy data to buffer
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
// decrease `readers`, return error if already zero
if (!rwm->ctrl.atomic_op(&sync_rwm_t::try_read_end))
2014-10-16 21:34:17 +02:00
{
return CELL_SYNC_ERROR_ABORT;
2014-10-16 21:34:17 +02:00
}
vm::notify_at(rwm.addr(), 4);
2015-03-07 22:20:38 +01:00
2014-10-16 21:34:17 +02:00
return CELL_OK;
}
2014-09-05 22:26:36 +02:00
s32 cellSyncRwmTryRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncRwmTryRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
2014-09-05 22:26:36 +02:00
if (!rwm || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!rwm.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
// increase `readers` if `writers` is zero
if (!rwm->ctrl.atomic_op(&sync_rwm_t::try_read_begin))
2014-08-07 23:34:56 +02:00
{
return CELL_SYNC_ERROR_BUSY;
2014-08-07 23:34:56 +02:00
}
2014-09-24 20:44:26 +02:00
// copy data to buffer
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
2014-08-07 23:34:56 +02:00
// decrease `readers`, return error if already zero
if (!rwm->ctrl.atomic_op(&sync_rwm_t::try_read_end))
2014-10-16 21:34:17 +02:00
{
return CELL_SYNC_ERROR_ABORT;
2014-10-16 21:34:17 +02:00
}
vm::notify_at(rwm.addr(), 4);
2015-03-07 22:20:38 +01:00
2014-10-16 21:34:17 +02:00
return CELL_OK;
}
2014-08-07 23:34:56 +02:00
s32 cellSyncRwmWrite(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncRwmWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
2014-09-05 22:26:36 +02:00
if (!rwm || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!rwm.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
// wait until `writers` is zero, set to 1
vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_write_begin)));
2014-08-07 23:34:56 +02:00
// wait until `readers` is zero
2015-09-15 18:23:17 +02:00
vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(!rwm->ctrl.load().readers));
2014-08-07 23:34:56 +02:00
// copy data from buffer
std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size);
2014-08-07 23:34:56 +02:00
// sync and clear `readers` and `writers`
rwm->ctrl.exchange({});
2015-03-07 22:20:38 +01:00
vm::notify_at(rwm.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncRwmTryWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncRwmTryWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
2014-09-05 22:26:36 +02:00
if (!rwm || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!rwm.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
// set `writers` to 1 if `readers` and `writers` are zero
if (!rwm->ctrl.compare_and_swap_test({ 0, 0 }, { 0, 1 }))
{
return CELL_SYNC_ERROR_BUSY;
}
2014-08-07 23:34:56 +02:00
// copy data from buffer
std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size);
2014-08-07 23:34:56 +02:00
// sync and clear `readers` and `writers`
rwm->ctrl.exchange({});
2015-03-07 22:20:38 +01:00
vm::notify_at(rwm.addr(), 4);
2015-03-07 22:20:38 +01:00
return CELL_OK;
}
s32 cellSyncQueueInitialize(vm::ptr<CellSyncQueue> queue, vm::ptr<u8> buffer, u32 size, u32 depth)
2014-07-22 21:02:45 +02:00
{
cellSync.Log("cellSyncQueueInitialize(queue=*0x%x, buffer=*0x%x, size=0x%x, depth=0x%x)", queue, buffer, size, depth);
2014-07-22 21:02:45 +02:00
if (!queue)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2014-09-05 22:26:36 +02:00
if (size && !buffer)
2014-07-22 21:02:45 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned() || buffer % 16)
2014-07-22 21:02:45 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
2014-07-22 21:02:45 +02:00
if (!depth || size % 16)
{
return CELL_SYNC_ERROR_INVAL;
}
// clear sync var, write size, depth, buffer addr and sync
queue->ctrl.store({});
queue->size = size;
queue->depth = depth;
queue->buffer = buffer;
2014-09-21 15:02:05 +02:00
_mm_mfence();
2015-03-07 22:20:38 +01:00
2014-09-21 15:02:05 +02:00
return CELL_OK;
}
s32 cellSyncQueuePush(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueuePush(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-07-22 21:02:45 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
2014-07-23 23:51:57 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
2014-07-23 23:51:57 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
2014-07-23 23:51:57 +02:00
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_push_begin, depth, position)));
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
2014-07-23 23:51:57 +02:00
// clear 5th byte
queue->ctrl &= { 0xffffffff, 0x00ffffff };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-07-22 21:02:45 +02:00
return CELL_OK;
}
s32 cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueueTryPush(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-07-22 21:02:45 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
u32 position;
2015-06-26 16:45:13 +02:00
if (!queue->ctrl.atomic_op(&sync_queue_t::try_push_begin, depth, position))
2014-09-21 15:02:05 +02:00
{
return CELL_SYNC_ERROR_BUSY;
}
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
2015-03-07 22:20:38 +01:00
// clear 5th byte
queue->ctrl &= { 0xffffffff, 0x00ffffff };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-09-21 15:02:05 +02:00
return CELL_OK;
}
s32 cellSyncQueuePop(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueuePop(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-07-22 21:02:45 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_pop_begin, depth, position)));
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size);
// clear first byte
queue->ctrl &= { 0x00ffffff, 0xffffffffu };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-07-22 21:02:45 +02:00
return CELL_OK;
}
2014-09-05 22:26:36 +02:00
s32 cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueueTryPop(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-07-22 21:02:45 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
u32 position;
2015-06-26 16:45:13 +02:00
if (!queue->ctrl.atomic_op(&sync_queue_t::try_pop_begin, depth, position))
2014-09-21 15:02:05 +02:00
{
return CELL_SYNC_ERROR_BUSY;
}
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size);
2015-03-07 22:20:38 +01:00
// clear first byte
queue->ctrl &= { 0x00ffffff, 0xffffffffu };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-09-21 15:02:05 +02:00
return CELL_OK;
}
s32 cellSyncQueuePeek(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueuePeek(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-07-22 21:02:45 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
2014-08-08 15:54:46 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
2014-08-08 15:54:46 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
2014-08-08 15:54:46 +02:00
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_peek_begin, depth, position)));
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size);
2015-03-07 22:20:38 +01:00
// clear first byte
queue->ctrl &= { 0x00ffffff, 0xffffffffu };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-07-22 21:02:45 +02:00
return CELL_OK;
}
2014-09-05 22:26:36 +02:00
s32 cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueueTryPeek(queue=*0x%x, buffer=*0x%x)", queue, buffer);
2014-08-08 15:54:46 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
2014-08-08 15:54:46 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
2014-08-08 15:54:46 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
const u32 depth = queue->check_depth();
2014-08-08 15:54:46 +02:00
u32 position;
2015-06-26 16:45:13 +02:00
if (!queue->ctrl.atomic_op(&sync_queue_t::try_peek_begin, depth, position))
2014-09-21 15:02:05 +02:00
{
return CELL_SYNC_ERROR_BUSY;
2014-08-08 15:54:46 +02:00
}
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size);
2015-03-07 22:20:38 +01:00
// clear first byte
queue->ctrl &= { 0x00ffffff, 0xffffffffu };
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-07-22 21:02:45 +02:00
return CELL_OK;
}
2014-08-31 11:54:12 +02:00
s32 cellSyncQueueSize(vm::ptr<CellSyncQueue> queue)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueueSize(queue=*0x%x)", queue);
2014-07-22 21:02:45 +02:00
if (!queue)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
queue->check_depth();
return queue->ctrl.load().m_v2 & 0xffffff;
2014-07-22 21:02:45 +02:00
}
s32 cellSyncQueueClear(PPUThread& ppu, vm::ptr<CellSyncQueue> queue)
2014-07-22 21:02:45 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncQueueClear(queue=*0x%x)", queue);
if (!queue)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
2015-06-26 16:45:13 +02:00
queue->check_depth();
2015-03-07 22:20:38 +01:00
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_1)));
2014-10-16 21:34:17 +02:00
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_2)));
queue->ctrl.exchange({});
2015-03-07 22:20:38 +01:00
vm::notify_at(queue.addr(), 8);
2015-03-07 22:20:38 +01:00
2014-07-22 21:02:45 +02:00
return CELL_OK;
}
2014-08-11 20:35:34 +02:00
// LFQueue functions
void syncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction, vm::ptr<void> eaSignal)
2014-08-18 23:16:48 +02:00
{
queue->m_size = size;
queue->m_depth = depth;
2014-09-05 22:26:36 +02:00
queue->m_buffer = buffer;
2014-08-18 23:16:48 +02:00
queue->m_direction = direction;
memset(queue->m_hs1, 0, sizeof(queue->m_hs1));
memset(queue->m_hs2, 0, sizeof(queue->m_hs2));
2014-09-05 22:26:36 +02:00
queue->m_eaSignal = eaSignal;
2014-08-18 23:16:48 +02:00
if (direction == CELL_SYNC_QUEUE_ANY2ANY)
{
2015-03-13 02:09:53 +01:00
queue->pop1 = {};
queue->push1 = {};
queue->m_buffer.set(queue->m_buffer.addr() | 1);
2014-08-18 23:16:48 +02:00
queue->m_bs[0] = -1;
queue->m_bs[1] = -1;
//m_bs[2]
//m_bs[3]
queue->m_v1 = -1;
queue->push2 = { { 0xffff } };
queue->pop2 = { { 0xffff } };
2014-08-18 23:16:48 +02:00
}
else
{
queue->pop1 = { { 0, 0, queue->pop1.load().m_h3, 0 } };
queue->push1 = { { 0, 0, queue->push1.load().m_h7, 0 } };
2014-08-18 23:16:48 +02:00
queue->m_bs[0] = -1; // written as u32
queue->m_bs[1] = -1;
queue->m_bs[2] = -1;
queue->m_bs[3] = -1;
queue->m_v1 = 0;
2015-03-13 02:09:53 +01:00
queue->push2 = {};
queue->pop2 = {};
2014-08-18 23:16:48 +02:00
}
2014-09-23 16:27:18 +02:00
queue->m_v2 = 0;
queue->m_eq_id = 0;
2014-08-18 23:16:48 +02:00
}
s32 cellSyncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction, vm::ptr<void> eaSignal)
2014-08-18 23:16:48 +02:00
{
2015-06-26 16:45:13 +02:00
cellSync.Warning("cellSyncLFQueueInitialize(queue=*0x%x, buffer=*0x%x, size=0x%x, depth=0x%x, direction=%d, eaSignal=*0x%x)", queue, buffer, size, depth, direction, eaSignal);
2014-08-18 23:16:48 +02:00
if (!queue)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
if (size)
{
2014-09-05 22:26:36 +02:00
if (!buffer)
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
if (size > 0x4000 || size % 16)
{
return CELL_SYNC_ERROR_INVAL;
}
}
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
if (!depth || (depth >> 15) || direction > 3)
{
return CELL_SYNC_ERROR_INVAL;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned() || buffer % 16)
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
2015-06-26 16:45:13 +02:00
// get sdk version of current process
2014-08-18 23:16:48 +02:00
s32 sdk_ver;
2015-06-26 16:45:13 +02:00
if (s32 ret = process_get_sdk_version(process_getpid(), sdk_ver))
2014-08-18 23:16:48 +02:00
{
return ret;
}
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
if (sdk_ver == -1)
{
sdk_ver = 0x460000;
}
2015-06-26 16:45:13 +02:00
// reserve `init`
2014-08-18 23:16:48 +02:00
u32 old_value;
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
while (true)
{
const auto old = queue->init.load();
2014-09-23 01:07:40 +02:00
auto init = old;
2014-08-18 23:16:48 +02:00
2015-09-15 18:23:17 +02:00
if (old)
2014-08-18 23:16:48 +02:00
{
2014-09-23 01:07:40 +02:00
if (sdk_ver > 0x17ffff && old != 2)
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_STAT;
}
2015-06-26 16:45:13 +02:00
2015-01-14 00:08:00 +01:00
old_value = old;
2014-08-18 23:16:48 +02:00
}
else
{
if (sdk_ver > 0x17ffff)
{
2014-09-06 00:12:10 +02:00
auto data = vm::get_ptr<u64>(queue.addr());
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
for (u32 i = 0; i < sizeof(CellSyncLFQueue) / sizeof(u64); i++)
{
2014-09-06 00:12:10 +02:00
if (data[i])
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_STAT;
}
}
}
2015-06-26 16:45:13 +02:00
2014-09-23 01:07:40 +02:00
init = 1;
old_value = 1;
2014-08-18 23:16:48 +02:00
}
2014-09-23 01:07:40 +02:00
if (queue->init.compare_and_swap_test(old, init)) break;
2014-08-18 23:16:48 +02:00
}
2014-09-23 01:07:40 +02:00
if (old_value == 2)
2014-08-18 23:16:48 +02:00
{
2015-04-17 21:46:06 +02:00
if (queue->m_size != size || queue->m_depth != depth || queue->m_buffer != buffer)
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_INVAL;
}
2015-06-26 16:45:13 +02:00
2014-08-18 23:16:48 +02:00
if (sdk_ver > 0x17ffff)
{
2015-06-26 16:45:13 +02:00
if (queue->m_eaSignal != eaSignal || queue->m_direction != direction)
2014-08-18 23:16:48 +02:00
{
return CELL_SYNC_ERROR_INVAL;
}
}
2015-06-26 16:45:13 +02:00
_mm_mfence();
2014-08-18 23:16:48 +02:00
}
else
{
2015-06-26 16:45:13 +02:00
syncLFQueueInitialize(queue, buffer, size, depth, direction, eaSignal);
2014-08-18 23:16:48 +02:00
2014-09-23 01:07:40 +02:00
queue->init.exchange({});
2014-08-18 23:16:48 +02:00
}
return CELL_OK;
}
s32 _cellSyncLFQueueGetPushPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue)
2014-08-18 23:16:48 +02:00
{
2015-06-26 16:45:13 +02:00
cellSync.Warning("_cellSyncLFQueueGetPushPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue);
2014-08-18 23:16:48 +02:00
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_PPU2SPU)
2014-08-12 17:46:22 +02:00
{
return CELL_SYNC_ERROR_PERM;
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
2014-08-17 17:08:26 +02:00
u32 var1 = 0;
2015-05-08 11:45:21 +02:00
2014-08-12 17:46:22 +02:00
while (true)
{
while (true)
{
2015-07-04 01:22:24 +02:00
CHECK_EMU_STATUS;
2014-08-13 23:39:54 +02:00
const auto old = queue->push1.load_sync();
2014-09-23 01:07:40 +02:00
auto push = old;
2014-08-12 17:46:22 +02:00
2014-08-17 17:08:26 +02:00
if (var1)
2014-08-12 17:46:22 +02:00
{
2014-09-23 01:07:40 +02:00
push.m_h7 = 0;
2014-08-12 17:46:22 +02:00
}
if (isBlocking && useEventQueue && *(u32*)queue->m_bs == -1)
{
return CELL_SYNC_ERROR_STAT;
}
2015-03-07 22:20:38 +01:00
s32 var2 = (s16)push.m_h8;
2014-08-12 17:46:22 +02:00
s32 res;
2015-09-15 18:23:17 +02:00
if (useEventQueue && ((s32)push.m_h5 != var2 || push.m_h7))
2014-08-12 17:46:22 +02:00
{
res = CELL_SYNC_ERROR_BUSY;
}
else
{
var2 -= (s32)(u16)queue->pop1.load().m_h1;
2014-08-12 17:46:22 +02:00
if (var2 < 0)
{
var2 += depth * 2;
}
if (var2 < depth)
{
2015-06-26 16:45:13 +02:00
const s32 _pointer = (s16)push.m_h8;
*pointer = _pointer;
if (_pointer + 1 >= depth * 2)
2014-08-12 17:46:22 +02:00
{
2014-09-23 01:07:40 +02:00
push.m_h8 = 0;
2014-08-12 17:46:22 +02:00
}
else
{
2014-09-23 01:07:40 +02:00
push.m_h8++;
2014-08-12 17:46:22 +02:00
}
res = CELL_OK;
}
2014-08-17 23:35:10 +02:00
else if (!isBlocking)
2014-08-12 17:46:22 +02:00
{
2015-03-07 22:20:38 +01:00
return CELL_SYNC_ERROR_AGAIN;
2014-08-17 23:35:10 +02:00
}
else if (!useEventQueue)
{
continue;
}
else
{
res = CELL_OK;
2014-09-23 01:07:40 +02:00
push.m_h7 = 3;
2014-08-17 23:35:10 +02:00
if (isBlocking != 3)
2014-08-12 17:46:22 +02:00
{
2014-08-17 23:35:10 +02:00
break;
2014-08-12 17:46:22 +02:00
}
}
}
2014-09-23 01:07:40 +02:00
if (queue->push1.compare_and_swap_test(old, push))
2014-08-12 17:46:22 +02:00
{
2015-09-15 18:23:17 +02:00
if (!push.m_h7 || res)
2014-08-12 17:46:22 +02:00
{
return res;
}
break;
}
}
if (s32 res = sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0))
2014-12-06 13:48:08 +01:00
{
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
2014-12-06 13:48:08 +01:00
}
2014-08-17 17:08:26 +02:00
var1 = 1;
2014-08-12 17:46:22 +02:00
}
}
2014-08-09 18:23:53 +02:00
s32 _cellSyncLFQueueGetPushPointer2(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue)
{
2014-08-11 20:35:34 +02:00
// arguments copied from _cellSyncLFQueueGetPushPointer
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueGetPushPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue);
2014-08-11 20:35:34 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
}
s32 _cellSyncLFQueueCompletePushPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal)
{
2015-06-26 16:45:13 +02:00
cellSync.Warning("_cellSyncLFQueueCompletePushPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal);
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_PPU2SPU)
2014-08-13 23:39:54 +02:00
{
return CELL_SYNC_ERROR_PERM;
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
2014-08-13 23:39:54 +02:00
while (true)
{
const auto old = queue->push2.load_sync();
2014-09-23 16:27:18 +02:00
auto push2 = old;
2014-08-13 23:39:54 +02:00
const auto old2 = queue->push3.load();
2014-09-23 16:27:18 +02:00
auto push3 = old2;
2014-08-13 23:39:54 +02:00
2014-09-23 16:27:18 +02:00
s32 var1 = pointer - (u16)push3.m_h5;
2014-08-13 23:39:54 +02:00
if (var1 < 0)
{
var1 += depth * 2;
}
s32 var2 = (s32)(s16)queue->pop1.load().m_h4 - (s32)(u16)queue->pop1.load().m_h1;
2014-08-13 23:39:54 +02:00
if (var2 < 0)
{
var2 += depth * 2;
}
s32 var9_ = 15 - var1;
2014-09-27 20:49:33 +02:00
// calculate (u16)(1 slw (15 - var1))
2014-08-13 23:39:54 +02:00
if (var9_ & 0x30)
{
var9_ = 0;
}
else
{
var9_ = 1 << var9_;
}
2014-09-27 20:49:33 +02:00
s32 var9 = cntlz32((u32)(u16)~(var9_ | (u16)push3.m_h6)) - 16; // count leading zeros in u16
2015-06-26 16:45:13 +02:00
2014-09-23 16:27:18 +02:00
s32 var5 = (s32)(u16)push3.m_h6 | var9_;
2014-08-13 23:39:54 +02:00
if (var9 & 0x30)
{
var5 = 0;
}
else
{
var5 <<= var9;
}
2014-09-23 16:27:18 +02:00
s32 var3 = (u16)push3.m_h5 + var9;
2014-08-13 23:39:54 +02:00
if (var3 >= depth * 2)
{
var3 -= depth * 2;
}
2014-09-23 16:27:18 +02:00
u16 pack = push2.pack; // three packed 5-bit fields
2014-08-13 23:39:54 +02:00
s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f);
if (var4 < 0)
{
var4 += 0x1e;
}
u32 var6;
if (var2 + var4 <= 15 && ((pack >> 10) & 0x1f) != (pack & 0x1f))
{
s32 var8 = (pack & 0x1f) - ((pack >> 10) & 0x1f);
if (var8 < 0)
{
var8 += 0x1e;
}
if (var9 > 1 && (u32)var8 > 1)
{
assert(16 - var2 <= 1);
}
s32 var11 = (pack >> 10) & 0x1f;
if (var11 >= 15)
{
var11 -= 15;
}
u16 var12 = (pack >> 10) & 0x1f;
if (var12 == 0x1d)
{
var12 = 0;
}
else
{
var12 = (var12 + 1) << 10;
}
2014-09-23 16:27:18 +02:00
push2.pack = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs1[var11];
2014-08-13 23:39:54 +02:00
}
else
{
var6 = -1;
}
2014-09-23 16:27:18 +02:00
push3.m_h5 = (u16)var3;
push3.m_h6 = (u16)var5;
2014-08-13 23:39:54 +02:00
2014-09-23 16:27:18 +02:00
if (queue->push2.compare_and_swap_test(old, push2))
2014-08-13 23:39:54 +02:00
{
assert(var2 + var4 < 16);
if (var6 != -1)
2014-08-13 23:39:54 +02:00
{
2014-09-23 16:27:18 +02:00
bool exch = queue->push3.compare_and_swap_test(old2, push3);
2014-08-17 23:35:10 +02:00
assert(exch);
if (exch)
2014-08-13 23:39:54 +02:00
{
assert(fpSendSignal);
return fpSendSignal(ppu, (u32)queue->m_eaSignal.addr(), var6);
2014-08-13 23:39:54 +02:00
}
}
else
{
pack = queue->push2.load().pack;
2014-08-17 23:35:10 +02:00
if ((pack & 0x1f) == ((pack >> 10) & 0x1f))
2014-08-13 23:39:54 +02:00
{
2014-09-23 16:27:18 +02:00
if (queue->push3.compare_and_swap_test(old2, push3))
2014-08-13 23:39:54 +02:00
{
return CELL_OK;
2014-08-13 23:39:54 +02:00
}
}
}
}
}
}
s32 _cellSyncLFQueueCompletePushPointer2(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal)
2014-08-11 20:35:34 +02:00
{
// arguments copied from _cellSyncLFQueueCompletePushPointer
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueCompletePushPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal);
2014-08-11 20:35:34 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
2014-08-11 20:35:34 +02:00
}
s32 _cellSyncLFQueuePushBody(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 isBlocking)
2014-08-11 20:35:34 +02:00
{
// cellSyncLFQueuePush has 1 in isBlocking param, cellSyncLFQueueTryPush has 0
2015-03-07 22:20:38 +01:00
cellSync.Warning("_cellSyncLFQueuePushBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking);
2014-08-11 20:35:34 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
2014-08-11 20:35:34 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned() || buffer % 16)
2014-08-11 20:35:34 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
2015-08-13 15:28:42 +02:00
const vm::var<s32> position(ppu);
2015-02-18 23:54:31 +01:00
2014-08-11 20:35:34 +02:00
while (true)
{
2015-07-04 01:22:24 +02:00
CHECK_EMU_STATUS;
2014-08-11 20:35:34 +02:00
s32 res;
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
2014-08-11 20:35:34 +02:00
{
res = _cellSyncLFQueueGetPushPointer(ppu, queue, position, isBlocking, 0);
2014-08-11 20:35:34 +02:00
}
else
{
res = _cellSyncLFQueueGetPushPointer2(ppu, queue, position, isBlocking, 0);
2014-08-11 20:35:34 +02:00
}
if (!isBlocking || res != CELL_SYNC_ERROR_AGAIN)
{
2015-06-26 16:45:13 +02:00
if (res) return res;
2014-08-11 20:35:34 +02:00
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
const s32 size = queue->m_size;
2015-08-13 15:28:42 +02:00
const s32 pos = *position;
2015-07-03 01:11:44 +02:00
const u32 addr = VM_CAST((u64)((queue->m_buffer.addr() & ~1ull) + size * (pos >= depth ? pos - depth : pos)));
2015-06-26 16:45:13 +02:00
std::memcpy(vm::get_ptr<void>(addr), buffer.get_ptr(), size);
2015-02-18 23:54:31 +01:00
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
2014-08-11 20:35:34 +02:00
{
return _cellSyncLFQueueCompletePushPointer(ppu, queue, pos, vm::null);
2014-08-11 20:35:34 +02:00
}
else
{
return _cellSyncLFQueueCompletePushPointer2(ppu, queue, pos, vm::null);
2014-08-11 20:35:34 +02:00
}
}
s32 _cellSyncLFQueueGetPopPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 arg4, u32 useEventQueue)
{
2015-06-26 16:45:13 +02:00
cellSync.Warning("_cellSyncLFQueueGetPopPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, arg4=%d, useEventQueue=%d)", queue, pointer, isBlocking, arg4, useEventQueue);
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_SPU2PPU)
2014-08-17 17:08:26 +02:00
{
return CELL_SYNC_ERROR_PERM;
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
2014-08-17 17:08:26 +02:00
u32 var1 = 0;
2015-05-08 11:45:21 +02:00
2014-08-17 17:08:26 +02:00
while (true)
{
while (true)
{
2015-07-04 01:22:24 +02:00
CHECK_EMU_STATUS;
2014-08-17 17:08:26 +02:00
const auto old = queue->pop1.load_sync();
2014-09-23 01:07:40 +02:00
auto pop = old;
2014-08-17 17:08:26 +02:00
if (var1)
{
2014-09-23 01:07:40 +02:00
pop.m_h3 = 0;
2014-08-17 17:08:26 +02:00
}
if (isBlocking && useEventQueue && *(u32*)queue->m_bs == -1)
{
return CELL_SYNC_ERROR_STAT;
}
2014-09-23 01:07:40 +02:00
s32 var2 = (s32)(s16)pop.m_h4;
2014-08-17 17:08:26 +02:00
s32 res;
2015-09-15 18:23:17 +02:00
if (useEventQueue && ((s32)(u16)pop.m_h1 != var2 || pop.m_h3))
2014-08-17 17:08:26 +02:00
{
res = CELL_SYNC_ERROR_BUSY;
}
else
{
var2 = (s32)(u16)queue->push1.load().m_h5 - var2;
2014-08-17 17:08:26 +02:00
if (var2 < 0)
{
var2 += depth * 2;
}
if (var2 > 0)
{
2015-06-26 16:45:13 +02:00
const s32 _pointer = (s16)pop.m_h4;
*pointer = _pointer;
if (_pointer + 1 >= depth * 2)
2014-08-17 17:08:26 +02:00
{
2014-09-23 01:07:40 +02:00
pop.m_h4 = 0;
2014-08-17 17:08:26 +02:00
}
else
{
2014-09-23 01:07:40 +02:00
pop.m_h4++;
2014-08-17 17:08:26 +02:00
}
res = CELL_OK;
}
2014-08-17 23:35:10 +02:00
else if (!isBlocking)
2014-08-17 17:08:26 +02:00
{
2015-03-07 22:20:38 +01:00
return CELL_SYNC_ERROR_AGAIN;
2014-08-17 23:35:10 +02:00
}
else if (!useEventQueue)
{
continue;
}
else
{
res = CELL_OK;
2014-09-23 01:07:40 +02:00
pop.m_h3 = 3;
2014-08-17 23:35:10 +02:00
if (isBlocking != 3)
2014-08-17 17:08:26 +02:00
{
2014-08-17 23:35:10 +02:00
break;
2014-08-17 17:08:26 +02:00
}
}
}
2014-09-23 01:07:40 +02:00
if (queue->pop1.compare_and_swap_test(old, pop))
2014-08-17 17:08:26 +02:00
{
2015-09-15 18:23:17 +02:00
if (!pop.m_h3 || res)
2014-08-17 17:08:26 +02:00
{
return res;
}
break;
}
}
if (s32 res = sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0))
2014-12-06 13:48:08 +01:00
{
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
2014-12-06 13:48:08 +01:00
}
2014-08-17 17:08:26 +02:00
var1 = 1;
}
}
s32 _cellSyncLFQueueGetPopPointer2(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue)
2014-08-11 20:35:34 +02:00
{
2014-08-17 17:08:26 +02:00
// arguments copied from _cellSyncLFQueueGetPopPointer
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueGetPopPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue);
2014-08-11 20:35:34 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
2014-08-11 20:35:34 +02:00
}
s32 _cellSyncLFQueueCompletePopPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull)
{
2015-06-26 16:45:13 +02:00
// arguments copied from _cellSyncLFQueueCompletePushPointer + unknown argument (noQueueFull taken from LFQueue2CompletePopPointer)
cellSync.Warning("_cellSyncLFQueueCompletePopPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull);
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_SPU2PPU)
2014-08-17 17:08:26 +02:00
{
return CELL_SYNC_ERROR_PERM;
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
2014-08-17 17:08:26 +02:00
while (true)
{
const auto old = queue->pop2.load_sync();
2014-09-23 16:27:18 +02:00
auto pop2 = old;
2014-08-17 17:08:26 +02:00
const auto old2 = queue->pop3.load();
2014-09-23 16:27:18 +02:00
auto pop3 = old2;
2014-08-17 17:08:26 +02:00
2014-09-23 16:27:18 +02:00
s32 var1 = pointer - (u16)pop3.m_h1;
2014-08-17 17:08:26 +02:00
if (var1 < 0)
{
var1 += depth * 2;
}
s32 var2 = (s32)(s16)queue->push1.load().m_h8 - (s32)(u16)queue->push1.load().m_h5;
2014-08-17 17:08:26 +02:00
if (var2 < 0)
{
var2 += depth * 2;
}
s32 var9_ = 15 - var1;
2014-09-27 20:49:33 +02:00
// calculate (u16)(1 slw (15 - var1))
2014-08-17 17:08:26 +02:00
if (var9_ & 0x30)
{
var9_ = 0;
}
else
{
var9_ = 1 << var9_;
}
2014-09-27 20:49:33 +02:00
s32 var9 = cntlz32((u32)(u16)~(var9_ | (u16)pop3.m_h2)) - 16; // count leading zeros in u16
2014-08-17 17:08:26 +02:00
2014-09-23 16:27:18 +02:00
s32 var5 = (s32)(u16)pop3.m_h2 | var9_;
2014-08-17 17:08:26 +02:00
if (var9 & 0x30)
{
var5 = 0;
}
else
{
var5 <<= var9;
}
2014-09-23 16:27:18 +02:00
s32 var3 = (u16)pop3.m_h1 + var9;
2014-08-17 17:08:26 +02:00
if (var3 >= depth * 2)
{
var3 -= depth * 2;
}
2014-09-23 16:27:18 +02:00
u16 pack = pop2.pack; // three packed 5-bit fields
2014-08-17 17:08:26 +02:00
s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f);
if (var4 < 0)
{
var4 += 0x1e;
}
u32 var6;
2014-08-17 23:35:10 +02:00
if (noQueueFull || var2 + var4 > 15 || ((pack >> 10) & 0x1f) == (pack & 0x1f))
{
var6 = -1;
}
else
2014-08-17 17:08:26 +02:00
{
s32 var8 = (pack & 0x1f) - ((pack >> 10) & 0x1f);
if (var8 < 0)
{
var8 += 0x1e;
}
if (var9 > 1 && (u32)var8 > 1)
{
assert(16 - var2 <= 1);
}
s32 var11 = (pack >> 10) & 0x1f;
if (var11 >= 15)
{
var11 -= 15;
}
u16 var12 = (pack >> 10) & 0x1f;
if (var12 == 0x1d)
{
var12 = 0;
}
else
{
var12 = (var12 + 1) << 10;
}
2014-09-23 16:27:18 +02:00
pop2.pack = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs2[var11];
2014-08-17 17:08:26 +02:00
}
2014-09-23 16:27:18 +02:00
pop3.m_h1 = (u16)var3;
pop3.m_h2 = (u16)var5;
2014-08-17 17:08:26 +02:00
2014-09-23 16:27:18 +02:00
if (queue->pop2.compare_and_swap_test(old, pop2))
2014-08-17 17:08:26 +02:00
{
if (var6 != -1)
{
2014-09-23 16:27:18 +02:00
bool exch = queue->pop3.compare_and_swap_test(old2, pop3);
2014-08-17 23:35:10 +02:00
assert(exch);
if (exch)
2014-08-17 17:08:26 +02:00
{
assert(fpSendSignal);
return fpSendSignal(ppu, (u32)queue->m_eaSignal.addr(), var6);
2014-08-17 17:08:26 +02:00
}
}
else
{
pack = queue->pop2.load().pack;
2014-08-17 23:35:10 +02:00
if ((pack & 0x1f) == ((pack >> 10) & 0x1f))
2014-08-17 17:08:26 +02:00
{
2014-09-23 16:27:18 +02:00
if (queue->pop3.compare_and_swap_test(old2, pop3))
2014-08-17 17:08:26 +02:00
{
return CELL_OK;
}
}
}
}
}
}
s32 _cellSyncLFQueueCompletePopPointer2(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull)
2014-08-11 20:35:34 +02:00
{
// arguments copied from _cellSyncLFQueueCompletePopPointer
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueCompletePopPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull);
2014-08-11 20:35:34 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
2014-08-11 20:35:34 +02:00
}
s32 _cellSyncLFQueuePopBody(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<void> buffer, u32 isBlocking)
2014-08-11 20:35:34 +02:00
{
// cellSyncLFQueuePop has 1 in isBlocking param, cellSyncLFQueueTryPop has 0
2015-03-07 22:20:38 +01:00
cellSync.Warning("_cellSyncLFQueuePopBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking);
2014-08-11 20:35:34 +02:00
2014-09-05 22:26:36 +02:00
if (!queue || !buffer)
2014-08-11 20:35:34 +02:00
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned() || buffer % 16)
2014-08-11 20:35:34 +02:00
{
return CELL_SYNC_ERROR_ALIGN;
}
2015-08-13 15:28:42 +02:00
const vm::var<s32> position(ppu);
2015-02-18 23:54:31 +01:00
2014-08-11 20:35:34 +02:00
while (true)
{
2015-07-04 01:22:24 +02:00
CHECK_EMU_STATUS;
2014-08-11 20:35:34 +02:00
s32 res;
2015-06-26 16:45:13 +02:00
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
2014-08-11 20:35:34 +02:00
{
res = _cellSyncLFQueueGetPopPointer(ppu, queue, position, isBlocking, 0, 0);
2014-08-11 20:35:34 +02:00
}
else
{
res = _cellSyncLFQueueGetPopPointer2(ppu, queue, position, isBlocking, 0);
2014-08-11 20:35:34 +02:00
}
if (!isBlocking || res != CELL_SYNC_ERROR_AGAIN)
{
2015-06-26 16:45:13 +02:00
if (res) return res;
2014-08-11 20:35:34 +02:00
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
}
2015-05-08 11:45:21 +02:00
const s32 depth = queue->m_depth;
const s32 size = queue->m_size;
2015-08-13 15:28:42 +02:00
const s32 pos = *position;
2015-07-03 01:11:44 +02:00
const u32 addr = VM_CAST((u64)((queue->m_buffer.addr() & ~1) + size * (pos >= depth ? pos - depth : pos)));
2015-06-26 16:45:13 +02:00
std::memcpy(buffer.get_ptr(), vm::get_ptr<void>(addr), size);
2015-02-18 23:54:31 +01:00
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
2014-08-11 20:35:34 +02:00
{
return _cellSyncLFQueueCompletePopPointer(ppu, queue, pos, vm::null, 0);
2014-08-11 20:35:34 +02:00
}
else
{
return _cellSyncLFQueueCompletePopPointer2(ppu, queue, pos, vm::null, 0);
2014-08-11 20:35:34 +02:00
}
}
2014-08-31 11:54:12 +02:00
s32 cellSyncLFQueueClear(vm::ptr<CellSyncLFQueue> queue)
2014-08-11 20:35:34 +02:00
{
2015-03-07 22:20:38 +01:00
cellSync.Warning("cellSyncLFQueueClear(queue=*0x%x)", queue);
if (!queue)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
while (true)
{
const auto old = queue->pop1.load_sync();
2014-09-23 01:07:40 +02:00
auto pop = old;
const auto push = queue->push1.load();
s32 var1, var2;
2014-10-24 15:24:09 +02:00
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
{
var1 = var2 = (u16)queue->pop2.load().pack;
}
else
{
2014-09-23 01:07:40 +02:00
var1 = (u16)push.m_h7;
var2 = (u16)pop.m_h3;
}
2014-09-23 01:07:40 +02:00
if ((s32)(s16)pop.m_h4 != (s32)(u16)pop.m_h1 ||
(s32)(s16)push.m_h8 != (s32)(u16)push.m_h5 ||
((var2 >> 10) & 0x1f) != (var2 & 0x1f) ||
((var1 >> 10) & 0x1f) != (var1 & 0x1f))
{
return CELL_SYNC_ERROR_BUSY;
}
2014-09-23 01:07:40 +02:00
pop.m_h1 = push.m_h5;
pop.m_h2 = push.m_h6;
pop.m_h3 = push.m_h7;
pop.m_h4 = push.m_h8;
if (queue->pop1.compare_and_swap_test(old, pop)) break;
}
return CELL_OK;
}
2014-10-11 19:20:01 +02:00
s32 cellSyncLFQueueSize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u32> size)
{
2015-03-07 22:20:38 +01:00
cellSync.Warning("cellSyncLFQueueSize(queue=*0x%x, size=*0x%x)", queue, size);
2014-09-01 02:51:48 +02:00
if (!queue || !size)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
while (true)
{
const auto old = queue->pop3.load_sync();
u32 var1 = (u16)queue->pop1.load().m_h1;
u32 var2 = (u16)queue->push1.load().m_h5;
2014-09-23 01:07:40 +02:00
if (queue->pop3.compare_and_swap_test(old, old))
{
if (var1 <= var2)
{
2014-09-01 02:51:48 +02:00
*size = var2 - var1;
}
else
{
2014-09-01 02:51:48 +02:00
*size = var2 - var1 + (u32)queue->m_depth * 2;
}
2015-06-26 16:45:13 +02:00
return CELL_OK;
}
}
}
2014-10-11 19:20:01 +02:00
s32 cellSyncLFQueueDepth(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u32> depth)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncLFQueueDepth(queue=*0x%x, depth=*0x%x)", queue, depth);
2014-09-01 02:51:48 +02:00
if (!queue || !depth)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
2014-09-01 02:51:48 +02:00
*depth = queue->m_depth;
2015-06-26 16:45:13 +02:00
return CELL_OK;
}
s32 _cellSyncLFQueueGetSignalAddress(vm::cptr<CellSyncLFQueue> queue, vm::pptr<void> ppSignal)
{
2015-05-08 11:45:21 +02:00
cellSync.Log("_cellSyncLFQueueGetSignalAddress(queue=*0x%x, ppSignal=**0x%x)", queue, ppSignal);
2014-09-01 02:51:48 +02:00
if (!queue || !ppSignal)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
2015-05-08 11:45:21 +02:00
*ppSignal = queue->m_eaSignal;
return CELL_OK;
}
s32 cellSyncLFQueueGetDirection(vm::cptr<CellSyncLFQueue> queue, vm::ptr<u32> direction)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncLFQueueGetDirection(queue=*0x%x, direction=*0x%x)", queue, direction);
2014-09-01 02:51:48 +02:00
if (!queue || !direction)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
2014-09-01 02:51:48 +02:00
*direction = queue->m_direction;
2015-06-26 16:45:13 +02:00
return CELL_OK;
}
s32 cellSyncLFQueueGetEntrySize(vm::cptr<CellSyncLFQueue> queue, vm::ptr<u32> entry_size)
{
2015-03-07 22:20:38 +01:00
cellSync.Log("cellSyncLFQueueGetEntrySize(queue=*0x%x, entry_size=*0x%x)", queue, entry_size);
2014-09-01 02:51:48 +02:00
if (!queue || !entry_size)
{
return CELL_SYNC_ERROR_NULL_POINTER;
}
2015-06-26 16:45:13 +02:00
if (!queue.aligned())
{
return CELL_SYNC_ERROR_ALIGN;
}
2014-09-01 02:51:48 +02:00
*entry_size = queue->m_size;
2015-06-26 16:45:13 +02:00
return CELL_OK;
2014-08-18 23:16:48 +02:00
}
2014-09-23 16:27:18 +02:00
s32 _cellSyncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueAttachLv2EventQueue(spus=*0x%x, num=%d, queue=*0x%x)", spus, num, queue);
2014-08-18 23:16:48 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
}
2014-09-23 16:27:18 +02:00
s32 _cellSyncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
2015-03-07 22:20:38 +01:00
cellSync.Todo("_cellSyncLFQueueDetachLv2EventQueue(spus=*0x%x, num=%d, queue=*0x%x)", spus, num, queue);
2014-08-18 23:16:48 +02:00
2015-07-01 19:09:26 +02:00
throw EXCEPTION("");
}
Module<> cellSync("cellSync", []()
{
2015-07-27 17:43:32 +02:00
// setup error handler
cellSync.on_error = [](s64 value, ModuleFunc* func)
{
// get error name for CELL_SYNC errors
auto get_error = [](u32 code) -> const char*
2015-07-27 17:43:32 +02:00
{
switch (code)
{
case CELL_SYNC_ERROR_AGAIN: return "CELL_SYNC_ERROR_AGAIN";
case CELL_SYNC_ERROR_INVAL: return "CELL_SYNC_ERROR_INVAL";
case CELL_SYNC_ERROR_NOSYS: return "CELL_SYNC_ERROR_NOSYS";
case CELL_SYNC_ERROR_NOMEM: return "CELL_SYNC_ERROR_NOMEM";
case CELL_SYNC_ERROR_SRCH: return "CELL_SYNC_ERROR_SRCH";
case CELL_SYNC_ERROR_NOENT: return "CELL_SYNC_ERROR_NOENT";
case CELL_SYNC_ERROR_NOEXEC: return "CELL_SYNC_ERROR_NOEXEC";
case CELL_SYNC_ERROR_DEADLK: return "CELL_SYNC_ERROR_DEADLK";
case CELL_SYNC_ERROR_PERM: return "CELL_SYNC_ERROR_PERM";
case CELL_SYNC_ERROR_BUSY: return "CELL_SYNC_ERROR_BUSY";
case CELL_SYNC_ERROR_ABORT: return "CELL_SYNC_ERROR_ABORT";
case CELL_SYNC_ERROR_FAULT: return "CELL_SYNC_ERROR_FAULT";
case CELL_SYNC_ERROR_CHILD: return "CELL_SYNC_ERROR_CHILD";
case CELL_SYNC_ERROR_STAT: return "CELL_SYNC_ERROR_STAT";
case CELL_SYNC_ERROR_ALIGN: return "CELL_SYNC_ERROR_ALIGN";
}
return "???";
};
// analyse error code
if (u32 code = (value & 0xffffff00) == 0x80410100 ? static_cast<u32>(value) : 0)
2015-07-27 17:43:32 +02:00
{
cellSync.Error("%s() -> %s (0x%x)", func->name, get_error(code), code);
}
};
REG_FUNC(cellSync, cellSyncMutexInitialize);
REG_FUNC(cellSync, cellSyncMutexLock);
REG_FUNC(cellSync, cellSyncMutexTryLock);
REG_FUNC(cellSync, cellSyncMutexUnlock);
REG_FUNC(cellSync, cellSyncBarrierInitialize);
REG_FUNC(cellSync, cellSyncBarrierNotify);
REG_FUNC(cellSync, cellSyncBarrierTryNotify);
REG_FUNC(cellSync, cellSyncBarrierWait);
REG_FUNC(cellSync, cellSyncBarrierTryWait);
REG_FUNC(cellSync, cellSyncRwmInitialize);
REG_FUNC(cellSync, cellSyncRwmRead);
REG_FUNC(cellSync, cellSyncRwmTryRead);
REG_FUNC(cellSync, cellSyncRwmWrite);
REG_FUNC(cellSync, cellSyncRwmTryWrite);
REG_FUNC(cellSync, cellSyncQueueInitialize);
REG_FUNC(cellSync, cellSyncQueuePush);
REG_FUNC(cellSync, cellSyncQueueTryPush);
REG_FUNC(cellSync, cellSyncQueuePop);
REG_FUNC(cellSync, cellSyncQueueTryPop);
REG_FUNC(cellSync, cellSyncQueuePeek);
REG_FUNC(cellSync, cellSyncQueueTryPeek);
REG_FUNC(cellSync, cellSyncQueueSize);
REG_FUNC(cellSync, cellSyncQueueClear);
REG_FUNC(cellSync, cellSyncLFQueueGetEntrySize);
REG_FUNC(cellSync, cellSyncLFQueueSize);
REG_FUNC(cellSync, cellSyncLFQueueClear);
REG_FUNC(cellSync, _cellSyncLFQueueCompletePushPointer2);
REG_FUNC(cellSync, _cellSyncLFQueueGetPopPointer2);
REG_FUNC(cellSync, _cellSyncLFQueueCompletePushPointer);
REG_FUNC(cellSync, _cellSyncLFQueueAttachLv2EventQueue);
REG_FUNC(cellSync, _cellSyncLFQueueGetPushPointer2);
REG_FUNC(cellSync, _cellSyncLFQueueGetPopPointer);
REG_FUNC(cellSync, _cellSyncLFQueueCompletePopPointer2);
REG_FUNC(cellSync, _cellSyncLFQueueDetachLv2EventQueue);
REG_FUNC(cellSync, cellSyncLFQueueInitialize);
REG_FUNC(cellSync, _cellSyncLFQueueGetSignalAddress);
REG_FUNC(cellSync, _cellSyncLFQueuePushBody);
REG_FUNC(cellSync, cellSyncLFQueueGetDirection);
REG_FUNC(cellSync, cellSyncLFQueueDepth);
REG_FUNC(cellSync, _cellSyncLFQueuePopBody);
REG_FUNC(cellSync, _cellSyncLFQueueGetPushPointer);
REG_FUNC(cellSync, _cellSyncLFQueueCompletePopPointer);
});