rpcsx/rpcs3/Emu/Cell/lv2/sys_spu.cpp

1350 lines
26 KiB
C++
Raw Normal View History

#include "stdafx.h"
2014-08-23 02:16:54 +02:00
#include "Emu/Memory/Memory.h"
2014-08-23 16:51:51 +02:00
#include "Emu/System.h"
2015-03-06 23:58:42 +01:00
#include "Emu/IdManager.h"
2016-04-14 00:23:53 +02:00
#include "Crypto/unself.h"
#include "Loader/ELF.h"
2014-08-23 16:51:51 +02:00
2016-04-14 00:23:53 +02:00
#include "Emu/Cell/ErrorCodes.h"
2017-02-06 19:36:46 +01:00
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
2015-07-12 23:02:02 +02:00
#include "sys_interrupt.h"
2015-03-04 05:42:04 +01:00
#include "sys_event.h"
2014-08-23 16:51:51 +02:00
#include "sys_spu.h"
2016-08-19 23:14:10 +02:00
namespace vm { using namespace ps3; }
2016-05-13 15:55:34 +02:00
logs::channel sys_spu("sys_spu", logs::level::notice);
2016-04-14 00:23:53 +02:00
void LoadSpuImage(const fs::file& stream, u32& spu_ep, u32 addr)
{
const spu_exec_object obj = stream;
2016-04-14 00:23:53 +02:00
if (obj != elf_error::ok)
2016-04-14 00:23:53 +02:00
{
fmt::throw_exception("Failed to load SPU image: %s" HERE, obj.get_error());
2016-04-14 00:23:53 +02:00
}
for (const auto& prog : obj.progs)
2016-04-14 00:23:53 +02:00
{
if (prog.p_type == 0x1 /* LOAD */)
{
std::memcpy(vm::base(addr + prog.p_vaddr), prog.bin.data(), prog.p_filesz);
}
}
spu_ep = obj.header.e_entry;
}
2016-04-14 00:23:53 +02:00
u32 LoadSpuImage(const fs::file& stream, u32& spu_ep)
{
2014-07-16 14:07:38 +02:00
const u32 alloc_size = 256 * 1024;
u32 spu_offset = (u32)vm::alloc(alloc_size, vm::main);
LoadSpuImage(stream, spu_ep, spu_offset);
2014-01-07 21:27:34 +01:00
return spu_offset;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu)
2015-03-04 22:51:14 +01:00
{
sys_spu.warning("sys_spu_initialize(max_usable_spu=%d, max_raw_spu=%d)", max_usable_spu, max_raw_spu);
2015-03-04 22:51:14 +01:00
if (max_raw_spu > 5)
{
return CELL_EINVAL;
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_image_open(vm::ptr<sys_spu_image_t> img, vm::cptr<char> path)
{
2016-08-11 01:29:59 +02:00
sys_spu.warning("sys_spu_image_open(img=*0x%x, path=%s)", img, path);
const fs::file elf_file = decrypt_self(fs::file(vfs::get(path.get_ptr())));
if (!elf_file)
{
2016-08-11 01:29:59 +02:00
sys_spu.error("sys_spu_image_open() error: %s not found!", path);
return CELL_ENOENT;
}
2014-01-07 21:27:34 +01:00
u32 entry;
u32 offset = LoadSpuImage(elf_file, entry);
2014-09-19 02:19:22 +02:00
img->type = SYS_SPU_IMAGE_TYPE_USER;
img->entry_point = entry;
2016-04-14 00:23:53 +02:00
img->segs.set(offset); // TODO: writing actual segment info
2014-09-19 02:19:22 +02:00
img->nsegs = 1; // wrong value
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm::ptr<sys_spu_image_t> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg)
{
sys_spu.warning("sys_spu_thread_initialize(thread=*0x%x, group=0x%x, spu_num=%d, img=*0x%x, attr=*0x%x, arg=*0x%x)", thread, group_id, spu_num, img, attr, arg);
2015-03-04 22:51:14 +01:00
2017-02-05 00:26:57 +01:00
// Read thread name
const std::string thread_name(attr->name.get_ptr(), attr->name ? attr->name_len - 1 : 0);
const auto group = idm::get<lv2_spu_group>(group_id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-03-04 22:51:14 +01:00
if (spu_num >= group->threads.size())
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
if (group->threads[spu_num] || group->run_state != SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
{
return CELL_EBUSY;
}
2017-02-05 00:26:57 +01:00
if (u32 option = attr->option)
{
sys_spu.todo("Unimplemented SPU Thread options (0x%x)", option);
}
auto spu = idm::make_ptr<SPUThread>(thread_name, spu_num, group.get());
fxm::get_always<mfc_thread>()->add_spu(spu);
2017-02-05 00:26:57 +01:00
*thread = spu->id;
group->threads[spu_num] = std::move(spu);
group->args[spu_num] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
group->images[spu_num] = img;
if (++group->init == group->num)
{
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
{
sys_spu.warning("sys_spu_thread_set_argument(id=0x%x, arg=*0x%x)", id, arg);
2014-09-19 02:19:22 +02:00
const auto thread = idm::get<SPUThread>(id);
2015-04-12 03:36:25 +02:00
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
const auto group = thread->group;
2015-03-04 22:51:14 +01:00
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2014-09-15 00:17:24 +02:00
2015-07-01 00:25:52 +02:00
group->args[thread->index].arg1 = arg->arg1;
group->args[thread->index].arg2 = arg->arg2;
group->args[thread->index].arg3 = arg->arg3;
group->args[thread->index].arg4 = arg->arg4;
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status)
{
sys_spu.warning("sys_spu_thread_get_exit_status(id=0x%x, status=*0x%x)", id, status);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-01 00:25:52 +02:00
// TODO: check CELL_ESTAT condition
2015-03-02 03:10:41 +01:00
2016-04-19 15:04:02 +02:00
*status = thread->ch_out_mbox.pop(*thread);
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr)
2015-03-04 22:51:14 +01:00
{
sys_spu.warning("sys_spu_thread_group_create(id=*0x%x, num=%d, prio=%d, attr=*0x%x)", id, num, prio, attr);
2015-03-04 22:51:14 +01:00
// TODO: max num value should be affected by sys_spu_initialize() settings
if (!num || num > 6 || prio < 16 || prio > 255)
{
return CELL_EINVAL;
}
2015-09-15 18:23:17 +02:00
if (attr->type)
2015-07-01 00:25:52 +02:00
{
sys_spu.todo("Unsupported SPU Thread Group type (0x%x)", attr->type);
2015-07-01 00:25:52 +02:00
}
*id = idm::make<lv2_spu_group>(std::string(attr->name.get_ptr(), attr->nsize - 1), num, prio, attr->type, attr->ct);
2015-07-01 00:25:52 +02:00
2015-03-04 22:51:14 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_destroy(u32 id)
{
sys_spu.warning("sys_spu_thread_group_destroy(id=0x%x)", id);
2017-02-05 00:26:57 +01:00
const auto group = idm::withdraw<lv2_spu_group>(id, [](lv2_spu_group& group) -> CellError
{
const auto _old = group.run_state.compare_and_swap(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED);
2015-03-04 22:51:14 +01:00
2017-02-05 00:26:57 +01:00
if (_old > SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_EBUSY;
}
return {};
});
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
if (group.ret)
{
2017-02-05 00:26:57 +01:00
return group.ret;
}
2017-02-05 00:26:57 +01:00
// Cleanup
for (auto& ptr : group->threads)
{
2017-02-05 00:26:57 +01:00
if (auto thread = std::move(ptr))
2014-09-05 22:26:36 +02:00
{
2017-02-05 00:26:57 +01:00
idm::remove<SPUThread>(thread->id);
2014-09-05 22:26:36 +02:00
}
}
fxm::check_unlocked<mfc_thread>()->add_spu(nullptr);
return CELL_OK;
}
error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
{
2017-03-11 00:14:48 +01:00
vm::temporary_unlock(ppu);
sys_spu.warning("sys_spu_thread_group_start(id=0x%x)", id);
2017-02-05 00:26:57 +01:00
const auto group = idm::get<lv2_spu_group>(id, [](lv2_spu_group& group)
{
// SPU_THREAD_GROUP_STATUS_READY state is not used
return group.run_state.compare_and_swap_test(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_RUNNING);
});
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
if (!group.ret)
2015-03-04 22:51:14 +01:00
{
return CELL_ESTAT;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-03-04 22:51:14 +01:00
group->join_state = 0;
2017-02-05 00:26:57 +01:00
for (auto& thread : group->threads)
2015-03-04 22:51:14 +01:00
{
2017-02-05 00:26:57 +01:00
if (thread)
2015-03-04 22:51:14 +01:00
{
2017-02-05 00:26:57 +01:00
auto& args = group->args[thread->index];
auto& image = group->images[thread->index];
2015-03-04 22:51:14 +01:00
// Copy SPU image:
// TODO: use segment info
2017-02-05 00:26:57 +01:00
std::memcpy(vm::base(thread->offset), image->segs.get_ptr(), 256 * 1024);
2015-03-04 22:51:14 +01:00
2017-02-05 00:26:57 +01:00
thread->pc = image->entry_point;
thread->cpu_init();
thread->gpr[3] = v128::from64(0, args.arg1);
thread->gpr[4] = v128::from64(0, args.arg2);
thread->gpr[5] = v128::from64(0, args.arg3);
thread->gpr[6] = v128::from64(0, args.arg4);
2015-03-13 02:09:53 +01:00
2017-02-05 00:26:57 +01:00
thread->status.exchange(SPU_STATUS_RUNNING);
2015-03-04 22:51:14 +01:00
}
}
2017-02-05 00:26:57 +01:00
// Because SPU_THREAD_GROUP_STATUS_READY is not possible, run event is delivered immediately
// TODO: check data2 and data3
group->send_run_event(id, 0, 0);
2016-04-14 00:23:53 +02:00
for (auto& thread : group->threads)
{
2016-04-14 00:23:53 +02:00
if (thread)
{
thread->run();
2016-04-14 00:23:53 +02:00
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_suspend(u32 id)
{
sys_spu.trace("sys_spu_thread_group_suspend(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2015-03-04 22:51:14 +01:00
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate
{
2015-03-04 22:51:14 +01:00
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_STOPPED)
2015-03-04 22:51:14 +01:00
{
return CELL_ESTAT;
}
2015-03-04 22:51:14 +01:00
// SPU_THREAD_GROUP_STATUS_READY state is not used
2017-02-05 00:26:57 +01:00
if (group->run_state == SPU_THREAD_GROUP_STATUS_RUNNING)
{
2017-02-05 00:26:57 +01:00
group->run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
2017-02-05 00:26:57 +01:00
else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING)
2015-03-04 22:51:14 +01:00
{
2017-02-05 00:26:57 +01:00
group->run_state = SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
2015-03-04 22:51:14 +01:00
}
2017-02-05 00:26:57 +01:00
else if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED || group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
2017-02-05 00:26:57 +01:00
return CELL_OK;
}
2015-03-04 22:51:14 +01:00
else
{
2015-03-04 22:51:14 +01:00
return CELL_ESTAT;
}
2016-04-14 00:23:53 +02:00
for (auto& thread : group->threads)
2015-03-04 22:51:14 +01:00
{
2016-04-14 00:23:53 +02:00
if (thread)
{
thread->state += cpu_flag::suspend;
2016-04-14 00:23:53 +02:00
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_resume(u32 id)
{
sys_spu.trace("sys_spu_thread_group_resume(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2015-03-04 22:51:14 +01:00
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate
{
2015-03-04 22:51:14 +01:00
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-03-04 22:51:14 +01:00
// SPU_THREAD_GROUP_STATUS_READY state is not used
2017-02-05 00:26:57 +01:00
if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED)
{
2017-02-05 00:26:57 +01:00
group->run_state = SPU_THREAD_GROUP_STATUS_RUNNING;
}
2017-02-05 00:26:57 +01:00
else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
2017-02-05 00:26:57 +01:00
group->run_state = SPU_THREAD_GROUP_STATUS_WAITING;
}
2015-03-04 22:51:14 +01:00
else
{
2015-03-04 22:51:14 +01:00
return CELL_ESTAT;
}
2016-04-14 00:23:53 +02:00
for (auto& thread : group->threads)
{
2016-04-14 00:23:53 +02:00
if (thread)
{
thread->state -= cpu_flag::suspend;
thread->notify();
2016-04-14 00:23:53 +02:00
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_yield(u32 id)
{
sys_spu.trace("sys_spu_thread_group_yield(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2015-03-04 22:51:14 +01:00
2015-08-10 21:39:52 +02:00
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate
{
return CELL_OK;
}
2017-02-05 00:26:57 +01:00
if (group->run_state != SPU_THREAD_GROUP_STATUS_RUNNING)
2015-03-04 22:51:14 +01:00
{
2015-04-12 03:36:25 +02:00
return CELL_ESTAT;
2015-03-04 22:51:14 +01:00
}
// SPU_THREAD_GROUP_STATUS_READY state is not used, so this function does nothing
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_terminate(u32 id, s32 value)
{
sys_spu.warning("sys_spu_thread_group_terminate(id=0x%x, value=0x%x)", id, value);
2017-02-05 00:26:57 +01:00
// The id can be either SPU Thread Group or SPU Thread
const auto thread = idm::get<SPUThread>(id);
2017-02-05 00:26:57 +01:00
const auto _group = idm::get<lv2_spu_group>(id);
const auto group = thread ? thread->group : _group.get();
2015-03-07 01:37:39 +01:00
2015-04-12 03:36:25 +02:00
if (!group && !thread)
{
return CELL_ESRCH;
}
2015-03-04 22:51:14 +01:00
2015-03-07 01:37:39 +01:00
if (thread)
{
for (auto& t : group->threads)
{
// find primary (?) thread and compare it with the one specified
if (t)
{
if (t == thread)
{
break;
}
else
{
return CELL_EPERM;
}
}
}
}
2015-03-04 22:51:14 +01:00
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING ||
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
2015-04-12 03:36:25 +02:00
return CELL_ESTAT;
}
2016-04-14 00:23:53 +02:00
for (auto& thread : group->threads)
{
2016-04-14 00:23:53 +02:00
if (thread)
{
thread->state += cpu_flag::stop;
thread->notify();
2016-04-14 00:23:53 +02:00
}
}
2017-02-05 00:26:57 +01:00
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
2015-03-04 22:51:14 +01:00
group->exit_status = value;
group->join_state |= SPU_TGJSF_TERMINATED;
2015-07-03 18:07:36 +02:00
group->cv.notify_one();
2015-03-07 01:37:39 +01:00
return CELL_OK;
}
2017-02-06 19:36:46 +01:00
error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
{
2017-03-11 00:14:48 +01:00
vm::temporary_unlock(ppu);
sys_spu.warning("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status);
2014-12-28 14:15:22 +01:00
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
2014-09-19 02:19:22 +02:00
{
2015-03-04 22:51:14 +01:00
return CELL_ESRCH;
2014-09-19 02:19:22 +02:00
}
2017-03-11 00:14:48 +01:00
u32 join_state = 0;
s32 exit_value = 0;
2017-02-05 00:26:57 +01:00
{
2017-03-11 00:14:48 +01:00
semaphore_lock lock(group->mutex);
2017-03-11 00:14:48 +01:00
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_ESTAT;
}
2017-03-11 00:14:48 +01:00
if (group->join_state.fetch_or(SPU_TGJSF_IS_JOINING) & SPU_TGJSF_IS_JOINING)
{
// another PPU thread is joining this thread group
return CELL_EBUSY;
}
2017-02-06 19:36:46 +01:00
2017-03-11 00:14:48 +01:00
lv2_obj::sleep(ppu);
2015-03-04 22:51:14 +01:00
2017-03-11 00:14:48 +01:00
while ((group->join_state & ~SPU_TGJSF_IS_JOINING) == 0)
{
2017-03-11 00:14:48 +01:00
bool stopped = true;
for (auto& t : group->threads)
{
2017-03-11 00:14:48 +01:00
if (t)
2014-08-27 23:04:55 +02:00
{
2017-03-11 00:14:48 +01:00
if ((t->status & SPU_STATUS_STOPPED_BY_STOP) == 0)
{
stopped = false;
break;
}
2014-08-27 23:04:55 +02:00
}
}
2014-08-27 23:04:55 +02:00
2017-03-11 00:14:48 +01:00
if (stopped)
{
break;
}
// TODO
group->cv.wait(lock, 1000);
thread_ctrl::test();
2015-03-04 22:51:14 +01:00
}
2014-08-27 23:04:55 +02:00
2017-03-11 00:14:48 +01:00
join_state = group->join_state;
exit_value = group->exit_status;
group->join_state &= ~SPU_TGJSF_IS_JOINING;
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; // hack
2015-03-04 22:51:14 +01:00
}
2017-03-11 00:14:48 +01:00
ppu.test_state();
2017-03-11 00:14:48 +01:00
switch (join_state & ~SPU_TGJSF_IS_JOINING)
2015-03-04 22:51:14 +01:00
{
case 0:
{
if (cause) *cause = SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT;
break;
}
case SPU_TGJSF_GROUP_EXIT:
2015-03-04 22:51:14 +01:00
{
if (cause) *cause = SYS_SPU_THREAD_GROUP_JOIN_GROUP_EXIT;
break;
}
case SPU_TGJSF_TERMINATED:
2015-03-04 22:51:14 +01:00
{
if (cause) *cause = SYS_SPU_THREAD_GROUP_JOIN_TERMINATED;
break;
}
2015-07-01 19:09:26 +02:00
default:
{
fmt::throw_exception("Unexpected join_state" HERE);
2015-07-01 19:09:26 +02:00
}
2015-03-04 22:51:14 +01:00
}
2015-03-04 22:51:14 +01:00
if (status)
{
2015-03-04 22:51:14 +01:00
*status = group->exit_status;
}
2017-03-11 00:14:48 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_write_ls(u32 id, u32 lsa, u64 value, u32 type)
{
sys_spu.trace("sys_spu_thread_write_ls(id=0x%x, lsa=0x%05x, value=0x%llx, type=%d)", id, lsa, value, type);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
2015-07-01 00:25:52 +02:00
return CELL_ESRCH;
}
if (lsa >= 0x40000 || lsa + type > 0x40000 || lsa % type) // check range and alignment
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
const auto group = thread->group;
2015-07-01 00:25:52 +02:00
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-07-03 18:07:36 +02:00
2017-02-05 00:26:57 +01:00
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
2015-07-01 00:25:52 +02:00
{
return CELL_ESTAT;
}
2015-03-04 22:51:14 +01:00
switch (type)
{
case 1: thread->_ref<u8>(lsa) = (u8)value; break;
case 2: thread->_ref<u16>(lsa) = (u16)value; break;
case 4: thread->_ref<u32>(lsa) = (u32)value; break;
case 8: thread->_ref<u64>(lsa) = value; break;
default: return CELL_EINVAL;
}
2015-03-04 22:51:14 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_read_ls(u32 id, u32 lsa, vm::ptr<u64> value, u32 type)
{
sys_spu.trace("sys_spu_thread_read_ls(id=0x%x, lsa=0x%05x, value=*0x%x, type=%d)", id, lsa, value, type);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
2015-07-01 00:25:52 +02:00
return CELL_ESRCH;
}
if (lsa >= 0x40000 || lsa + type > 0x40000 || lsa % type) // check range and alignment
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
const auto group = thread->group;
2015-07-01 00:25:52 +02:00
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-07-03 18:07:36 +02:00
2017-02-05 00:26:57 +01:00
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
2015-07-01 00:25:52 +02:00
{
return CELL_ESTAT;
}
2015-03-04 22:51:14 +01:00
switch (type)
{
case 1: *value = thread->_ref<u8>(lsa); break;
case 2: *value = thread->_ref<u16>(lsa); break;
case 4: *value = thread->_ref<u32>(lsa); break;
case 8: *value = thread->_ref<u64>(lsa); break;
default: return CELL_EINVAL;
}
2015-03-04 22:51:14 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_write_spu_mb(u32 id, u32 value)
{
sys_spu.warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
const auto group = thread->group;
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
2015-07-03 18:07:36 +02:00
2017-02-05 00:26:57 +01:00
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
2015-07-01 00:25:52 +02:00
{
return CELL_ESTAT;
}
2016-04-19 15:04:02 +02:00
thread->ch_in_mbox.push(*thread, value);
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_set_spu_cfg(u32 id, u64 value)
{
sys_spu.warning("sys_spu_thread_set_spu_cfg(id=0x%x, value=0x%x)", id, value);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
if (value > 3)
{
return CELL_EINVAL;
}
2015-07-01 00:25:52 +02:00
thread->snr_config = value;
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_get_spu_cfg(u32 id, vm::ptr<u64> value)
{
sys_spu.warning("sys_spu_thread_get_spu_cfg(id=0x%x, value=*0x%x)", id, value);
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-01 00:25:52 +02:00
*value = thread->snr_config;
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
{
sys_spu.trace("sys_spu_thread_write_snr(id=0x%x, number=%d, value=0x%x)", id, number, value);
2014-12-24 00:38:13 +01:00
const auto thread = idm::get<SPUThread>(id);
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
if (number > 1)
{
return CELL_EINVAL;
}
2015-07-12 23:02:02 +02:00
//if (group->state < SPU_THREAD_GROUP_STATUS_WAITING || group->state > SPU_THREAD_GROUP_STATUS_RUNNING) // ???
2015-07-03 18:07:36 +02:00
//{
// return CELL_ESTAT;
//}
2015-07-17 18:27:12 +02:00
thread->push_snr(number, value);
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et)
{
sys_spu.warning("sys_spu_thread_group_connect_event(id=0x%x, eq=0x%x, et=%d)", id, eq, et);
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
2015-04-12 03:36:25 +02:00
if (!group || !queue)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
switch (et)
{
case SYS_SPU_THREAD_GROUP_EVENT_RUN:
{
if (!group->ep_run.expired())
{
return CELL_EBUSY;
}
group->ep_run = queue;
break;
}
case SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION:
{
if (!group->ep_exception.expired())
{
return CELL_EBUSY;
}
group->ep_exception = queue;
break;
}
case SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE:
{
if (!group->ep_sysmodule.expired())
{
return CELL_EBUSY;
}
group->ep_sysmodule = queue;
break;
}
default:
{
sys_spu.error("sys_spu_thread_group_connect_event(): unknown event type (%d)", et);
return CELL_EINVAL;
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_disconnect_event(u32 id, u32 et)
{
sys_spu.warning("sys_spu_thread_group_disconnect_event(id=0x%x, et=%d)", id, et);
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
switch (et)
{
case SYS_SPU_THREAD_GROUP_EVENT_RUN:
{
if (group->ep_run.expired())
{
return CELL_ENOTCONN;
}
group->ep_run.reset();
break;
}
case SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION:
{
if (group->ep_exception.expired())
{
return CELL_ENOTCONN;
}
group->ep_exception.reset();
break;
}
case SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE:
{
if (group->ep_sysmodule.expired())
{
return CELL_ENOTCONN;
}
group->ep_sysmodule.reset();
break;
}
default:
{
sys_spu.error("sys_spu_thread_group_disconnect_event(): unknown event type (%d)", et);
return CELL_EINVAL;
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
{
sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);
const auto thread = idm::get<SPUThread>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
2015-03-04 05:42:04 +01:00
2015-07-01 00:25:52 +02:00
if (!thread || !queue)
{
return CELL_ESRCH;
}
2015-03-04 05:42:04 +01:00
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63 || queue->type != SYS_PPU_QUEUE)
{
sys_spu.error("sys_spu_thread_connect_event(): invalid arguments (et=%d, spup=%d, queue->type=%d)", et, spup, queue->type);
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(thread->group->mutex);
2015-07-01 00:25:52 +02:00
auto& port = thread->spup[spup];
2015-03-04 05:42:04 +01:00
if (!port.expired())
{
return CELL_EISCONN;
}
2015-03-04 05:42:04 +01:00
port = queue;
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
{
sys_spu.warning("sys_spu_thread_disconnect_event(id=0x%x, et=%d, spup=%d)", id, et, spup);
const auto thread = idm::get<SPUThread>(id);
2015-03-04 05:42:04 +01:00
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-03-04 05:42:04 +01:00
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63)
{
sys_spu.error("sys_spu_thread_disconnect_event(): invalid arguments (et=%d, spup=%d)", et, spup);
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(thread->group->mutex);
2015-07-01 00:25:52 +02:00
auto& port = thread->spup[spup];
2015-03-04 05:42:04 +01:00
if (port.expired())
{
return CELL_ENOTCONN;
}
2015-03-04 05:42:04 +01:00
port.reset();
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
{
sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
const auto thread = idm::get<SPUThread>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
2015-03-04 05:42:04 +01:00
2015-07-01 00:25:52 +02:00
if (!thread || !queue)
{
return CELL_ESRCH;
}
2015-03-04 05:42:04 +01:00
if (queue->type != SYS_SPU_QUEUE)
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(thread->group->mutex);
2015-07-01 00:25:52 +02:00
for (auto& v : thread->spuq)
{
if (auto q = v.second.lock())
{
if (v.first == spuq_num || q == queue)
{
return CELL_EBUSY;
}
}
}
2015-07-01 00:25:52 +02:00
for (auto& v : thread->spuq)
2015-03-04 05:42:04 +01:00
{
if (v.second.expired())
{
v.first = spuq_num;
v.second = queue;
return CELL_OK;
}
}
return CELL_EAGAIN;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_unbind_queue(u32 id, u32 spuq_num)
{
sys_spu.warning("sys_spu_thread_unbind_queue(id=0x%x, spuq_num=0x%x)", id, spuq_num);
const auto thread = idm::get<SPUThread>(id);
2015-03-04 05:42:04 +01:00
2015-07-01 00:25:52 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(thread->group->mutex);
2015-07-01 00:25:52 +02:00
for (auto& v : thread->spuq)
2015-03-04 05:42:04 +01:00
{
if (v.first == spuq_num && !v.second.expired())
{
v.second.reset();
return CELL_OK;
}
}
return CELL_ESRCH;
2013-11-19 11:30:58 +01:00
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, vm::ptr<u8> spup)
{
sys_spu.warning("sys_spu_thread_group_connect_event_all_threads(id=0x%x, eq=0x%x, req=0x%llx, spup=*0x%x)", id, eq, req, spup);
2014-02-15 22:16:35 +01:00
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
2014-02-15 22:16:35 +01:00
2015-04-12 03:36:25 +02:00
if (!group || !queue)
2014-02-15 22:16:35 +01:00
{
return CELL_ESRCH;
}
2014-07-13 20:55:14 +02:00
2015-03-04 22:51:14 +01:00
if (!req)
{
2015-03-04 22:51:14 +01:00
return CELL_EINVAL;
2014-07-13 20:55:14 +02:00
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
2014-07-13 20:55:14 +02:00
{
return CELL_ESTAT;
}
u8 port = 0; // SPU Port number
for (; port < 64; port++)
2014-07-13 20:55:14 +02:00
{
if (!(req & (1ull << port)))
{
continue;
}
2014-07-13 20:55:14 +02:00
bool found = true;
2015-03-04 22:51:14 +01:00
for (auto& t : group->threads)
2014-07-13 20:55:14 +02:00
{
if (t)
2015-03-04 22:51:14 +01:00
{
2015-07-03 18:07:36 +02:00
if (!t->spup[port].expired())
{
found = false;
break;
2015-03-04 22:51:14 +01:00
}
}
}
2014-07-13 20:55:14 +02:00
2015-03-04 22:51:14 +01:00
if (found)
{
break;
}
}
2015-03-04 22:51:14 +01:00
if (port == 64)
{
return CELL_EISCONN;
}
2015-03-04 22:51:14 +01:00
for (auto& t : group->threads)
{
if (t)
{
2015-07-03 18:07:36 +02:00
t->spup[port] = queue;
2015-03-04 22:51:14 +01:00
}
2014-07-13 20:55:14 +02:00
}
*spup = port;
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
{
sys_spu.warning("sys_spu_thread_group_disconnect_event_all_threads(id=0x%x, spup=%d)", id, spup);
const auto group = idm::get<lv2_spu_group>(id);
2015-04-12 03:36:25 +02:00
if (!group)
{
return CELL_ESRCH;
}
if (spup > 63)
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
semaphore_lock lock(group->mutex);
for (auto& t : group->threads)
{
if (t)
{
2015-07-03 18:07:36 +02:00
t->spup[spup].reset();
}
}
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr)
{
sys_spu.warning("sys_raw_spu_create(id=*0x%x, attr=*0x%x)", id, attr);
2015-07-06 21:35:34 +02:00
// TODO: check number set by sys_spu_initialize()
2016-04-14 00:23:53 +02:00
const auto thread = idm::make_ptr<RawSPUThread>("");
2015-03-04 22:51:14 +01:00
2015-07-01 00:25:52 +02:00
if (!thread)
2014-07-16 14:07:38 +02:00
{
return CELL_EAGAIN;
}
2016-04-14 00:23:53 +02:00
thread->cpu_init();
2015-03-05 01:01:48 +01:00
const u32 _id = thread->index;
fxm::get_always<mfc_thread>()->add_spu(std::move(thread));
*id = _id;
2015-03-05 01:01:48 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_destroy(ppu_thread& ppu, u32 id)
{
sys_spu.warning("sys_raw_spu_destroy(id=%d)", id);
2014-07-16 14:07:38 +02:00
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2014-07-16 14:07:38 +02:00
2015-07-01 00:25:52 +02:00
if (!thread)
2014-07-16 14:07:38 +02:00
{
return CELL_ESRCH;
}
2015-07-12 23:02:02 +02:00
// TODO: CELL_EBUSY is not returned
// Stop thread
thread->state += cpu_flag::stop;
2015-07-12 23:02:02 +02:00
2017-02-04 17:30:21 +01:00
// Kernel objects which must be removed
std::unordered_map<lv2_obj*, u32, pointer_hash<lv2_obj, alignof(void*)>> to_remove;
2015-07-12 23:02:02 +02:00
// Clear interrupt handlers
for (auto& intr : thread->int_ctrl)
{
if (intr.tag)
{
2017-02-04 17:30:21 +01:00
if (auto handler = intr.tag->handler.lock())
2015-07-12 23:02:02 +02:00
{
2017-02-05 00:26:57 +01:00
// SLEEP
handler->join();
2017-02-04 17:30:21 +01:00
to_remove.emplace(handler.get(), 0);
2015-07-12 23:02:02 +02:00
}
2017-02-04 17:30:21 +01:00
to_remove.emplace(intr.tag.get(), 0);
2015-07-12 23:02:02 +02:00
}
}
2017-02-04 17:30:21 +01:00
// Scan all kernel objects to determine IDs
idm::select<lv2_obj>([&](u32 id, lv2_obj& obj)
{
const auto found = to_remove.find(&obj);
if (found != to_remove.end())
{
found->second = id;
}
});
// Remove IDs
for (auto&& pair : to_remove)
{
if (pair.second >> 24 == 0xa)
idm::remove<lv2_obj, lv2_int_tag>(pair.second);
if (pair.second >> 24 == 0xb)
idm::remove<lv2_obj, lv2_int_serv>(pair.second);
}
2016-04-14 00:23:53 +02:00
idm::remove<RawSPUThread>(thread->id);
2015-03-05 01:01:48 +01:00
fxm::check_unlocked<mfc_thread>()->add_spu(nullptr);
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag)
{
sys_spu.warning("sys_raw_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag=*0x%x)", id, class_id, hwthread, intrtag);
2015-07-12 23:02:02 +02:00
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
2017-02-05 00:26:57 +01:00
CellError error = {};
2015-03-02 22:09:20 +01:00
2017-02-05 00:26:57 +01:00
const auto tag = idm::import<lv2_obj, lv2_int_tag>([&]()
{
2017-02-05 00:26:57 +01:00
std::shared_ptr<lv2_int_tag> result;
2017-02-05 00:26:57 +01:00
auto thread = idm::check_unlocked<RawSPUThread>(id);
2015-07-12 23:02:02 +02:00
2017-02-05 00:26:57 +01:00
if (!thread)
{
error = CELL_ESRCH;
return result;
}
2015-03-05 01:01:48 +01:00
2017-02-05 00:26:57 +01:00
auto& int_ctrl = thread->int_ctrl[class_id];
if (int_ctrl.tag)
{
error = CELL_EAGAIN;
return result;
}
result = std::make_shared<lv2_int_tag>();
int_ctrl.tag = result;
return result;
});
if (tag)
{
*intrtag = tag;
return CELL_OK;
}
return error;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask)
{
sys_spu.trace("sys_raw_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask);
2015-03-02 22:09:20 +01:00
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-12 23:02:02 +02:00
thread->int_ctrl[class_id].mask.exchange(mask);
2015-03-02 22:09:20 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_get_int_mask(u32 id, u32 class_id, vm::ptr<u64> mask)
{
sys_spu.trace("sys_raw_spu_get_int_mask(id=%d, class_id=%d, mask=*0x%x)", id, class_id, mask);
2015-03-02 22:09:20 +01:00
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
*mask = thread->int_ctrl[class_id].mask;
2015-03-02 22:09:20 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_set_int_stat(u32 id, u32 class_id, u64 stat)
{
sys_spu.trace("sys_raw_spu_set_int_stat(id=%d, class_id=%d, stat=0x%llx)", id, class_id, stat);
2015-03-02 22:09:20 +01:00
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-12 23:02:02 +02:00
thread->int_ctrl[class_id].clear(stat);
2015-03-02 22:09:20 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr<u64> stat)
{
sys_spu.trace("sys_raw_spu_get_int_stat(id=%d, class_id=%d, stat=*0x%x)", id, class_id, stat);
2015-03-02 22:09:20 +01:00
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
*stat = thread->int_ctrl[class_id].stat;
2015-03-02 22:09:20 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value)
{
sys_spu.trace("sys_raw_spu_read_puint_mb(id=%d, value=*0x%x)", id, value);
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2016-04-19 15:04:02 +02:00
*value = thread->ch_out_intr_mbox.pop(*thread);
2015-03-02 22:09:20 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_set_spu_cfg(u32 id, u32 value)
{
sys_spu.trace("sys_raw_spu_set_spu_cfg(id=%d, value=0x%x)", id, value);
2015-03-02 22:09:20 +01:00
if (value > 3)
{
fmt::throw_exception("Unexpected value (0x%x)" HERE, value);
2015-03-02 22:09:20 +01:00
}
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-03 18:07:36 +02:00
thread->snr_config = value;
2015-03-02 03:10:41 +01:00
return CELL_OK;
}
2017-02-05 13:48:11 +01:00
error_code sys_raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value)
{
sys_spu.trace("sys_raw_spu_get_spu_afg(id=%d, value=*0x%x)", id, value);
2016-04-14 00:23:53 +02:00
const auto thread = idm::get<RawSPUThread>(id);
2015-03-02 03:10:41 +01:00
2015-07-03 18:07:36 +02:00
if (!thread)
{
return CELL_ESRCH;
}
2015-07-03 18:07:36 +02:00
*value = (u32)thread->snr_config;
2015-03-02 22:09:20 +01:00
return CELL_OK;
}