#include "stdafx.h" #include "sys_spu.h" #include "Emu/System.h" #include "Emu/IdManager.h" #include "Crypto/unself.h" #include "Crypto/unedat.h" #include "Crypto/sha1.h" #include "Loader/ELF.h" #include "Utilities/bin_patch.h" #include "Emu/Cell/ErrorCodes.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/RawSPUThread.h" #include "sys_interrupt.h" #include "sys_process.h" #include "sys_mmapper.h" #include "sys_event.h" LOG_CHANNEL(sys_spu); void sys_spu_image::load(const fs::file& stream) { const spu_exec_object obj{stream, 0, elf_opt::no_sections + elf_opt::no_data}; if (obj != elf_error::ok) { fmt::throw_exception("Failed to load SPU image: %s" HERE, obj.get_error()); } for (const auto& shdr : obj.shdrs) { LOG_NOTICE(SPU, "** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", shdr.sh_type, shdr.sh_addr, shdr.sh_size, shdr.sh_flags); } for (const auto& prog : obj.progs) { LOG_NOTICE(SPU, "** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_flags); if (prog.p_type != SYS_SPU_SEGMENT_TYPE_COPY && prog.p_type != SYS_SPU_SEGMENT_TYPE_INFO) { LOG_ERROR(SPU, "Unknown program type (0x%x)", prog.p_type); } } type = SYS_SPU_IMAGE_TYPE_KERNEL; nsegs = sys_spu_image::get_nsegs(obj.progs); const u32 mem_size = nsegs * sizeof(sys_spu_segment) + ::size32(stream); segs = vm::cast(vm::alloc(mem_size, vm::main)); // Write ID and save entry entry_point = idm::make(+obj.header.e_entry); const u32 src = segs.addr() + nsegs * sizeof(sys_spu_segment); stream.seek(0); stream.read(vm::base(src), stream.size()); if (nsegs < 0 || sys_spu_image::fill(segs, nsegs, obj.progs, src) != nsegs) { fmt::throw_exception("Failed to load SPU segments (%d)" HERE, nsegs); } vm::page_protect(segs.addr(), ::align(mem_size, 4096), 0, 0, vm::page_writable); } void sys_spu_image::free() { if (type == SYS_SPU_IMAGE_TYPE_KERNEL) { vm::dealloc_verbose_nothrow(segs.addr(), vm::main); } } void sys_spu_image::deploy(u32 loc, sys_spu_segment* segs, u32 nsegs) { // Segment info dump std::string dump; // Executable hash sha1_context sha; sha1_starts(&sha); u8 sha1_hash[20]; for (u32 i = 0; i < nsegs; i++) { auto& seg = segs[i]; fmt::append(dump, "\n\t[%d] t=0x%x, ls=0x%x, size=0x%x, addr=0x%x", i, seg.type, seg.ls, seg.size, seg.addr); sha1_update(&sha, reinterpret_cast(&seg.type), sizeof(seg.type)); // Hash big-endian values if (seg.type == SYS_SPU_SEGMENT_TYPE_COPY) { std::memcpy(vm::base(loc + seg.ls), vm::base(seg.addr), seg.size); sha1_update(&sha, reinterpret_cast(&seg.size), sizeof(seg.size)); sha1_update(&sha, reinterpret_cast(&seg.ls), sizeof(seg.ls)); sha1_update(&sha, vm::_ptr(seg.addr), seg.size); } else if (seg.type == SYS_SPU_SEGMENT_TYPE_FILL) { if ((seg.ls | seg.size) % 4) { LOG_ERROR(SPU, "Unaligned SPU FILL type segment (ls=0x%x, size=0x%x)", seg.ls, seg.size); } std::fill_n(vm::_ptr(loc + seg.ls), seg.size / 4, seg.addr); sha1_update(&sha, reinterpret_cast(&seg.size), sizeof(seg.size)); sha1_update(&sha, reinterpret_cast(&seg.ls), sizeof(seg.ls)); sha1_update(&sha, reinterpret_cast(&seg.addr), sizeof(seg.addr)); } else if (seg.type == SYS_SPU_SEGMENT_TYPE_INFO) { const be_t size = seg.size + 0x14; // Workaround sha1_update(&sha, reinterpret_cast(&size), sizeof(size)); } } sha1_finish(&sha, sha1_hash); // Format patch name std::string hash("SPU-0000000000000000000000000000000000000000"); for (u32 i = 0; i < sizeof(sha1_hash); i++) { constexpr auto pal = "0123456789abcdef"; hash[4 + i * 2] = pal[sha1_hash[i] >> 4]; hash[5 + i * 2] = pal[sha1_hash[i] & 15]; } // Apply the patch auto applied = g_fxo->get()->apply(hash, vm::_ptr(loc)); if (!Emu.GetTitleID().empty()) { // Alternative patch applied += g_fxo->get()->apply(Emu.GetTitleID() + '-' + hash, vm::_ptr(loc)); } LOG_NOTICE(LOADER, "Loaded SPU image: %s (<- %u)%s", hash, applied, dump); } // Get spu thread ptr, returns group ptr as well for refcounting std::pair*, std::shared_ptr> lv2_spu_group::get_thread(u32 id) { if (id >= 0x06000000) { // thread index is out of range (5 max) return {}; } // Bits 0-23 contain group id (without id base) decltype(get_thread(0)) res{nullptr, idm::get((id & 0xFFFFFF) | (lv2_spu_group::id_base & ~0xFFFFFF))}; // Bits 24-31 contain thread index within the group const u32 index = id >> 24; if (auto group = res.second.get(); group && group->init > index) { res.first = group->threads[index].get(); } return res; } error_code sys_spu_initialize(ppu_thread& ppu, u32 max_usable_spu, u32 max_raw_spu) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_initialize(max_usable_spu=%d, max_raw_spu=%d)", max_usable_spu, max_raw_spu); if (max_raw_spu > 5) { return CELL_EINVAL; } return CELL_OK; } error_code _sys_spu_image_get_information(ppu_thread& ppu, vm::ptr img, vm::ptr entry_point, vm::ptr nsegs) { vm::temporary_unlock(ppu); sys_spu.warning("_sys_spu_image_get_information(img=*0x%x, entry_point=*0x%x, nsegs=*0x%x)", img, entry_point, nsegs); if (img->type != SYS_SPU_IMAGE_TYPE_KERNEL) { return CELL_EINVAL; } const auto image = idm::get(img->entry_point); if (!image) { return CELL_ESRCH; } *entry_point = image->e_entry; *nsegs = img->nsegs; return CELL_OK; } error_code sys_spu_image_open(ppu_thread& ppu, vm::ptr img, vm::cptr path) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_image_open(img=*0x%x, path=%s)", img, path); const fs::file elf_file = decrypt_self(fs::file(vfs::get(path.get_ptr())), g_fxo->get()->devKlic.data()); if (!elf_file) { sys_spu.error("sys_spu_image_open() error: failed to open %s!", path); return CELL_ENOENT; } img->load(elf_file); return CELL_OK; } error_code _sys_spu_image_import(ppu_thread& ppu, vm::ptr img, u32 src, u32 size, u32 arg4) { vm::temporary_unlock(ppu); sys_spu.warning("_sys_spu_image_import(img=*0x%x, src=*0x%x, size=0x%x, arg4=0x%x)", img, src, size, arg4); img->load(fs::file{vm::base(src), size}); return CELL_OK; } error_code _sys_spu_image_close(ppu_thread& ppu, vm::ptr img) { vm::temporary_unlock(ppu); sys_spu.warning("_sys_spu_image_close(img=*0x%x)", img); if (img->type != SYS_SPU_IMAGE_TYPE_KERNEL) { return CELL_EINVAL; } if (!idm::remove(img->entry_point)) { return CELL_ESRCH; } vm::dealloc(img->segs.addr(), vm::main); return CELL_OK; } error_code _sys_spu_image_get_segments(ppu_thread& ppu, vm::ptr img, vm::ptr segments, s32 nseg) { vm::temporary_unlock(ppu); sys_spu.error("_sys_spu_image_get_segments(img=*0x%x, segments=*0x%x, nseg=%d)", img, segments, nseg); // TODO: apply SPU patches std::memcpy(segments.get_ptr(), img->segs.get_ptr(), sizeof(sys_spu_segment) * nseg); return CELL_OK; } error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr thread, u32 group_id, u32 spu_num, vm::ptr img, vm::ptr attr, vm::ptr arg) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_initialize(thread=*0x%x, group=0x%x, spu_num=%d, img=*0x%x, attr=*0x%x, arg=*0x%x)", thread, group_id, spu_num, img, attr, arg); if (attr->name_len > 0x80 || attr->option & ~(SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE | SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE)) { return CELL_EINVAL; } if (img->type != SYS_SPU_IMAGE_TYPE_KERNEL && img->type != SYS_SPU_IMAGE_TYPE_USER) { return CELL_EINVAL; } sys_spu_image image = *img; if (img->type == SYS_SPU_IMAGE_TYPE_KERNEL) { const auto handle = idm::get(img->entry_point); if (!handle) { return CELL_ESRCH; } // Save actual entry point image.entry_point = handle->e_entry; } // Read thread name const std::string thread_name(attr->name.get_ptr(), std::max(attr->name_len, 1) - 1); const auto group = idm::get(group_id); if (!group) { return CELL_ESRCH; } if (spu_num >= group->threads_map.size()) { return CELL_EINVAL; } std::lock_guard lock(group->mutex); if (group->threads_map[spu_num] != -1 || group->run_state != SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED) { return CELL_EBUSY; } if (u32 option = attr->option) { sys_spu.warning("Unimplemented SPU Thread options (0x%x)", option); } const vm::addr_t ls_addr{verify("SPU LS" HERE, vm::alloc(0x80000, vm::main))}; const u32 inited = group->init; const u32 tid = (inited << 24) | (group_id & 0xffffff); verify(HERE), idm::import>([&]() { std::string full_name = fmt::format("SPU[0x%07x] Thread", tid); if (!thread_name.empty()) { fmt::append(full_name, " (%s)", thread_name); } const auto spu = std::make_shared>(full_name, ls_addr, group.get(), spu_num, thread_name, tid); group->threads[inited] = spu; group->threads_map[spu_num] = static_cast(inited); return spu; }); *thread = tid; group->args[inited] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4}; group->imgs[inited].first = image; group->imgs[inited].second.assign(img->segs.get_ptr(), img->segs.get_ptr() + img->nsegs); if (++group->init == group->max_num) { if (g_cfg.core.max_spurs_threads < 6 && group->max_num > 0u + g_cfg.core.max_spurs_threads) { if (group->name.size() >= 20 && group->name.compare(group->name.size() - 20, 20, "CellSpursKernelGroup", 20) == 0) { // Hack: don't run more SPURS threads than specified. group->max_run = g_cfg.core.max_spurs_threads; LOG_SUCCESS(SPU, "HACK: '%s' (0x%x) limited to %u threads.", group->name, group_id, +g_cfg.core.max_spurs_threads); } } group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; } sys_spu.warning(u8"sys_spu_thread_initialize(): Thread “%s” created (id=0x%x)", thread_name, tid); return CELL_OK; } error_code sys_spu_thread_set_argument(ppu_thread& ppu, u32 id, vm::ptr arg) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_set_argument(id=0x%x, arg=*0x%x)", id, arg); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } std::lock_guard lock(group->mutex); group->args[id >> 24] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4}; return CELL_OK; } error_code sys_spu_thread_get_exit_status(ppu_thread& ppu, u32 id, vm::ptr status) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_get_exit_status(id=0x%x, status=*0x%x)", id, status); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } if (thread->status & SPU_STATUS_STOPPED_BY_STOP) { *status = thread->ch_out_mbox.get_value(); return CELL_OK; } return CELL_ESTAT; } error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr id, u32 num, s32 prio, vm::ptr attr) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_create(id=*0x%x, num=%d, prio=%d, attr=*0x%x)", id, num, prio, attr); // TODO: max num value should be affected by sys_spu_initialize() settings const s32 min_prio = g_ps3_process_info.has_root_perm() ? 0 : 16; if (attr->nsize > 0x80 || !num || num > 6 || ((prio < min_prio || prio > 255) && (attr->type != SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT && attr->type != SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM))) { return CELL_EINVAL; } if (attr->type) { sys_spu.warning("sys_spu_thread_group_create(): SPU Thread Group type (0x%x)", attr->type); } const auto group = idm::make_ptr(std::string(attr->name.get_ptr(), std::max(attr->nsize, 1) - 1), num, prio, attr->type, attr->ct); if (!group) { return CELL_EAGAIN; } *id = idm::last_id(); sys_spu.warning(u8"sys_spu_thread_group_create(): Thread group “%s” created (id=0x%x)", group->name, idm::last_id()); return CELL_OK; } error_code sys_spu_thread_group_destroy(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_destroy(id=0x%x)", id); const auto group = idm::withdraw(id, [](lv2_spu_group& group) -> CellError { const auto _old = group.run_state.compare_and_swap(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED); if (_old > SPU_THREAD_GROUP_STATUS_INITIALIZED) { return CELL_EBUSY; } return {}; }); if (!group) { return CELL_ESRCH; } if (group.ret) { return group.ret; } for (const auto& t : group->threads) { if (auto thread = t.get()) { // Remove ID from IDM (destruction will occur in group destructor) idm::remove>(thread->id); } } return CELL_OK; } error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_start(id=0x%x)", id); const auto group = idm::get(id, [](lv2_spu_group& group) { // SPU_THREAD_GROUP_STATUS_READY state is not used return group.run_state.compare_and_swap_test(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_RUNNING); }); if (!group) { return CELL_ESRCH; } if (!group.ret) { return CELL_ESTAT; } std::lock_guard lock(group->mutex); const u32 max_threads = group->max_run; group->join_state = 0; group->running = max_threads; for (auto& thread : group->threads) { if (thread) { auto& args = group->args[thread->lv2_id >> 24]; auto& img = group->imgs[thread->lv2_id >> 24]; sys_spu_image::deploy(thread->offset, img.second.data(), img.first.nsegs); thread->cpu_init(); thread->npc = img.first.entry_point; thread->gpr[3] = v128::from64(0, args[0]); thread->gpr[4] = v128::from64(0, args[1]); thread->gpr[5] = v128::from64(0, args[2]); thread->gpr[6] = v128::from64(0, args[3]); thread->status.exchange(SPU_STATUS_RUNNING); } } // Because SPU_THREAD_GROUP_STATUS_READY is not possible, run event is delivered immediately // TODO: check data2 and data3 group->send_run_event(id, 0, 0); u32 ran_threads = max_threads; for (auto& thread : group->threads) { if (!ran_threads) { break; } if (thread && ran_threads--) { thread->state -= cpu_flag::stop; thread_ctrl::notify(*thread); } } return CELL_OK; } error_code sys_spu_thread_group_suspend(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_suspend(id=0x%x)", id); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate { return CELL_EINVAL; } std::lock_guard lock(group->mutex); if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_STOPPED) { return CELL_ESTAT; } // SPU_THREAD_GROUP_STATUS_READY state is not used if (group->run_state == SPU_THREAD_GROUP_STATUS_RUNNING) { group->run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED; } else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING) { group->run_state = SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED; } else if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED || group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { return CELL_OK; } else { return CELL_ESTAT; } for (auto& thread : group->threads) { if (thread) { thread->state += cpu_flag::suspend; } } return CELL_OK; } error_code sys_spu_thread_group_resume(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_resume(id=0x%x)", id); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate { return CELL_EINVAL; } std::lock_guard lock(group->mutex); // SPU_THREAD_GROUP_STATUS_READY state is not used if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED) { group->run_state = SPU_THREAD_GROUP_STATUS_RUNNING; } else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { group->run_state = SPU_THREAD_GROUP_STATUS_WAITING; } else { return CELL_ESTAT; } for (auto& thread : group->threads) { if (thread) { thread->state -= cpu_flag::suspend; thread_ctrl::notify(*thread); } } return CELL_OK; } error_code sys_spu_thread_group_yield(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_yield(id=0x%x)", id); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate { return CELL_OK; } if (group->run_state != SPU_THREAD_GROUP_STATUS_RUNNING) { return CELL_ESTAT; } // SPU_THREAD_GROUP_STATUS_READY state is not used, so this function does nothing return CELL_OK; } error_code sys_spu_thread_group_terminate(ppu_thread& ppu, u32 id, s32 value) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_terminate(id=0x%x, value=0x%x)", id, value); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } std::unique_lock lock(group->mutex); if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_WAITING || group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { return CELL_ESTAT; } for (auto& thread : group->threads) { if (thread) { thread->state += cpu_flag::stop; } } for (auto& thread : group->threads) { if (thread && group->running) { thread_ctrl::notify(*thread); } } group->exit_status = value; group->join_state = SYS_SPU_THREAD_GROUP_JOIN_TERMINATED; // Wait until the threads are actually stopped const u64 last_stop = group->stop_count - !group->running; while (group->stop_count == last_stop) { group->cond.wait(lock); } return CELL_OK; } error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr cause, vm::ptr status) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } do { std::unique_lock lock(group->mutex); if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED) { return CELL_ESTAT; } if (group->waiter) { // another PPU thread is joining this thread group return CELL_EBUSY; } if (group->join_state && group->run_state == SPU_THREAD_GROUP_STATUS_INITIALIZED) { // Already signaled ppu.gpr[4] = group->join_state; ppu.gpr[5] = group->exit_status; group->join_state.release(0); break; } else { // Subscribe to receive status in r4-r5 ppu.gpr[4] = 0; group->waiter = &ppu; } lv2_obj::sleep(ppu); while (!ppu.gpr[4]) { if (ppu.is_stopped()) { return 0; } group->cond.wait(lock); } } while (0); if (ppu.test_stopped()) { return 0; } if (!cause) { if (status) { // Report unwritten data return CELL_EFAULT; } return not_an_error(CELL_EFAULT); } *cause = static_cast(ppu.gpr[4]); if (!status) { return not_an_error(CELL_EFAULT); } *status = static_cast(ppu.gpr[5]); return CELL_OK; } error_code sys_spu_thread_group_set_priority(ppu_thread& ppu, u32 id, s32 priority) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_set_priority(id=0x%x, priority=%d)", id, priority); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (priority < (g_ps3_process_info.has_root_perm() ? 0 : 16) || priority > 255) { return CELL_EINVAL; } if (group->type == SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) { return CELL_EINVAL; } group->prio = priority; return CELL_OK; } error_code sys_spu_thread_group_get_priority(ppu_thread& ppu, u32 id, vm::ptr priority) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_group_get_priority(id=0x%x, priority=*0x%x)", id, priority); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (group->type == SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) { *priority = 0; } else { *priority = group->prio; } return CELL_OK; } error_code sys_spu_thread_write_ls(ppu_thread& ppu, u32 id, u32 lsa, u64 value, u32 type) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_write_ls(id=0x%x, lsa=0x%05x, value=0x%llx, type=%d)", id, lsa, value, type); if (lsa >= 0x40000 || type > 8 || !type || (type | lsa) & (type - 1)) // check range and alignment { return CELL_EINVAL; } const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } std::lock_guard lock(group->mutex); if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING) { return CELL_ESTAT; } switch (type) { case 1: thread->_ref(lsa) = static_cast(value); break; case 2: thread->_ref(lsa) = static_cast(value); break; case 4: thread->_ref(lsa) = static_cast(value); break; case 8: thread->_ref(lsa) = value; break; default: ASSUME(0); } return CELL_OK; } error_code sys_spu_thread_read_ls(ppu_thread& ppu, u32 id, u32 lsa, vm::ptr value, u32 type) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_read_ls(id=0x%x, lsa=0x%05x, value=*0x%x, type=%d)", id, lsa, value, type); if (lsa >= 0x40000 || type > 8 || !type || (type | lsa) & (type - 1)) // check range and alignment { return CELL_EINVAL; } const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } std::lock_guard lock(group->mutex); if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING) { return CELL_ESTAT; } switch (type) { case 1: *value = thread->_ref(lsa); break; case 2: *value = thread->_ref(lsa); break; case 4: *value = thread->_ref(lsa); break; case 8: *value = thread->_ref(lsa); break; default: ASSUME(0); } return CELL_OK; } error_code sys_spu_thread_write_spu_mb(ppu_thread& ppu, u32 id, u32 value) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } std::lock_guard lock(group->mutex); thread->ch_in_mbox.push(*thread, value); return CELL_OK; } error_code sys_spu_thread_set_spu_cfg(ppu_thread& ppu, u32 id, u64 value) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_set_spu_cfg(id=0x%x, value=0x%x)", id, value); if (value > 3) { return CELL_EINVAL; } const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } thread->snr_config = value; return CELL_OK; } error_code sys_spu_thread_get_spu_cfg(ppu_thread& ppu, u32 id, vm::ptr value) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_get_spu_cfg(id=0x%x, value=*0x%x)", id, value); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } *value = thread->snr_config; return CELL_OK; } error_code sys_spu_thread_write_snr(ppu_thread& ppu, u32 id, u32 number, u32 value) { vm::temporary_unlock(ppu); sys_spu.trace("sys_spu_thread_write_snr(id=0x%x, number=%d, value=0x%x)", id, number, value); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } if (number > 1) { return CELL_EINVAL; } thread->push_snr(number, value); return CELL_OK; } error_code sys_spu_thread_group_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_connect_event(id=0x%x, eq=0x%x, et=%d)", id, eq, et); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } const auto ep = et == SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE ? &group->ep_sysmodule : et == SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION ? &group->ep_exception : et == SYS_SPU_THREAD_GROUP_EVENT_RUN ? &group->ep_run : nullptr; if (!ep) { sys_spu.error("sys_spu_thread_group_connect_event(): unknown event type (%d)", et); return CELL_EINVAL; } const auto queue = idm::get(eq); std::lock_guard lock(group->mutex); if (!ep->expired()) { return CELL_EBUSY; } // ESRCH of event queue after EBUSY if (!queue) { return CELL_ESRCH; } *ep = queue; return CELL_OK; } error_code sys_spu_thread_group_disconnect_event(ppu_thread& ppu, u32 id, u32 et) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_disconnect_event(id=0x%x, et=%d)", id, et); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } const auto ep = et == SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE ? &group->ep_sysmodule : et == SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION ? &group->ep_exception : et == SYS_SPU_THREAD_GROUP_EVENT_RUN ? &group->ep_run : nullptr; if (!ep) { sys_spu.error("sys_spu_thread_group_disconnect_event(): unknown event type (%d)", et); return CELL_EINVAL; } std::lock_guard lock(group->mutex); if (ep->expired()) { return CELL_EINVAL; } ep->reset(); return CELL_OK; } error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et, u8 spup) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup); const auto [thread, group] = lv2_spu_group::get_thread(id); const auto queue = idm::get(eq); if (UNLIKELY(!queue || !thread)) { return CELL_ESRCH; } if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63) { sys_spu.error("sys_spu_thread_connect_event(): invalid arguments (et=%d, spup=%d, queue->type=%d)", et, spup, queue->type); return CELL_EINVAL; } std::lock_guard lock(group->mutex); auto& port = thread->spup[spup]; if (!port.expired()) { return CELL_EISCONN; } port = queue; return CELL_OK; } error_code sys_spu_thread_disconnect_event(ppu_thread& ppu, u32 id, u32 et, u8 spup) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_disconnect_event(id=0x%x, et=%d, spup=%d)", id, et, spup); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63) { sys_spu.error("sys_spu_thread_disconnect_event(): invalid arguments (et=%d, spup=%d)", et, spup); return CELL_EINVAL; } std::lock_guard lock(group->mutex); auto& port = thread->spup[spup]; if (port.expired()) { return CELL_ENOTCONN; } port.reset(); return CELL_OK; } error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq_num) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num); const auto [thread, group] = lv2_spu_group::get_thread(id); const auto queue = idm::get(spuq); if (UNLIKELY(!queue || !thread)) { return CELL_ESRCH; } if (queue->type != SYS_SPU_QUEUE) { return CELL_EINVAL; } std::lock_guard lock(group->mutex); decltype(std::data(thread->spuq)) q{}; for (auto& v : thread->spuq) { // Check if the entry is assigned at all if (const decltype(v.second) test{}; !v.second.owner_before(test) && !test.owner_before(v.second)) { if (!q) { q = &v; } continue; } if (v.first == spuq_num || (!v.second.owner_before(queue) && !queue.owner_before(v.second))) { return CELL_EBUSY; } } if (!q) { return CELL_EAGAIN; } q->first = spuq_num; q->second = queue; return CELL_OK; } error_code sys_spu_thread_unbind_queue(ppu_thread& ppu, u32 id, u32 spuq_num) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_unbind_queue(id=0x%x, spuq_num=0x%x)", id, spuq_num); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } std::lock_guard lock(group->mutex); for (auto& v : thread->spuq) { if (v.first != spuq_num) { continue; } if (const decltype(v.second) test{}; !v.second.owner_before(test) && !test.owner_before(v.second)) { continue; } v.second.reset(); return CELL_OK; } return CELL_ESRCH; } error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread& ppu, u32 id, u32 eq, u64 req, vm::ptr spup) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_connect_event_all_threads(id=0x%x, eq=0x%x, req=0x%llx, spup=*0x%x)", id, eq, req, spup); const auto group = idm::get(id); const auto queue = idm::get(eq); if (!group || !queue) { return CELL_ESRCH; } if (!req) { return CELL_EINVAL; } std::lock_guard lock(group->mutex); if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED) { return CELL_ESTAT; } u8 port = 0; // SPU Port number for (; port < 64; port++) { if (!(req & (1ull << port))) { continue; } bool found = true; for (auto& t : group->threads) { if (t) { if (!t->spup[port].expired()) { found = false; break; } } } if (found) { break; } } if (port == 64) { return CELL_EISCONN; } for (auto& t : group->threads) { if (t) { t->spup[port] = queue; } } *spup = port; return CELL_OK; } error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread& ppu, u32 id, u8 spup) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_disconnect_event_all_threads(id=0x%x, spup=%d)", id, spup); const auto group = idm::get(id); if (!group) { return CELL_ESRCH; } if (spup > 63) { return CELL_EINVAL; } std::lock_guard lock(group->mutex); for (auto& t : group->threads) { if (t) { t->spup[spup].reset(); } } return CELL_OK; } error_code sys_spu_thread_group_log(ppu_thread& ppu, s32 command, vm::ptr stat) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_group_log(command=0x%x, stat=*0x%x)", command, stat); struct spu_group_log_state_t { atomic_t state = SYS_SPU_THREAD_GROUP_LOG_ON; }; const auto state = g_fxo->get(); switch (command) { case SYS_SPU_THREAD_GROUP_LOG_GET_STATUS: { if (!stat) { return CELL_EFAULT; } *stat = state->state; break; } case SYS_SPU_THREAD_GROUP_LOG_ON: case SYS_SPU_THREAD_GROUP_LOG_OFF: { state->state.release(command); break; } default: return CELL_EINVAL; } return CELL_OK; } error_code sys_spu_thread_recover_page_fault(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.warning("sys_spu_thread_recover_page_fault(id=0x%x)", id); const auto [thread, group] = lv2_spu_group::get_thread(id); if (UNLIKELY(!thread)) { return CELL_ESRCH; } return mmapper_thread_recover_page_fault(id); } error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.warning("sys_raw_spu_recover_page_fault(id=0x%x)", id); const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } return mmapper_thread_recover_page_fault(id); } error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr id, vm::ptr attr) { vm::temporary_unlock(ppu); sys_spu.warning("sys_raw_spu_create(id=*0x%x, attr=*0x%x)", id, attr); // TODO: check number set by sys_spu_initialize() if (!spu_thread::g_raw_spu_ctr.try_inc(5)) { return CELL_EAGAIN; } u32 index = 0; // Find free RawSPU ID while (!spu_thread::g_raw_spu_id[index].try_inc(1)) { if (++index == 5) index = 0; } const vm::addr_t ls_addr{verify(HERE, vm::falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, 0x40000, vm::spu))}; const u32 tid = idm::make>(fmt::format("RawSPU[0x%x] Thread", index), ls_addr, nullptr, index, "", index); spu_thread::g_raw_spu_id[index] = verify("RawSPU ID" HERE, tid); *id = index; return CELL_OK; } error_code sys_raw_spu_destroy(ppu_thread& ppu, u32 id) { vm::temporary_unlock(ppu); sys_spu.warning("sys_raw_spu_destroy(id=%d)", id); const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } // TODO: CELL_EBUSY is not returned // Stop thread thread->state += cpu_flag::stop; // Kernel objects which must be removed std::unordered_map> to_remove; // Clear interrupt handlers for (auto& intr : thread->int_ctrl) { if (const auto tag = intr.tag.lock()) { if (auto handler = tag->handler.lock()) { // SLEEP handler->join(); to_remove.emplace(handler.get(), 0); } to_remove.emplace(tag.get(), 0); } } // Scan all kernel objects to determine IDs idm::select([&](u32 id, lv2_obj& obj) { const auto found = to_remove.find(&obj); if (found != to_remove.end()) { found->second = id; } }); // Remove IDs for (auto&& pair : to_remove) { if (pair.second >> 24 == 0xa) idm::remove(pair.second); if (pair.second >> 24 == 0xb) idm::remove(pair.second); } idm::remove>(thread->id); return CELL_OK; } error_code sys_raw_spu_create_interrupt_tag(ppu_thread& ppu, u32 id, u32 class_id, u32 hwthread, vm::ptr intrtag) { vm::temporary_unlock(ppu); sys_spu.warning("sys_raw_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag=*0x%x)", id, class_id, hwthread, intrtag); if (class_id != 0 && class_id != 2) { return CELL_EINVAL; } CellError error = {}; const auto tag = idm::import([&]() { std::shared_ptr result; auto thread = idm::check_unlocked>(spu_thread::find_raw_spu(id)); if (!thread) { error = CELL_ESRCH; return result; } auto& int_ctrl = thread->int_ctrl[class_id]; if (!int_ctrl.tag.expired()) { error = CELL_EAGAIN; return result; } result = std::make_shared(); int_ctrl.tag = result; return result; }); if (tag) { *intrtag = tag; return CELL_OK; } return error; } error_code sys_raw_spu_set_int_mask(ppu_thread& ppu, u32 id, u32 class_id, u64 mask) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask); if (class_id != 0 && class_id != 2) { return CELL_EINVAL; } const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } thread->int_ctrl[class_id].mask.exchange(mask); return CELL_OK; } error_code sys_raw_spu_get_int_mask(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr mask) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_get_int_mask(id=%d, class_id=%d, mask=*0x%x)", id, class_id, mask); if (class_id != 0 && class_id != 2) { return CELL_EINVAL; } const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } *mask = thread->int_ctrl[class_id].mask; return CELL_OK; } error_code sys_raw_spu_set_int_stat(ppu_thread& ppu, u32 id, u32 class_id, u64 stat) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_set_int_stat(id=%d, class_id=%d, stat=0x%llx)", id, class_id, stat); if (class_id != 0 && class_id != 2) { return CELL_EINVAL; } const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } thread->int_ctrl[class_id].clear(stat); return CELL_OK; } error_code sys_raw_spu_get_int_stat(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr stat) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_get_int_stat(id=%d, class_id=%d, stat=*0x%x)", id, class_id, stat); if (class_id != 0 && class_id != 2) { return CELL_EINVAL; } const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } *stat = thread->int_ctrl[class_id].stat; return CELL_OK; } error_code sys_raw_spu_read_puint_mb(ppu_thread& ppu, u32 id, vm::ptr value) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_read_puint_mb(id=%d, value=*0x%x)", id, value); const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } *value = thread->ch_out_intr_mbox.pop(*thread); return CELL_OK; } error_code sys_raw_spu_set_spu_cfg(ppu_thread& ppu, u32 id, u32 value) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_set_spu_cfg(id=%d, value=0x%x)", id, value); if (value > 3) { fmt::throw_exception("Unexpected value (0x%x)" HERE, value); } const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } thread->snr_config = value; return CELL_OK; } error_code sys_raw_spu_get_spu_cfg(ppu_thread& ppu, u32 id, vm::ptr value) { vm::temporary_unlock(ppu); sys_spu.trace("sys_raw_spu_get_spu_afg(id=%d, value=*0x%x)", id, value); const auto thread = idm::get>(spu_thread::find_raw_spu(id)); if (UNLIKELY(!thread)) { return CELL_ESRCH; } *value = static_cast(thread->snr_config); return CELL_OK; }