Move rpcs3/Emu/Cell/lv2 to kernel/cellos

This commit is contained in:
DH 2025-10-04 16:46:36 +03:00
parent fce4127c2e
commit dbfa5002e5
282 changed files with 40062 additions and 41342 deletions

2523
kernel/cellos/src/lv2.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_bdemu.h"
LOG_CHANNEL(sys_bdemu);
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf,
u64 buf_len) {
sys_bdemu.todo("sys_bdemu_send_command(cmd=0%llx, a2=0x%x, a3=0x%x, "
"buf=0x%x, buf_len=0x%x)",
cmd, a2, a3, buf, buf_len);
return CELL_OK;
}

View file

@ -0,0 +1,12 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_btsetting.h"
LOG_CHANNEL(sys_btsetting);
error_code sys_btsetting_if(u64 cmd, vm::ptr<void> msg) {
sys_btsetting.todo("sys_btsetting_if(cmd=0x%llx, msg=*0x%x)", cmd, msg);
return CELL_OK;
}

View file

@ -0,0 +1,509 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "util/serialization.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_cond.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_cond);
lv2_cond::lv2_cond(utils::serial &ar) noexcept
: key(ar), name(ar), mtx_id(ar),
mutex(idm::check_unlocked<lv2_obj, lv2_mutex>(mtx_id)),
_mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
{}
lv2_cond::lv2_cond(u64 key, u64 name, u32 mtx_id,
shared_ptr<lv2_obj> mutex0) noexcept
: key(key), name(name), mtx_id(mtx_id),
mutex(static_cast<lv2_mutex *>(mutex0.get())), _mutex(mutex0) {}
CellError lv2_cond::on_id_create() {
exists++;
static auto do_it = [](lv2_cond *_this) -> CellError {
if (lv2_obj::check(_this->mutex)) {
_this->mutex->cond_count++;
return {};
}
// Mutex has been destroyed, cannot create conditional variable
return CELL_ESRCH;
};
if (mutex) {
return do_it(this);
}
ensure(!!Emu.DeserialManager());
Emu.PostponeInitCode([this]() {
if (!mutex) {
_mutex = static_cast<shared_ptr<lv2_obj>>(
ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)));
}
// Defer function
ensure(CellError{} == do_it(this));
});
return {};
}
std::function<void(void *)> lv2_cond::load(utils::serial &ar) {
return load_func(make_shared<lv2_cond>(exact_t<utils::serial &>(ar)));
}
void lv2_cond::save(utils::serial &ar) { ar(key, name, mtx_id); }
error_code sys_cond_create(ppu_thread &ppu, vm::ptr<u32> cond_id, u32 mutex_id,
vm::ptr<sys_cond_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)",
cond_id, mutex_id, attr);
auto mutex = idm::get_unlocked<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex) {
return CELL_ESRCH;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_cond.warning("sys_cond_create(cond_id=*0x%x, attr=*0x%x): IPC=0x%016x",
cond_id, attr, ipc_key);
}
if (const auto error =
lv2_obj::create<lv2_cond>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_single<lv2_cond>(ipc_key, _attr.name_u64, mutex_id,
std::move(mutex));
})) {
return error;
}
ppu.check_state();
*cond_id = idm::last_id();
return CELL_OK;
}
error_code sys_cond_destroy(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_destroy(cond_id=0x%x)", cond_id);
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(
cond_id, [&](lv2_cond &cond) -> CellError {
std::lock_guard lock(cond.mutex->mutex);
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
return CELL_EBUSY;
}
cond.mutex->cond_count--;
lv2_obj::on_id_destroy(cond, cond.key);
return {};
});
if (!cond) {
return CELL_ESRCH;
}
if (cond->key) {
sys_cond.warning("sys_cond_destroy(cond_id=0x%x): IPC=0x%016x", cond_id,
cond->key);
}
if (cond.ret) {
return cond.ret;
}
return CELL_OK;
}
error_code sys_cond_signal(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
if (const auto cpu =
cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
if (cond.mutex->try_own(*cpu)) {
cond.awake(cpu);
}
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
}
}
});
if (!finished) {
continue;
}
if (!cond) {
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_all(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
}
cpu_thread *result = nullptr;
auto sq = cond.sq;
atomic_storage<ppu_thread *>::release(cond.sq, nullptr);
while (const auto cpu =
cond.schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY)) {
if (cond.mutex->try_own(*cpu)) {
ensure(!std::exchange(result, cpu));
}
}
if (result) {
cond.awake(result);
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
}
}
});
if (!finished) {
continue;
}
if (!cond) {
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_to(ppu_thread &ppu, u32 cond_id, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id,
thread_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id)) {
return -1;
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->id == thread_id) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
ensure(cond.unqueue(cond.sq, cpu));
if (cond.mutex->try_own(*cpu)) {
cond.awake(cpu);
}
return 1;
}
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (!cond.ret) {
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code sys_cond_wait(ppu_thread &ppu, u32 cond_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
// Further function result
ppu.gpr[3] = CELL_OK;
auto &sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) -> s64 {
if (!ppu.loaded_from_savestate &&
atomic_storage<u32>::load(cond.mutex->control.raw().owner) !=
ppu.id) {
return -1;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex->mutex);
const u64 syscall_state = sstate.try_read<u64>().second;
sstate.clear();
if (ppu.loaded_from_savestate) {
if (syscall_state & 1) {
// Mutex sleep
ensure(!cond.mutex->try_own(ppu));
} else {
lv2_obj::emplace(cond.sq, &ppu);
}
cond.sleep(ppu, timeout);
return static_cast<u32>(syscall_state >> 32);
}
// Register waiter
lv2_obj::emplace(cond.sq, &ppu);
// Unlock the mutex
const u32 count = cond.mutex->lock_count.exchange(0);
if (const auto cpu = cond.mutex->reown<ppu_thread>()) {
if (cpu->state & cpu_flag::again) {
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return 0;
}
cond.mutex->append(cpu);
}
// Sleep current thread and schedule mutex waiter
cond.sleep(ppu, timeout);
// Save the recursive value
return count;
});
if (!cond) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return {};
}
if (cond.ret < 0) {
return CELL_EPERM;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(cond->mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = atomic_storage<ppu_thread *>::load(cond->sq); cpu;
cpu = cpu->next_cpu) {
if (cpu == &ppu) {
cond_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread *>::load(
cond->mutex->control.raw().sq);
cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
mutex_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep) {
break;
}
const u64 optional_syscall_state =
u32{mutex_sleep} | (u64{static_cast<u32>(cond.ret)} << 32);
sstate(optional_syscall_state);
ppu.state += cpu_flag::again;
return {};
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
const u64 start_time = ppu.start_time;
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex->mutex);
// Try to cancel the waiting
if (cond->unqueue(cond->sq, &ppu)) {
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
ppu.gpr[3] = CELL_ETIMEDOUT;
// Own or requeue
if (cond->mutex->try_own(ppu)) {
break;
}
} else if (atomic_storage<u32>::load(
cond->mutex->control.raw().owner) == ppu.id) {
break;
}
cond->mutex->sleep(ppu);
ppu.start_time =
start_time; // Restore start time because awake has been called
timeout = 0;
continue;
}
} else {
ppu.state.wait(state);
}
}
// Verify ownership
ensure(atomic_storage<u32>::load(cond->mutex->control.raw().owner) == ppu.id);
// Restore the recursive value
cond->mutex->lock_count.release(static_cast<u32>(cond.ret));
return not_an_error(ppu.gpr[3]);
}

View file

@ -0,0 +1,464 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "cellos/sys_event.h"
#include "sys_config.h"
LOG_CHANNEL(sys_config);
// Enums
template <>
void fmt_class_string<sys_config_service_id>::format(std::string &out, u64 id) {
const s64 s_id = static_cast<s64>(id);
switch (s_id) {
case SYS_CONFIG_SERVICE_PADMANAGER:
out += "SYS_CONFIG_SERVICE_PADMANAGER";
return;
case SYS_CONFIG_SERVICE_PADMANAGER2:
out += "SYS_CONFIG_SERVICE_PADMANAGER2";
return;
case SYS_CONFIG_SERVICE_USER_LIBPAD:
out += "SYS_CONFIG_SERVICE_USER_LIBPAD";
return;
case SYS_CONFIG_SERVICE_USER_LIBKB:
out += "SYS_CONFIG_SERVICE_USER_LIBKB";
return;
case SYS_CONFIG_SERVICE_USER_LIBMOUSE:
out += "SYS_CONFIG_SERVICE_USER_LIBMOUSE";
return;
}
if (s_id < 0) {
fmt::append(out, "SYS_CONFIG_SERVICE_USER_%llx", id & ~(1ull << 63));
} else {
fmt::append(out, "SYS_CONFIG_SERVICE_%llx", id);
}
}
template <>
void fmt_class_string<sys_config_service_listener_type>::format(
std::string &out, u64 arg) {
format_enum(out, arg, [](auto value) {
switch (value) {
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_ONCE);
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_REPEATING);
}
return unknown;
});
}
// Utilities
void dump_buffer(std::string &out, const std::vector<u8> &buffer) {
if (!buffer.empty()) {
out.reserve(out.size() + buffer.size() * 2 + 1);
fmt::append(out, "0x");
for (u8 x : buffer) {
fmt::append(out, "%02x", x);
}
} else {
fmt::append(out, "EMPTY");
}
}
// LV2 Config
void lv2_config::initialize() {
if (m_state || !m_state.compare_and_swap_test(0, 1)) {
return;
}
// Register padmanager service, notifying vsh that a controller is connected
static const u8 hid_info[0x1a] = {
0x01, 0x01, // 2 unk
0x02, 0x02, // 4
0x00, 0x00, // 6
0x00, 0x00, // 8
0x00, 0x00, // 10
0x05, 0x4c, // 12 vid
0x02, 0x68, // 14 pid
0x00, 0x10, // 16 unk2
0x91, 0x88, // 18
0x04, 0x00, // 20
0x00, 0x07, // 22
0x00, 0x00, // 24
0x00, 0x00 // 26
};
// user_id for the padmanager seems to signify the controller port number, and
// the buffer contains some sort of HID descriptor
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER, 0, 1, 0, hid_info,
0x1a)
->notify();
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER2, 0, 1, 0, hid_info,
0x1a)
->notify();
}
void lv2_config::add_service_event(shared_ptr<lv2_config_service_event> event) {
std::lock_guard lock(m_mutex);
events.emplace(event->id, std::move(event));
}
void lv2_config::remove_service_event(u32 id) {
shared_ptr<lv2_config_service_event> ptr;
std::lock_guard lock(m_mutex);
if (auto it = events.find(id); it != events.end()) {
ptr = std::move(it->second);
events.erase(it);
}
}
lv2_config_service_event &
lv2_config_service_event::operator=(thread_state s) noexcept {
if (s == thread_state::destroying_context && !m_destroyed.exchange(true)) {
if (auto global = g_fxo->try_get<lv2_config>()) {
global->remove_service_event(id);
}
}
return *this;
}
lv2_config_service_event::~lv2_config_service_event() noexcept {
operator=(thread_state::destroying_context);
}
lv2_config::~lv2_config() noexcept {
for (auto &[key, event] : events) {
if (event) {
// Avoid collision with lv2_config_service_event destructor
event->m_destroyed = true;
}
}
}
// LV2 Config Service Listener
bool lv2_config_service_listener::check_service(
const lv2_config_service &service) const {
// Filter by type
if (type == SYS_CONFIG_SERVICE_LISTENER_ONCE && !service_events.empty()) {
return false;
}
// Filter by service ID or verbosity
if (service_id != service.id || min_verbosity > service.verbosity) {
return false;
}
// realhw only seems to send the pad connected events to the listeners that
// provided 0x01 as the first byte of their data buffer
// TODO: Figure out how this filter works more properly
if (service_id == SYS_CONFIG_SERVICE_PADMANAGER &&
(data.empty() || data[0] != 0x01)) {
return false;
}
// Event applies to this listener!
return true;
}
bool lv2_config_service_listener::notify(
const shared_ptr<lv2_config_service_event> &event) {
service_events.emplace_back(event);
return event->notify();
}
bool lv2_config_service_listener::notify(
const shared_ptr<lv2_config_service> &service) {
if (!check_service(*service))
return false;
// Create service event and notify queue!
const auto event = lv2_config_service_event::create(handle, service, *this);
return notify(event);
}
void lv2_config_service_listener::notify_all() {
std::vector<shared_ptr<lv2_config_service>> services;
// Grab all events
idm::select<lv2_config_service>([&](u32 /*id*/, lv2_config_service &service) {
if (check_service(service)) {
services.push_back(service.get_shared_ptr());
}
});
// Sort services by timestamp
sort(services.begin(), services.end(),
[](const shared_ptr<lv2_config_service> &s1,
const shared_ptr<lv2_config_service> &s2) {
return s1->timestamp < s2->timestamp;
});
// Notify listener (now with services in sorted order)
for (auto &service : services) {
this->notify(service);
}
}
// LV2 Config Service
void lv2_config_service::unregister() {
registered = false;
// Notify listeners
notify();
// Allow this object to be destroyed by withdrawing it from the IDM
// Note that it won't be destroyed while there are service events that hold a
// reference to it
idm::remove<lv2_config_service>(idm_id);
}
void lv2_config_service::notify() const {
std::vector<shared_ptr<lv2_config_service_listener>> listeners;
const shared_ptr<lv2_config_service> sptr = get_shared_ptr();
idm::select<lv2_config_service_listener>(
[&](u32 /*id*/, lv2_config_service_listener &listener) {
if (listener.check_service(*sptr))
listeners.push_back(listener.get_shared_ptr());
});
for (auto &listener : listeners) {
listener->notify(sptr);
}
}
bool lv2_config_service_event::notify() const {
const auto _handle = handle;
if (!_handle) {
return false;
}
// Send event
return _handle->notify(SYS_CONFIG_EVENT_SOURCE_SERVICE,
(static_cast<u64>(service->is_registered()) << 32) |
id,
service->get_size());
}
// LV2 Config Service Event
void lv2_config_service_event::write(sys_config_service_event_t *dst) const {
const auto registered = service->is_registered();
dst->service_listener_handle = listener.get_id();
dst->registered = registered;
dst->service_id = service->id;
dst->user_id = service->user_id;
if (registered) {
dst->verbosity = service->verbosity;
dst->padding = service->padding;
const auto size = service->data.size();
dst->data_size = static_cast<u32>(size);
memcpy(dst->data, service->data.data(), size);
}
}
/*
* Syscalls
*/
error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl) {
sys_config.trace("sys_config_open(equeue_hdl=0x%x, out_config_hdl=*0x%x)",
equeue_hdl, out_config_hdl);
// Find queue with the given ID
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_hdl);
if (!queue) {
return CELL_ESRCH;
}
// Initialize lv2_config global state
auto &global = g_fxo->get<lv2_config>();
if (true) {
global.initialize();
}
// Create a lv2_config_handle object
const auto config = lv2_config_handle::create(std::move(queue));
if (config) {
*out_config_hdl = idm::last_id();
return CELL_OK;
}
// Failed to allocate sys_config object
return CELL_EAGAIN;
}
error_code sys_config_close(u32 config_hdl) {
sys_config.trace("sys_config_close(config_hdl=0x%x)", config_hdl);
if (!idm::remove<lv2_config_handle>(config_hdl)) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_get_service_event(u32 config_hdl, u32 event_id,
vm::ptr<sys_config_service_event_t> dst,
u64 size) {
sys_config.trace("sys_config_get_service_event(config_hdl=0x%x, "
"event_id=0x%llx, dst=*0x%llx, size=0x%llx)",
config_hdl, event_id, dst, size);
// Find sys_config handle object with the given ID
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Find service_event object
const auto event = g_fxo->get<lv2_config>().find_event(event_id);
if (!event) {
return CELL_ESRCH;
}
// Check buffer fits
if (!event->check_buffer_size(size)) {
return CELL_EAGAIN;
}
// Write event to buffer
event->write(dst.get_ptr());
return CELL_OK;
}
error_code sys_config_add_service_listener(
u32 config_hdl, sys_config_service_id service_id, u64 min_verbosity,
vm::ptr<void> in, u64 size, sys_config_service_listener_type type,
vm::ptr<u32> out_listener_hdl) {
sys_config.trace("sys_config_add_service_listener(config_hdl=0x%x, "
"service_id=0x%llx, min_verbosity=0x%llx, in=*0x%x, "
"size=%lld, type=0x%llx, out_listener_hdl=*0x%x)",
config_hdl, service_id, min_verbosity, in, size, type,
out_listener_hdl);
// Find sys_config handle object with the given ID
auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Create service listener
const auto listener = lv2_config_service_listener::create(
cfg, service_id, min_verbosity, type, static_cast<u8 *>(in.get_ptr()),
size);
if (!listener) {
return CELL_EAGAIN;
}
if (size > 0) {
std::string buf_str;
dump_buffer(buf_str, listener->data);
sys_config.todo(
"Registered service listener for service %llx with non-zero buffer: %s",
service_id, buf_str.c_str());
}
// Notify listener with all past events
listener->notify_all();
// Done!
*out_listener_hdl = listener->get_id();
return CELL_OK;
}
error_code sys_config_remove_service_listener(u32 config_hdl,
u32 listener_hdl) {
sys_config.trace(
"sys_config_remove_service_listener(config_hdl=0x%x, listener_hdl=0x%x)",
config_hdl, listener_hdl);
// Remove listener from IDM
if (!idm::remove<lv2_config_service_listener>(listener_hdl)) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_register_service(u32 config_hdl,
sys_config_service_id service_id,
u64 user_id, u64 verbosity,
vm::ptr<u8> data_buf, u64 size,
vm::ptr<u32> out_service_hdl) {
sys_config.trace("sys_config_register_service(config_hdl=0x%x, "
"service_id=0x%llx, user_id=0x%llx, verbosity=0x%llx, "
"data_but=*0x%llx, size=%lld, out_service_hdl=*0x%llx)",
config_hdl, service_id, user_id, verbosity, data_buf, size,
out_service_hdl);
// Find sys_config handle object with the given ID
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Create service
const auto service = lv2_config_service::create(
service_id, user_id, verbosity, 0, data_buf.get_ptr(), size);
if (!service) {
return CELL_EAGAIN;
}
// Notify all listeners
service->notify();
// Done!
*out_service_hdl = service->get_id();
return CELL_OK;
}
error_code sys_config_unregister_service(u32 config_hdl, u32 service_hdl) {
sys_config.trace(
"sys_config_unregister_service(config_hdl=0x%x, service_hdl=0x%x)",
config_hdl, service_hdl);
// Remove listener from IDM
auto service = idm::withdraw<lv2_config_service>(service_hdl);
if (!service) {
return CELL_ESRCH;
}
// Unregister service
service->unregister();
// Done!
return CELL_OK;
}
/*
* IO Events - TODO
*/
error_code sys_config_get_io_event(u32 config_hdl, u32 event_id /*?*/,
vm::ptr<void> out_buf /*?*/,
u64 size /*?*/) {
sys_config.todo("sys_config_get_io_event(config_hdl=0x%x, event_id=0x%x, "
"out_buf=*0x%x, size=%lld)",
config_hdl, event_id, out_buf, size);
return CELL_OK;
}
error_code sys_config_register_io_error_listener(u32 config_hdl) {
sys_config.todo("sys_config_register_io_error_listener(config_hdl=0x%x)",
config_hdl);
return CELL_OK;
}
error_code sys_config_unregister_io_error_listener(u32 config_hdl) {
sys_config.todo("sys_config_unregister_io_error_listener(config_hdl=0x%x)",
config_hdl);
return CELL_OK;
}

View file

@ -0,0 +1,13 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_console.h"
LOG_CHANNEL(sys_console);
error_code sys_console_write(vm::cptr<char> buf, u32 len) {
sys_console.todo("sys_console_write(buf=*0x%x, len=0x%x)", buf, len);
return CELL_OK;
}

View file

@ -0,0 +1,28 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_crypto_engine.h"
LOG_CHANNEL(sys_crypto_engine);
error_code sys_crypto_engine_create(vm::ptr<u32> id) {
sys_crypto_engine.todo("sys_crypto_engine_create(id=*0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_destroy(u32 id) {
sys_crypto_engine.todo("sys_crypto_engine_destroy(id=0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer,
u64 buffer_size) {
sys_crypto_engine.todo(
"sys_crypto_engine_random_generate(buffer=*0x%x, buffer_size=0x%x",
buffer, buffer_size);
return CELL_OK;
}

View file

@ -0,0 +1,128 @@
#include "stdafx.h"
#include "sys_dbg.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUInterpreter.h"
#include "Emu/Memory/vm_locking.h"
#include "rpcsx/fw/ps3/sys_lv2dbg.h"
#include "util/asm.hpp"
void ppu_register_function_at(u32 addr, u32 size,
ppu_intrp_func_t ptr = nullptr);
LOG_CHANNEL(sys_dbg);
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size,
vm::ptr<void> data) {
sys_dbg.warning("sys_dbg_read_process_memory(pid=0x%x, address=0x%llx, "
"size=0x%x, data=*0x%x)",
pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
vm::writer_lock lock;
// Check if data destination is writable
if (!vm::check_addr(data.addr(), vm::page_writable, size)) {
return CELL_EFAULT;
}
// Check if the source is readable
if (!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
std::memmove(data.get_ptr(), vm::base(address), size);
return CELL_OK;
}
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size,
vm::cptr<void> data) {
sys_dbg.warning("sys_dbg_write_process_memory(pid=0x%x, address=0x%llx, "
"size=0x%x, data=*0x%x)",
pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
// Check if data source is readable
if (!vm::check_addr(data.addr(), vm::page_readable, size)) {
return CELL_EFAULT;
}
// Check destination (can be read-only actually)
if (!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
vm::writer_lock lock;
// Again
if (!vm::check_addr(data.addr(), vm::page_readable, size) ||
!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
const u8 *data_ptr = static_cast<const u8 *>(data.get_ptr());
if ((address >> 28) == 0xDu) {
// Stack pages (4k pages is the exception here)
std::memmove(vm::base(address), data_ptr, size);
return CELL_OK;
}
const u32 end = address + size;
for (u32 i = address, exec_update_size = 0; i < end;) {
const u32 op_size =
std::min<u32>(utils::align<u32>(i + 1, 0x10000), end) - i;
const bool is_exec =
vm::check_addr(i, vm::page_executable | vm::page_readable);
if (is_exec) {
exec_update_size += op_size;
i += op_size;
}
if (!is_exec || i >= end) {
// Commit executable data update
// The read memory is also super ptr so memmove can work correctly on all
// implementations
const u32 before_addr = i - exec_update_size;
std::memmove(vm::get_super_ptr(before_addr),
vm::get_super_ptr(data.addr() + (before_addr - address)),
exec_update_size);
ppu_register_function_at(before_addr, exec_update_size);
exec_update_size = 0;
if (i >= end) {
break;
}
}
if (!is_exec) {
std::memmove(vm::base(i), data_ptr + (i - address), op_size);
i += op_size;
}
}
return CELL_OK;
}

View file

@ -0,0 +1,732 @@
#include "stdafx.h"
#include "sys_event.h"
#include "Emu/IPC.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "sys_process.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_event);
lv2_event_queue::lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name,
u64 ipc_key) noexcept
: id(idm::last_id()), protocol{static_cast<u8>(protocol)},
type(static_cast<u8>(type)), size(static_cast<u8>(size)), name(name),
key(ipc_key) {}
lv2_event_queue::lv2_event_queue(utils::serial &ar) noexcept
: id(idm::last_id()), protocol(ar), type(ar), size(ar), name(ar), key(ar) {
ar(events);
}
std::function<void(void *)> lv2_event_queue::load(utils::serial &ar) {
auto queue = make_shared<lv2_event_queue>(exact_t<utils::serial &>(ar));
return [ptr = lv2_obj::load(queue->key, queue)](void *storage) {
*static_cast<atomic_ptr<lv2_obj> *>(storage) = ptr;
};
}
void lv2_event_queue::save(utils::serial &ar) {
ar(protocol, type, size, name, key, events);
}
void lv2_event_queue::save_ptr(utils::serial &ar, lv2_event_queue *q) {
if (!lv2_obj::check(q)) {
ar(u32{0});
return;
}
ar(q->id);
}
shared_ptr<lv2_event_queue>
lv2_event_queue::load_ptr(utils::serial &ar, shared_ptr<lv2_event_queue> &queue,
std::string_view msg) {
const u32 id = ar.pop<u32>();
if (!id) {
return {};
}
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id)) {
// Already initialized
return q;
}
if (id >> 24 != id_base >> 24) {
fmt::throw_exception("Failed in event queue pointer deserialization "
"(invalid ID): location: %s, id=0x%x",
msg, id);
}
Emu.PostponeInitCode([id, &queue, msg_str = std::string{msg}]() {
// Defer resolving
queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(id);
if (!queue) {
fmt::throw_exception("Failed in event queue pointer deserialization (not "
"found): location: %s, id=0x%x",
msg_str, id);
}
});
// Null until resolved
return {};
}
lv2_event_port::lv2_event_port(utils::serial &ar)
: type(ar), name(ar),
queue(lv2_event_queue::load_ptr(ar, queue, "eventport")) {}
void lv2_event_port::save(utils::serial &ar) {
ar(type, name);
lv2_event_queue::save_ptr(ar, queue.get());
}
shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key) {
if (ipc_key == SYS_EVENT_QUEUE_LOCAL) {
// Invalid IPC key
return {};
}
return g_fxo->get<ipc_manager<lv2_event_queue, u64>>().get(ipc_key);
}
extern void resume_spu_thread_group_from_waiting(spu_thread &spu);
CellError lv2_event_queue::send(lv2_event event, bool *notified_thread,
lv2_event_port *port) {
if (notified_thread) {
*notified_thread = false;
}
std::lock_guard lock(mutex);
if (!exists) {
return CELL_ENOTCONN;
}
if (!pq && !sq) {
if (events.size() < this->size + 0u) {
// Save event
events.emplace_back(event);
return {};
}
return CELL_EBUSY;
}
if (type == SYS_PPU_QUEUE) {
// Store event in registers
auto &ppu = static_cast<ppu_thread &>(*schedule<ppu_thread>(pq, protocol));
if (ppu.state & cpu_flag::again) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::again;
cpu->state += cpu_flag::exit;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
awake(&ppu);
if (port &&
ppu.prio.load().prio <
ensure(cpu_thread::get_current<ppu_thread>())->prio.load().prio) {
// Block event port disconnection for the time being of sending events
// PPU -> lower prio PPU is the only case that can cause thread blocking
port->is_busy++;
ensure(notified_thread);
*notified_thread = true;
}
} else {
// Store event in In_MBox
auto &spu = static_cast<spu_thread &>(*schedule<spu_thread>(sq, protocol));
if (spu.state & cpu_flag::again) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::exit + cpu_flag::again;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
const u32 data1 = static_cast<u32>(std::get<1>(event));
const u32 data2 = static_cast<u32>(std::get<2>(event));
const u32 data3 = static_cast<u32>(std::get<3>(event));
spu.ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
resume_spu_thread_group_from_waiting(spu);
}
return {};
}
error_code sys_event_queue_create(cpu_thread &cpu, vm::ptr<u32> equeue_id,
vm::ptr<sys_event_queue_attribute_t> attr,
u64 ipc_key, s32 size) {
cpu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_create(equeue_id=*0x%x, attr=*0x%x, "
"ipc_key=0x%llx, size=%d)",
equeue_id, attr, ipc_key, size);
if (size <= 0 || size > 127) {
return CELL_EINVAL;
}
const u32 protocol = attr->protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_event.error("sys_event_queue_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u32 type = attr->type;
if (type != SYS_PPU_QUEUE && type != SYS_SPU_QUEUE) {
sys_event.error("sys_event_queue_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u32 pshared = ipc_key == SYS_EVENT_QUEUE_LOCAL
? SYS_SYNC_NOT_PROCESS_SHARED
: SYS_SYNC_PROCESS_SHARED;
constexpr u32 flags = SYS_SYNC_NEWLY_CREATED;
const u64 name = attr->name_u64;
if (const auto error =
lv2_obj::create<lv2_event_queue>(pshared, ipc_key, flags, [&]() {
return make_shared<lv2_event_queue>(protocol, type, size, name,
ipc_key);
})) {
return error;
}
cpu.check_state();
*equeue_id = idm::last_id();
return CELL_OK;
}
error_code sys_event_queue_destroy(ppu_thread &ppu, u32 equeue_id, s32 mode) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_destroy(equeue_id=0x%x, mode=%d)",
equeue_id, mode);
if (mode && mode != SYS_EVENT_QUEUE_DESTROY_FORCE) {
return CELL_EINVAL;
}
std::vector<lv2_event> events;
std::unique_lock<shared_mutex> qlock;
cpu_thread *head{};
const auto queue = idm::withdraw<lv2_obj, lv2_event_queue>(
equeue_id, [&](lv2_event_queue &queue) -> CellError {
qlock = std::unique_lock{queue.mutex};
head = queue.type == SYS_PPU_QUEUE
? static_cast<cpu_thread *>(+queue.pq)
: +queue.sq;
if (!mode && head) {
return CELL_EBUSY;
}
if (!queue.events.empty()) {
// Copy events for logging, does not empty
events.insert(events.begin(), queue.events.begin(),
queue.events.end());
}
lv2_obj::on_id_destroy(queue, queue.key);
if (!head) {
qlock.unlock();
} else {
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return CELL_EAGAIN;
}
}
}
return {};
});
if (!queue) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return {};
}
if (queue.ret) {
return queue.ret;
}
std::string lost_data;
if (qlock.owns_lock()) {
if (sys_event.warning) {
u32 size = 0;
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
size++;
}
fmt::append(lost_data, "Forcefully awaken waiters (%u):\n", size);
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
lost_data += cpu->get_name();
lost_data += '\n';
}
}
if (queue->type == SYS_PPU_QUEUE) {
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu) {
cpu->gpr[3] = CELL_ECANCELED;
queue->append(cpu);
}
atomic_storage<ppu_thread *>::release(queue->pq, nullptr);
lv2_obj::awake_all();
} else {
for (auto cpu = +queue->sq; cpu; cpu = cpu->next_cpu) {
cpu->ch_in_mbox.set_values(1, CELL_ECANCELED);
resume_spu_thread_group_from_waiting(*cpu);
}
atomic_storage<spu_thread *>::release(queue->sq, nullptr);
}
qlock.unlock();
}
if (sys_event.warning) {
if (!events.empty()) {
fmt::append(lost_data, "Unread queue events (%u):\n", events.size());
}
for (const lv2_event &evt : events) {
fmt::append(lost_data, "data0=0x%x, data1=0x%x, data2=0x%x, data3=0x%x\n",
std::get<0>(evt), std::get<1>(evt), std::get<2>(evt),
std::get<3>(evt));
}
if (!lost_data.empty()) {
sys_event.warning("sys_event_queue_destroy(): %s", lost_data);
}
}
return CELL_OK;
}
error_code sys_event_queue_tryreceive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> event_array,
s32 size, vm::ptr<u32> number) {
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_tryreceive(equeue_id=0x%x, "
"event_array=*0x%x, size=%d, number=*0x%x)",
equeue_id, event_array, size, number);
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
if (!queue) {
return CELL_ESRCH;
}
if (queue->type != SYS_PPU_QUEUE) {
return CELL_EINVAL;
}
std::array<sys_event_t, 127> events;
std::unique_lock lock(queue->mutex);
if (!queue->exists) {
return CELL_ESRCH;
}
s32 count = 0;
while (count < size && !queue->events.empty()) {
auto &dest = events[count++];
std::tie(dest.source, dest.data1, dest.data2, dest.data3) =
queue->events.front();
queue->events.pop_front();
}
lock.unlock();
ppu.check_state();
std::copy_n(events.begin(), count, event_array.get_ptr());
*number = count;
return CELL_OK;
}
error_code sys_event_queue_receive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> dummy_event,
u64 timeout) {
ppu.state += cpu_flag::wait;
sys_event.trace(
"sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx)",
equeue_id, dummy_event, timeout);
ppu.gpr[3] = CELL_OK;
const auto queue = idm::get<lv2_obj, lv2_event_queue>(
equeue_id,
[&,
notify = lv2_obj::notify_all_t()](lv2_event_queue &queue) -> CellError {
if (queue.type != SYS_PPU_QUEUE) {
return CELL_EINVAL;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(queue.mutex);
// "/dev_flash/vsh/module/msmw2.sprx" seems to rely on some cryptic
// shared memory behaviour that we don't emulate correctly
// This is a hack to avoid waiting for 1m40s every time we boot vsh
if (queue.key == 0x8005911000000012 && Emu.IsVsh()) {
sys_event.todo("sys_event_queue_receive(equeue_id=0x%x, *0x%x, "
"timeout=0x%llx) Bypassing timeout for msmw2.sprx",
equeue_id, dummy_event, timeout);
timeout = 1;
}
if (queue.events.empty()) {
queue.sleep(ppu, timeout);
lv2_obj::emplace(queue.pq, &ppu);
return CELL_EBUSY;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) =
queue.events.front();
queue.events.pop_front();
return {};
});
if (!queue) {
return CELL_ESRCH;
}
if (queue.ret) {
if (queue.ret != CELL_EBUSY) {
return queue.ret;
}
} else {
return CELL_OK;
}
// If cancelled, gpr[3] will be non-zero. Other registers must contain event
// data.
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock_rsx(queue->mutex);
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(queue->pq)) {
// Waiters queue is empty, so the thread must have been signaled
queue->mutex.lock_unlock();
break;
}
std::lock_guard lock(queue->mutex);
if (!queue->unqueue(queue->pq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_queue_drain(ppu_thread &ppu, u32 equeue_id) {
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_drain(equeue_id=0x%x)", equeue_id);
const auto queue = idm::check<lv2_obj, lv2_event_queue>(
equeue_id, [&](lv2_event_queue &queue) {
std::lock_guard lock(queue.mutex);
queue.events.clear();
});
if (!queue) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_port_create(cpu_thread &cpu, vm::ptr<u32> eport_id,
s32 port_type, u64 name) {
cpu.state += cpu_flag::wait;
sys_event.warning(
"sys_event_port_create(eport_id=*0x%x, port_type=%d, name=0x%llx)",
eport_id, port_type, name);
if (port_type != SYS_EVENT_PORT_LOCAL && port_type != 3) {
sys_event.error("sys_event_port_create(): unknown port type (%d)",
port_type);
return CELL_EINVAL;
}
if (const u32 id = idm::make<lv2_obj, lv2_event_port>(port_type, name)) {
cpu.check_state();
*eport_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_event_port_destroy(ppu_thread &ppu, u32 eport_id) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_destroy(eport_id=0x%x)", eport_id);
const auto port = idm::withdraw<lv2_obj, lv2_event_port>(
eport_id, [](lv2_event_port &port) -> CellError {
if (lv2_obj::check(port.queue)) {
return CELL_EISCONN;
}
return {};
});
if (!port) {
return CELL_ESRCH;
}
if (port.ret) {
return port.ret;
}
return CELL_OK;
}
error_code sys_event_port_connect_local(cpu_thread &cpu, u32 eport_id,
u32 equeue_id) {
cpu.state += cpu_flag::wait;
sys_event.warning(
"sys_event_port_connect_local(eport_id=0x%x, equeue_id=0x%x)", eport_id,
equeue_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !idm::check_unlocked<lv2_obj, lv2_event_queue>(equeue_id)) {
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_LOCAL) {
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue)) {
return CELL_EISCONN;
}
port->queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
return CELL_OK;
}
error_code sys_event_port_connect_ipc(ppu_thread &ppu, u32 eport_id,
u64 ipc_key) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_connect_ipc(eport_id=0x%x, ipc_key=0x%x)",
eport_id, ipc_key);
if (ipc_key == 0) {
return CELL_EINVAL;
}
auto queue = lv2_event_queue::find(ipc_key);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !queue) {
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_IPC) {
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue)) {
return CELL_EISCONN;
}
port->queue = std::move(queue);
return CELL_OK;
}
error_code sys_event_port_disconnect(ppu_thread &ppu, u32 eport_id) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_disconnect(eport_id=0x%x)", eport_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port) {
return CELL_ESRCH;
}
if (!lv2_obj::check(port->queue)) {
return CELL_ENOTCONN;
}
if (port->is_busy) {
return CELL_EBUSY;
}
port->queue.reset();
return CELL_OK;
}
error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3) {
const auto cpu = cpu_thread::get_current();
const auto ppu = cpu ? cpu->try_get<ppu_thread>() : nullptr;
if (cpu) {
cpu->state += cpu_flag::wait;
}
sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, "
"data2=0x%llx, data3=0x%llx)",
eport_id, data1, data2, data3);
bool notified_thread = false;
const auto port = idm::check<lv2_obj, lv2_event_port>(
eport_id,
[&, notify = lv2_obj::notify_all_t()](lv2_event_port &port) -> CellError {
if (ppu && ppu->loaded_from_savestate) {
port.is_busy++;
notified_thread = true;
return {};
}
if (lv2_obj::check(port.queue)) {
const u64 source =
port.name ? port.name
: (u64{process_getpid() + 0u} << 32) | u64{eport_id};
return port.queue->send(
source, data1, data2, data3, &notified_thread,
ppu && port.queue->type == SYS_PPU_QUEUE ? &port : nullptr);
}
return CELL_ENOTCONN;
});
if (!port) {
return CELL_ESRCH;
}
if (ppu && notified_thread) {
// Wait to be requeued
if (ppu->test_stopped()) {
// Wait again on savestate load
ppu->state += cpu_flag::again;
}
port->is_busy--;
return CELL_OK;
}
if (port.ret) {
if (port.ret == CELL_EAGAIN) {
// Not really an error code exposed to games (thread has raised
// cpu_flag::again)
return not_an_error(CELL_EAGAIN);
}
if (port.ret == CELL_EBUSY) {
return not_an_error(CELL_EBUSY);
}
return port.ret;
}
return CELL_OK;
}

View file

@ -0,0 +1,514 @@
#include "stdafx.h"
#include "sys_event_flag.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_event_flag);
lv2_event_flag::lv2_event_flag(utils::serial &ar)
: protocol(ar), key(ar), type(ar), name(ar) {
ar(pattern);
}
std::function<void(void *)> lv2_event_flag::load(utils::serial &ar) {
return load_func(make_shared<lv2_event_flag>(exact_t<utils::serial &>(ar)));
}
void lv2_event_flag::save(utils::serial &ar) {
ar(protocol, key, type, name, pattern);
}
error_code sys_event_flag_create(ppu_thread &ppu, vm::ptr<u32> id,
vm::ptr<sys_event_flag_attribute_t> attr,
u64 init) {
ppu.state += cpu_flag::wait;
sys_event_flag.warning(
"sys_event_flag_create(id=*0x%x, attr=*0x%x, init=0x%llx)", id, attr,
init);
if (!id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_event_flag.error("sys_event_flag_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u32 type = _attr.type;
if (type != SYS_SYNC_WAITER_SINGLE && type != SYS_SYNC_WAITER_MULTIPLE) {
sys_event_flag.error("sys_event_flag_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (const auto error = lv2_obj::create<lv2_event_flag>(
_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_event_flag>(
_attr.protocol, ipc_key, _attr.type, _attr.name_u64, init);
})) {
return error;
}
ppu.check_state();
*id = idm::last_id();
return CELL_OK;
}
error_code sys_event_flag_destroy(ppu_thread &ppu, u32 id) {
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_destroy(id=0x%x)", id);
const auto flag = idm::withdraw<lv2_obj, lv2_event_flag>(
id, [&](lv2_event_flag &flag) -> CellError {
if (flag.sq) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(flag, flag.key);
return {};
});
if (!flag) {
return CELL_ESRCH;
}
if (flag.ret) {
return flag.ret;
}
return CELL_OK;
}
error_code sys_event_flag_wait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_wait(id=0x%x, bitptn=0x%llx, mode=0x%x, "
"result=*0x%x, timeout=0x%llx)",
id, bitptn, mode, result, timeout);
// Fix function arguments for external access
ppu.gpr[3] = -1;
ppu.gpr[4] = bitptn;
ppu.gpr[5] = mode;
ppu.gpr[6] = 0;
// Always set result
struct store_result {
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept {
if (ptr) {
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode)) {
sys_event_flag.error("sys_event_flag_wait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
const auto flag = idm::get<lv2_obj, lv2_event_flag>(
id,
[&, notify = lv2_obj::notify_all_t()](lv2_event_flag &flag) -> CellError {
if (flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode,
&ppu.gpr[6]);
})
.second) {
// TODO: is it possible to return EPERM in this case?
return {};
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(flag.mutex);
if (flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode,
&ppu.gpr[6]);
})
.second) {
return {};
}
if (flag.type == SYS_SYNC_WAITER_SINGLE && flag.sq) {
return CELL_EPERM;
}
flag.sleep(ppu, timeout);
lv2_obj::emplace(flag.sq, &ppu);
return CELL_EBUSY;
});
if (!flag) {
return CELL_ESRCH;
}
if (flag.ret) {
if (flag.ret != CELL_EBUSY) {
return flag.ret;
}
} else {
store.val = ppu.gpr[6];
return CELL_OK;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(flag->sq)) {
// Waiters queue is empty, so the thread must have been signaled
flag->mutex.lock_unlock();
break;
}
std::lock_guard lock(flag->mutex);
if (!flag->unqueue(flag->sq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
ppu.gpr[6] = flag->pattern;
break;
}
} else {
ppu.state.wait(state);
}
}
store.val = ppu.gpr[6];
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_flag_trywait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace(
"sys_event_flag_trywait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x)",
id, bitptn, mode, result);
// Always set result
struct store_result {
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept {
if (ptr) {
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode)) {
sys_event_flag.error("sys_event_flag_trywait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
u64 pattern{};
const auto flag =
idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag &flag) {
return flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode, &pattern);
})
.second;
});
if (!flag) {
return CELL_ESRCH;
}
if (!flag.ret) {
return not_an_error(CELL_EBUSY);
}
store.val = pattern;
return CELL_OK;
}
error_code sys_event_flag_set(cpu_thread &cpu, u32 id, u64 bitptn) {
cpu.state += cpu_flag::wait;
// Warning: may be called from SPU thread.
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id,
bitptn);
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag) {
return CELL_ESRCH;
}
if ((flag->pattern & bitptn) == bitptn) {
return CELL_OK;
}
if (lv2_obj::notify_all_t notify; true) {
std::lock_guard lock(flag->mutex);
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu) {
if (ppu->state & cpu_flag::again) {
cpu.state += cpu_flag::again;
// Fake error for abort
return not_an_error(CELL_EAGAIN);
}
}
u32 count = 0;
// Process all waiters in single atomic op
for (u64 pattern = flag->pattern, to_write = pattern, dependant_mask = 0;;
to_write = pattern, dependant_mask = 0) {
count = 0;
to_write |= bitptn;
dependant_mask = 0;
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu) {
ppu->gpr[7] = 0;
}
auto first = +flag->sq;
auto get_next = [&]() -> ppu_thread * {
s32 prio = smax;
ppu_thread *it{};
for (auto ppu = first; ppu; ppu = ppu->next_cpu) {
if (!ppu->gpr[7] && (flag->protocol != SYS_SYNC_PRIORITY ||
ppu->prio.load().prio <= prio)) {
it = ppu;
prio = ppu->prio.load().prio;
}
}
if (it) {
// Mark it so it won't reappear
it->gpr[7] = 1;
}
return it;
};
while (auto it = get_next()) {
auto &ppu = *it;
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
// If it's OR mode, set bits must have waken up the thread therefore no
// dependency on old value
const u64 dependant_mask_or =
((mode & 0xf) == SYS_EVENT_FLAG_WAIT_OR ||
(bitptn & pattern & to_write) == pattern
? 0
: pattern);
if (lv2_event_flag::check_pattern(to_write, pattern, mode,
&ppu.gpr[6])) {
dependant_mask |= dependant_mask_or;
ppu.gpr[3] = CELL_OK;
count++;
if (!to_write) {
break;
}
} else {
ppu.gpr[3] = -1;
}
}
dependant_mask &= ~bitptn;
auto [new_val, ok] = flag->pattern.fetch_op([&](u64 &x) {
if ((x ^ pattern) & dependant_mask) {
return false;
}
x |= bitptn;
// Clear the bit-wise difference
x &= ~((pattern | bitptn) & ~to_write);
return true;
});
if (ok) {
break;
}
pattern = new_val;
}
if (!count) {
return CELL_OK;
}
// Remove waiters
for (auto next_cpu = &flag->sq; *next_cpu;) {
auto &ppu = **next_cpu;
if (ppu.gpr[3] == CELL_OK) {
atomic_storage<ppu_thread *>::release(*next_cpu, ppu.next_cpu);
ppu.next_cpu = nullptr;
flag->append(&ppu);
continue;
}
next_cpu = &ppu.next_cpu;
};
lv2_obj::awake_all();
}
return CELL_OK;
}
error_code sys_event_flag_clear(ppu_thread &ppu, u32 id, u64 bitptn) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_clear(id=0x%x, bitptn=0x%llx)", id,
bitptn);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(
id, [&](lv2_event_flag &flag) { flag.pattern &= bitptn; });
if (!flag) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_flag_cancel(ppu_thread &ppu, u32 id, vm::ptr<u32> num) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_cancel(id=0x%x, num=*0x%x)", id, num);
if (num)
*num = 0;
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag) {
return CELL_ESRCH;
}
u32 value = 0;
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
// Get current pattern
const u64 pattern = flag->pattern;
// Signal all threads to return CELL_ECANCELED (protocol does not matter)
while (auto ppu = flag->schedule<ppu_thread>(flag->sq, SYS_SYNC_FIFO)) {
ppu->gpr[3] = CELL_ECANCELED;
ppu->gpr[6] = pattern;
value++;
flag->append(ppu);
}
if (value) {
lv2_obj::awake_all();
}
}
static_cast<void>(ppu.test_stopped());
if (num)
*num = value;
return CELL_OK;
}
error_code sys_event_flag_get(ppu_thread &ppu, u32 id, vm::ptr<u64> flags) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_get(id=0x%x, flags=*0x%x)", id, flags);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(
id, [](lv2_event_flag &flag) { return +flag.pattern; });
ppu.check_state();
if (!flag) {
if (flags)
*flags = 0;
return CELL_ESRCH;
}
if (!flags) {
return CELL_EFAULT;
}
*flags = flag.ret;
return CELL_OK;
}

3248
kernel/cellos/src/sys_fs.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,267 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/System.h"
#include "Emu/system_utils.hpp"
#include "cellos/sys_process.h"
#include "util/StrUtil.h"
#include "util/Thread.h"
#include "util/sysinfo.hpp"
#include "util/v128.hpp"
#include "Emu/Cell/timers.hpp"
#include "sys_game.h"
LOG_CHANNEL(sys_game);
struct system_sw_version {
system_sw_version() {
f64 version_f = 0;
if (!try_to_float(&version_f, utils::get_firmware_version(), 0.0f,
99.9999f))
sys_game.error("Error parsing firmware version");
version = static_cast<usz>(version_f * 10000);
}
system_sw_version(const system_sw_version &) = delete;
system_sw_version &operator=(const system_sw_version &) = delete;
~system_sw_version() = default;
atomic_t<u64> version;
};
struct board_storage {
public:
bool read(u8 *buffer) {
if (!buffer)
return false;
const auto data = storage.load();
memcpy(buffer, &data, size);
return true;
}
bool write(u8 *buffer) {
if (!buffer)
return false;
storage.store(read_from_ptr<be_t<v128>>(buffer));
written = true;
return true;
}
board_storage() {
memset(&storage.raw(), -1, size);
if (fs::file file; file.open(file_path, fs::read))
file.read(&storage.raw(), std::min(file.size(), size));
}
board_storage(const board_storage &) = delete;
board_storage &operator=(const board_storage &) = delete;
~board_storage() {
if (written) {
if (fs::file file;
file.open(file_path, fs::create + fs::write + fs::lock)) {
file.write(&storage.raw(), size);
file.trunc(size);
}
}
}
private:
atomic_be_t<v128> storage;
bool written = false;
const std::string file_path =
rpcs3::utils::get_hdd1_dir() + "/caches/board_storage.bin";
static constexpr u64 size = sizeof(v128);
};
struct watchdog_t {
struct alignas(8) control_t {
bool needs_restart = false;
bool active = false;
char pad[sizeof(u32) - sizeof(bool) * 2]{};
u32 timeout = 0;
};
atomic_t<control_t> control;
void operator()() {
u64 start_time = get_system_time();
u64 old_time = start_time;
u64 current_time = old_time;
constexpr u64 sleep_time = 50'000;
while (thread_ctrl::state() != thread_state::aborting) {
if (Emu.GetStatus(false) == system_state::paused) {
start_time += current_time - old_time;
old_time = current_time;
thread_ctrl::wait_for(sleep_time);
current_time = get_system_time();
continue;
}
old_time = std::exchange(current_time, get_system_time());
const auto old = control
.fetch_op([&](control_t &data) {
if (data.needs_restart) {
data.needs_restart = false;
return true;
}
return false;
})
.first;
if (old.active && old.needs_restart) {
start_time = current_time;
old_time = current_time;
continue;
}
if (old.active && current_time - start_time >= old.timeout) {
sys_game.success("Watchdog timeout! Restarting the game...");
Emu.CallFromMainThread([]() { Emu.Restart(false); });
return;
}
thread_ctrl::wait_for(sleep_time);
}
}
static constexpr auto thread_name = "LV2 Watchdog Thread"sv;
};
void abort_lv2_watchdog() {
if (auto thr = g_fxo->try_get<named_thread<watchdog_t>>()) {
sys_game.notice("Aborting %s...", thr->thread_name);
*thr = thread_state::aborting;
}
}
error_code _sys_game_watchdog_start(u32 timeout) {
sys_game.trace("sys_game_watchdog_start(timeout=%d)", timeout);
// According to disassembly
timeout *= 1'000'000;
timeout &= -64;
if (!g_fxo->get<named_thread<watchdog_t>>()
.control
.fetch_op([&](watchdog_t::control_t &data) {
if (data.active) {
return false;
}
data.needs_restart = true;
data.active = true;
data.timeout = timeout;
return true;
})
.second) {
return CELL_EABORT;
}
return CELL_OK;
}
error_code _sys_game_watchdog_stop() {
sys_game.trace("sys_game_watchdog_stop()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op(
[](watchdog_t::control_t &data) {
if (!data.active) {
return false;
}
data.active = false;
return true;
});
return CELL_OK;
}
error_code _sys_game_watchdog_clear() {
sys_game.trace("sys_game_watchdog_clear()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op(
[](watchdog_t::control_t &data) {
if (!data.active || data.needs_restart) {
return false;
}
data.needs_restart = true;
return true;
});
return CELL_OK;
}
error_code _sys_game_set_system_sw_version(u64 version) {
sys_game.trace("sys_game_set_system_sw_version(version=%d)", version);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
g_fxo->get<system_sw_version>().version = version;
return CELL_OK;
}
u64 _sys_game_get_system_sw_version() {
sys_game.trace("sys_game_get_system_sw_version()");
return g_fxo->get<system_sw_version>().version;
}
error_code _sys_game_board_storage_read(vm::ptr<u8> buffer,
vm::ptr<u8> status) {
sys_game.trace("sys_game_board_storage_read(buffer=*0x%x, status=*0x%x)",
buffer, status);
if (!buffer || !status) {
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().read(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer,
vm::ptr<u8> status) {
sys_game.trace("sys_game_board_storage_write(buffer=*0x%x, status=*0x%x)",
buffer, status);
if (!buffer || !status) {
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().write(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_get_rtc_status(vm::ptr<s32> status) {
sys_game.trace("sys_game_get_rtc_status(status=*0x%x)", status);
if (!status) {
return CELL_EFAULT;
}
*status = 0;
return CELL_OK;
}

View file

@ -0,0 +1,102 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_gamepad.h"
LOG_CHANNEL(sys_gamepad);
u32 sys_gamepad_ycon_initalize(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_initalize(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_finalize(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_finalize(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_has_input_ownership(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_has_input_ownership(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_enumerate_device(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_enumerate_device(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_device_info(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_get_device_info(in=%d, out=%d) -> CELL_OK",
in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_read_raw_report(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_read_raw_report(in=%d, out=%d) -> CELL_OK",
in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_write_raw_report(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_write_raw_report(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_feature(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_get_feature(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_set_feature(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_set_feature(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_is_gem(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_is_gem(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
// syscall(621,packet_id,u8 *in,u8 *out)
// Talk:LV2_Functions_and_Syscalls#Syscall_621_.280x26D.29 gamepad_if usage
u32 sys_gamepad_ycon_if(u8 packet_id, vm::ptr<u8> in, vm::ptr<u8> out) {
switch (packet_id) {
case 0:
return sys_gamepad_ycon_initalize(in, out);
case 1:
return sys_gamepad_ycon_finalize(in, out);
case 2:
return sys_gamepad_ycon_has_input_ownership(in, out);
case 3:
return sys_gamepad_ycon_enumerate_device(in, out);
case 4:
return sys_gamepad_ycon_get_device_info(in, out);
case 5:
return sys_gamepad_ycon_read_raw_report(in, out);
case 6:
return sys_gamepad_ycon_write_raw_report(in, out);
case 7:
return sys_gamepad_ycon_get_feature(in, out);
case 8:
return sys_gamepad_ycon_set_feature(in, out);
case 9:
return sys_gamepad_ycon_is_gem(in, out);
default:
sys_gamepad.error(
"sys_gamepad_ycon_if(packet_id=*%d, in=%d, out=%d), unknown packet id",
packet_id, in, out);
break;
}
return CELL_OK;
}

View file

@ -0,0 +1,41 @@
#include "stdafx.h"
#include "sys_gpio.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_gpio);
error_code sys_gpio_get(u64 device_id, vm::ptr<u64> value) {
sys_gpio.trace("sys_gpio_get(device_id=0x%llx, value=*0x%x)", device_id,
value);
if (device_id != SYS_GPIO_LED_DEVICE_ID &&
device_id != SYS_GPIO_DIP_SWITCH_DEVICE_ID) {
return CELL_ESRCH;
}
// Retail consoles dont have LEDs or DIPs switches, hence always sets 0 in
// paramenter
if (!value.try_write(0)) {
return CELL_EFAULT;
}
return CELL_OK;
}
error_code sys_gpio_set(u64 device_id, u64 mask, u64 value) {
sys_gpio.trace("sys_gpio_set(device_id=0x%llx, mask=0x%llx, value=0x%llx)",
device_id, mask, value);
// Retail consoles dont have LEDs or DIPs switches, hence the syscall can't
// modify devices's value
switch (device_id) {
case SYS_GPIO_LED_DEVICE_ID:
return CELL_OK;
case SYS_GPIO_DIP_SWITCH_DEVICE_ID:
return CELL_EINVAL;
}
return CELL_ESRCH;
}

View file

@ -0,0 +1,193 @@
#include "stdafx.h"
#include "sys_hid.h"
#include "Emu/Memory/vm_var.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "rpcsx/fw/ps3/cellPad.h"
#include "sys_process.h"
LOG_CHANNEL(sys_hid);
error_code sys_hid_manager_open(ppu_thread &ppu, u64 device_type, u64 port_no,
vm::ptr<u32> handle) {
sys_hid.todo("sys_hid_manager_open(device_type=0x%llx, port_no=0x%llx, "
"handle=*0x%llx)",
device_type, port_no, handle);
// device type == 1 = pad, 2 = kb, 3 = mouse
if (device_type > 3) {
return CELL_EINVAL;
}
if (!handle) {
return CELL_EFAULT;
}
// 'handle' starts at 0x100 in realhw, and increments every time
// sys_hid_manager_open is called however, sometimes the handle is reused when
// opening sys_hid_manager again (even when the previous one hasn't been
// closed yet) - maybe when processes/threads get killed/finish they also
// release their handles?
static u32 ctr = 0x100;
*handle = ctr++;
if (device_type == 1) {
cellPadInit(ppu, 7);
cellPadSetPortSetting(::narrow<u32>(port_no) /* 0 */,
CELL_PAD_SETTING_LDD | CELL_PAD_SETTING_PRESS_ON |
CELL_PAD_SETTING_SENSOR_ON);
}
return CELL_OK;
}
error_code sys_hid_manager_ioctl(u32 hid_handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size) {
sys_hid.todo("sys_hid_manager_ioctl(hid_handle=0x%x, pkg_id=0x%llx, "
"buf=*0x%x, buf_size=0x%llx)",
hid_handle, pkg_id, buf, buf_size);
// clang-format off
// From realhw syscall dump when vsh boots
// SC count | handle | pkg_id | *buf (in) | *buf (out) | size -> ret
// ---------|--------|--------|---------------------------------------------------------------------------|---------------------------------------------------------------------------|------------
// 28893 | 0x101 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28894 | 0x101 | 0x3 | 00000000 | 00000000 | 4 -> 0
// 28895 | 0x101 | 0x5 | 00000000 | 00000000 | 4 -> 0
// 28896 | 0x101 | 0x68 | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 64 -> 0
// | | | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 |
// 28898 | 0x102 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28901 | 0x100 | 0x64 | 00000001 | 00000001 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 2890 | 0x100 | 0x65 | 6b49d200 | 6b49d200 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 28903 | 0x100 | 0x66 | 00000001 | 00000001 | 4 -> 0 # x3::hidportassign
// 28904 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 68 -> 0 # x3::hidportassign
// | | | 000000000000000000000000000000000000001000000010000000100000001 | 000000000000000000000000000000000000001000000010000000100000001 |
// 28907 | 0x101 | 0x3 | 00000001 | 00000001 | 4 -> 0
// 28908 | 0x101 | 0x5 | 00000001 | 00000001 | 4 -> 0
// 29404 | 0x100 | 0x4 | 00 | ee | 1 -> 0
// *** repeats 30600, 31838, 33034, 34233, 35075 (35075 is x3::hidportassign) ***
// 35076 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 68 -> 0
// | | | 000003200000032000000320000003200002710000027100000271000002710 | 000003200000032000000320000003200002710000027100000271000002710 |
// *** more 0x4 that have buf(in)=00 and buf(out)=ee ***
// clang-format on
if (pkg_id == 2) {
// Return what realhw seems to return
// TODO: Figure out what this corresponds to
auto info = vm::static_ptr_cast<sys_hid_info_2>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
u8 realhw[17] = {0x01, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x03, 0x50, 0x00, 0x00, 0x1c, 0x1f};
memcpy(info->unk, &realhw, 17);
} else if (pkg_id == 5) {
auto info = vm::static_ptr_cast<sys_hid_info_5>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
}
// pkg_id == 6 == setpressmode?
else if (pkg_id == 0x68) {
[[maybe_unused]] auto info = vm::static_ptr_cast<sys_hid_ioctl_68>(buf);
// info->unk2 = 0;
}
return CELL_OK;
}
error_code sys_hid_manager_check_focus() {
// spammy sys_hid.todo("sys_hid_manager_check_focus()");
return not_an_error(1);
}
error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf,
u64 buf_size) {
sys_hid.todo("sys_hid_manager_513(%llx, %llx, buf=%llx, buf_size=%llx)", a1,
a2, buf, buf_size);
return CELL_OK;
}
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size) {
if (pkg_id == 0xE) {
sys_hid.trace(
"sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id,
buf, buf_size);
} else {
sys_hid.todo("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)",
pkg_id, buf, buf_size);
}
if (pkg_id == 0xE) {
// buf holds device_type
// auto device_type = vm::static_ptr_cast<u8>(buf);
// spammy sys_hid.todo("device_type: 0x%x", device_type[0]);
// return 1 or 0? look like almost like another check_focus type check,
// returning 0 looks to keep system focus
} else if (pkg_id == 0xD) {
auto inf = vm::static_ptr_cast<sys_hid_manager_514_pkg_d>(buf);
// unk1 = (pad# << 24) | pad# | 0x100
// return value doesn't seem to be used again
sys_hid.todo("unk1: 0x%x, unk2:0x%x", inf->unk1, inf->unk2);
}
return CELL_OK;
}
error_code sys_hid_manager_is_process_permission_root(u32 pid) {
sys_hid.todo("sys_hid_manager_is_process_permission_root(pid=0x%x)", pid);
return not_an_error(g_ps3_process_info.has_root_perm());
}
error_code sys_hid_manager_add_hot_key_observer(u32 event_queue,
vm::ptr<u32> unk) {
sys_hid.todo(
"sys_hid_manager_add_hot_key_observer(event_queue=0x%x, unk=*0x%x)",
event_queue, unk);
return CELL_OK;
}
error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size) {
if (!buf) {
return CELL_EFAULT;
}
(pkg_id == 2 || pkg_id == 0x81 ? sys_hid.trace : sys_hid.todo)(
"sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, "
"buf_size=0x%llx)",
handle, pkg_id, buf, buf_size);
if (pkg_id == 2) {
// cellPadGetData
// it returns just button array from 'CellPadData'
// auto data = vm::static_ptr_cast<u16[64]>(buf);
// todo: use handle and dont call cellpad here
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0) {
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16),
buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize);
}
} else if (pkg_id == 0x81) {
// cellPadGetDataExtra?
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0) {
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16),
buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize / 2);
}
}
return CELL_OK;
}

View file

@ -0,0 +1,262 @@
#include "stdafx.h"
#include "sys_interrupt.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
LOG_CHANNEL(sys_interrupt);
lv2_int_tag::lv2_int_tag() noexcept : lv2_obj(1), id(idm::last_id()) {}
lv2_int_tag::lv2_int_tag(utils::serial &ar) noexcept
: lv2_obj(1), id(idm::last_id()), handler([&]() {
const u32 id = ar;
auto ptr = idm::get_unlocked<lv2_obj, lv2_int_serv>(id);
if (!ptr && id) {
Emu.PostponeInitCode([id, &handler = this->handler]() {
handler = ensure(idm::get_unlocked<lv2_obj, lv2_int_serv>(id));
});
}
return ptr;
}()) {}
void lv2_int_tag::save(utils::serial &ar) {
ar(lv2_obj::check(handler) ? handler->id : 0);
}
lv2_int_serv::lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread,
u64 arg1, u64 arg2) noexcept
: lv2_obj(1), id(idm::last_id()), thread(thread), arg1(arg1), arg2(arg2) {}
lv2_int_serv::lv2_int_serv(utils::serial &ar) noexcept
: lv2_obj(1), id(idm::last_id()),
thread(idm::get_unlocked<named_thread<ppu_thread>>(ar)), arg1(ar),
arg2(ar) {}
void lv2_int_serv::save(utils::serial &ar) {
ar(thread && idm::check_unlocked<named_thread<ppu_thread>>(thread->id)
? thread->id
: 0,
arg1, arg2);
}
void ppu_interrupt_thread_entry(ppu_thread &, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *);
void lv2_int_serv::exec() const {
thread->cmd_list({{ppu_cmd::reset_stack, 0},
{ppu_cmd::set_args, 2},
arg1,
arg2,
{ppu_cmd::entry_call, 0},
{ppu_cmd::sleep, 0},
{ppu_cmd::ptr_call, 0},
std::bit_cast<u64>(&ppu_interrupt_thread_entry)});
}
void ppu_thread_exit(ppu_thread &, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *);
void lv2_int_serv::join() const {
thread->cmd_list(
{{ppu_cmd::ptr_call, 0}, std::bit_cast<u64>(&ppu_thread_exit)});
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
(*thread)();
idm::remove_verify<named_thread<ppu_thread>>(thread->id, thread);
}
error_code sys_interrupt_tag_destroy(ppu_thread &ppu, u32 intrtag) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning("sys_interrupt_tag_destroy(intrtag=0x%x)", intrtag);
const auto tag = idm::withdraw<lv2_obj, lv2_int_tag>(
intrtag, [](lv2_int_tag &tag) -> CellError {
if (lv2_obj::check(tag.handler)) {
return CELL_EBUSY;
}
tag.exists.release(0);
return {};
});
if (!tag) {
return CELL_ESRCH;
}
if (tag.ret) {
return tag.ret;
}
return CELL_OK;
}
error_code _sys_interrupt_thread_establish(ppu_thread &ppu, vm::ptr<u32> ih,
u32 intrtag, u32 intrthread,
u64 arg1, u64 arg2) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning(
"_sys_interrupt_thread_establish(ih=*0x%x, intrtag=0x%x, "
"intrthread=0x%x, arg1=0x%llx, arg2=0x%llx)",
ih, intrtag, intrthread, arg1, arg2);
CellError error = CELL_EAGAIN;
const u32 id = idm::import <lv2_obj, lv2_int_serv>([&]() {
shared_ptr<lv2_int_serv> result;
// Get interrupt tag
const auto tag = idm::check_unlocked<lv2_obj, lv2_int_tag>(intrtag);
if (!tag) {
error = CELL_ESRCH;
return result;
}
// Get interrupt thread
const auto it = idm::get_unlocked<named_thread<ppu_thread>>(intrthread);
if (!it) {
error = CELL_ESRCH;
return result;
}
// If interrupt thread is running, it's already established on another
// interrupt tag
if (cpu_flag::stop - it->state) {
error = CELL_EAGAIN;
return result;
}
// It's unclear if multiple handlers can be established on single interrupt
// tag
if (lv2_obj::check(tag->handler)) {
error = CELL_ESTAT;
return result;
}
result = make_shared<lv2_int_serv>(it, arg1, arg2);
tag->handler = result;
it->cmd_list({{ppu_cmd::ptr_call, 0},
std::bit_cast<u64>(&ppu_interrupt_thread_entry)});
it->state -= cpu_flag::stop;
it->state.notify_one();
return result;
});
if (id) {
ppu.check_state();
*ih = id;
return CELL_OK;
}
return error;
}
error_code _sys_interrupt_thread_disestablish(ppu_thread &ppu, u32 ih,
vm::ptr<u64> r13) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning(
"_sys_interrupt_thread_disestablish(ih=0x%x, r13=*0x%x)", ih, r13);
const auto handler = idm::withdraw<lv2_obj, lv2_int_serv>(
ih, [](lv2_obj &obj) { obj.exists.release(0); });
if (!handler) {
if (const auto thread = idm::withdraw<named_thread<ppu_thread>>(ih)) {
*r13 = thread->gpr[13];
// It is detached from IDM now so join must be done explicitly now
*thread = thread_state::finished;
return CELL_OK;
}
return CELL_ESRCH;
}
lv2_obj::sleep(ppu);
// Wait for sys_interrupt_thread_eoi() and destroy interrupt thread
handler->join();
// Save TLS base
*r13 = handler->thread->gpr[13];
return CELL_OK;
}
void sys_interrupt_thread_eoi(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_interrupt.trace("sys_interrupt_thread_eoi()");
ppu.state += cpu_flag::ret;
lv2_obj::sleep(ppu);
ppu.interrupt_thread_executing = false;
}
void ppu_interrupt_thread_entry(ppu_thread &ppu, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *) {
while (true) {
shared_ptr<lv2_int_serv> serv = null_ptr;
// Loop endlessly trying to invoke an interrupt if required
idm::select<named_thread<spu_thread>>([&](u32, spu_thread &spu) {
if (spu.get_type() != spu_type::threaded) {
auto &ctrl = spu.int_ctrl[2];
if (lv2_obj::check(ctrl.tag)) {
auto &handler = ctrl.tag->handler;
if (lv2_obj::check(handler)) {
if (handler->thread.get() == &ppu) {
if (spu.ch_out_intr_mbox.get_count() &&
ctrl.mask & SPU_INT2_STAT_MAILBOX_INT) {
ctrl.stat |= SPU_INT2_STAT_MAILBOX_INT;
}
if (ctrl.mask & ctrl.stat) {
ensure(!serv);
serv = handler;
}
}
}
}
}
});
if (serv) {
// Queue interrupt, after the interrupt has finished the PPU returns to
// this loop
serv->exec();
return;
}
const auto state = +ppu.state;
if (::is_stopped(state) || ppu.cmd_notify.exchange(0)) {
return;
}
thread_ctrl::wait_on(ppu.cmd_notify, 0);
}
}

View file

@ -0,0 +1,70 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm.h"
#include "sys_io.h"
LOG_CHANNEL(sys_io);
error_code sys_io_buffer_create(u32 block_count, u32 block_size, u32 blocks,
u32 unk1, vm::ptr<u32> handle) {
sys_io.todo("sys_io_buffer_create(block_count=0x%x, block_size=0x%x, "
"blocks=0x%x, unk1=0x%x, handle=*0x%x)",
block_count, block_size, blocks, unk1, handle);
if (!handle) {
return CELL_EFAULT;
}
if (auto io = idm::make<lv2_io_buf>(block_count, block_size, blocks, unk1)) {
*handle = io;
return CELL_OK;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_destroy(u32 handle) {
sys_io.todo("sys_io_buffer_destroy(handle=0x%x)", handle);
idm::remove<lv2_io_buf>(handle);
return CELL_OK;
}
error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block) {
sys_io.todo("sys_io_buffer_allocate(handle=0x%x, block=*0x%x)", handle,
block);
if (!block) {
return CELL_EFAULT;
}
if (auto io = idm::get_unlocked<lv2_io_buf>(handle)) {
// no idea what we actually need to allocate
if (u32 addr = vm::alloc(io->block_count * io->block_size, vm::main)) {
*block = addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_free(u32 handle, u32 block) {
sys_io.todo("sys_io_buffer_free(handle=0x%x, block=0x%x)", handle, block);
const auto io = idm::get_unlocked<lv2_io_buf>(handle);
if (!io) {
return CELL_ESRCH;
}
vm::dealloc(block);
return CELL_OK;
}

View file

@ -0,0 +1,565 @@
#include "stdafx.h"
#include "sys_lwcond.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwcond);
lv2_lwcond::lv2_lwcond(utils::serial &ar)
: name(ar.pop<be_t<u64>>()), lwid(ar), protocol(ar),
control(ar.pop<decltype(control)>()) {}
void lv2_lwcond::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(name, lwid, protocol, control);
}
error_code _sys_lwcond_create(ppu_thread &ppu, vm::ptr<u32> lwcond_id,
u32 lwmutex_id, vm::ptr<sys_lwcond_t> control,
u64 name) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(u8"_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, "
u8"control=*0x%x, name=0x%llx (“%s”))",
lwcond_id, lwmutex_id, control, name,
lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
u32 protocol;
// Extract protocol from lwmutex
if (!idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id,
[&protocol](lv2_lwmutex &mutex) { protocol = mutex.protocol; })) {
return CELL_ESRCH;
}
if (protocol == SYS_SYNC_RETRY) {
// Lwcond can't have SYS_SYNC_RETRY protocol
protocol = SYS_SYNC_PRIORITY;
}
if (const u32 id =
idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id, protocol, control)) {
ppu.check_state();
*lwcond_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwcond_destroy(ppu_thread &ppu, u32 lwcond_id) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
shared_ptr<lv2_lwcond> _cond;
while (true) {
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwcond>(
lwcond_id, [&](lv2_lwcond &cond) -> CellError {
// Ignore check on first iteration
if (_cond && std::addressof(cond) != _cond.get()) {
// Other thread has destroyed the lwcond earlier
return CELL_ESRCH;
}
std::lock_guard lock(cond.mutex);
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
return CELL_EBUSY;
}
old_val = cond.lwmutex_waiters.or_fetch(smin);
if (old_val != smin) {
// De-schedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwmutex waiters inside
// _sys_lwcond_queue_wait
return CELL_EAGAIN;
}
return {};
});
if (!ptr) {
return CELL_ESRCH;
}
if (ret) {
if (ret != CELL_EAGAIN) {
return ret;
}
} else {
break;
}
_cond = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31) {
thread_ctrl::wait_on(_cond->lwmutex_waiters, old_val);
if (ppu.is_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
old_val = _cond->lwmutex_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwcond_signal(ppu_thread &ppu, u32 lwcond_id, u32 lwmutex_id,
u64 ppu_thread_id, u32 mode) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, "
"ppu_thread_id=0x%llx, mode=%d)",
lwcond_id, lwmutex_id, ppu_thread_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been
// increased Mode 3: lwmutex was forcefully owned by the calling thread
if (mode < 1 || mode > 3) {
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(
lwcond_id,
[&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) -> int {
ppu_thread *cpu = nullptr;
if (ppu_thread_id != u32{umax}) {
cpu = idm::check_unlocked<named_thread<ppu_thread>>(
static_cast<u32>(ppu_thread_id));
if (!cpu) {
return -1;
}
}
lv2_lwmutex *mutex = nullptr;
if (mode != 2) {
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return -1;
}
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
if (cpu) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
}
auto result =
cpu ? cond.unqueue(cond.sq, cpu)
: cond.schedule<ppu_thread>(cond.sq, cond.protocol);
if (result) {
if (static_cast<ppu_thread *>(result)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
if (mode == 2) {
static_cast<ppu_thread *>(result)->gpr[3] = CELL_EBUSY;
} else if (mode == 3 && mutex->load_sq()) [[unlikely]] {
std::lock_guard lock(mutex->mutex);
// Respect ordering of the sleep queue
mutex->try_own(result, true);
auto result2 = mutex->reown<ppu_thread>();
if (result2->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
if (result2 != result) {
cond.awake(result2);
result = nullptr;
}
} else if (mode == 1) {
mutex->try_own(result, true);
result = nullptr;
}
if (result) {
cond.awake(result);
}
return 1;
}
} else {
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (!cond.ret) {
if (ppu_thread_id == u32{umax}) {
if (mode == 3) {
return not_an_error(CELL_ENOENT);
} else if (mode == 2) {
return CELL_OK;
}
}
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code _sys_lwcond_signal_all(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u32 mode) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(
"_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)",
lwcond_id, lwmutex_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been
// increased
if (mode < 1 || mode > 2) {
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(
lwcond_id,
[&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) -> int {
lv2_lwmutex *mutex{};
if (mode != 2) {
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return -1;
}
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
u32 result = 0;
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
}
auto sq = cond.sq;
atomic_storage<ppu_thread *>::release(cond.sq, nullptr);
while (const auto cpu =
cond.schedule<ppu_thread>(sq, cond.protocol)) {
if (mode == 2) {
static_cast<ppu_thread *>(cpu)->gpr[3] = CELL_EBUSY;
}
if (mode == 1) {
mutex->try_own(cpu, true);
} else {
lv2_obj::append(cpu);
}
result++;
}
if (result && mode == 2) {
lv2_obj::awake_all();
}
return result;
} else {
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (mode == 1) {
// Mode 1: return the amount of threads (TODO)
return not_an_error(cond.ret);
}
return CELL_OK;
}
}
error_code _sys_lwcond_queue_wait(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(
"_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)",
lwcond_id, lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
shared_ptr<lv2_lwmutex> mutex;
auto &sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(
lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) {
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return;
}
// Increment lwmutex's lwcond's waiters count
mutex->lwcond_waiters++;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex);
cond.lwmutex_waiters++;
const bool mutex_sleep = sstate.try_read<bool>().second;
sstate.clear();
if (mutex_sleep) {
// Special: loading state from the point of waiting on lwmutex sleep
// queue
mutex->try_own(&ppu, true);
} else {
// Add a waiter
lv2_obj::emplace(cond.sq, &ppu);
}
if (!ppu.loaded_from_savestate && !mutex->try_unlock(false)) {
std::lock_guard lock2(mutex->mutex);
// Process lwmutex sleep queue
if (const auto cpu = mutex->reown<ppu_thread>()) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return;
}
// Put the current thread to sleep and schedule lwmutex waiter
// atomically
cond.append(cpu);
cond.sleep(ppu, timeout);
return;
}
}
cond.sleep(ppu, timeout);
});
if (!cond || !mutex) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return CELL_OK;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::scoped_lock lock(cond->mutex, mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
mutex_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread *>::load(cond->sq); cpu;
cpu = cpu->next_cpu) {
if (cpu == &ppu) {
cond_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep) {
break;
}
sstate(mutex_sleep);
ppu.state += cpu_flag::again;
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex);
if (cond->unqueue(cond->sq, &ppu)) {
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
std::lock_guard lock2(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
if (--mutex->lwcond_waiters == smin) {
// Notify the thread destroying lwmutex on last waiter
mutex->lwcond_waiters.notify_all();
}
if (--cond->lwmutex_waiters == smin) {
// Notify the thread destroying lwcond on last waiter
cond->lwmutex_waiters.notify_all();
}
// Return cause
return not_an_error(ppu.gpr[3]);
}

View file

@ -0,0 +1,353 @@
#include "stdafx.h"
#include "sys_lwmutex.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwmutex);
lv2_lwmutex::lv2_lwmutex(utils::serial &ar)
: protocol(ar), control(ar.pop<decltype(control)>()),
name(ar.pop<be_t<u64>>()) {
ar(lv2_control.raw().signaled);
}
void lv2_lwmutex::save(utils::serial &ar) {
ar(protocol, control, name, lv2_control.raw().signaled);
}
error_code _sys_lwmutex_create(ppu_thread &ppu, vm::ptr<u32> lwmutex_id,
u32 protocol, vm::ptr<sys_lwmutex_t> control,
s32 has_name, u64 name) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace(u8"_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, "
u8"control=*0x%x, has_name=0x%x, name=0x%llx (“%s”))",
lwmutex_id, protocol, control, has_name, name,
lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY &&
protocol != SYS_SYNC_PRIORITY) {
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
if (!(has_name < 0)) {
name = 0;
}
if (const u32 id = idm::make<lv2_obj, lv2_lwmutex>(protocol, control, name)) {
ppu.check_state();
*lwmutex_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwmutex_destroy(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
shared_ptr<lv2_lwmutex> _mutex;
while (true) {
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&](lv2_lwmutex &mutex) -> CellError {
// Ignore check on first iteration
if (_mutex && std::addressof(mutex) != _mutex.get()) {
// Other thread has destroyed the lwmutex earlier
return CELL_ESRCH;
}
std::lock_guard lock(mutex.mutex);
if (mutex.load_sq()) {
return CELL_EBUSY;
}
old_val = mutex.lwcond_waiters.or_fetch(smin);
if (old_val != smin) {
// Deschedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwcond waiters
return CELL_EAGAIN;
}
return {};
});
if (!ptr) {
return CELL_ESRCH;
}
if (ret) {
if (ret != CELL_EAGAIN) {
return ret;
}
} else {
break;
}
_mutex = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31) {
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
if (ppu.is_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
old_val = _mutex->lwcond_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwmutex_lock(ppu_thread &ppu, u32 lwmutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)",
lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (s32 signal = mutex.lv2_control
.fetch_op([](lv2_lwmutex::control_data_t &data) {
if (data.signaled) {
data.signaled = 0;
return true;
}
return false;
})
.first.signaled) {
if (~signal & 1) {
ppu.gpr[3] = CELL_EBUSY;
}
return true;
}
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (s32 signal = mutex.try_own(&ppu)) {
if (~signal & 1) {
ppu.gpr[3] = CELL_EBUSY;
}
ppu.cancel_sleep = 0;
return true;
}
const bool finished = !mutex.sleep(ppu, timeout);
notify.cleanup();
return finished;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
return not_an_error(ppu.gpr[3]);
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(mutex->mutex);
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!mutex->load_sq()) {
// Sleep queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code _sys_lwmutex_trylock(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_trylock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex =
idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex &mutex) {
auto [_, ok] =
mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t &data) {
if (data.signaled & 1) {
data.signaled = 0;
return true;
}
return false;
});
return ok;
});
if (!mutex) {
return CELL_ESRCH;
}
if (!mutex.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (mutex.try_unlock(false)) {
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>()) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of
// the time, can be ignored
}
});
if (!mutex) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock2(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (mutex.try_unlock(true)) {
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>(true)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread *>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of
// the time, can be ignored
}
});
if (!mutex) {
return CELL_ESRCH;
}
return CELL_OK;
}

View file

@ -0,0 +1,408 @@
#include "stdafx.h"
#include "sys_memory.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_locking.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_memory);
//
static shared_mutex s_memstats_mtx;
lv2_memory_container::lv2_memory_container(u32 size, bool from_idm) noexcept
: size(size),
id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID} {}
lv2_memory_container::lv2_memory_container(utils::serial &ar,
bool from_idm) noexcept
: size(ar), id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID},
used(ar) {}
std::function<void(void *)> lv2_memory_container::load(utils::serial &ar) {
// Use idm::last_id() only for the instances at IDM
return [ptr = make_shared<lv2_memory_container>(exact_t<utils::serial &>(ar),
true)](void *storage) {
*static_cast<atomic_ptr<lv2_memory_container> *>(storage) = ptr;
};
}
void lv2_memory_container::save(utils::serial &ar) { ar(size, used); }
lv2_memory_container *lv2_memory_container::search(u32 id) {
if (id != SYS_MEMORY_CONTAINER_ID_INVALID) {
return idm::check_unlocked<lv2_memory_container>(id);
}
return &g_fxo->get<lv2_memory_container>();
}
struct sys_memory_address_table {
atomic_t<lv2_memory_container *> addrs[65536]{};
sys_memory_address_table() = default;
SAVESTATE_INIT_POS(
id_manager::id_map<lv2_memory_container>::savestate_init_pos + 0.1);
sys_memory_address_table(utils::serial &ar) {
// First: address, second: conatiner ID (SYS_MEMORY_CONTAINER_ID_INVALID for
// global FXO memory container)
std::unordered_map<u16, u32> mm;
ar(mm);
for (const auto &[addr, id] : mm) {
addrs[addr] = ensure(lv2_memory_container::search(id));
}
}
void save(utils::serial &ar) {
std::unordered_map<u16, u32> mm;
for (auto &ctr : addrs) {
if (const auto ptr = +ctr) {
mm[static_cast<u16>(&ctr - addrs)] = ptr->id;
}
}
ar(mm);
}
};
std::shared_ptr<vm::block_t> reserve_map(u32 alloc_size, u32 align) {
return vm::reserve_map(
align == 0x10000 ? vm::user64k : vm::user1m, 0,
align == 0x10000 ? 0x20000000 : utils::align(alloc_size, 0x10000000),
align == 0x10000 ? (vm::page_size_64k | vm::bf0_0x1)
: (vm::page_size_1m | vm::bf0_0x1));
}
// Todo: fix order of error checks
error_code sys_memory_allocate(cpu_thread &cpu, u64 size, u64 flags,
vm::ptr<u32> alloc_addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning(
"sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size,
flags, alloc_addr);
if (!size) {
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000
: flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000
: flags == 0 ? 0x100000
: 0;
if (!align) {
return {CELL_EINVAL, flags};
}
if (size % align) {
return {CELL_EALIGN, size};
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
// Try to get "physical memory"
if (!dct.take(size)) {
return {CELL_ENOMEM, dct.size - dct.used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align)) {
if (const u32 addr = area->alloc(static_cast<u32>(size), nullptr, align)) {
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
&dct));
if (alloc_addr) {
sys_memory.notice(
"sys_memory_allocate(): Allocated 0x%x address (size=0x%x)", addr,
size);
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
dct.free(size);
return CELL_ENOMEM;
}
error_code sys_memory_allocate_from_container(cpu_thread &cpu, u64 size,
u32 cid, u64 flags,
vm::ptr<u32> alloc_addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, "
"flags=0x%llx, alloc_addr=*0x%x)",
size, cid, flags, alloc_addr);
if (!size) {
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000
: flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000
: flags == 0 ? 0x100000
: 0;
if (!align) {
return {CELL_EINVAL, flags};
}
if (size % align) {
return {CELL_EALIGN, size};
}
const auto ct = idm::get<lv2_memory_container>(
cid, [&](lv2_memory_container &ct) -> CellError {
// Try to get "physical memory"
if (!ct.take(size)) {
return CELL_ENOMEM;
}
return {};
});
if (!ct) {
return CELL_ESRCH;
}
if (ct.ret) {
return {ct.ret, ct->size - ct->used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align)) {
if (const u32 addr = area->alloc(static_cast<u32>(size))) {
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
ct.ptr.get()));
if (alloc_addr) {
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
ct->free(size);
return CELL_ENOMEM;
}
error_code sys_memory_free(cpu_thread &cpu, u32 addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_free(addr=0x%x)", addr);
const auto ct =
addr % 0x10000
? nullptr
: g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
nullptr);
if (!ct) {
return {CELL_EINVAL, addr};
}
const auto size = (ensure(vm::dealloc(addr)));
reader_lock{id_manager::g_mutex}, ct->free(size);
return CELL_OK;
}
error_code sys_memory_get_page_attribute(cpu_thread &cpu, u32 addr,
vm::ptr<sys_page_attr_t> attr) {
cpu.state += cpu_flag::wait;
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr,
attr);
vm::writer_lock rlock;
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR) {
return CELL_EINVAL;
}
if (!vm::check_addr(attr.addr(), vm::page_readable, attr.size())) {
return CELL_EFAULT;
}
attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE (TODO)
attr->access_right = addr >> 28 == 0xdu
? SYS_MEMORY_ACCESS_RIGHT_PPU_THR
: SYS_MEMORY_ACCESS_RIGHT_ANY; // (TODO)
if (vm::check_addr(addr, vm::page_1m_size)) {
attr->page_size = 0x100000;
} else if (vm::check_addr(addr, vm::page_64k_size)) {
attr->page_size = 0x10000;
} else {
attr->page_size = 4096;
}
attr->pad = 0; // Always write 0
return CELL_OK;
}
error_code
sys_memory_get_user_memory_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)",
mem_info);
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
sys_memory_info_t out{};
{
::reader_lock lock(s_memstats_mtx);
out.total_user_memory = dct.size;
out.available_user_memory = dct.size - dct.used;
// Scan other memory containers
idm::select<lv2_memory_container>([&](u32, lv2_memory_container &ct) {
out.total_user_memory -= ct.size;
});
}
cpu.check_state();
*mem_info = out;
return CELL_OK;
}
error_code sys_memory_get_user_memory_stat(
cpu_thread &cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat) {
cpu.state += cpu_flag::wait;
sys_memory.todo("sys_memory_get_user_memory_stat(mem_stat=*0x%x)", mem_stat);
return CELL_OK;
}
error_code sys_memory_container_create(cpu_thread &cpu, vm::ptr<u32> cid,
u64 size) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid,
size);
// Round down to 1 MB granularity
size &= ~0xfffff;
if (!size) {
return CELL_ENOMEM;
}
auto &dct = g_fxo->get<lv2_memory_container>();
std::lock_guard lock(s_memstats_mtx);
// Try to obtain "physical memory" from the default container
if (!dct.take(size)) {
return CELL_ENOMEM;
}
// Create the memory container
if (const u32 id =
idm::make<lv2_memory_container>(static_cast<u32>(size), true)) {
cpu.check_state();
*cid = id;
return CELL_OK;
}
dct.free(size);
return CELL_EAGAIN;
}
error_code sys_memory_container_destroy(cpu_thread &cpu, u32 cid) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid);
std::lock_guard lock(s_memstats_mtx);
const auto ct = idm::withdraw<lv2_memory_container>(
cid, [](lv2_memory_container &ct) -> CellError {
// Check if some memory is not deallocated (the container cannot be
// destroyed in this case)
if (!ct.used.compare_and_swap_test(0, ct.size)) {
return CELL_EBUSY;
}
return {};
});
if (!ct) {
return CELL_ESRCH;
}
if (ct.ret) {
return ct.ret;
}
// Return "physical memory" to the default container
g_fxo->get<lv2_memory_container>().free(ct->size);
return CELL_OK;
}
error_code sys_memory_container_get_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info,
u32 cid) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)",
mem_info, cid);
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
cpu.check_state();
mem_info->total_user_memory = ct->size; // Total container memory
mem_info->available_user_memory =
ct->size - ct->used; // Available container memory
return CELL_OK;
}
error_code sys_memory_container_destroy_parent_with_childs(
cpu_thread &cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child) {
sys_memory.warning("sys_memory_container_destroy_parent_with_childs(cid=0x%x,"
" must_0=%d, mc_child=*0x%x)",
cid, must_0, mc_child);
if (must_0) {
return CELL_EINVAL;
}
// Multi-process is not supported yet so child containers mean nothing at the
// moment Simply destroy parent
return sys_memory_container_destroy(cpu, cid);
}

View file

@ -0,0 +1,805 @@
#include "stdafx.h"
#include "sys_mmapper.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Memory/vm_var.h"
#include "cellos/sys_event.h"
#include "sys_memory.h"
#include "sys_process.h"
#include "sys_sync.h"
#include <span>
#include "util/vm.hpp"
LOG_CHANNEL(sys_mmapper);
template <>
void fmt_class_string<lv2_mem_container_id>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto value) {
switch (value) {
case SYS_MEMORY_CONTAINER_ID_INVALID:
return "Global";
}
// Resort to hex formatting for other values
return unknown;
});
}
lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared,
lv2_memory_container *ct)
: size(size), align(align), flags(flags), key(key), pshared(pshared),
ct(ct), shm(std::make_shared<utils::shm>(size, 1 /* shareable flag */)) {
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
lv2_memory::lv2_memory(utils::serial &ar)
: size(ar), align(ar), flags(ar), key(ar), pshared(ar),
ct(lv2_memory_container::search(ar.pop<u32>())), shm([&](u32 addr) {
if (addr) {
return ensure(vm::get(vm::any, addr)->peek(addr).second);
}
const auto _shm = std::make_shared<utils::shm>(size, 1);
ar(std::span(_shm->map_self(), size));
return _shm;
}(ar.pop<u32>())),
counter(ar) {
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
CellError lv2_memory::on_id_create() {
if (!exists && !ct->take(size)) {
sys_mmapper.error("lv2_memory::on_id_create(): Cannot allocate 0x%x bytes "
"(0x%x available)",
size, ct->size - ct->used);
return CELL_ENOMEM;
}
exists++;
return {};
}
std::function<void(void *)> lv2_memory::load(utils::serial &ar) {
auto mem = make_shared<lv2_memory>(exact_t<utils::serial &>(ar));
mem->exists++; // Disable on_id_create()
auto func = load_func(mem, +mem->pshared);
mem->exists--;
return func;
}
void lv2_memory::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_memory);
ar(size, align, flags, key, pshared, ct->id);
ar(counter ? vm::get_shm_addr(shm) : 0);
if (!counter) {
ar(std::span(shm->map_self(), size));
}
ar(counter);
}
page_fault_notification_entries::page_fault_notification_entries(
utils::serial &ar) {
ar(entries);
}
void page_fault_notification_entries::save(utils::serial &ar) { ar(entries); }
template <bool exclusive = false>
error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align,
u64 flags, lv2_memory_container *ct) {
const u32 _pshared =
pshared ? SYS_SYNC_PROCESS_SHARED : SYS_SYNC_NOT_PROCESS_SHARED;
if (!pshared) {
ipc_key = 0;
}
if (auto error = lv2_obj::create<lv2_memory>(
_pshared, ipc_key,
exclusive ? SYS_SYNC_NEWLY_CREATED : SYS_SYNC_NOT_CARE,
[&]() {
return make_shared<lv2_memory>(static_cast<u32>(size), align, flags,
ipc_key, pshared, ct);
},
false)) {
return error;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_address(ppu_thread &ppu, u64 size, u64 flags,
u64 alignment,
vm::ptr<u32> alloc_addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_address(size=0x%x, flags=0x%x, "
"alignment=0x%x, alloc_addr=*0x%x)",
size, flags, alignment, alloc_addr);
if (size % 0x10000000) {
return CELL_EALIGN;
}
if (size > u32{umax}) {
return CELL_ENOMEM;
}
// This is a workaround for psl1ght, which gives us an alignment of 0, which
// is technically invalid, but apparently is allowed on actual ps3
// https://github.com/ps3dev/PSL1GHT/blob/534e58950732c54dc6a553910b653c99ba6e9edc/ppu/librt/sbrk.c#L71
if (!alignment) {
alignment = 0x10000000;
}
switch (alignment) {
case 0x10000000:
case 0x20000000:
case 0x40000000:
case 0x80000000: {
if (const auto area =
vm::find_map(static_cast<u32>(size), static_cast<u32>(alignment),
flags & SYS_MEMORY_PAGE_SIZE_MASK)) {
sys_mmapper.warning(
"sys_mmapper_allocate_address(): Found VM 0x%x area (vsize=0x%x)",
area->addr, size);
ppu.check_state();
*alloc_addr = area->addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
}
return CELL_EALIGN;
}
error_code sys_mmapper_allocate_fixed_address(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_fixed_address()");
if (!vm::map(0xB0000000, 0x10000000, SYS_MEMORY_PAGE_SIZE_1M)) {
return CELL_EEXIST;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory(ppu_thread &ppu, u64 ipc_key,
u64 size, u64 flags,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_shared_memory(ipc_key=0x%x, "
"size=0x%x, flags=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
// Check page granularity
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case 0:
case SYS_MEMORY_GRANULARITY_1M: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm(
ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct)) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code
sys_mmapper_allocate_shared_memory_from_container(ppu_thread &ppu, u64 ipc_key,
u64 size, u32 cid, u64 flags,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_allocate_shared_memory_from_container(ipc_key=0x%x, "
"size=0x%x, cid=0x%x, flags=0x%x, mem_id=*0x%x)",
ipc_key, size, cid, flags, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
// Check page granularity.
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case 0:
case SYS_MEMORY_GRANULARITY_1M: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
if (auto error =
create_lv2_shm(ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000,
flags, ct.get())) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_ext(
ppu_thread &ppu, u64 ipc_key, u64 size, u32 flags,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo(
"sys_mmapper_allocate_shared_memory_ext(ipc_key=0x%x, size=0x%x, "
"flags=0x%x, entries=*0x%x, entry_count=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, entries, entry_count, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case SYS_MEMORY_GRANULARITY_1M:
case 0: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK) {
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10) {
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true) {
for (s32 i = 0; i < entry_count; i++) {
const u64 type = entries[i].type;
// The whole structure contents are unknown
sys_mmapper.todo(
"sys_mmapper_allocate_shared_memory_ext(): entry type = 0x%x", type);
switch (type) {
case 0:
case 1:
case 3: {
break;
}
case 5: {
to_perm_check = true;
break;
}
default: {
return CELL_EPERM;
}
}
}
if (to_perm_check) {
if (flags != SYS_MEMORY_PAGE_SIZE_64K ||
!g_ps3_process_info.debug_or_root()) {
return CELL_EPERM;
}
}
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm<true>(
true, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct)) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_from_container_ext(
ppu_thread &ppu, u64 ipc_key, u64 size, u64 flags, u32 cid,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext(ipc_"
"key=0x%x, size=0x%x, flags=0x%x, cid=0x%x, entries=*0x%x, "
"entry_count=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, cid, entries, entry_count, mem_id);
switch (flags & SYS_MEMORY_PAGE_SIZE_MASK) {
case SYS_MEMORY_PAGE_SIZE_1M:
case 0: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_PAGE_SIZE_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK) {
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10) {
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true) {
for (s32 i = 0; i < entry_count; i++) {
const u64 type = entries[i].type;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext()"
": entry type = 0x%x",
type);
switch (type) {
case 0:
case 1:
case 3: {
break;
}
case 5: {
to_perm_check = true;
break;
}
default: {
return CELL_EPERM;
}
}
}
if (to_perm_check) {
if (flags != SYS_MEMORY_PAGE_SIZE_64K ||
!g_ps3_process_info.debug_or_root()) {
return CELL_EPERM;
}
}
}
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
if (auto error = create_lv2_shm<true>(
true, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags,
ct.get())) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_change_address_access_right(ppu_thread &ppu, u32 addr,
u64 flags) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo(
"sys_mmapper_change_address_access_right(addr=0x%x, flags=0x%x)", addr,
flags);
return CELL_OK;
}
error_code sys_mmapper_free_address(ppu_thread &ppu, u32 addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_address(addr=0x%x)", addr);
if (addr < 0x20000000 || addr >= 0xC0000000) {
return {CELL_EINVAL, addr};
}
// If page fault notify exists and an address in this area is faulted, we
// can't free the memory.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto mem = vm::get(vm::any, addr);
if (!mem || mem->addr != addr) {
return {CELL_EINVAL, addr};
}
for (const auto &ev : pf_events.events) {
if (addr <= ev.second && ev.second <= addr + mem->size - 1) {
return CELL_EBUSY;
}
}
// Try to unmap area
const auto [area, success] = vm::unmap(addr, true, &mem);
if (!area) {
return {CELL_EINVAL, addr};
}
if (!success) {
return CELL_EBUSY;
}
// If a memory block is freed, remove it from page notification table.
auto &pf_entries = g_fxo->get<page_fault_notification_entries>();
std::lock_guard lock(pf_entries.mutex);
auto ind_to_remove = pf_entries.entries.begin();
for (; ind_to_remove != pf_entries.entries.end(); ++ind_to_remove) {
if (addr == ind_to_remove->start_addr) {
break;
}
}
if (ind_to_remove != pf_entries.entries.end()) {
pf_entries.entries.erase(ind_to_remove);
}
return CELL_OK;
}
error_code sys_mmapper_free_shared_memory(ppu_thread &ppu, u32 mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_shared_memory(mem_id=0x%x)", mem_id);
// Conditionally remove memory ID
const auto mem = idm::withdraw<lv2_obj, lv2_memory>(
mem_id, [&](lv2_memory &mem) -> CellError {
if (mem.counter) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(mem, mem.key, +mem.pshared);
if (!mem.exists) {
// Return "physical memory" to the memory container
mem.ct->free(mem.size);
}
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
return CELL_OK;
}
error_code sys_mmapper_map_shared_memory(ppu_thread &ppu, u32 addr, u32 mem_id,
u64 flags) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_map_shared_memory(addr=0x%x, mem_id=0x%x, flags=0x%x)", addr,
mem_id, flags);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000) {
return CELL_EINVAL;
}
const auto mem =
idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory &mem) -> CellError {
const u32 page_alignment =
area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment) {
return CELL_EINVAL;
}
if (addr % page_alignment) {
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
if (!area->falloc(addr, mem->size, &mem->shm,
mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K
: SYS_MEMORY_PAGE_SIZE_1M)) {
mem->counter--;
if (!area->is_valid()) {
return {CELL_EINVAL, addr};
}
return CELL_EBUSY;
}
vm::lock_sudo(addr, mem->size);
return CELL_OK;
}
error_code sys_mmapper_search_and_map(ppu_thread &ppu, u32 start_addr,
u32 mem_id, u64 flags,
vm::ptr<u32> alloc_addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_search_and_map(start_addr=0x%x, "
"mem_id=0x%x, flags=0x%x, alloc_addr=*0x%x)",
start_addr, mem_id, flags, alloc_addr);
const auto area = vm::get(vm::any, start_addr);
if (!area || start_addr != area->addr || start_addr < 0x20000000 ||
start_addr >= 0xC0000000) {
return {CELL_EINVAL, start_addr};
}
const auto mem =
idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory &mem) -> CellError {
const u32 page_alignment =
area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment) {
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
const u32 addr = area->alloc(mem->size, &mem->shm, mem->align,
mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K
: SYS_MEMORY_PAGE_SIZE_1M);
if (!addr) {
mem->counter--;
if (!area->is_valid()) {
return {CELL_EINVAL, start_addr};
}
return CELL_ENOMEM;
}
sys_mmapper.notice("sys_mmapper_search_and_map(): Found 0x%x address", addr);
vm::lock_sudo(addr, mem->size);
ppu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
error_code sys_mmapper_unmap_shared_memory(ppu_thread &ppu, u32 addr,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_unmap_shared_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000) {
return {CELL_EINVAL, addr};
}
const auto shm = area->peek(addr);
if (!shm.second) {
return {CELL_EINVAL, addr};
}
const auto mem =
idm::select<lv2_obj, lv2_memory>([&](u32 id, lv2_memory &mem) -> u32 {
if (mem.shm.get() == shm.second.get()) {
return id;
}
return 0;
});
if (!mem) {
return {CELL_EINVAL, addr};
}
if (!area->dealloc(addr, &shm.second)) {
return {CELL_EINVAL, addr};
}
// Write out the ID
ppu.check_state();
*mem_id = mem.ret;
// Acknowledge
mem->counter--;
return CELL_OK;
}
error_code sys_mmapper_enable_page_fault_notification(ppu_thread &ppu,
u32 start_addr,
u32 event_queue_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_enable_page_fault_notification(start_addr="
"0x%x, event_queue_id=0x%x)",
start_addr, event_queue_id);
auto mem = vm::get(vm::any, start_addr);
if (!mem || start_addr != mem->addr || start_addr < 0x20000000 ||
start_addr >= 0xC0000000) {
return {CELL_EINVAL, start_addr};
}
// TODO: Check memory region's flags to make sure the memory can be used for
// page faults.
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(event_queue_id);
if (!queue) { // Can't connect the queue if it doesn't exist.
return CELL_ESRCH;
}
vm::var<u32> port_id(0);
error_code res = sys_event_port_create(ppu, port_id, SYS_EVENT_PORT_LOCAL,
SYS_MEMORY_PAGE_FAULT_EVENT_KEY);
sys_event_port_connect_local(ppu, *port_id, event_queue_id);
if (res + 0u == CELL_EAGAIN) {
// Not enough system resources.
return CELL_EAGAIN;
}
auto &pf_entries = g_fxo->get<page_fault_notification_entries>();
std::unique_lock lock(pf_entries.mutex);
// Return error code if page fault notifications are already enabled
for (const auto &entry : pf_entries.entries) {
if (entry.start_addr == start_addr) {
lock.unlock();
sys_event_port_disconnect(ppu, *port_id);
sys_event_port_destroy(ppu, *port_id);
return CELL_EBUSY;
}
}
page_fault_notification_entry entry{start_addr, event_queue_id,
port_id->value()};
pf_entries.entries.emplace_back(entry);
return CELL_OK;
}
error_code mmapper_thread_recover_page_fault(cpu_thread *cpu) {
// We can only wake a thread if it is being suspended for a page fault.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
{
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto pf_event_ind = pf_events.events.find(cpu);
if (pf_event_ind == pf_events.events.end()) {
// if not found...
return CELL_EINVAL;
}
pf_events.events.erase(pf_event_ind);
if (cpu->get_class() == thread_class::ppu) {
lv2_obj::awake(cpu);
} else {
cpu->state += cpu_flag::signal;
}
}
if (cpu->state & cpu_flag::signal) {
cpu->state.notify_one();
}
return CELL_OK;
}

View file

@ -0,0 +1,338 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
#include "sys_mutex.h"
LOG_CHANNEL(sys_mutex);
lv2_mutex::lv2_mutex(utils::serial &ar)
: protocol(ar), recursive(ar), adaptive(ar), key(ar), name(ar) {
ar(lock_count, control.raw().owner);
// For backwards compatibility
control.raw().owner >>= 1;
}
std::function<void(void *)> lv2_mutex::load(utils::serial &ar) {
return load_func(make_shared<lv2_mutex>(exact_t<utils::serial &>(ar)));
}
void lv2_mutex::save(utils::serial &ar) {
ar(protocol, recursive, adaptive, key, name, lock_count,
control.raw().owner << 1);
}
error_code sys_mutex_create(ppu_thread &ppu, vm::ptr<u32> mutex_id,
vm::ptr<sys_mutex_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x)", mutex_id,
attr);
if (!mutex_id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_mutex.warning(
"sys_mutex_create(mutex_id=*0x%x, attr=*0x%x): IPC=0x%016x", mutex_id,
attr, ipc_key);
}
switch (_attr.protocol) {
case SYS_SYNC_FIFO:
break;
case SYS_SYNC_PRIORITY:
break;
case SYS_SYNC_PRIORITY_INHERIT:
sys_mutex.warning("sys_mutex_create(): SYS_SYNC_PRIORITY_INHERIT");
break;
default: {
sys_mutex.error("sys_mutex_create(): unknown protocol (0x%x)",
_attr.protocol);
return CELL_EINVAL;
}
}
switch (_attr.recursive) {
case SYS_SYNC_RECURSIVE:
break;
case SYS_SYNC_NOT_RECURSIVE:
break;
default: {
sys_mutex.error("sys_mutex_create(): unknown recursive (0x%x)",
_attr.recursive);
return CELL_EINVAL;
}
}
if (_attr.adaptive != SYS_SYNC_NOT_ADAPTIVE) {
sys_mutex.todo("sys_mutex_create(): unexpected adaptive (0x%x)",
_attr.adaptive);
}
if (auto error = lv2_obj::create<lv2_mutex>(
_attr.pshared, _attr.ipc_key, _attr.flags, [&]() {
return make_shared<lv2_mutex>(_attr.protocol, _attr.recursive,
_attr.adaptive, ipc_key,
_attr.name_u64);
})) {
return error;
}
ppu.check_state();
*mutex_id = idm::last_id();
return CELL_OK;
}
error_code sys_mutex_destroy(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_destroy(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::withdraw<lv2_obj, lv2_mutex>(
mutex_id, [](lv2_mutex &mutex) -> CellError {
std::lock_guard lock(mutex.mutex);
if (atomic_storage<u32>::load(mutex.control.raw().owner)) {
return CELL_EBUSY;
}
if (mutex.cond_count) {
return CELL_EPERM;
}
lv2_obj::on_id_destroy(mutex, mutex.key);
return {};
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex->key) {
sys_mutex.warning("sys_mutex_destroy(mutex_id=0x%x): IPC=0x%016x", mutex_id,
mutex->key);
}
if (mutex.ret) {
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_lock(ppu_thread &ppu, u32 mutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id,
timeout);
const auto mutex = idm::get<lv2_obj, lv2_mutex>(
mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex &mutex) {
CellError result = mutex.try_lock(ppu);
if (result == CELL_EBUSY &&
!atomic_storage<ppu_thread *>::load(mutex.control.raw().sq)) {
// Try busy waiting a bit if advantageous
for (u32 i = 0, end = lv2_obj::has_ppus_in_running_state() ? 3 : 10;
id_manager::g_mutex.is_lockable() && i < end; i++) {
busy_wait(300);
result = mutex.try_lock(ppu);
if (!result ||
atomic_storage<ppu_thread *>::load(mutex.control.raw().sq)) {
break;
}
}
}
if (result == CELL_EBUSY) {
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (mutex.try_own(ppu) || !mutex.sleep(ppu, timeout)) {
result = {};
}
if (ppu.cancel_sleep != 1) {
notify.cleanup();
}
ppu.cancel_sleep = 0;
}
return result;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
if (mutex.ret != CELL_EBUSY) {
return mutex.ret;
}
} else {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(mutex->mutex);
for (auto cpu =
atomic_storage<ppu_thread *>::load(mutex->control.raw().sq);
cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 40; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(mutex->control.raw().sq)) {
// Waiters queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->control.fetch_op([&](lv2_mutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_mutex_trylock(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(
mutex_id, [&](lv2_mutex &mutex) { return mutex.try_lock(ppu); });
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
if (mutex.ret == CELL_EBUSY) {
return not_an_error(CELL_EBUSY);
}
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_unlock(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(
mutex_id,
[&, notify = lv2_obj::notify_all_t()](lv2_mutex &mutex) -> CellError {
auto result = mutex.try_unlock(ppu);
if (result == CELL_EBUSY) {
std::lock_guard lock(mutex.mutex);
if (auto cpu = mutex.reown<ppu_thread>()) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
mutex.awake(cpu);
}
result = {};
}
notify.cleanup();
return result;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
return mutex.ret;
}
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,157 @@
#include "stdafx.h"
#include "sys_net/lv2_socket.h"
#include "sys_net/network_context.h"
LOG_CHANNEL(sys_net);
lv2_socket::lv2_socket(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol) {
this->family = family;
this->type = type;
this->protocol = protocol;
}
std::unique_lock<shared_mutex> lv2_socket::lock() {
return std::unique_lock(mutex);
}
lv2_socket_family lv2_socket::get_family() const { return family; }
lv2_socket_type lv2_socket::get_type() const { return type; }
lv2_ip_protocol lv2_socket::get_protocol() const { return protocol; }
std::size_t lv2_socket::get_queue_size() const { return queue.size(); }
socket_type lv2_socket::get_socket() const { return native_socket; }
#ifdef _WIN32
bool lv2_socket::is_connecting() const { return connecting; }
void lv2_socket::set_connecting(bool connecting) {
this->connecting = connecting;
}
#endif
void lv2_socket::set_lv2_id(u32 id) { lv2_id = id; }
bs_t<lv2_socket::poll_t> lv2_socket::get_events() const {
return events.load();
}
void lv2_socket::set_poll_event(bs_t<lv2_socket::poll_t> event) {
events += event;
}
void lv2_socket::poll_queue(
shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event,
std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb) {
set_poll_event(event);
queue.emplace_back(std::move(ppu), poll_cb);
// Makes sure network_context thread is awaken
if (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM) {
auto &nc = g_fxo->get<network_context>();
const u32 prev_value = nc.num_polls.fetch_add(1);
if (!prev_value) {
nc.num_polls.notify_one();
}
}
}
u32 lv2_socket::clear_queue(ppu_thread *ppu) {
std::lock_guard lock(mutex);
u32 cleared = 0;
for (auto it = queue.begin(); it != queue.end();) {
if (it->first.get() == ppu) {
it = queue.erase(it);
cleared++;
continue;
}
it++;
}
if (queue.empty()) {
events.store({});
}
if (cleared && (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM)) {
// Makes sure network_context thread can go back to sleep if there is no
// active polling
const u32 prev_value =
g_fxo->get<network_context>().num_polls.fetch_sub(cleared);
ensure(prev_value >= cleared);
}
return cleared;
}
void lv2_socket::handle_events(const pollfd &native_pfd,
[[maybe_unused]] bool unset_connecting) {
bs_t<lv2_socket::poll_t> events_happening{};
if (native_pfd.revents & (POLLIN | POLLHUP) &&
events.test_and_reset(lv2_socket::poll_t::read))
events_happening += lv2_socket::poll_t::read;
if (native_pfd.revents & POLLOUT &&
events.test_and_reset(lv2_socket::poll_t::write))
events_happening += lv2_socket::poll_t::write;
if (native_pfd.revents & POLLERR &&
events.test_and_reset(lv2_socket::poll_t::error))
events_happening += lv2_socket::poll_t::error;
if (events_happening || (!queue.empty() && (so_rcvtimeo || so_sendtimeo))) {
std::lock_guard lock(mutex);
#ifdef _WIN32
if (unset_connecting)
set_connecting(false);
#endif
u32 handled = 0;
for (auto it = queue.begin(); it != queue.end();) {
if (it->second(events_happening)) {
it = queue.erase(it);
handled++;
continue;
}
it++;
}
if (handled &&
(type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM)) {
const u32 prev_value =
g_fxo->get<network_context>().num_polls.fetch_sub(handled);
ensure(prev_value >= handled);
}
if (queue.empty()) {
events.store({});
}
}
}
void lv2_socket::queue_wake(ppu_thread *ppu) {
switch (type) {
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
g_fxo->get<network_context>().add_ppu_to_awake(ppu);
break;
case SYS_NET_SOCK_DGRAM_P2P:
case SYS_NET_SOCK_STREAM_P2P:
g_fxo->get<p2p_context>().add_ppu_to_awake(ppu);
break;
default:
break;
}
}
lv2_socket &lv2_socket::operator=(thread_state s) noexcept {
if (s == thread_state::destroying_context) {
close();
}
return *this;
}
lv2_socket::~lv2_socket() noexcept {}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,400 @@
#include "stdafx.h"
#include "Emu/NP/np_helpers.h"
#include "sys_net/lv2_socket_p2p.h"
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
lv2_socket_p2p::lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol) {
sockopt_cache cache_type;
cache_type.data._int = SYS_NET_SOCK_DGRAM_P2P;
cache_type.len = 4;
sockopts[(static_cast<u64>(SYS_NET_SOL_SOCKET) << 32ull) | SYS_NET_SO_TYPE] =
cache_type;
}
lv2_socket_p2p::lv2_socket_p2p(utils::serial &ar, lv2_socket_type type)
: lv2_socket(make_exact(ar), type) {
ar(port, vport, bound_addr);
auto data_dequeue =
ar.pop<std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>>>();
for (; !data_dequeue.empty(); data_dequeue.pop_front()) {
data.push(std::move(data_dequeue.front()));
}
}
void lv2_socket_p2p::save(utils::serial &ar) {
lv2_socket::save(ar, true);
ar(port, vport, bound_addr);
std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data_dequeue;
for (auto save_data = ::as_rvalue(data); !save_data.empty();
save_data.pop()) {
data_dequeue.push_back(std::move(save_data.front()));
}
ar(data_dequeue);
}
void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr,
std::vector<u8> p2p_data) {
std::lock_guard lock(mutex);
sys_net.trace("Received a P2P packet for vport %d and saved it",
p2p_addr.sin_vport);
data.push(std::make_pair(std::move(p2p_addr), std::move(p2p_data)));
// Check if poll is happening
if (events.test_and_reset(lv2_socket::poll_t::read)) {
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
for (auto it = queue.begin(); it != queue.end();) {
if (it->second(read_event)) {
it = queue.erase(it);
continue;
}
it++;
}
if (queue.empty()) {
events.store({});
}
}
}
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
lv2_socket_p2p::accept([[maybe_unused]] bool is_lock) {
sys_net.fatal("[P2P] accept() called on a P2P socket");
return {};
}
std::optional<s32>
lv2_socket_p2p::connect([[maybe_unused]] const sys_net_sockaddr &addr) {
sys_net.fatal("[P2P] connect() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::connect_followup() {
sys_net.fatal("[P2P] connect_followup() called on a P2P socket");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getpeername() {
sys_net.fatal("[P2P] getpeername() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::listen([[maybe_unused]] s32 backlog) {
sys_net.fatal("[P2P] listen() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::bind(const sys_net_sockaddr &addr) {
const auto *psa_in_p2p =
reinterpret_cast<const sys_net_sockaddr_in_p2p *>(&addr);
u16 p2p_port = psa_in_p2p->sin_port;
u16 p2p_vport = psa_in_p2p->sin_vport;
sys_net.notice("[P2P] Trying to bind %s:%d:%d",
np::ip_to_string(std::bit_cast<u32>(psa_in_p2p->sin_addr)),
p2p_port, p2p_vport);
if (p2p_port != SCE_NP_PORT) {
if (p2p_port == 0) {
return -SYS_NET_EINVAL;
}
sys_net.warning("[P2P] Attempting to bind a socket to a port != %d",
+SCE_NP_PORT);
}
socket_type real_socket{};
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
nc.create_p2p_port(p2p_port);
auto &pport = ::at32(nc.list_p2p_ports, p2p_port);
real_socket = pport.p2p_socket;
{
std::lock_guard lock(pport.bound_p2p_vports_mutex);
if (p2p_vport == 0) {
// Find a free vport starting at 30000
p2p_vport = 30000;
while (pport.bound_p2p_vports.contains(p2p_vport)) {
p2p_vport++;
}
}
if (pport.bound_p2p_vports.contains(p2p_vport)) {
// Check that all other sockets are SO_REUSEADDR or SO_REUSEPORT
auto &bound_sockets = ::at32(pport.bound_p2p_vports, p2p_vport);
if (!sys_net_helpers::all_reusable(bound_sockets)) {
return -SYS_NET_EADDRINUSE;
}
bound_sockets.insert(lv2_id);
} else {
std::set<s32> bound_ports{lv2_id};
pport.bound_p2p_vports.insert(
std::make_pair(p2p_vport, std::move(bound_ports)));
}
}
}
{
std::lock_guard lock(mutex);
port = p2p_port;
vport = p2p_vport;
native_socket = real_socket;
bound_addr = psa_in_p2p->sin_addr;
}
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getsockname() {
std::lock_guard lock(mutex);
// Unbound socket
if (!native_socket) {
return {CELL_OK, {}};
}
sys_net_sockaddr sn_addr{};
sys_net_sockaddr_in_p2p *paddr =
reinterpret_cast<sys_net_sockaddr_in_p2p *>(&sn_addr);
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = port;
paddr->sin_vport = vport;
paddr->sin_addr = bound_addr;
return {CELL_OK, sn_addr};
}
std::tuple<s32, lv2_socket::sockopt_data, u32>
lv2_socket_p2p::getsockopt(s32 level, s32 optname, u32 len) {
std::lock_guard lock(mutex);
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
if (!sockopts.contains(key)) {
sys_net.error("Unhandled getsockopt(level=%d, optname=%d, len=%d)", level,
optname, len);
return {};
}
const auto &cache = ::at32(sockopts, key);
return {CELL_OK, cache.data, cache.len};
}
s32 lv2_socket_p2p::setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) {
std::lock_guard lock(mutex);
int native_int = *reinterpret_cast<const be_t<s32> *>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO) {
so_nbio = native_int;
}
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
sockopt_cache cache{};
memcpy(&cache.data._int, optval.data(), optval.size());
cache.len = ::size32(optval);
sockopts[key] = std::move(cache);
return CELL_OK;
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
lv2_socket_p2p::recvfrom(s32 flags, u32 len, bool is_lock) {
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock) {
lock.lock();
}
if (data.empty()) {
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT))
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
return std::nullopt;
}
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
std::vector<u8> res_buf(len);
const auto &p2p_data = data.front();
s32 native_result = std::min(len, static_cast<u32>(p2p_data.second.size()));
memcpy(res_buf.data(), p2p_data.second.data(), native_result);
sys_net_sockaddr sn_addr;
memcpy(&sn_addr, &p2p_data.first, sizeof(sn_addr));
data.pop();
return {{native_result, res_buf, sn_addr}};
}
std::optional<s32>
lv2_socket_p2p::sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock) {
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock) {
lock.lock();
}
ensure(opt_sn_addr);
ensure(socket); // ensures it has been bound
ensure(
buf.size() <=
static_cast<usz>(
65535 -
VPORT_P2P_HEADER_SIZE)); // catch games using full payload for future
// fragmentation implementation if necessary
const u16 p2p_port =
reinterpret_cast<const sys_net_sockaddr_in *>(&*opt_sn_addr)->sin_port;
const u16 p2p_vport =
reinterpret_cast<const sys_net_sockaddr_in_p2p *>(&*opt_sn_addr)
->sin_vport;
auto native_addr = sys_net_addr_to_native_addr(*opt_sn_addr);
char ip_str[16];
inet_ntop(AF_INET, &native_addr.sin_addr, ip_str, sizeof(ip_str));
sys_net.trace("[P2P] Sending a packet to %s:%d:%d", ip_str, p2p_port,
p2p_vport);
std::vector<u8> p2p_data(buf.size() + VPORT_P2P_HEADER_SIZE);
const le_t<u16> p2p_vport_le = p2p_vport;
const le_t<u16> src_vport_le = vport;
const le_t<u16> p2p_flags_le = P2P_FLAG_P2P;
memcpy(p2p_data.data(), &p2p_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16), &src_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16) + sizeof(u16), &p2p_flags_le,
sizeof(u16));
memcpy(p2p_data.data() + VPORT_P2P_HEADER_SIZE, buf.data(), buf.size());
int native_flags = 0;
if (flags & SYS_NET_MSG_WAITALL) {
native_flags |= MSG_WAITALL;
}
auto native_result = np::sendto_possibly_ipv6(
native_socket, reinterpret_cast<const char *>(p2p_data.data()),
::size32(p2p_data), &native_addr, native_flags);
if (native_result >= 0) {
return {std::max<s32>(native_result - VPORT_P2P_HEADER_SIZE, 0l)};
}
s32 result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
if (result) {
return {-result};
}
// Note that this can only happen if the send buffer is full
return std::nullopt;
}
std::optional<s32>
lv2_socket_p2p::sendmsg([[maybe_unused]] s32 flags,
[[maybe_unused]] const sys_net_msghdr &msg,
[[maybe_unused]] bool is_lock) {
sys_net.todo("lv2_socket_p2p::sendmsg");
return {};
}
void lv2_socket_p2p::close() {
if (!port || !vport) {
return;
}
if (g_fxo->is_init<p2p_context>()) {
auto &nc = g_fxo->get<p2p_context>();
std::lock_guard lock(nc.list_p2p_ports_mutex);
if (!nc.list_p2p_ports.contains(port))
return;
auto &p2p_port = ::at32(nc.list_p2p_ports, port);
{
std::lock_guard lock(p2p_port.bound_p2p_vports_mutex);
if (!p2p_port.bound_p2p_vports.contains(vport)) {
return;
}
auto &bound_sockets = ::at32(p2p_port.bound_p2p_vports, vport);
bound_sockets.erase(lv2_id);
if (bound_sockets.empty()) {
p2p_port.bound_p2p_vports.erase(vport);
}
}
}
}
s32 lv2_socket_p2p::shutdown([[maybe_unused]] s32 how) {
sys_net.todo("[P2P] shutdown");
return CELL_OK;
}
s32 lv2_socket_p2p::poll(sys_net_pollfd &sn_pfd,
[[maybe_unused]] pollfd &native_pfd) {
std::lock_guard lock(mutex);
ensure(vport);
// Check if it's a bound P2P socket
if ((sn_pfd.events & SYS_NET_POLLIN) && !data.empty()) {
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
sn_pfd.revents |= SYS_NET_POLLIN;
}
// Data can always be written on a dgram socket
if (sn_pfd.events & SYS_NET_POLLOUT) {
sn_pfd.revents |= SYS_NET_POLLOUT;
}
return sn_pfd.revents ? 1 : 0;
}
std::tuple<bool, bool, bool>
lv2_socket_p2p::select(bs_t<lv2_socket::poll_t> selected,
[[maybe_unused]] pollfd &native_pfd) {
std::lock_guard lock(mutex);
bool read_set = false;
bool write_set = false;
// Check if it's a bound P2P socket
if ((selected & lv2_socket::poll_t::read) && vport && !data.empty()) {
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
read_set = true;
}
if (selected & lv2_socket::poll_t::write) {
write_set = true;
}
return {read_set, write_set, false};
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,137 @@
#include "stdafx.h"
#include "Emu/NP/vport0.h"
#include "sys_net/lv2_socket_raw.h"
LOG_CHANNEL(sys_net);
template <typename T> struct socket_raw_logging {
socket_raw_logging() = default;
socket_raw_logging(const socket_raw_logging &) = delete;
socket_raw_logging &operator=(const socket_raw_logging &) = delete;
atomic_t<bool> logged = false;
};
#define LOG_ONCE(raw_var, message) \
if (!g_fxo->get<socket_raw_logging<class raw_var>>().logged.exchange( \
true)) { \
sys_net.todo(message); \
}
lv2_socket_raw::lv2_socket_raw(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol) {}
lv2_socket_raw::lv2_socket_raw(utils::serial &ar, lv2_socket_type type)
: lv2_socket(make_exact(ar), type) {}
void lv2_socket_raw::save(utils::serial &ar) { lv2_socket::save(ar, true); }
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
lv2_socket_raw::accept([[maybe_unused]] bool is_lock) {
sys_net.fatal("[RAW] accept() called on a RAW socket");
return {};
}
std::optional<s32>
lv2_socket_raw::connect([[maybe_unused]] const sys_net_sockaddr &addr) {
sys_net.fatal("[RAW] connect() called on a RAW socket");
return CELL_OK;
}
s32 lv2_socket_raw::connect_followup() {
sys_net.fatal("[RAW] connect_followup() called on a RAW socket");
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getpeername() {
LOG_ONCE(raw_getpeername, "[RAW] getpeername() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::listen([[maybe_unused]] s32 backlog) {
LOG_ONCE(raw_listen, "[RAW] listen() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::bind([[maybe_unused]] const sys_net_sockaddr &addr) {
LOG_ONCE(raw_bind, "lv2_socket_raw::bind");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getsockname() {
LOG_ONCE(raw_getsockname, "lv2_socket_raw::getsockname");
return {};
}
std::tuple<s32, lv2_socket::sockopt_data, u32>
lv2_socket_raw::getsockopt([[maybe_unused]] s32 level,
[[maybe_unused]] s32 optname,
[[maybe_unused]] u32 len) {
LOG_ONCE(raw_getsockopt, "lv2_socket_raw::getsockopt");
return {};
}
s32 lv2_socket_raw::setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) {
LOG_ONCE(raw_setsockopt, "lv2_socket_raw::setsockopt");
// TODO
int native_int = *reinterpret_cast<const be_t<s32> *>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO) {
so_nbio = native_int;
}
return {};
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
lv2_socket_raw::recvfrom(s32 flags, [[maybe_unused]] u32 len,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_recvfrom, "lv2_socket_raw::recvfrom");
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT)) {
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
}
return {};
}
std::optional<s32> lv2_socket_raw::sendto(
[[maybe_unused]] s32 flags, [[maybe_unused]] const std::vector<u8> &buf,
[[maybe_unused]] std::optional<sys_net_sockaddr> opt_sn_addr,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_sendto, "lv2_socket_raw::sendto");
return ::size32(buf);
}
std::optional<s32>
lv2_socket_raw::sendmsg([[maybe_unused]] s32 flags,
[[maybe_unused]] const sys_net_msghdr &msg,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_sendmsg, "lv2_socket_raw::sendmsg");
return {};
}
void lv2_socket_raw::close() { LOG_ONCE(raw_close, "lv2_socket_raw::close"); }
s32 lv2_socket_raw::shutdown([[maybe_unused]] s32 how) {
LOG_ONCE(raw_shutdown, "lv2_socket_raw::shutdown");
return {};
}
s32 lv2_socket_raw::poll([[maybe_unused]] sys_net_pollfd &sn_pfd,
[[maybe_unused]] pollfd &native_pfd) {
LOG_ONCE(raw_poll, "lv2_socket_raw::poll");
return {};
}
std::tuple<bool, bool, bool>
lv2_socket_raw::select([[maybe_unused]] bs_t<lv2_socket::poll_t> selected,
[[maybe_unused]] pollfd &native_pfd) {
LOG_ONCE(raw_select, "lv2_socket_raw::select");
return {};
}

View file

@ -0,0 +1,299 @@
#include "stdafx.h"
#include "Emu/NP/ip_address.h"
#include "cellos/sys_sync.h"
#include "rpcsx/fw/ps3/sceNp.h" // for SCE_NP_PORT
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
// Used by RPCN to send signaling packets to RPCN server(for UDP hole punching)
bool send_packet_from_p2p_port_ipv4(const std::vector<u8> &data,
const sockaddr_in &addr) {
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
if (def_port.is_ipv6) {
const auto addr6 = np::sockaddr_to_sockaddr6(addr);
if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()),
::size32(data), 0,
reinterpret_cast<const sockaddr *>(&addr6),
sizeof(sockaddr_in6)) == -1) {
sys_net.error(
"Failed to send IPv4 signaling packet on IPv6 socket: %s",
get_last_error(false, false));
return false;
}
} else if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()),
::size32(data), 0,
reinterpret_cast<const sockaddr *>(&addr),
sizeof(sockaddr_in)) == -1) {
sys_net.error("Failed to send signaling packet on IPv4 socket: %s",
get_last_error(false, false));
return false;
}
} else {
sys_net.error("send_packet_from_p2p_port_ipv4: port %d not present",
+SCE_NP_PORT);
return false;
}
}
return true;
}
bool send_packet_from_p2p_port_ipv6(const std::vector<u8> &data,
const sockaddr_in6 &addr) {
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
ensure(def_port.is_ipv6);
if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()), ::size32(data),
0, reinterpret_cast<const sockaddr *>(&addr),
sizeof(sockaddr_in6)) == -1) {
sys_net.error("Failed to send signaling packet on IPv6 socket: %s",
get_last_error(false, false));
return false;
}
} else {
sys_net.error("send_packet_from_p2p_port_ipv6: port %d not present",
+SCE_NP_PORT);
return false;
}
}
return true;
}
std::vector<std::vector<u8>> get_rpcn_msgs() {
std::vector<std::vector<u8>> msgs;
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_rpcn_mutex);
msgs = std::move(def_port.rpcn_msgs);
def_port.rpcn_msgs.clear();
}
} else {
sys_net.error("get_rpcn_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
std::vector<signaling_message> get_sign_msgs() {
std::vector<signaling_message> msgs;
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_sign_mutex);
msgs = std::move(def_port.sign_msgs);
def_port.sign_msgs.clear();
}
} else {
sys_net.error("get_sign_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
namespace np {
void init_np_handler_dependencies();
}
void base_network_thread::add_ppu_to_awake(ppu_thread *ppu) {
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.emplace_back(ppu);
}
void base_network_thread::del_ppu_to_awake(ppu_thread *ppu) {
std::lock_guard lock(mutex_ppu_to_awake);
for (auto it = ppu_to_awake.begin(); it != ppu_to_awake.end();) {
if (*it == ppu) {
it = ppu_to_awake.erase(it);
continue;
}
it++;
}
}
void base_network_thread::wake_threads() {
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.erase(std::unique(ppu_to_awake.begin(), ppu_to_awake.end()),
ppu_to_awake.end());
for (ppu_thread *ppu : ppu_to_awake) {
network_clear_queue(*ppu);
lv2_obj::append(ppu);
}
if (!ppu_to_awake.empty()) {
ppu_to_awake.clear();
lv2_obj::awake_all();
}
}
p2p_thread::p2p_thread() { np::init_np_handler_dependencies(); }
void p2p_thread::bind_sce_np_port() {
std::lock_guard list_lock(list_p2p_ports_mutex);
create_p2p_port(SCE_NP_PORT);
}
void network_thread::operator()() {
std::vector<shared_ptr<lv2_socket>> socklist;
socklist.reserve(lv2_socket::id_count);
{
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.clear();
}
std::vector<::pollfd> fds(lv2_socket::id_count);
#ifdef _WIN32
std::vector<bool> connecting(lv2_socket::id_count);
std::vector<bool> was_connecting(lv2_socket::id_count);
#endif
while (thread_ctrl::state() != thread_state::aborting) {
if (!num_polls) {
thread_ctrl::wait_on(num_polls, 0);
continue;
}
ensure(socklist.size() <= lv2_socket::id_count);
// Wait with 1ms timeout
#ifdef _WIN32
windows_poll(fds, ::size32(socklist), 1, connecting);
#else
::poll(fds.data(), socklist.size(), 1);
#endif
std::lock_guard lock(mutex_thread_loop);
for (usz i = 0; i < socklist.size(); i++) {
#ifdef _WIN32
socklist[i]->handle_events(fds[i], was_connecting[i] && !connecting[i]);
#else
socklist[i]->handle_events(fds[i]);
#endif
}
wake_threads();
socklist.clear();
// Obtain all native active sockets
idm::select<lv2_socket>([&](u32 id, lv2_socket &s) {
if (s.get_type() == SYS_NET_SOCK_DGRAM ||
s.get_type() == SYS_NET_SOCK_STREAM) {
socklist.emplace_back(idm::get_unlocked<lv2_socket>(id));
}
});
for (usz i = 0; i < socklist.size(); i++) {
auto events = socklist[i]->get_events();
fds[i].fd = events ? socklist[i]->get_socket() : -1;
fds[i].events = (events & lv2_socket::poll_t::read ? POLLIN : 0) |
(events & lv2_socket::poll_t::write ? POLLOUT : 0) | 0;
fds[i].revents = 0;
#ifdef _WIN32
const auto cur_connecting = socklist[i]->is_connecting();
was_connecting[i] = cur_connecting;
connecting[i] = cur_connecting;
#endif
}
}
}
// Must be used under list_p2p_ports_mutex lock!
void p2p_thread::create_p2p_port(u16 p2p_port) {
if (!list_p2p_ports.contains(p2p_port)) {
list_p2p_ports.emplace(std::piecewise_construct,
std::forward_as_tuple(p2p_port),
std::forward_as_tuple(p2p_port));
const u32 prev_value = num_p2p_ports.fetch_add(1);
if (!prev_value) {
num_p2p_ports.notify_one();
}
}
}
void p2p_thread::operator()() {
std::vector<::pollfd> p2p_fd(lv2_socket::id_count);
while (thread_ctrl::state() != thread_state::aborting) {
if (!num_p2p_ports) {
thread_ctrl::wait_on(num_p2p_ports, 0);
continue;
}
// Check P2P sockets for incoming packets
auto num_p2p_sockets = 0;
std::memset(p2p_fd.data(), 0, p2p_fd.size() * sizeof(::pollfd));
{
auto set_fd = [&](socket_type socket) {
p2p_fd[num_p2p_sockets].events = POLLIN;
p2p_fd[num_p2p_sockets].revents = 0;
p2p_fd[num_p2p_sockets].fd = socket;
num_p2p_sockets++;
};
std::lock_guard lock(list_p2p_ports_mutex);
for (const auto &[_, p2p_port] : list_p2p_ports) {
set_fd(p2p_port.p2p_socket);
}
}
#ifdef _WIN32
const auto ret_p2p = WSAPoll(p2p_fd.data(), num_p2p_sockets, 1);
#else
const auto ret_p2p = ::poll(p2p_fd.data(), num_p2p_sockets, 1);
#endif
if (ret_p2p > 0) {
std::lock_guard lock(list_p2p_ports_mutex);
auto fd_index = 0;
auto process_fd = [&](nt_p2p_port &p2p_port) {
if ((p2p_fd[fd_index].revents & POLLIN) == POLLIN ||
(p2p_fd[fd_index].revents & POLLRDNORM) == POLLRDNORM) {
while (p2p_port.recv_data())
;
}
fd_index++;
};
for (auto &[_, p2p_port] : list_p2p_ports) {
process_fd(p2p_port);
}
wake_threads();
} else if (ret_p2p < 0) {
sys_net.error("[P2P] Error poll on master P2P socket: %d",
get_last_error(false));
}
}
}

View file

@ -0,0 +1,377 @@
#include "sys_net/nt_p2p_port.h"
#include "Emu/NP/ip_address.h"
#include "Emu/NP/np_handler.h"
#include "Emu/NP/signaling_handler.h"
#include "Emu/NP/vport0.h"
#include "stdafx.h"
#include "sys_net/lv2_socket_p2ps.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
namespace sys_net_helpers {
bool all_reusable(const std::set<s32> &sock_ids) {
for (const s32 sock_id : sock_ids) {
const auto [_, reusable] =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) -> bool {
auto [res_reuseaddr, optval_reuseaddr, optlen_reuseaddr] =
sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEADDR,
sizeof(s32));
auto [res_reuseport, optval_reuseport, optlen_reuseport] =
sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEPORT,
sizeof(s32));
const bool reuse_addr =
optlen_reuseaddr == 4 && !!optval_reuseaddr._int;
const bool reuse_port =
optlen_reuseport == 4 && !!optval_reuseport._int;
return (reuse_addr || reuse_port);
});
if (!reusable) {
return false;
}
}
return true;
}
} // namespace sys_net_helpers
nt_p2p_port::nt_p2p_port(u16 port) : port(port) {
is_ipv6 = np::is_ipv6_supported();
// Creates and bind P2P Socket
p2p_socket = is_ipv6 ? ::socket(AF_INET6, SOCK_DGRAM, 0)
: ::socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
#ifdef _WIN32
if (p2p_socket == INVALID_SOCKET)
#else
if (p2p_socket == -1)
#endif
fmt::throw_exception("Failed to create DGRAM socket for P2P socket: %s!",
get_last_error(true));
np::set_socket_non_blocking(p2p_socket);
u32 optval = 131072; // value obtained from DECR for a SOCK_DGRAM_P2P
// socket(should maybe be bigger for actual socket?)
if (setsockopt(p2p_socket, SOL_SOCKET, SO_RCVBUF,
reinterpret_cast<const char *>(&optval), sizeof(optval)) != 0)
fmt::throw_exception("Error setsockopt SO_RCVBUF on P2P socket: %s",
get_last_error(true));
int ret_bind = 0;
const u16 be_port = std::bit_cast<u16, be_t<u16>>(port);
if (is_ipv6) {
// Some OS(Windows, maybe more) will only support IPv6 adressing by default
// and we need IPv4 over IPv6
optval = 0;
if (setsockopt(p2p_socket, IPPROTO_IPV6, IPV6_V6ONLY,
reinterpret_cast<const char *>(&optval),
sizeof(optval)) != 0)
fmt::throw_exception("Error setsockopt IPV6_V6ONLY on P2P socket: %s",
get_last_error(true));
::sockaddr_in6 p2p_ipv6_addr{.sin6_family = AF_INET6, .sin6_port = be_port};
ret_bind = ::bind(p2p_socket, reinterpret_cast<sockaddr *>(&p2p_ipv6_addr),
sizeof(p2p_ipv6_addr));
} else {
::sockaddr_in p2p_ipv4_addr{.sin_family = AF_INET, .sin_port = be_port};
ret_bind = ::bind(p2p_socket, reinterpret_cast<sockaddr *>(&p2p_ipv4_addr),
sizeof(p2p_ipv4_addr));
}
if (ret_bind == -1)
fmt::throw_exception("Failed to bind DGRAM socket to %d for P2P: %s!", port,
get_last_error(true));
auto &nph = g_fxo->get<named_thread<np::np_handler>>();
nph.upnp_add_port_mapping(port, "UDP");
sys_net.notice("P2P port %d was bound!", port);
}
nt_p2p_port::~nt_p2p_port() { np::close_socket(p2p_socket); }
void nt_p2p_port::dump_packet(p2ps_encapsulated_tcp *tcph) {
sys_net.trace("PACKET DUMP:\nsrc_port: %d\ndst_port: %d\nflags: %d\nseq: "
"%d\nack: %d\nlen: %d",
tcph->src_port, tcph->dst_port, tcph->flags, tcph->seq,
tcph->ack, tcph->length);
}
// Must be used under bound_p2p_vports_mutex lock
u16 nt_p2p_port::get_port() {
if (binding_port == 0) {
binding_port = 30000;
}
return binding_port++;
}
bool nt_p2p_port::handle_connected(s32 sock_id,
p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr) {
const auto sock =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) -> bool {
ensure(sock.get_type() == SYS_NET_SOCK_STREAM_P2P);
auto &sock_p2ps = reinterpret_cast<lv2_socket_p2ps &>(sock);
return sock_p2ps.handle_connected(tcp_header, data, op_addr, this);
});
if (!sock) {
sys_net.error("[P2PS] Couldn't find the socket!");
return false;
}
if (!sock.ret) {
sys_net.error("[P2PS] handle_connected() failed!");
return false;
}
return true;
}
bool nt_p2p_port::handle_listening(s32 sock_id,
p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr) {
auto sock = idm::get_unlocked<lv2_socket>(sock_id);
if (!sock)
return false;
auto &sock_p2ps = reinterpret_cast<lv2_socket_p2ps &>(*sock);
return sock_p2ps.handle_listening(tcp_header, data, op_addr);
}
bool nt_p2p_port::recv_data() {
::sockaddr_storage native_addr{};
::socklen_t native_addrlen = sizeof(native_addr);
const auto recv_res = ::recvfrom(
p2p_socket, reinterpret_cast<char *>(p2p_recv_data.data()),
::size32(p2p_recv_data), 0,
reinterpret_cast<struct sockaddr *>(&native_addr), &native_addrlen);
if (recv_res == -1) {
auto lerr = get_last_error(false);
if (lerr != SYS_NET_EINPROGRESS && lerr != SYS_NET_EWOULDBLOCK)
sys_net.error("Error recvfrom on %s P2P socket: %d",
is_ipv6 ? "IPv6" : "IPv4", lerr);
return false;
}
if (recv_res < static_cast<s32>(sizeof(u16))) {
sys_net.error("Received badly formed packet on P2P port(no vport)!");
return true;
}
u16 dst_vport = reinterpret_cast<le_t<u16> &>(p2p_recv_data[0]);
if (is_ipv6) {
const auto *addr_ipv6 = reinterpret_cast<sockaddr_in6 *>(&native_addr);
const auto addr_ipv4 = np::sockaddr6_to_sockaddr(*addr_ipv6);
native_addr = {};
std::memcpy(&native_addr, &addr_ipv4, sizeof(addr_ipv4));
}
if (dst_vport == 0) {
if (recv_res < VPORT_0_HEADER_SIZE) {
sys_net.error("Bad vport 0 packet(no subset)!");
return true;
}
const u8 subset = p2p_recv_data[2];
const auto data_size = recv_res - VPORT_0_HEADER_SIZE;
std::vector<u8> vport_0_data(p2p_recv_data.data() + VPORT_0_HEADER_SIZE,
p2p_recv_data.data() + VPORT_0_HEADER_SIZE +
data_size);
switch (subset) {
case SUBSET_RPCN: {
std::lock_guard lock(s_rpcn_mutex);
rpcn_msgs.push_back(std::move(vport_0_data));
return true;
}
case SUBSET_SIGNALING: {
signaling_message msg;
msg.src_addr =
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_addr.s_addr;
msg.src_port = std::bit_cast<u16, be_t<u16>>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_port);
msg.data = std::move(vport_0_data);
{
std::lock_guard lock(s_sign_mutex);
sign_msgs.push_back(std::move(msg));
}
auto &sigh = g_fxo->get<named_thread<signaling_handler>>();
sigh.wake_up();
return true;
}
default: {
sys_net.error("Invalid vport 0 subset!");
return true;
}
}
}
if (recv_res < VPORT_P2P_HEADER_SIZE) {
return true;
}
const u16 src_vport =
*reinterpret_cast<le_t<u16> *>(p2p_recv_data.data() + sizeof(u16));
const u16 vport_flags = *reinterpret_cast<le_t<u16> *>(
p2p_recv_data.data() + sizeof(u16) + sizeof(u16));
std::vector<u8> p2p_data(recv_res - VPORT_P2P_HEADER_SIZE);
memcpy(p2p_data.data(), p2p_recv_data.data() + VPORT_P2P_HEADER_SIZE,
p2p_data.size());
if (vport_flags & P2P_FLAG_P2P) {
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_vports.contains(dst_vport)) {
sys_net_sockaddr_in_p2p p2p_addr{};
p2p_addr.sin_len = sizeof(sys_net_sockaddr_in);
p2p_addr.sin_family = SYS_NET_AF_INET;
p2p_addr.sin_addr = std::bit_cast<be_t<u32>, u32>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)
->sin_addr.s_addr);
p2p_addr.sin_vport = src_vport;
p2p_addr.sin_port = std::bit_cast<be_t<u16>, u16>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_port);
auto &bound_sockets = ::at32(bound_p2p_vports, dst_vport);
for (const auto sock_id : bound_sockets) {
const auto sock =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) {
ensure(sock.get_type() == SYS_NET_SOCK_DGRAM_P2P);
auto &sock_p2p = reinterpret_cast<lv2_socket_p2p &>(sock);
sock_p2p.handle_new_data(p2p_addr, p2p_data);
});
if (!sock) {
sys_net.error("Socket %d found in bound_p2p_vports didn't exist!",
sock_id);
bound_sockets.erase(sock_id);
if (bound_sockets.empty()) {
bound_p2p_vports.erase(dst_vport);
}
}
}
return true;
}
} else if (vport_flags & P2P_FLAG_P2PS) {
if (p2p_data.size() < sizeof(p2ps_encapsulated_tcp)) {
sys_net.notice("Received P2P packet targeted at unbound vport(likely) or "
"invalid(vport=%d)",
dst_vport);
return true;
}
auto *tcp_header =
reinterpret_cast<p2ps_encapsulated_tcp *>(p2p_data.data());
// Validate signature & length
if (tcp_header->signature != P2PS_U2S_SIG) {
sys_net.notice("Received P2P packet targeted at unbound vport(vport=%d)",
dst_vport);
return true;
}
if (tcp_header->length !=
(p2p_data.size() - sizeof(p2ps_encapsulated_tcp))) {
sys_net.error(
"Received STREAM-P2P packet tcp length didn't match packet length");
return true;
}
// Sanity check
if (tcp_header->dst_port != dst_vport) {
sys_net.error("Received STREAM-P2P packet with dst_port != vport");
return true;
}
// Validate checksum
u16 given_checksum = tcp_header->checksum;
tcp_header->checksum = 0;
if (given_checksum !=
u2s_tcp_checksum(reinterpret_cast<const le_t<u16> *>(p2p_data.data()),
p2p_data.size())) {
sys_net.error("Checksum is invalid, dropping packet!");
return true;
}
// The packet is valid
dump_packet(tcp_header);
// Check if it's bound
const u64 key_connected =
(reinterpret_cast<struct sockaddr_in *>(&native_addr)
->sin_addr.s_addr) |
(static_cast<u64>(tcp_header->src_port) << 48) |
(static_cast<u64>(tcp_header->dst_port) << 32);
{
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_streams.contains(key_connected)) {
const auto sock_id = ::at32(bound_p2p_streams, key_connected);
sys_net.trace("Received packet for connected STREAM-P2P socket(s=%d)",
sock_id);
handle_connected(sock_id, tcp_header,
p2p_data.data() + sizeof(p2ps_encapsulated_tcp),
&native_addr);
return true;
}
if (bound_p2ps_vports.contains(tcp_header->dst_port)) {
const auto &bound_sockets =
::at32(bound_p2ps_vports, tcp_header->dst_port);
for (const auto sock_id : bound_sockets) {
sys_net.trace("Received packet for listening STREAM-P2P socket(s=%d)",
sock_id);
handle_listening(sock_id, tcp_header,
p2p_data.data() + sizeof(p2ps_encapsulated_tcp),
&native_addr);
}
return true;
}
if (tcp_header->flags == p2ps_tcp_flags::RST) {
sys_net.trace("[P2PS] Received RST on unbound P2PS");
return true;
}
// The P2PS packet was sent to an unbound vport, send a RST packet
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::RST;
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
if (np::sendto_possibly_ipv6(
p2p_socket, reinterpret_cast<char *>(packet.data()),
::size32(packet),
reinterpret_cast<const sockaddr_in *>(&native_addr), 0) == -1) {
sys_net.error("[P2PS] Error sending RST to sender to unbound P2PS: %s",
get_last_error(false));
return true;
}
sys_net.trace("[P2PS] Sent RST to sender to unbound P2PS");
return true;
}
}
sys_net.notice("Received a P2P packet with no bound target(dst_vport = %d)",
dst_vport);
return true;
}

View file

@ -0,0 +1,243 @@
#include "stdafx.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/IdManager.h"
#include "sys_net/lv2_socket.h"
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
int get_native_error() {
int native_error;
#ifdef _WIN32
native_error = WSAGetLastError();
#else
native_error = errno;
#endif
return native_error;
}
sys_net_error convert_error(bool is_blocking, int native_error,
[[maybe_unused]] bool is_connecting) {
// Convert the error code for socket functions to a one for sys_net
sys_net_error result{};
const char *name{};
#ifdef _WIN32
#define ERROR_CASE(error) \
case WSA##error: \
result = SYS_NET_##error; \
name = #error; \
break;
#else
#define ERROR_CASE(error) \
case error: \
result = SYS_NET_##error; \
name = #error; \
break;
#endif
switch (native_error) {
#ifndef _WIN32
ERROR_CASE(ENOENT);
ERROR_CASE(ENOMEM);
ERROR_CASE(EBUSY);
ERROR_CASE(ENOSPC);
ERROR_CASE(EPIPE);
#endif
// TODO: We don't currently support EFAULT or EINTR
// ERROR_CASE(EFAULT);
// ERROR_CASE(EINTR);
ERROR_CASE(EBADF);
ERROR_CASE(EACCES);
ERROR_CASE(EINVAL);
ERROR_CASE(EMFILE);
ERROR_CASE(EWOULDBLOCK);
ERROR_CASE(EINPROGRESS);
ERROR_CASE(EALREADY);
ERROR_CASE(EDESTADDRREQ);
ERROR_CASE(EMSGSIZE);
ERROR_CASE(EPROTOTYPE);
ERROR_CASE(ENOPROTOOPT);
ERROR_CASE(EPROTONOSUPPORT);
ERROR_CASE(EOPNOTSUPP);
ERROR_CASE(EPFNOSUPPORT);
ERROR_CASE(EAFNOSUPPORT);
ERROR_CASE(EADDRINUSE);
ERROR_CASE(EADDRNOTAVAIL);
ERROR_CASE(ENETDOWN);
ERROR_CASE(ENETUNREACH);
ERROR_CASE(ECONNABORTED);
ERROR_CASE(ECONNRESET);
ERROR_CASE(ENOBUFS);
ERROR_CASE(EISCONN);
ERROR_CASE(ENOTCONN);
ERROR_CASE(ESHUTDOWN);
ERROR_CASE(ETOOMANYREFS);
ERROR_CASE(ETIMEDOUT);
ERROR_CASE(ECONNREFUSED);
ERROR_CASE(EHOSTDOWN);
ERROR_CASE(EHOSTUNREACH);
#ifdef _WIN32
// Windows likes to be special with unique errors
case WSAENETRESET:
result = SYS_NET_ECONNRESET;
name = "WSAENETRESET";
break;
#endif
default:
fmt::throw_exception("sys_net get_last_error(is_blocking=%d, "
"native_error=%d): Unknown/illegal socket error",
is_blocking, native_error);
}
#ifdef _WIN32
if (is_connecting) {
// Windows will return SYS_NET_ENOTCONN when recvfrom/sendto is called on a
// socket that is connecting but not yet connected
if (result == SYS_NET_ENOTCONN)
return SYS_NET_EAGAIN;
}
#endif
if (name && result != SYS_NET_EWOULDBLOCK && result != SYS_NET_EINPROGRESS) {
sys_net.error("Socket error %s", name);
}
if (is_blocking && result == SYS_NET_EWOULDBLOCK) {
return {};
}
if (is_blocking && result == SYS_NET_EINPROGRESS) {
return {};
}
return result;
#undef ERROR_CASE
}
sys_net_error get_last_error(bool is_blocking, bool is_connecting) {
return convert_error(is_blocking, get_native_error(), is_connecting);
}
sys_net_sockaddr
native_addr_to_sys_net_addr(const ::sockaddr_storage &native_addr) {
ensure(native_addr.ss_family == AF_INET ||
native_addr.ss_family == AF_UNSPEC);
sys_net_sockaddr sn_addr;
sys_net_sockaddr_in *paddr =
reinterpret_cast<sys_net_sockaddr_in *>(&sn_addr);
*paddr = {};
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = std::bit_cast<be_t<u16>, u16>(
reinterpret_cast<const sockaddr_in *>(&native_addr)->sin_port);
paddr->sin_addr = std::bit_cast<be_t<u32>, u32>(
reinterpret_cast<const sockaddr_in *>(&native_addr)->sin_addr.s_addr);
return sn_addr;
}
::sockaddr_in sys_net_addr_to_native_addr(const sys_net_sockaddr &sn_addr) {
ensure(sn_addr.sa_family == SYS_NET_AF_INET);
const sys_net_sockaddr_in *psa_in =
reinterpret_cast<const sys_net_sockaddr_in *>(&sn_addr);
::sockaddr_in native_addr{};
native_addr.sin_family = AF_INET;
native_addr.sin_port = std::bit_cast<u16>(psa_in->sin_port);
native_addr.sin_addr.s_addr = std::bit_cast<u32>(psa_in->sin_addr);
#ifdef _WIN32
// Windows doesn't support sending packets to 0.0.0.0 but it works on unixes,
// send to 127.0.0.1 instead
if (native_addr.sin_addr.s_addr == 0x00000000) {
sys_net.warning("[Native] Redirected 0.0.0.0 to 127.0.0.1");
native_addr.sin_addr.s_addr = std::bit_cast<u32, be_t<u32>>(0x7F000001);
}
#endif
return native_addr;
}
bool is_ip_public_address(const ::sockaddr_in &addr) {
const u8 *ip = reinterpret_cast<const u8 *>(&addr.sin_addr.s_addr);
if ((ip[0] == 10) || (ip[0] == 127) ||
(ip[0] == 172 && (ip[1] >= 16 && ip[1] <= 31)) ||
(ip[0] == 192 && ip[1] == 168)) {
return false;
}
return true;
}
u32 network_clear_queue(ppu_thread &ppu) {
u32 cleared = 0;
idm::select<lv2_socket>(
[&](u32, lv2_socket &sock) { cleared += sock.clear_queue(&ppu); });
return cleared;
}
void clear_ppu_to_awake(ppu_thread &ppu) {
g_fxo->get<network_context>().del_ppu_to_awake(&ppu);
g_fxo->get<p2p_context>().del_ppu_to_awake(&ppu);
}
#ifdef _WIN32
// Workaround function for WSAPoll not reporting failed connections
// Note that this was fixed in Windows 10 version 2004 (after more than 10 years
// lol)
void windows_poll(std::vector<pollfd> &fds, unsigned long nfds, int timeout,
std::vector<bool> &connecting) {
ensure(fds.size() >= nfds);
ensure(connecting.size() >= nfds);
// Don't call WSAPoll with zero nfds (errors 10022 or 10038)
if (std::none_of(fds.begin(), fds.begin() + nfds,
[](pollfd &pfd) { return pfd.fd != INVALID_SOCKET; })) {
if (timeout > 0) {
Sleep(timeout);
}
return;
}
int r = ::WSAPoll(fds.data(), nfds, timeout);
if (r == SOCKET_ERROR) {
sys_net.error(
"WSAPoll failed: %s",
fmt::win_error{static_cast<unsigned long>(WSAGetLastError()), nullptr});
return;
}
for (unsigned long i = 0; i < nfds; i++) {
if (connecting[i]) {
if (!fds[i].revents) {
int error = 0;
socklen_t intlen = sizeof(error);
if (getsockopt(fds[i].fd, SOL_SOCKET, SO_ERROR,
reinterpret_cast<char *>(&error), &intlen) == -1 ||
error != 0) {
// Connection silently failed
connecting[i] = false;
fds[i].revents =
POLLERR | POLLHUP | (fds[i].events & (POLLIN | POLLOUT));
}
} else {
connecting[i] = false;
}
}
}
}
#endif

View file

@ -0,0 +1,210 @@
#include "stdafx.h"
#include "Crypto/unedat.h"
#include "Crypto/unself.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/VFS.h"
#include "Emu/system_config.h"
#include "Loader/ELF.h"
#include "sys_fs.h"
#include "sys_overlay.h"
#include "sys_process.h"
extern std::pair<shared_ptr<lv2_overlay>, CellError>
ppu_load_overlay(const ppu_exec_object &, bool virtual_load,
const std::string &path, s64 file_offset,
utils::serial *ar = nullptr);
extern bool ppu_initialize(const ppu_module<lv2_obj> &, bool check_only = false,
u64 file_size = 0);
extern void ppu_finalize(const ppu_module<lv2_obj> &info,
bool force_mem_release = false);
LOG_CHANNEL(sys_overlay);
static error_code overlay_load_module(vm::ptr<u32> ovlmid,
const std::string &vpath, u64 /*flags*/,
vm::ptr<u32> entry, fs::file src = {},
s64 file_offset = 0) {
if (!src) {
auto [fs_error, ppath, path, lv2_file, type] = lv2_file::open(vpath, 0, 0);
if (fs_error) {
return {fs_error, vpath};
}
src = std::move(lv2_file);
}
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
src = decrypt_self(std::move(src), reinterpret_cast<u8 *>(&klic));
if (!src) {
return {CELL_ENOEXEC, +"Failed to decrypt file"};
}
ppu_exec_object obj = std::move(src);
src.close();
if (obj != elf_error::ok) {
return {CELL_ENOEXEC, obj.operator elf_error()};
}
const auto [ovlm, error] =
ppu_load_overlay(obj, false, vfs::get(vpath), file_offset);
obj.clear();
if (error) {
if (error == CELL_CANCEL + 0u) {
// Emulation stopped
return {};
}
return error;
}
ppu_initialize(*ovlm);
sys_overlay.success("Loaded overlay: \"%s\" (id=0x%x)", vpath,
idm::last_id());
*ovlmid = idm::last_id();
*entry = ovlm->entry;
return CELL_OK;
}
fs::file make_file_view(fs::file &&file, u64 offset, u64 size);
std::function<void(void *)> lv2_overlay::load(utils::serial &ar) {
const std::string vpath = ar.pop<std::string>();
const std::string path = vfs::get(vpath);
const s64 offset = ar.pop<s64>();
sys_overlay.success("lv2_overlay::load(): vpath='%s', path='%s', offset=0x%x",
vpath, path, offset);
shared_ptr<lv2_overlay> ovlm;
fs::file file{path.substr(
0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file) {
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset, umax);
ovlm =
ppu_load_overlay(ppu_exec_object{decrypt_self(
std::move(file), reinterpret_cast<u8 *>(&klic))},
false, path, 0, &ar)
.first;
if (!ovlm) {
fmt::throw_exception("lv2_overlay::load(): ppu_load_overlay() failed. "
"(vpath='%s', offset=0x%x)",
vpath, offset);
}
} else if (!g_cfg.savestate.state_inspection_mode.get()) {
fmt::throw_exception(
"lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)",
vpath, offset);
} else {
sys_overlay.error(
"lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)",
vpath, offset);
}
return [ovlm](void *storage) {
*static_cast<atomic_ptr<lv2_obj> *>(storage) = ovlm;
};
}
void lv2_overlay::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
const std::string vpath = vfs::retrieve(path);
(vpath.empty() ? sys_overlay.error : sys_overlay.success)(
"lv2_overlay::save(): vpath='%s', offset=0x%x", vpath, offset);
ar(vpath, offset);
}
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path,
u64 flags, vm::ptr<u32> entry) {
sys_overlay.warning(
"sys_overlay_load_module(ovlmid=*0x%x, path=%s, flags=0x%x, entry=*0x%x)",
ovlmid, path, flags, entry);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
if (!path) {
return CELL_EFAULT;
}
return overlay_load_module(ovlmid, path.get_ptr(), flags, entry);
}
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd,
u64 offset, u64 flags,
vm::ptr<u32> entry) {
sys_overlay.warning("sys_overlay_load_module_by_fd(ovlmid=*0x%x, fd=%d, "
"offset=0x%llx, flags=0x%x, entry=*0x%x)",
ovlmid, fd, offset, flags, entry);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
if (static_cast<s64>(offset) < 0) {
return CELL_EINVAL;
}
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file) {
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file) {
return CELL_EBADF;
}
return overlay_load_module(
ovlmid,
offset ? fmt::format("%s_x%x", file->name.data(), offset)
: file->name.data(),
flags, entry, lv2_file::make_view(file, offset), offset);
}
error_code sys_overlay_unload_module(u32 ovlmid) {
sys_overlay.warning("sys_overlay_unload_module(ovlmid=0x%x)", ovlmid);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
const auto _main = idm::withdraw<lv2_obj, lv2_overlay>(ovlmid);
if (!_main) {
return CELL_ESRCH;
}
for (auto &seg : _main->segs) {
vm::dealloc(seg.addr);
}
ppu_finalize(*_main);
return CELL_OK;
}

View file

@ -0,0 +1,645 @@
#include "stdafx.h"
#include "sys_ppu_thread.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Memory/vm_locking.h"
#include "sys_event.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_process.h"
#include "util/asm.hpp"
#include <thread>
LOG_CHANNEL(sys_ppu_thread);
// Simple structure to cleanup previous thread, because can't remove its own
// thread
struct ppu_thread_cleaner {
shared_ptr<named_thread<ppu_thread>> old;
shared_ptr<named_thread<ppu_thread>>
clean(shared_ptr<named_thread<ppu_thread>> ptr) {
return std::exchange(old, std::move(ptr));
}
ppu_thread_cleaner() = default;
ppu_thread_cleaner(const ppu_thread_cleaner &) = delete;
ppu_thread_cleaner &operator=(const ppu_thread_cleaner &) = delete;
ppu_thread_cleaner &operator=(thread_state state) noexcept {
reader_lock lock(id_manager::g_mutex);
if (old) {
// It is detached from IDM now so join must be done explicitly now
*static_cast<named_thread<ppu_thread> *>(old.get()) = state;
}
return *this;
}
};
void ppu_thread_exit(ppu_thread &ppu, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *) {
ppu.state += cpu_flag::exit + cpu_flag::wait;
// Deallocate Stack Area
ensure(vm::dealloc(ppu.stack_addr, vm::stack) == ppu.stack_size);
if (auto dct = g_fxo->try_get<lv2_memory_container>()) {
dct->free(ppu.stack_size);
}
if (ppu.call_history.index) {
ppu_log.notice("Calling history: %s", ppu.call_history);
ppu.call_history.index = 0;
}
if (ppu.syscall_history.index) {
ppu_log.notice("HLE/LV2 history: %s", ppu.syscall_history);
ppu.syscall_history.index = 0;
}
}
constexpr u32 c_max_ppu_name_size = 28;
void _sys_ppu_thread_exit(ppu_thread &ppu, u64 errorcode) {
ppu.state += cpu_flag::wait;
u64 writer_mask = 0;
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
ppu_join_status old_status;
// Avoid cases where cleaning causes the destructor to be called inside IDM
// lock scope (for performance)
shared_ptr<named_thread<ppu_thread>> old_ppu;
{
lv2_obj::notify_all_t notify;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(id_manager::g_mutex);
// Get joiner ID
old_status = ppu.joiner.fetch_op([](ppu_join_status &status) {
if (status == ppu_join_status::joinable) {
// Joinable, not joined
status = ppu_join_status::zombie;
return;
}
// Set deleted thread status
status = ppu_join_status::exited;
});
if (old_status >= ppu_join_status::max) {
lv2_obj::append(idm::check_unlocked<named_thread<ppu_thread>>(
static_cast<u32>(old_status)));
}
if (old_status != ppu_join_status::joinable) {
// Remove self ID from IDM, move owning ptr
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(
idm::withdraw<named_thread<ppu_thread>>(ppu.id, 0,
std::false_type{}));
}
// Get writers mask (wait for all current writers to quit)
writer_mask = vm::g_range_lock_bits[1];
// Unqueue
lv2_obj::sleep(ppu);
notify.cleanup();
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
}
while (ppu.joiner == ppu_join_status::zombie) {
if (ppu.is_stopped() &&
ppu.joiner.compare_and_swap_test(ppu_join_status::zombie,
ppu_join_status::joinable)) {
// Abort
ppu.state += cpu_flag::again;
return;
}
// Wait for termination
thread_ctrl::wait_on(ppu.joiner, ppu_join_status::zombie);
}
ppu_thread_exit(ppu, {}, nullptr, nullptr);
if (old_ppu) {
// It is detached from IDM now so join must be done explicitly now
*old_ppu = thread_state::finished;
}
// Need to wait until the current writers finish
if (ppu.state & cpu_flag::memory) {
for (; writer_mask; writer_mask &= vm::g_range_lock_bits[1]) {
busy_wait(200);
}
}
}
s32 sys_ppu_thread_yield(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_yield()");
const s32 success = lv2_obj::yield(ppu) ? CELL_OK : CELL_CANCEL;
if (success == CELL_CANCEL) {
// Do other work in the meantime
lv2_obj::notify_all();
}
// Return 0 on successful context switch, 1 otherwise
return success;
}
error_code sys_ppu_thread_join(ppu_thread &ppu, u32 thread_id,
vm::ptr<u64> vptr) {
lv2_obj::prepare_for_sleep(ppu);
sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)",
thread_id, vptr);
if (thread_id == ppu.id) {
return CELL_EDEADLK;
}
auto thread = idm::get<named_thread<ppu_thread>>(
thread_id,
[&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) -> CellError {
CellError result =
thread.joiner.atomic_op([&](ppu_join_status &value) -> CellError {
switch (value) {
case ppu_join_status::joinable:
value = ppu_join_status{ppu.id};
return {};
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
case ppu_join_status::detached:
default:
return CELL_EINVAL;
}
});
if (!result) {
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
}
notify.cleanup();
return result;
});
if (!thread) {
return CELL_ESRCH;
}
if (thread.ret && thread.ret != CELL_EAGAIN) {
return thread.ret;
}
if (thread.ret == CELL_EAGAIN) {
// Notify thread if waiting for a joiner
thread->joiner.notify_one();
}
// Wait for cleanup
(*thread.ptr)();
if (thread->joiner != ppu_join_status::exited) {
// Thread aborted, log it later
ppu.state += cpu_flag::again;
return {};
}
static_cast<void>(ppu.test_stopped());
// Get the exit status from the register
const u64 vret = thread->gpr[3];
if (thread.ret == CELL_EAGAIN) {
// Cleanup
ensure(idm::remove_verify<named_thread<ppu_thread>>(thread_id,
std::move(thread.ptr)));
}
if (!vptr) {
return not_an_error(CELL_EFAULT);
}
*vptr = vret;
return CELL_OK;
}
error_code sys_ppu_thread_detach(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
CellError result = CELL_ESRCH;
auto [ptr, _] = idm::withdraw<named_thread<ppu_thread>>(
thread_id, [&](ppu_thread &thread) {
result =
thread.joiner.atomic_op([](ppu_join_status &value) -> CellError {
switch (value) {
case ppu_join_status::joinable:
value = ppu_join_status::detached;
return {};
case ppu_join_status::detached:
return CELL_EINVAL;
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
default:
return CELL_EBUSY;
}
});
// Remove ID on EAGAIN
return result != CELL_EAGAIN;
});
if (result) {
if (result == CELL_EAGAIN) {
// Join and notify thread (it is detached from IDM now so it must be done
// explicitly now)
*ptr = thread_state::finished;
}
return result;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_join_state(ppu_thread &ppu,
vm::ptr<s32> isjoinable) {
sys_ppu_thread.trace("sys_ppu_thread_get_join_state(isjoinable=*0x%x)",
isjoinable);
if (!isjoinable) {
return CELL_EFAULT;
}
*isjoinable = ppu.joiner != ppu_join_status::detached;
return CELL_OK;
}
error_code sys_ppu_thread_set_priority(ppu_thread &ppu, u32 thread_id,
s32 prio) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_set_priority(thread_id=0x%x, prio=%d)",
thread_id, prio);
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071) {
return CELL_EINVAL;
}
if (thread_id == ppu.id) {
// Fast path for self
if (ppu.prio.load().prio != prio) {
lv2_obj::set_priority(ppu, prio);
}
return CELL_OK;
}
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) {
lv2_obj::set_priority(thread, prio);
});
if (!thread) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_priority(ppu_thread &ppu, u32 thread_id,
vm::ptr<s32> priop) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace(
"sys_ppu_thread_get_priority(thread_id=0x%x, priop=*0x%x)", thread_id,
priop);
u32 prio{};
if (thread_id == ppu.id) {
// Fast path for self
for (; !ppu.is_stopped(); std::this_thread::yield()) {
if (reader_lock lock(lv2_obj::g_mutex); cpu_flag::suspend - ppu.state) {
prio = ppu.prio.load().prio;
break;
}
ppu.check_state();
ppu.state += cpu_flag::wait;
}
ppu.check_state();
*priop = prio;
return CELL_OK;
}
for (; !ppu.is_stopped(); std::this_thread::yield()) {
bool check_state = false;
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [&](ppu_thread &thread) {
if (reader_lock lock(lv2_obj::g_mutex);
cpu_flag::suspend - ppu.state) {
prio = thread.prio.load().prio;
} else {
check_state = true;
}
});
if (check_state) {
ppu.check_state();
ppu.state += cpu_flag::wait;
continue;
}
if (!thread) {
return CELL_ESRCH;
}
ppu.check_state();
*priop = prio;
break;
}
return CELL_OK;
}
error_code
sys_ppu_thread_get_stack_information(ppu_thread &ppu,
vm::ptr<sys_ppu_thread_stack_t> sp) {
sys_ppu_thread.trace("sys_ppu_thread_get_stack_information(sp=*0x%x)", sp);
sp->pst_addr = ppu.stack_addr;
sp->pst_size = ppu.stack_size;
return CELL_OK;
}
error_code sys_ppu_thread_stop(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [](named_thread<ppu_thread> &) {});
if (!thread) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_restart(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_restart()");
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code _sys_ppu_thread_create(ppu_thread &ppu, vm::ptr<u64> thread_id,
vm::ptr<ppu_thread_param_t> param, u64 arg,
u64 unk, s32 prio, u32 _stacksz, u64 flags,
vm::cptr<char> threadname) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning(
"_sys_ppu_thread_create(thread_id=*0x%x, param=*0x%x, arg=0x%llx, "
"unk=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)",
thread_id, param, arg, unk, prio, _stacksz, flags, threadname);
// thread_id is checked for null in stub -> CELL_ENOMEM
// unk is set to 0 in sys_ppu_thread_create stub
if (!param || !param->entry) {
return CELL_EFAULT;
}
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071) {
return CELL_EINVAL;
}
if ((flags & 3) == 3) // Check two flags: joinable + interrupt not allowed
{
return CELL_EPERM;
}
const ppu_func_opd_t entry = param->entry.opd();
const u32 tls = param->tls;
// Compute actual stack size and allocate
const u32 stack_size = utils::align<u32>(std::max<u32>(_stacksz, 4096), 4096);
auto &dct = g_fxo->get<lv2_memory_container>();
// Try to obtain "physical memory" from the default container
if (!dct.take(stack_size)) {
return {CELL_ENOMEM, dct.size - dct.used};
}
const vm::addr_t stack_base{vm::alloc(stack_size, vm::stack, 4096)};
if (!stack_base) {
dct.free(stack_size);
return CELL_ENOMEM;
}
std::string ppu_name;
if (threadname) {
constexpr u32 max_size =
c_max_ppu_name_size - 1; // max size excluding null terminator
if (!vm::read_string(threadname.addr(), max_size, ppu_name, true)) {
dct.free(stack_size);
return CELL_EFAULT;
}
}
const u32 tid = idm::import <named_thread<ppu_thread>>([&]() {
ppu_thread_params p;
p.stack_addr = stack_base;
p.stack_size = stack_size;
p.tls_addr = tls;
p.entry = entry;
p.arg0 = arg;
p.arg1 = unk;
return stx::make_shared<named_thread<ppu_thread>>(
p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
});
if (!tid) {
vm::dealloc(stack_base);
dct.free(stack_size);
return CELL_EAGAIN;
}
sys_ppu_thread.warning("_sys_ppu_thread_create(): Thread \"%s\" created "
"(id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)",
ppu_name, tid, entry.addr, entry.rtoc, tls);
ppu.check_state();
*thread_id = tid;
return CELL_OK;
}
error_code sys_ppu_thread_start(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(
thread_id,
[&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) -> CellError {
if (!thread.state.test_and_reset(cpu_flag::stop)) {
// Already started
return CELL_EBUSY;
}
ensure(lv2_obj::awake(&thread));
thread.cmd_list({
{ppu_cmd::entry_call, 0},
});
return {};
});
if (!thread) {
return CELL_ESRCH;
}
if (thread.ret) {
return thread.ret;
} else {
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
}
return CELL_OK;
}
error_code sys_ppu_thread_rename(ppu_thread &ppu, u32 thread_id,
vm::cptr<char> name) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)",
thread_id, name);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
if (!name) {
return CELL_EFAULT;
}
constexpr u32 max_size =
c_max_ppu_name_size - 1; // max size excluding null terminator
// Make valid name
std::string out_str;
if (!vm::read_string(name.addr(), max_size, out_str, true)) {
return CELL_EFAULT;
}
auto _name = make_single<std::string>(std::move(out_str));
// thread_ctrl name is not changed (TODO)
sys_ppu_thread.warning("sys_ppu_thread_rename(): Thread renamed to \"%s\"",
*_name);
thread->ppu_tname.store(std::move(_name));
thread_ctrl::set_name(
*thread, thread->thread_name); // TODO: Currently sets debugger thread
// name only for local thread
return CELL_OK;
}
error_code sys_ppu_thread_recover_page_fault(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)",
thread_id);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
return mmapper_thread_recover_page_fault(thread.get());
}
error_code
sys_ppu_thread_get_page_fault_context(ppu_thread &ppu, u32 thread_id,
vm::ptr<sys_ppu_thread_icontext_t> ctxt) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo(
"sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)",
thread_id, ctxt);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
// We can only get a context if the thread is being suspended for a page
// fault.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
reader_lock lock(pf_events.pf_mutex);
const auto evt = pf_events.events.find(thread.get());
if (evt == pf_events.events.end()) {
return CELL_EINVAL;
}
// TODO: Fill ctxt with proper information.
return CELL_OK;
}

View file

@ -0,0 +1,590 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/System.h"
#include "Emu/VFS.h"
#include "sys_process.h"
#include "Crypto/unedat.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_cond.h"
#include "sys_event.h"
#include "sys_event_flag.h"
#include "sys_fs.h"
#include "sys_interrupt.h"
#include "sys_lwcond.h"
#include "sys_lwmutex.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_mutex.h"
#include "sys_overlay.h"
#include "sys_prx.h"
#include "sys_rwlock.h"
#include "sys_semaphore.h"
#include "sys_spu.h"
#include "sys_timer.h"
#include "sys_vm.h"
// Check all flags known to be related to extended permissions (TODO)
// It's possible anything which has root flags implicitly has debug perm as well
// But I haven't confirmed it.
bool ps3_process_info_t::debug_or_root() const {
return (ctrl_flags1 & (0xe << 28)) != 0;
}
bool ps3_process_info_t::has_root_perm() const {
return (ctrl_flags1 & (0xc << 28)) != 0;
}
bool ps3_process_info_t::has_debug_perm() const {
return (ctrl_flags1 & (0xa << 28)) != 0;
}
// If a SELF file is of CellOS return its filename, otheriwse return an empty
// string
std::string_view ps3_process_info_t::get_cellos_appname() const {
if (!has_root_perm() || !Emu.GetTitleID().empty()) {
return {};
}
return std::string_view(Emu.GetBoot())
.substr(Emu.GetBoot().find_last_of('/') + 1);
}
LOG_CHANNEL(sys_process);
ps3_process_info_t g_ps3_process_info;
s32 process_getpid() {
// TODO: get current process id
return 1;
}
s32 sys_process_getpid() {
sys_process.trace("sys_process_getpid() -> 1");
return process_getpid();
}
s32 sys_process_getppid() {
sys_process.todo("sys_process_getppid() -> 0");
return 0;
}
template <typename T, typename Get> u32 idm_get_count() {
return idm::select<T, Get>([&](u32, Get &) {});
}
error_code sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump) {
sys_process.error("sys_process_get_number_of_object(object=0x%x, nump=*0x%x)",
object, nump);
switch (object) {
case SYS_MEM_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_memory>();
break;
case SYS_MUTEX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_mutex>();
break;
case SYS_COND_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_cond>();
break;
case SYS_RWLOCK_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_rwlock>();
break;
case SYS_INTR_TAG_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_int_tag>();
break;
case SYS_INTR_SERVICE_HANDLE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_int_serv>();
break;
case SYS_EVENT_QUEUE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_queue>();
break;
case SYS_EVENT_PORT_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_port>();
break;
case SYS_TRACE_OBJECT:
sys_process.error(
"sys_process_get_number_of_object: object = SYS_TRACE_OBJECT");
*nump = 0;
break;
case SYS_SPUIMAGE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_spu_image>();
break;
case SYS_PRX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_prx>();
break;
case SYS_SPUPORT_OBJECT:
sys_process.error(
"sys_process_get_number_of_object: object = SYS_SPUPORT_OBJECT");
*nump = 0;
break;
case SYS_OVERLAY_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_overlay>();
break;
case SYS_LWMUTEX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_lwmutex>();
break;
case SYS_TIMER_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_timer>();
break;
case SYS_SEMAPHORE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_sema>();
break;
case SYS_FS_FD_OBJECT:
*nump = idm_get_count<lv2_fs_object, lv2_fs_object>();
break;
case SYS_LWCOND_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_lwcond>();
break;
case SYS_EVENT_FLAG_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_flag>();
break;
default: {
return CELL_EINVAL;
}
}
return CELL_OK;
}
#include <set>
template <typename T, typename Get> void idm_get_set(std::set<u32> &out) {
idm::select<T, Get>([&](u32 id, Get &) { out.emplace(id); });
}
static error_code process_get_id(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
std::set<u32> objects;
switch (object) {
case SYS_MEM_OBJECT:
idm_get_set<lv2_obj, lv2_memory>(objects);
break;
case SYS_MUTEX_OBJECT:
idm_get_set<lv2_obj, lv2_mutex>(objects);
break;
case SYS_COND_OBJECT:
idm_get_set<lv2_obj, lv2_cond>(objects);
break;
case SYS_RWLOCK_OBJECT:
idm_get_set<lv2_obj, lv2_rwlock>(objects);
break;
case SYS_INTR_TAG_OBJECT:
idm_get_set<lv2_obj, lv2_int_tag>(objects);
break;
case SYS_INTR_SERVICE_HANDLE_OBJECT:
idm_get_set<lv2_obj, lv2_int_serv>(objects);
break;
case SYS_EVENT_QUEUE_OBJECT:
idm_get_set<lv2_obj, lv2_event_queue>(objects);
break;
case SYS_EVENT_PORT_OBJECT:
idm_get_set<lv2_obj, lv2_event_port>(objects);
break;
case SYS_TRACE_OBJECT:
fmt::throw_exception("SYS_TRACE_OBJECT");
case SYS_SPUIMAGE_OBJECT:
idm_get_set<lv2_obj, lv2_spu_image>(objects);
break;
case SYS_PRX_OBJECT:
idm_get_set<lv2_obj, lv2_prx>(objects);
break;
case SYS_OVERLAY_OBJECT:
idm_get_set<lv2_obj, lv2_overlay>(objects);
break;
case SYS_LWMUTEX_OBJECT:
idm_get_set<lv2_obj, lv2_lwmutex>(objects);
break;
case SYS_TIMER_OBJECT:
idm_get_set<lv2_obj, lv2_timer>(objects);
break;
case SYS_SEMAPHORE_OBJECT:
idm_get_set<lv2_obj, lv2_sema>(objects);
break;
case SYS_FS_FD_OBJECT:
idm_get_set<lv2_fs_object, lv2_fs_object>(objects);
break;
case SYS_LWCOND_OBJECT:
idm_get_set<lv2_obj, lv2_lwcond>(objects);
break;
case SYS_EVENT_FLAG_OBJECT:
idm_get_set<lv2_obj, lv2_event_flag>(objects);
break;
case SYS_SPUPORT_OBJECT:
fmt::throw_exception("SYS_SPUPORT_OBJECT");
default: {
return CELL_EINVAL;
}
}
u32 i = 0;
// NOTE: Treats negative and 0 values as 1 due to signed checks and "do-while"
// behavior of fw
for (auto id = objects.begin();
i < std::max<s32>(size, 1) + 0u && id != objects.end(); id++, i++) {
buffer[i] = *id;
}
*set_size = i;
return CELL_OK;
}
error_code sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
sys_process.error(
"sys_process_get_id(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)",
object, buffer, size, set_size);
if (object == SYS_SPUPORT_OBJECT) {
// Unallowed for this syscall
return CELL_EINVAL;
}
return process_get_id(object, buffer, size, set_size);
}
error_code sys_process_get_id2(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
sys_process.error(
"sys_process_get_id2(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)",
object, buffer, size, set_size);
if (!g_ps3_process_info.has_root_perm()) {
// This syscall is more capable than sys_process_get_id but also needs a
// root perm check
return CELL_ENOSYS;
}
return process_get_id(object, buffer, size, set_size);
}
CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags) {
if (!flags || flags & ~(SYS_MEMORY_ACCESS_RIGHT_SPU_THR |
SYS_MEMORY_ACCESS_RIGHT_RAW_SPU)) {
return CELL_EINVAL;
}
// TODO: respect sys_mmapper region's access rights
switch (addr >> 28) {
case 0x0: // Main memory
case 0x1: // Main memory
case 0x2: // User 64k (sys_memory)
case 0xc: // RSX Local memory
case 0xe: // RawSPU MMIO
break;
case 0xf: // Private SPU MMIO
{
if (flags & SYS_MEMORY_ACCESS_RIGHT_RAW_SPU) {
// Cannot be accessed by RawSPU
return CELL_EPERM;
}
break;
}
case 0xd: // PPU Stack area
return CELL_EPERM;
default: {
if (auto vm0 = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr))) {
// sys_vm area was not covering the address specified but made a
// reservation on the entire 256mb region
if (vm0->addr + vm0->size - 1 < addr) {
return CELL_EINVAL;
}
// sys_vm memory is not allowed
return CELL_EPERM;
}
if (!vm::get(vm::any, addr & -0x1000'0000)) {
return CELL_EINVAL;
}
break;
}
}
return {};
}
error_code sys_process_is_spu_lock_line_reservation_address(u32 addr,
u64 flags) {
sys_process.warning("sys_process_is_spu_lock_line_reservation_address(addr="
"0x%x, flags=0x%llx)",
addr, flags);
if (auto err = process_is_spu_lock_line_reservation_address(addr, flags)) {
return err;
}
return CELL_OK;
}
error_code _sys_process_get_paramsfo(vm::ptr<char> buffer) {
sys_process.warning("_sys_process_get_paramsfo(buffer=0x%x)", buffer);
if (Emu.GetTitleID().empty()) {
return CELL_ENOENT;
}
memset(buffer.get_ptr(), 0, 0x40);
memcpy(buffer.get_ptr() + 1, Emu.GetTitleID().c_str(),
std::min<usz>(Emu.GetTitleID().length(), 9));
return CELL_OK;
}
s32 process_get_sdk_version(u32 /*pid*/, s32 &ver) {
// get correct SDK version for selected pid
ver = g_ps3_process_info.sdk_ver;
return CELL_OK;
}
error_code sys_process_get_sdk_version(u32 pid, vm::ptr<s32> version) {
sys_process.warning("sys_process_get_sdk_version(pid=0x%x, version=*0x%x)",
pid, version);
s32 sdk_ver;
const s32 ret = process_get_sdk_version(pid, sdk_ver);
if (ret != CELL_OK) {
return CellError{ret + 0u}; // error code
} else {
*version = sdk_ver;
return CELL_OK;
}
}
error_code sys_process_kill(u32 pid) {
sys_process.todo("sys_process_kill(pid=0x%x)", pid);
return CELL_OK;
}
error_code sys_process_wait_for_child(u32 pid, vm::ptr<u32> status, u64 unk) {
sys_process.todo(
"sys_process_wait_for_child(pid=0x%x, status=*0x%x, unk=0x%llx", pid,
status, unk);
return CELL_OK;
}
error_code sys_process_wait_for_child2(u64 unk1, u64 unk2, u64 unk3, u64 unk4,
u64 unk5, u64 unk6) {
sys_process.todo("sys_process_wait_for_child2(unk1=0x%llx, unk2=0x%llx, "
"unk3=0x%llx, unk4=0x%llx, unk5=0x%llx, unk6=0x%llx)",
unk1, unk2, unk3, unk4, unk5, unk6);
return CELL_OK;
}
error_code sys_process_get_status(u64 unk) {
sys_process.todo("sys_process_get_status(unk=0x%llx)", unk);
// vm::write32(CPU.gpr[4], GetPPUThreadStatus(CPU));
return CELL_OK;
}
error_code sys_process_detach_child(u64 unk) {
sys_process.todo("sys_process_detach_child(unk=0x%llx)", unk);
return CELL_OK;
}
extern void signal_system_cache_can_stay();
void _sys_process_exit(ppu_thread &ppu, s32 status, u32 arg2, u32 arg3) {
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit(status=%d, arg2=0x%x, arg3=0x%x)",
status, arg2, arg3);
Emu.CallFromMainThread([]() {
sys_process.success("Process finished");
signal_system_cache_can_stay();
Emu.Kill();
});
// Wait for GUI thread
while (auto state = +ppu.state) {
if (is_stopped(state)) {
break;
}
ppu.state.wait(state);
}
}
void _sys_process_exit2(ppu_thread &ppu, s32 status,
vm::ptr<sys_exit2_param> arg, u32 arg_size, u32 arg4) {
ppu.state += cpu_flag::wait;
sys_process.warning(
"_sys_process_exit2(status=%d, arg=*0x%x, arg_size=0x%x, arg4=0x%x)",
status, arg, arg_size, arg4);
auto pstr = +arg->args;
std::vector<std::string> argv;
std::vector<std::string> envp;
while (auto ptr = *pstr++) {
argv.emplace_back(ptr.get_ptr());
sys_process.notice(" *** arg: %s", ptr);
}
while (auto ptr = *pstr++) {
envp.emplace_back(ptr.get_ptr());
sys_process.notice(" *** env: %s", ptr);
}
std::vector<u8> data;
if (arg_size > 0x1030) {
data.resize(0x1000);
std::memcpy(data.data(), vm::base(arg.addr() + arg_size - 0x1000), 0x1000);
}
if (argv.empty()) {
return _sys_process_exit(ppu, status, 0, 0);
}
// TODO: set prio, flags
lv2_exitspawn(ppu, argv, envp, data);
}
void lv2_exitspawn(ppu_thread &ppu, std::vector<std::string> &argv,
std::vector<std::string> &envp, std::vector<u8> &data) {
ppu.state += cpu_flag::wait;
// sys_sm_shutdown
const bool is_real_reboot = (ppu.gpr[11] == 379);
Emu.CallFromMainThread([is_real_reboot, argv = std::move(argv),
envp = std::move(envp),
data = std::move(data)]() mutable {
sys_process.success("Process finished -> %s", argv[0]);
std::string disc;
if (Emu.GetCat() == "DG" || Emu.GetCat() == "GD")
disc = vfs::get("/dev_bdvd/");
if (disc.empty() && !Emu.GetTitleID().empty())
disc = vfs::get(Emu.GetDir());
std::string path = vfs::get(argv[0]);
std::string hdd1 = vfs::get("/dev_hdd1/");
const u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
using namespace id_manager;
shared_ptr<utils::serial> idm_capture = make_shared<utils::serial>();
if (!is_real_reboot) {
reader_lock rlock{id_manager::g_mutex};
g_fxo->get<id_map<lv2_memory_container>>().save(*idm_capture);
stx::serial_breathe_and_tag(*idm_capture, "id_map<lv2_memory_container>",
false);
}
idm_capture->set_reading_state();
auto func = [is_real_reboot,
old_size = g_fxo->get<lv2_memory_container>().size,
idm_capture](u32 sdk_suggested_mem) mutable {
if (is_real_reboot) {
// Do not save containers on actual reboot
ensure(g_fxo->init<id_map<lv2_memory_container>>());
} else {
// Save LV2 memory containers
ensure(g_fxo->init<id_map<lv2_memory_container>>(*idm_capture));
}
// Empty the containers, accumulate their total size
u32 total_size = 0;
idm::select<lv2_memory_container>([&](u32, lv2_memory_container &ctr) {
ctr.used = 0;
total_size += ctr.size;
});
// The default memory container capacity can only decrease after exitspawn
// 1. If newer SDK version suggests higher memory capacity - it is ignored
// 2. If newer SDK version suggests lower memory capacity - it is lowered
// And if 2. happens while user memory containers exist, the left space
// can be spent on user memory containers
ensure(g_fxo->init<lv2_memory_container>(
std::min(old_size - total_size, sdk_suggested_mem) + total_size));
};
Emu.after_kill_callback = [func = std::move(func), argv = std::move(argv),
envp = std::move(envp), data = std::move(data),
disc = std::move(disc), path = std::move(path),
hdd1 = std::move(hdd1),
old_config = Emu.GetUsedConfig(),
klic]() mutable {
Emu.argv = std::move(argv);
Emu.envp = std::move(envp);
Emu.data = std::move(data);
Emu.disc = std::move(disc);
Emu.hdd1 = std::move(hdd1);
Emu.init_mem_containers = std::move(func);
if (klic) {
Emu.klic.emplace_back(klic);
}
Emu.SetForceBoot(true);
auto res = Emu.BootGame(path, "", true, cfg_mode::continuous, old_config);
if (res != game_boot_result::no_errors) {
sys_process.fatal(
"Failed to boot from exitspawn! (path=\"%s\", error=%s)", path,
res);
}
};
signal_system_cache_can_stay();
// Make sure we keep the game window opened
Emu.SetContinuousMode(true);
Emu.Kill(false);
});
// Wait for GUI thread
while (auto state = +ppu.state) {
if (is_stopped(state)) {
break;
}
ppu.state.wait(state);
}
}
void sys_process_exit3(ppu_thread &ppu, s32 status) {
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit3(status=%d)", status);
return _sys_process_exit(ppu, status, 0, 0);
}
error_code sys_process_spawns_a_self2(vm::ptr<u32> pid, u32 primary_prio,
u64 flags, vm::ptr<void> stack,
u32 stack_size, u32 mem_id,
vm::ptr<void> param_sfo,
vm::ptr<void> dbg_data) {
sys_process.todo("sys_process_spawns_a_self2(pid=*0x%x, primary_prio=0x%x, "
"flags=0x%llx, stack=*0x%x, stack_size=0x%x, mem_id=0x%x, "
"param_sfo=*0x%x, dbg_data=*0x%x",
pid, primary_prio, flags, stack, stack_size, mem_id,
param_sfo, dbg_data);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,989 @@
#include "stdafx.h"
#include "sys_rsx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Memory/vm_locking.h"
#include "Emu/RSX/Core/RSXEngLock.hpp"
#include "Emu/RSX/Core/RSXReservationLock.hpp"
#include "Emu/RSX/RSXThread.h"
#include "Emu/System.h"
#include "sys_event.h"
#include "sys_vm.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_rsx);
// Unknown error code returned by sys_rsx_context_attribute
enum sys_rsx_error : s32 { SYS_RSX_CONTEXT_ATTRIBUTE_ERROR = -17 };
template <>
void fmt_class_string<sys_rsx_error>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto error) {
switch (error) { STR_CASE(SYS_RSX_CONTEXT_ATTRIBUTE_ERROR); }
return unknown;
});
}
static u64 rsx_timeStamp() { return get_timebased_time(); }
static void set_rsx_dmactl(rsx::thread *render, u64 get_put) {
{
rsx::eng_lock rlock(render);
render->fifo_ctrl->abort();
// Unconditional set
while (!render->new_get_put.compare_and_swap_test(u64{umax}, get_put)) {
// Wait for the first store to complete (or be aborted)
if (auto cpu = cpu_thread::get_current()) {
if (cpu->state & cpu_flag::exit) {
// Retry
cpu->state += cpu_flag::again;
return;
}
}
utils::pause();
}
// Schedule FIFO interrupt to deal with this immediately
render->m_eng_interrupt_mask |= rsx::dma_control_interrupt;
}
if (auto cpu = cpu_thread::get_current()) {
// Wait for the first store to complete (or be aborted)
while (render->new_get_put != usz{umax}) {
if (cpu->state & cpu_flag::exit) {
if (render->new_get_put.compare_and_swap_test(get_put, umax)) {
// Retry
cpu->state += cpu_flag::again;
return;
}
}
thread_ctrl::wait_for(1000);
}
}
}
bool rsx::thread::send_event(u64 data1, u64 event_flags, u64 data3) {
// Filter event bits, send them only if they are masked by gcm
// Except the upper 32-bits, they are reserved for unmapped io events and
// execute unconditionally
event_flags &= vm::_ref<RsxDriverInfo>(driver_info).handlers | 0xffff'ffffull
<< 32;
if (!event_flags) {
// Nothing to do
return true;
}
auto error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
while (error + 0u == CELL_EBUSY) {
auto cpu = get_current_cpu_thread();
if (cpu && cpu->get_class() == thread_class::ppu) {
// Deschedule
lv2_obj::sleep(*cpu, 100);
}
// Wait a bit before resending event
thread_ctrl::wait_for(100);
if (cpu && cpu->get_class() == thread_class::rsx)
cpu->cpu_wait({});
if (Emu.IsStopped() || (cpu && cpu->check_state())) {
error = 0;
break;
}
error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
}
if (error + 0u == CELL_EAGAIN) {
// Thread has aborted when sending event (VBLANK duplicates are allowed)
ensure((unsent_gcm_events.fetch_or(event_flags) & event_flags &
~(SYS_RSX_EVENT_VBLANK | SYS_RSX_EVENT_SECOND_VBLANK_BASE |
SYS_RSX_EVENT_SECOND_VBLANK_BASE * 2)) == 0);
return false;
}
if (error && error + 0u != CELL_ENOTCONN) {
fmt::throw_exception(
"rsx::thread::send_event() Failed to send event! (error=%x)", +error);
}
return true;
}
error_code sys_rsx_device_open(cpu_thread &cpu) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_open()");
return CELL_OK;
}
error_code sys_rsx_device_close(cpu_thread &cpu) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_close()");
return CELL_OK;
}
/**
* lv2 SysCall 668 (0x29C): sys_rsx_memory_allocate
* @param mem_handle (OUT): Context / ID, which is used by sys_rsx_memory_free
* to free allocated memory.
* @param mem_addr (OUT): Returns the local memory base address, usually
* 0xC0000000.
* @param size (IN): Local memory size. E.g. 0x0F900000 (249 MB). (changes with
* sdk version)
* @param flags (IN): E.g. Immediate value passed in cellGcmSys is 8.
* @param a5 (IN): E.g. Immediate value passed in cellGcmSys is 0x00300000 (3
* MB?).
* @param a6 (IN): E.g. Immediate value passed in cellGcmSys is 16.
* @param a7 (IN): E.g. Immediate value passed in cellGcmSys is 8.
*/
error_code sys_rsx_memory_allocate(cpu_thread &cpu, vm::ptr<u32> mem_handle,
vm::ptr<u64> mem_addr, u32 size, u64 flags,
u64 a5, u64 a6, u64 a7) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_allocate(mem_handle=*0x%x, mem_addr=*0x%x, "
"size=0x%x, flags=0x%llx, a5=0x%llx, a6=0x%llx, a7=0x%llx)",
mem_handle, mem_addr, size, flags, a5, a6, a7);
if (vm::falloc(rsx::constants::local_mem_base, size, vm::video)) {
rsx::get_current_renderer()->local_mem_size = size;
if (u32 addr = rsx::get_current_renderer()->driver_info) {
vm::_ref<RsxDriverInfo>(addr).memory_size = size;
}
*mem_addr = rsx::constants::local_mem_base;
*mem_handle = 0x5a5a5a5b;
return CELL_OK;
}
return CELL_ENOMEM;
}
/**
* lv2 SysCall 669 (0x29D): sys_rsx_memory_free
* @param mem_handle (OUT): Context / ID, for allocated local memory generated
* by sys_rsx_memory_allocate
*/
error_code sys_rsx_memory_free(cpu_thread &cpu, u32 mem_handle) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_free(mem_handle=0x%x)", mem_handle);
if (!vm::check_addr(rsx::constants::local_mem_base)) {
return CELL_ENOMEM;
}
if (rsx::get_current_renderer()->dma_address) {
fmt::throw_exception("Attempting to dealloc rsx memory when the context is "
"still being used");
}
if (!vm::dealloc(rsx::constants::local_mem_base)) {
return CELL_ENOMEM;
}
return CELL_OK;
}
/**
* lv2 SysCall 670 (0x29E): sys_rsx_context_allocate
* @param context_id (OUT): RSX context, E.g. 0x55555555 (in vsh.self)
* @param lpar_dma_control (OUT): Control register area. E.g. 0x60100000 (in
* vsh.self)
* @param lpar_driver_info (OUT): RSX data like frequencies, sizes, version...
* E.g. 0x60200000 (in vsh.self)
* @param lpar_reports (OUT): Report data area. E.g. 0x60300000 (in vsh.self)
* @param mem_ctx (IN): mem_ctx given by sys_rsx_memory_allocate
* @param system_mode (IN):
*/
error_code sys_rsx_context_allocate(cpu_thread &cpu, vm::ptr<u32> context_id,
vm::ptr<u64> lpar_dma_control,
vm::ptr<u64> lpar_driver_info,
vm::ptr<u64> lpar_reports, u64 mem_ctx,
u64 system_mode) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_allocate(context_id=*0x%x, "
"lpar_dma_control=*0x%x, lpar_driver_info=*0x%x, "
"lpar_reports=*0x%x, mem_ctx=0x%llx, system_mode=0x%llx)",
context_id, lpar_dma_control, lpar_driver_info, lpar_reports,
mem_ctx, system_mode);
if (!vm::check_addr(rsx::constants::local_mem_base)) {
return CELL_EINVAL;
}
const auto render = rsx::get_current_renderer();
std::lock_guard lock(render->sys_rsx_mtx);
if (render->dma_address) {
// We currently do not support multiple contexts
fmt::throw_exception("sys_rsx_context_allocate was called twice");
}
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 dma_address = area ? area->alloc(0x300000) : 0;
if (!dma_address) {
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_context_allocate(): Mapped address 0x%x",
dma_address);
*lpar_dma_control = dma_address;
*lpar_driver_info = dma_address + 0x100000;
*lpar_reports = dma_address + 0x200000;
auto &reports = vm::_ref<RsxReports>(vm::cast(*lpar_reports));
std::memset(&reports, 0, sizeof(RsxReports));
for (usz i = 0; i < std::size(reports.notify); ++i)
reports.notify[i].timestamp = -1;
for (usz i = 0; i < std::size(reports.semaphore); i += 4) {
reports.semaphore[i + 0].val.raw() = 0x1337C0D3;
reports.semaphore[i + 1].val.raw() = 0x1337BABE;
reports.semaphore[i + 2].val.raw() = 0x1337BEEF;
reports.semaphore[i + 3].val.raw() = 0x1337F001;
}
for (usz i = 0; i < std::size(reports.report); ++i) {
reports.report[i].val = 0;
reports.report[i].timestamp = -1;
reports.report[i].pad = -1;
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(vm::cast(*lpar_driver_info));
std::memset(&driverInfo, 0, sizeof(RsxDriverInfo));
driverInfo.version_driver = 0x211;
driverInfo.version_gpu = 0x5c;
driverInfo.memory_size = render->local_mem_size;
driverInfo.nvcore_frequency = 500000000; // 0x1DCD6500
driverInfo.memory_frequency = 650000000; // 0x26BE3680
driverInfo.reportsNotifyOffset = 0x1000;
driverInfo.reportsOffset = 0;
driverInfo.reportsReportOffset = 0x1400;
driverInfo.systemModeFlags = static_cast<u32>(system_mode);
driverInfo.hardware_channel = 1; // * i think* this 1 for games, 0 for vsh
render->driver_info = vm::cast(*lpar_driver_info);
auto &dmaControl = vm::_ref<RsxDmaControl>(vm::cast(*lpar_dma_control));
dmaControl.get = 0;
dmaControl.put = 0;
dmaControl.ref = 0; // Set later to -1 by cellGcmSys
if ((false /*system_mode & something*/ || g_cfg.video.decr_memory_layout) &&
g_cfg.core.debug_console_mode)
rsx::get_current_renderer()->main_mem_size = 0x20000000; // 512MB
else
rsx::get_current_renderer()->main_mem_size = 0x10000000; // 256MB
vm::var<sys_event_queue_attribute_t, vm::page_allocator<>> attr;
attr->protocol = SYS_SYNC_PRIORITY;
attr->type = SYS_PPU_QUEUE;
attr->name_u64 = 0;
sys_event_port_create(cpu, vm::get_addr(&driverInfo.handler_queue),
SYS_EVENT_PORT_LOCAL, 0);
render->rsx_event_port = driverInfo.handler_queue;
sys_event_queue_create(cpu, vm::get_addr(&driverInfo.handler_queue), attr, 0,
0x20);
sys_event_port_connect_local(cpu, render->rsx_event_port,
driverInfo.handler_queue);
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->label_addr = vm::cast(*lpar_reports);
render->init(dma_address);
*context_id = 0x55555555;
return CELL_OK;
}
/**
* lv2 SysCall 671 (0x29F): sys_rsx_context_free
* @param context_id (IN): RSX context generated by sys_rsx_context_allocate to
* free the context.
*/
error_code sys_rsx_context_free(ppu_thread &ppu, u32 context_id) {
ppu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_context_free(context_id=0x%x)", context_id);
const auto render = rsx::get_current_renderer();
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
const u32 dma_address = render->dma_address;
render->dma_address = 0;
if (context_id != 0x55555555 || !dma_address ||
render->state & cpu_flag::ret) {
return CELL_EINVAL;
}
g_fxo->get<rsx::vblank_thread>() = thread_state::finished;
const u32 queue_id =
vm::_ptr<RsxDriverInfo>(render->driver_info)->handler_queue;
render->state += cpu_flag::ret;
while (render->state & cpu_flag::ret) {
thread_ctrl::wait_for(1000);
}
sys_event_port_disconnect(ppu, render->rsx_event_port);
sys_event_port_destroy(ppu, render->rsx_event_port);
sys_event_queue_destroy(ppu, queue_id, SYS_EVENT_QUEUE_DESTROY_FORCE);
render->label_addr = 0;
render->driver_info = 0;
render->main_mem_size = 0;
render->rsx_event_port = 0;
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->ctrl = nullptr;
render->rsx_thread_running = false;
render->serialized = false;
ensure(vm::get(vm::rsx_context)->dealloc(dma_address));
return CELL_OK;
}
/**
* lv2 SysCall 672 (0x2A0): sys_rsx_context_iomap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO offset mapping area. E.g. 0x00600000
* @param ea (IN): Start address of mapping area. E.g. 0x20400000
* @param size (IN): Size of mapping area in bytes. E.g. 0x00200000
* @param flags (IN):
*/
error_code sys_rsx_context_iomap(cpu_thread &cpu, u32 context_id, u32 io,
u32 ea, u32 size, u64 flags) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_iomap(context_id=0x%x, io=0x%x, ea=0x%x, "
"size=0x%x, flags=0x%llx)",
context_id, io, ea, size, flags);
const auto render = rsx::get_current_renderer();
if (!size || io & 0xFFFFF ||
ea + u64{size} > rsx::constants::local_mem_base || ea & 0xFFFFF ||
size & 0xFFFFF || context_id != 0x55555555 ||
render->main_mem_size < io + u64{size}) {
return CELL_EINVAL;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_iomap(): RSX is not idle while mapping io");
}
// Wait until we have no active RSX locks and reserve iomap for use. Must do
// so before acquiring vm lock to avoid deadlocks
rsx::reservation_lock<true> rsx_lock(ea, size);
vm::writer_lock rlock;
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000) {
if (!vm::check_addr(addr, vm::page_readable |
(addr < 0x20000000 ? 0 : vm::page_1m_size))) {
return CELL_EINVAL;
}
if ((addr == ea || !(addr % 0x1000'0000)) &&
idm::check_unlocked<sys_vm_t>(sys_vm_t::find_id(addr))) {
// Virtual memory is disallowed
return CELL_EINVAL;
}
}
io >>= 20, ea >>= 20, size >>= 20;
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
for (u32 i = 0; i < size; i++) {
auto &table = render->iomap_table;
// TODO: Investigate relaxed memory ordering
const u32 prev_ea = table.ea[io + i];
table.ea[io + i].release((ea + i) << 20);
if (prev_ea + 1)
table.io[prev_ea >> 20].release(-1); // Clear previous mapping if exists
table.io[ea + i].release((io + i) << 20);
}
return CELL_OK;
}
/**
* lv2 SysCall 673 (0x2A1): sys_rsx_context_iounmap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO address. E.g. 0x00600000 (Start page 6)
* @param size (IN): Size to unmap in byte. E.g. 0x00200000
*/
error_code sys_rsx_context_iounmap(cpu_thread &cpu, u32 context_id, u32 io,
u32 size) {
cpu.state += cpu_flag::wait;
sys_rsx.warning(
"sys_rsx_context_iounmap(context_id=0x%x, io=0x%x, size=0x%x)",
context_id, io, size);
const auto render = rsx::get_current_renderer();
if (!size || size & 0xFFFFF || io & 0xFFFFF || context_id != 0x55555555 ||
render->main_mem_size < io + u64{size}) {
return CELL_EINVAL;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_iounmap(): RSX is not idle while unmapping io");
}
vm::writer_lock rlock;
std::scoped_lock lock(render->sys_rsx_mtx);
for (const u32 end = (io >>= 20) + (size >>= 20); io < end;) {
auto &table = render->iomap_table;
const u32 ea_entry = table.ea[io];
table.ea[io++].release(-1);
if (ea_entry + 1)
table.io[ea_entry >> 20].release(-1);
}
return CELL_OK;
}
/**
* lv2 SysCall 674 (0x2A2): sys_rsx_context_attribute
* @param context_id (IN): RSX context, e.g. 0x55555555
* @param package_id (IN):
* @param a3 (IN):
* @param a4 (IN):
* @param a5 (IN):
* @param a6 (IN):
*/
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3,
u64 a4, u64 a5, u64 a6) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::wait;
}
// Flip/queue/reset flip/flip event/user command/vblank as trace to help with
// log spam
const bool trace_log =
(package_id == 0x102 || package_id == 0x103 || package_id == 0x10a ||
package_id == 0xFEC || package_id == 0xFED || package_id == 0xFEF);
(trace_log ? sys_rsx.trace : sys_rsx.warning)(
"sys_rsx_context_attribute(context_id=0x%x, package_id=0x%x, a3=0x%llx, "
"a4=0x%llx, a5=0x%llx, a6=0x%llx)",
context_id, package_id, a3, a4, a5, a6);
// todo: these event ports probly 'shouldnt' be here as i think its supposed
// to be interrupts that are sent from rsx somewhere in lv1
const auto render = rsx::get_current_renderer();
if (!render->dma_address) {
return {CELL_EINVAL, "dma_address is 0"};
}
if (context_id != 0x55555555) {
return {CELL_EINVAL, "context_id is 0x%x", context_id};
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(render->driver_info);
switch (package_id) {
case 0x001: // FIFO
{
const u64 get = static_cast<u32>(a3);
const u64 put = static_cast<u32>(a4);
const u64 get_put = put << 32 | get;
std::lock_guard lock(render->sys_rsx_mtx);
set_rsx_dmactl(render, get_put);
break;
}
case 0x100: // Display mode set
break;
case 0x101: // Display sync set, cellGcmSetFlipMode
// a4 == 2 is vsync, a4 == 1 is hsync
render->requested_vsync.store(a4 == 2);
break;
case 0x102: // Display flip
{
u32 flip_idx = ~0u;
// high bit signifys grabbing a queued buffer
// otherwise it contains a display buffer offset
if ((a4 & 0x80000000) != 0) {
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// last half byte gives buffer, 0xf seems to trigger just last queued
u8 idx_check = a4 & 0xf;
if (idx_check > 7)
flip_idx = driverInfo.head[a3].lastQueuedBufferId;
else
flip_idx = idx_check;
// fyi -- u32 hardware_channel = (a4 >> 8) & 0xFF;
// sanity check, the head should have a 'queued' buffer on it, and it
// should have been previously 'queued'
const u32 sanity_check = 0x40000000 & (1 << flip_idx);
if ((driverInfo.head[a3].flipFlags & sanity_check) != sanity_check)
rsx_log.error(
"Display Flip Queued: Flipping non previously queued buffer 0x%llx",
a4);
} else {
for (u32 i = 0; i < render->display_buffers_count; ++i) {
if (render->display_buffers[i].offset == a4) {
flip_idx = i;
break;
}
}
if (flip_idx == ~0u) {
rsx_log.error("Display Flip: Couldn't find display buffer offset, "
"flipping 0. Offset: 0x%x",
a4);
flip_idx = 0;
}
}
if (!render->request_emu_flip(flip_idx)) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
return {};
}
break;
}
case 0x103: // Display Queue
{
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].lastQueuedBufferId = static_cast<u32>(a4);
driverInfo.head[a3].flipFlags |= 0x40000000 | (1 << a4);
render->on_frame_end(static_cast<u32>(a4));
if (!render->send_event(0, SYS_RSX_EVENT_QUEUE_BASE << a3, 0)) {
break;
}
if (g_cfg.video.frame_limit == frame_limit_type::infinite) {
render->post_vblank_event(get_system_time());
}
break;
}
case 0x104: // Display buffer
{
const u8 id = a3 & 0xFF;
if (id > 7) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
std::lock_guard lock(render->sys_rsx_mtx);
// Note: no error checking is being done
const u32 width = (a4 >> 32) & 0xFFFFFFFF;
const u32 height = a4 & 0xFFFFFFFF;
const u32 pitch = (a5 >> 32) & 0xFFFFFFFF;
const u32 offset = a5 & 0xFFFFFFFF;
render->display_buffers[id].width = width;
render->display_buffers[id].height = height;
render->display_buffers[id].pitch = pitch;
render->display_buffers[id].offset = offset;
render->display_buffers_count =
std::max<u32>(id + 1, render->display_buffers_count);
break;
}
case 0x105: // destroy buffer?
break;
case 0x106: // ? (Used by cellGcmInitPerfMon)
break;
case 0x108: // cellGcmSetVBlankFrequency, cellGcmSetSecondVFrequency
// a4 == 3, CELL_GCM_DISPLAY_FREQUENCY_59_94HZ
// a4 == 2, CELL_GCM_DISPLAY_FREQUENCY_SCANOUT
// a4 == 4, CELL_GCM_DISPLAY_FREQUENCY_DISABLE
if (a5 == 1u) {
// This function resets vsync state to enabled
render->requested_vsync = true;
// TODO: Set vblank frequency
} else if (ensure(a5 == 2u)) {
// TODO: Implement its frequency as well
render->enable_second_vhandler.store(a4 != 4);
}
break;
case 0x10a: // ? Involved in managing flip status through
// cellGcmResetFlipStatus
{
if (a3 > 7) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].flipFlags.atomic_op([&](be_t<u32> &flipStatus) {
flipStatus = (flipStatus & static_cast<u32>(a4)) | static_cast<u32>(a5);
});
break;
}
case 0x10D: // Called by cellGcmInitCursor
break;
case 0x300: // Tiles
{
// a4 high bits = ret.tile = (location + 1) | (bank << 4) | ((offset /
// 0x10000) << 16) | (location << 31); a4 low bits = ret.limit = ((offset +
// size - 1) / 0x10000) << 16 | (location << 31); a5 high bits = ret.pitch =
// (pitch / 0x100) << 8; a5 low bits = ret.format = base | ((base + ((size -
// 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30);
ensure(a3 < std::size(render->tiles));
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_attribute(): RSX is not idle while setting tile");
}
auto &tile = render->tiles[a3];
const u32 location = ((a4 >> 32) & 0x3) - 1;
const u32 offset = ((((a4 >> 32) & 0x7FFFFFFF) >> 16) * 0x10000);
const u32 size = ((((a4 & 0x7FFFFFFF) >> 16) + 1) * 0x10000) - offset;
const u32 pitch = (((a5 >> 32) & 0xFFFFFFFF) >> 8) * 0x100;
const u32 comp = ((a5 & 0xFFFFFFFF) >> 26) & 0xF;
const u32 base = (a5 & 0xFFFFFFFF) & 0x7FF;
// const u32 bank = (((a4 >> 32) & 0xFFFFFFFF) >> 4) & 0xF;
const bool bound = ((a4 >> 32) & 0x3) != 0;
const auto range = utils::address_range::start_length(offset, size);
if (bound) {
if (!size || !pitch) {
return {CELL_EINVAL, "size or pitch are 0 (size=%d, pitch=%d)", size,
pitch};
}
u32 limit = -1;
switch (location) {
case CELL_GCM_LOCATION_MAIN:
limit = render->main_mem_size;
break;
case CELL_GCM_LOCATION_LOCAL:
limit = render->local_mem_size;
break;
default:
fmt::throw_exception("sys_rsx_context_attribute(): Unexpected location "
"value (location=0x%x)",
location);
}
if (!range.valid() || range.end >= limit) {
return {CELL_EINVAL, "range invalid (valid=%d, end=%d, limit=%d)",
range.valid(), range.end, limit};
}
// Hardcoded value in gcm
ensure(a5 & (1 << 30));
}
std::lock_guard lock(render->sys_rsx_mtx);
// When tile is going to be unbound, we can use it as a hint that the
// address will no longer be used as a surface and can be
// removed/invalidated Todo: There may be more checks such as
// format/size/width can could be done
if (tile.bound && !bound)
render->notify_tile_unbound(static_cast<u32>(a3));
if (location == CELL_GCM_LOCATION_MAIN && bound) {
vm::writer_lock rlock;
for (u32 io = (offset >> 20), end = (range.end >> 20); io <= end; io++) {
if (render->iomap_table.ea[io] == umax) {
return {CELL_EINVAL, "iomap_table ea is umax"};
}
}
}
tile.location = location;
tile.offset = offset;
tile.size = size;
tile.pitch = pitch;
tile.comp = comp;
tile.base = base;
tile.bank = base;
tile.bound = bound;
break;
}
case 0x301: // Depth-buffer (Z-cull)
{
// a4 high = region = (1 << 0) | (zFormat << 4) | (aaFormat << 8);
// a4 low = size = ((width >> 6) << 22) | ((height >> 6) << 6);
// a5 high = start = cullStart&(~0xFFF);
// a5 low = offset = offset;
// a6 high = status0 = (zcullDir << 1) | (zcullFormat << 2) | ((sFunc & 0xF)
// << 12) | (sRef << 16) | (sMask << 24); a6 low = status1 = (0x2000 << 0) |
// (0x20 << 16);
if (a3 >= std::size(render->zculls)) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_attribute(): RSX is not idle while setting zcull");
}
const u32 width = ((a4 & 0xFFFFFFFF) >> 22) << 6;
const u32 height = ((a4 & 0x0000FFFF) >> 6) << 6;
const u32 cullStart = (a5 >> 32) & ~0xFFF;
const u32 offset = (a5 & 0x0FFFFFFF);
const bool bound = (a6 & 0xFFFFFFFF) != 0;
if (bound) {
const auto cull_range =
utils::address_range::start_length(cullStart, width * height);
// cullStart is an offset inside ZCULL RAM which is 3MB long, check bounds
// width and height are not allowed to be zero (checked by range.valid())
if (!cull_range.valid() || cull_range.end >= 3u << 20 ||
offset >= render->local_mem_size) {
return {CELL_EINVAL,
"cull_range invalid (valid=%d, end=%d, offset=%d, "
"local_mem_size=%d)",
cull_range.valid(),
cull_range.end,
offset,
render->local_mem_size};
}
if (a5 & 0xF0000000) {
sys_rsx.warning("sys_rsx_context_attribute(): ZCULL offset greater "
"than 256MB (offset=0x%x)",
offset);
}
// Hardcoded values in gcm
ensure(a4 & (1ull << 32));
ensure((a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16)));
}
std::lock_guard lock(render->sys_rsx_mtx);
auto &zcull = render->zculls[a3];
zcull.zFormat = ((a4 >> 32) >> 4) & 0xF;
zcull.aaFormat = ((a4 >> 32) >> 8) & 0xF;
zcull.width = width;
zcull.height = height;
zcull.cullStart = cullStart;
zcull.offset = offset;
zcull.zcullDir = ((a6 >> 32) >> 1) & 0x1;
zcull.zcullFormat = ((a6 >> 32) >> 2) & 0x3FF;
zcull.sFunc = ((a6 >> 32) >> 12) & 0xF;
zcull.sRef = ((a6 >> 32) >> 16) & 0xFF;
zcull.sMask = ((a6 >> 32) >> 24) & 0xFF;
zcull.bound = bound;
break;
}
case 0x302: // something with zcull
break;
case 0x600: // Framebuffer setup
break;
case 0x601: // Framebuffer blit
break;
case 0x602: // Framebuffer blit sync
break;
case 0x603: // Framebuffer close
break;
case 0xFEC: // hack: flip event notification
{
// we only ever use head 1 for now
driverInfo.head[1].flipFlags |= 0x80000000;
driverInfo.head[1].lastFlipTime =
rsx_timeStamp(); // should rsxthread set this?
driverInfo.head[1].flipBufferId = static_cast<u32>(a3);
// seems gcmSysWaitLabel uses this offset, so lets set it to 0 every flip
// NOTE: Realhw resets 16 bytes of this semaphore for some reason
vm::_ref<atomic_t<u128>>(render->label_addr + 0x10).store(u128{});
render->send_event(0, SYS_RSX_EVENT_FLIP_BASE << 1, 0);
break;
}
case 0xFED: // hack: vblank command
{
if (cpu_thread::get_current<ppu_thread>()) {
// VBLANK/RSX thread only
return {CELL_EINVAL, "wrong thread"};
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// todo: this is wrong and should be 'second' vblank handler and freq, but
// since currently everything is reported as being 59.94, this should be
// fine
driverInfo.head[a3].lastSecondVTime.atomic_op([&](be_t<u64> &time) {
a4 = std::max<u64>(a4, time + 1);
time = a4;
});
// Time point is supplied in argument 4 (todo: convert it to MFTB rate and
// use it)
const u64 current_time = rsx_timeStamp();
// Note: not atomic
driverInfo.head[a3].lastVTimeLow = static_cast<u32>(current_time);
driverInfo.head[a3].lastVTimeHigh = static_cast<u32>(current_time >> 32);
driverInfo.head[a3].vBlankCount++;
u64 event_flags = SYS_RSX_EVENT_VBLANK;
if (render->enable_second_vhandler)
event_flags |= SYS_RSX_EVENT_SECOND_VBLANK_BASE << a3; // second vhandler
render->send_event(0, event_flags, 0);
break;
}
case 0xFEF: // hack: user command
{
// 'custom' invalid package id for now
// as i think we need custom lv1 interrupts to handle this accurately
// this also should probly be set by rsxthread
driverInfo.userCmdParam = static_cast<u32>(a4);
render->send_event(0, SYS_RSX_EVENT_USER_CMD, 0);
break;
}
default:
return {CELL_EINVAL, "unsupported package id %d", package_id};
}
return CELL_OK;
}
/**
* lv2 SysCall 675 (0x2A3): sys_rsx_device_map
* @param a1 (OUT): rsx device map address : 0x40000000, 0x50000000.. 0xB0000000
* @param a2 (OUT): Unused
* @param dev_id (IN): An immediate value and always 8. (cellGcmInitPerfMon uses
* 11, 10, 9, 7, 12 successively).
*/
error_code sys_rsx_device_map(cpu_thread &cpu, vm::ptr<u64> dev_addr,
vm::ptr<u64> a2, u32 dev_id) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_device_map(dev_addr=*0x%x, a2=*0x%x, dev_id=0x%x)",
dev_addr, a2, dev_id);
if (dev_id != 8) {
// TODO: lv1 related
fmt::throw_exception("sys_rsx_device_map: Invalid dev_id %d", dev_id);
}
const auto render = rsx::get_current_renderer();
std::scoped_lock lock(render->sys_rsx_mtx);
if (!render->device_addr) {
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 addr = area ? area->alloc(0x100000) : 0;
if (!addr) {
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_device_map(): Mapped address 0x%x", addr);
*dev_addr = addr;
render->device_addr = addr;
return CELL_OK;
}
*dev_addr = render->device_addr;
return CELL_OK;
}
/**
* lv2 SysCall 676 (0x2A4): sys_rsx_device_unmap
* @param dev_id (IN): An immediate value and always 8.
*/
error_code sys_rsx_device_unmap(cpu_thread &cpu, u32 dev_id) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_unmap(dev_id=0x%x)", dev_id);
return CELL_OK;
}
/**
* lv2 SysCall 677 (0x2A5): sys_rsx_attribute
*/
error_code sys_rsx_attribute(cpu_thread &cpu, u32 packageId, u32 a2, u32 a3,
u32 a4, u32 a5) {
cpu.state += cpu_flag::wait;
sys_rsx.warning(
"sys_rsx_attribute(packageId=0x%x, a2=0x%x, a3=0x%x, a4=0x%x, a5=0x%x)",
packageId, a2, a3, a4, a5);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,496 @@
#include "stdafx.h"
#include "sys_rwlock.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_rwlock);
lv2_rwlock::lv2_rwlock(utils::serial &ar) : protocol(ar), key(ar), name(ar) {
ar(owner);
}
std::function<void(void *)> lv2_rwlock::load(utils::serial &ar) {
return load_func(make_shared<lv2_rwlock>(exact_t<utils::serial &>(ar)));
}
void lv2_rwlock::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, owner);
}
error_code sys_rwlock_create(ppu_thread &ppu, vm::ptr<u32> rw_lock_id,
vm::ptr<sys_rwlock_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_create(rw_lock_id=*0x%x, attr=*0x%x)",
rw_lock_id, attr);
if (!rw_lock_id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_rwlock.error("sys_rwlock_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (auto error =
lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
})) {
return error;
}
ppu.check_state();
*rw_lock_id = idm::last_id();
return CELL_OK;
}
error_code sys_rwlock_destroy(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_destroy(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::withdraw<lv2_obj, lv2_rwlock>(
rw_lock_id, [](lv2_rwlock &rw) -> CellError {
if (rw.owner) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(rw, rw.key);
return {};
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret) {
return rwlock.ret;
}
return CELL_OK;
}
error_code sys_rwlock_rlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)",
rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(
rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
if (val <= 0 && !(val & 1)) {
if (rwlock.owner.compare_and_swap_test(val, val - 2)) {
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64 &val) {
if (val <= 0 && !(val & 1)) {
val -= 2;
} else {
val |= 1;
}
});
if (_old > 0 || _old & 1) {
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.rq, &ppu);
return false;
}
return true;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret) {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(rwlock->rq)) {
// Waiters queue is empty, so the thread must have been signaled
rwlock->mutex.lock_unlock();
break;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->rq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_tryrlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_tryrlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock &rwlock) {
auto [_, ok] = rwlock.owner.fetch_op([](s64 &val) {
if (val <= 0 && !(val & 1)) {
val -= 2;
return true;
}
return false;
});
return ok;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (!rwlock.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_runlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_runlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
if (val < 0 && !(val & 1)) {
if (rwlock.owner.compare_and_swap_test(val, val + 2)) {
return true;
}
}
return false;
});
if (!rwlock) {
return CELL_ESRCH;
}
lv2_obj::notify_all_t notify;
if (rwlock.ret) {
return CELL_OK;
} else {
std::lock_guard lock(rwlock->mutex);
// Remove one reader
const s64 _old = rwlock->owner.fetch_op([](s64 &val) {
if (val < -1) {
val += 2;
}
});
if (_old >= 0) {
return CELL_EPERM;
}
if (_old == -1) {
if (const auto cpu =
rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
} else {
rwlock->owner = 0;
ensure(!rwlock->rq);
}
}
}
return CELL_OK;
}
error_code sys_rwlock_wlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)",
rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(
rw_lock_id,
[&, notify = lv2_obj::notify_all_t()](lv2_rwlock &rwlock) -> s64 {
const s64 val = rwlock.owner;
if (val == 0) {
if (rwlock.owner.compare_and_swap_test(0, ppu.id << 1)) {
return 0;
}
} else if (val >> 1 == ppu.id) {
return val;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64 &val) {
if (val == 0) {
val = ppu.id << 1;
} else {
val |= 1;
}
});
if (_old != 0) {
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.wq, &ppu);
}
return _old;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret == 0) {
return CELL_OK;
}
if (rwlock.ret >> 1 == ppu.id) {
return CELL_EDEADLK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->wq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->wq, &ppu)) {
break;
}
// If the last waiter quit the writer sleep queue, wake blocked readers
if (rwlock->rq && !rwlock->wq && rwlock->owner < 0) {
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu =
rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO)) {
size++;
rwlock->append(cpu);
}
rwlock->owner.atomic_op([&](s64 &owner) {
owner -= 2 * size; // Add readers to value
owner &= -2; // Clear wait bit
});
lv2_obj::awake_all();
} else if (!rwlock->rq && !rwlock->wq) {
rwlock->owner &= -2;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_trywlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
// Return previous value
return val ? val : rwlock.owner.compare_and_swap(0, ppu.id << 1);
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret != 0) {
if (rwlock.ret >> 1 == ppu.id) {
return CELL_EDEADLK;
}
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_wunlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
// Return previous value
return val != ppu.id << 1 ? val : rwlock.owner.compare_and_swap(val, 0);
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret >> 1 != ppu.id) {
return CELL_EPERM;
}
if (lv2_obj::notify_all_t notify; rwlock.ret & 1) {
std::lock_guard lock(rwlock->mutex);
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
} else if (rwlock->rq) {
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu =
rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO)) {
size++;
rwlock->append(cpu);
}
rwlock->owner.release(-2 * static_cast<s64>(size));
lv2_obj::awake_all();
} else {
rwlock->owner = 0;
}
}
return CELL_OK;
}

View file

@ -0,0 +1,318 @@
#include "stdafx.h"
#include "sys_semaphore.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_semaphore);
lv2_sema::lv2_sema(utils::serial &ar)
: protocol(ar), key(ar), name(ar), max(ar) {
ar(val);
}
std::function<void(void *)> lv2_sema::load(utils::serial &ar) {
return load_func(make_shared<lv2_sema>(exact_t<utils::serial &>(ar)));
}
void lv2_sema::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, max, std::max<s32>(+val, 0));
}
error_code sys_semaphore_create(ppu_thread &ppu, vm::ptr<u32> sem_id,
vm::ptr<sys_semaphore_attribute_t> attr,
s32 initial_val, s32 max_val) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, "
"initial_val=%d, max_val=%d)",
sem_id, attr, initial_val, max_val);
if (!sem_id || !attr) {
return CELL_EFAULT;
}
if (max_val <= 0 || initial_val > max_val || initial_val < 0) {
sys_semaphore.error("sys_semaphore_create(): invalid parameters "
"(initial_val=%d, max_val=%d)",
initial_val, max_val);
return CELL_EINVAL;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_semaphore.error("sys_semaphore_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_semaphore.warning("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, "
"initial_val=%d, max_val=%d): IPC=0x%016x",
sem_id, attr, initial_val, max_val, ipc_key);
}
if (auto error =
lv2_obj::create<lv2_sema>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64,
max_val, initial_val);
})) {
return error;
}
static_cast<void>(ppu.test_stopped());
*sem_id = idm::last_id();
return CELL_OK;
}
error_code sys_semaphore_destroy(ppu_thread &ppu, u32 sem_id) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_destroy(sem_id=0x%x)", sem_id);
const auto sem =
idm::withdraw<lv2_obj, lv2_sema>(sem_id, [](lv2_sema &sema) -> CellError {
if (sema.val < 0) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(sema, sema.key);
return {};
});
if (!sem) {
return CELL_ESRCH;
}
if (sem->key) {
sys_semaphore.warning("sys_semaphore_destroy(sem_id=0x%x): IPC=0x%016x",
sem_id, sem->key);
}
if (sem.ret) {
return sem.ret;
}
return CELL_OK;
}
error_code sys_semaphore_wait(ppu_thread &ppu, u32 sem_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id,
timeout);
const auto sem = idm::get<lv2_obj, lv2_sema>(
sem_id, [&, notify = lv2_obj::notify_all_t()](lv2_sema &sema) {
const s32 val = sema.val;
if (val > 0) {
if (sema.val.compare_and_swap_test(val, val - 1)) {
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(sema.mutex);
if (sema.val-- <= 0) {
sema.sleep(ppu, timeout);
lv2_obj::emplace(sema.sq, &ppu);
return false;
}
return true;
});
if (!sem) {
return CELL_ESRCH;
}
if (sem.ret) {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(sem->mutex);
if (!sem->unqueue(sem->sq, &ppu)) {
break;
}
ensure(0 > sem->val.fetch_op([](s32 &val) {
if (val < 0) {
val++;
}
}));
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_semaphore_trywait(ppu_thread &ppu, u32 sem_id) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_trywait(sem_id=0x%x)", sem_id);
const auto sem = idm::check<lv2_obj, lv2_sema>(
sem_id, [&](lv2_sema &sema) { return sema.val.try_dec(0); });
if (!sem) {
return CELL_ESRCH;
}
if (!sem.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_semaphore_post(ppu_thread &ppu, u32 sem_id, s32 count) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_post(sem_id=0x%x, count=%d)", sem_id,
count);
const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&](lv2_sema &sema) {
const s32 val = sema.val;
if (val >= 0 && count > 0 && count <= sema.max - val) {
if (sema.val.compare_and_swap_test(val, val + count)) {
return true;
}
}
return false;
});
if (!sem) {
return CELL_ESRCH;
}
if (count <= 0) {
return CELL_EINVAL;
}
lv2_obj::notify_all_t notify;
if (sem.ret) {
return CELL_OK;
} else {
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
const auto [val, ok] = sem->val.fetch_op([&](s32 &val) {
if (count + 0u <= sem->max + 0u - val) {
val += count;
return true;
}
return false;
});
if (!ok) {
return not_an_error(CELL_EBUSY);
}
// Wake threads
const s32 to_awake = std::min<s32>(-std::min<s32>(val, 0), count);
for (s32 i = 0; i < to_awake; i++) {
sem->append((ensure(sem->schedule<ppu_thread>(sem->sq, sem->protocol))));
}
if (to_awake > 0) {
lv2_obj::awake_all();
}
}
return CELL_OK;
}
error_code sys_semaphore_get_value(ppu_thread &ppu, u32 sem_id,
vm::ptr<s32> count) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_get_value(sem_id=0x%x, count=*0x%x)",
sem_id, count);
const auto sema = idm::check<lv2_obj, lv2_sema>(
sem_id, [](lv2_sema &sema) { return std::max<s32>(0, sema.val); });
if (!sema) {
return CELL_ESRCH;
}
if (!count) {
return CELL_EFAULT;
}
static_cast<void>(ppu.test_stopped());
*count = sema.ret;
return CELL_OK;
}

View file

@ -0,0 +1,123 @@
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "cellos/sys_process.h"
#include "sys_sm.h"
LOG_CHANNEL(sys_sm);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c,
vm::ptr<u64> d) {
sys_sm.todo("sys_sm_get_params(a=*0x%x, b=*0x%x, c=*0x%x, d=*0x%x)", a, b, c,
d);
if (a)
*a = 0;
else
return CELL_EFAULT;
if (b)
*b = 0;
else
return CELL_EFAULT;
if (c)
*c = 0x200;
else
return CELL_EFAULT;
if (d)
*d = 7;
else
return CELL_EFAULT;
return CELL_OK;
}
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2,
vm::ptr<u64> a3, u64 a4) {
sys_sm.todo(
"sys_sm_get_ext_event2(a1=*0x%x, a2=*0x%x, a3=*0x%x, a4=*0x%x, a4=0x%xll",
a1, a2, a3, a4);
if (a4 != 0 && a4 != 1) {
return CELL_EINVAL;
}
// a1 == 7 - 'console too hot, restart'
// a2 looks to be used if a1 is either 5 or 3?
// a3 looks to be ignored in vsh
if (a1)
*a1 = 0;
else
return CELL_EFAULT;
if (a2)
*a2 = 0;
else
return CELL_EFAULT;
if (a3)
*a3 = 0;
else
return CELL_EFAULT;
// eagain for no event
return not_an_error(CELL_EAGAIN);
}
error_code sys_sm_shutdown(ppu_thread &ppu, u16 op, vm::ptr<void> param,
u64 size) {
ppu.state += cpu_flag::wait;
sys_sm.success("sys_sm_shutdown(op=0x%x, param=*0x%x, size=0x%x)", op, param,
size);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
switch (op) {
case 0x100:
case 0x1100: {
sys_sm.success("Received shutdown request from application");
_sys_process_exit(ppu, 0, 0, 0);
break;
}
case 0x200:
case 0x1200: {
sys_sm.success("Received reboot request from application");
lv2_exitspawn(ppu, Emu.argv, Emu.envp, Emu.data);
break;
}
case 0x8201:
case 0x8202:
case 0x8204: {
sys_sm.warning("Unsupported LPAR operation: 0x%x", op);
return CELL_ENOTSUP;
}
default:
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_sm_set_shop_mode(s32 mode) {
sys_sm.todo("sys_sm_set_shop_mode(mode=0x%x)", mode);
return CELL_OK;
}
error_code sys_sm_control_led(u8 led, u8 action) {
sys_sm.todo("sys_sm_control_led(led=0x%x, action=0x%x)", led, action);
return CELL_OK;
}
error_code sys_sm_ring_buzzer(u64 packet, u64 a1, u64 a2) {
sys_sm.todo("sys_sm_ring_buzzer(packet=0x%x, a1=0x%x, a2=0x%x)", packet, a1,
a2);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,531 @@
#include "stdafx.h"
#include "sys_ss.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
#include "Emu/system_config.h"
#include "sys_process.h"
#include "util/sysinfo.hpp"
#include <charconv>
#include <shared_mutex>
#include <unordered_set>
#ifdef _WIN32
#include <bcrypt.h>
#include <windows.h>
#endif
struct lv2_update_manager {
lv2_update_manager() {
std::string version_str = utils::get_firmware_version();
// For example, 4.90 should be converted to 0x4900000000000
std::erase(version_str, '.');
if (std::from_chars(version_str.data(),
version_str.data() + version_str.size(),
system_sw_version, 16)
.ec != std::errc{})
system_sw_version <<= 40;
else
system_sw_version = 0;
}
lv2_update_manager(const lv2_update_manager &) = delete;
lv2_update_manager &operator=(const lv2_update_manager &) = delete;
~lv2_update_manager() = default;
u64 system_sw_version;
std::unordered_map<u32, u8> eeprom_map // offset, value
{
// system language
// *i think* this gives english
{0x48C18, 0x00},
{0x48C19, 0x00},
{0x48C1A, 0x00},
{0x48C1B, 0x01},
// system language end
// vsh target (seems it can be 0xFFFFFFFE, 0xFFFFFFFF, 0x00000001
// default: 0x00000000 / vsh sets it to 0x00000000 on boot if it isn't
// 0x00000000)
{0x48C1C, 0x00},
{0x48C1D, 0x00},
{0x48C1E, 0x00},
{0x48C1F, 0x00} // vsh target end
};
mutable std::shared_mutex eeprom_mutex;
std::unordered_set<u32> malloc_set;
mutable std::shared_mutex malloc_mutex;
// return address
u32 allocate(u32 size) {
std::unique_lock unique_lock(malloc_mutex);
if (const auto addr = vm::alloc(size, vm::main); addr) {
malloc_set.emplace(addr);
return addr;
}
return 0;
}
// return size
u32 deallocate(u32 addr) {
std::unique_lock unique_lock(malloc_mutex);
if (malloc_set.count(addr)) {
return vm::dealloc(addr, vm::main);
}
return 0;
}
};
template <>
void fmt_class_string<sys_ss_rng_error>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto error) {
switch (error) {
STR_CASE(SYS_SS_RNG_ERROR_INVALID_PKG);
STR_CASE(SYS_SS_RNG_ERROR_ENOMEM);
STR_CASE(SYS_SS_RNG_ERROR_EAGAIN);
STR_CASE(SYS_SS_RNG_ERROR_EFAULT);
STR_CASE(SYS_SS_RTC_ERROR_UNK);
}
return unknown;
});
}
LOG_CHANNEL(sys_ss);
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf,
u64 size) {
sys_ss.warning(
"sys_ss_random_number_generator(pkg_id=%u, buf=*0x%x, size=0x%x)", pkg_id,
buf, size);
if (pkg_id != 2) {
if (pkg_id == 1) {
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
sys_ss.todo("sys_ss_random_number_generator(): pkg_id=1");
std::memset(buf.get_ptr(), 0, 0x18);
return CELL_OK;
}
return SYS_SS_RNG_ERROR_INVALID_PKG;
}
// TODO
if (size > 0x10000000) {
return SYS_SS_RNG_ERROR_ENOMEM;
}
std::unique_ptr<u8[]> temp(new u8[size]);
#ifdef _WIN32
if (auto ret = BCryptGenRandom(nullptr, temp.get(), static_cast<ULONG>(size),
BCRYPT_USE_SYSTEM_PREFERRED_RNG)) {
fmt::throw_exception(
"sys_ss_random_number_generator(): BCryptGenRandom failed (0x%08x)",
ret);
}
#else
fs::file rnd{"/dev/urandom"};
if (!rnd || rnd.read(temp.get(), size) != size) {
fmt::throw_exception("sys_ss_random_number_generator(): Failed to generate "
"pseudo-random numbers");
}
#endif
std::memcpy(buf.get_ptr(), temp.get(), size);
return CELL_OK;
}
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3) {
sys_ss.success(
"sys_ss_access_control_engine(pkg_id=0x%llx, a2=0x%llx, a3=0x%llx)",
pkg_id, a2, a3);
const u64 authid =
g_ps3_process_info.self_info.valid
? g_ps3_process_info.self_info.prog_id_hdr.program_authority_id
: 0;
switch (pkg_id) {
case 0x1: {
if (!g_ps3_process_info.debug_or_root()) {
return not_an_error(CELL_ENOSYS);
}
if (!a2) {
return CELL_ESRCH;
}
ensure(a2 == static_cast<u64>(process_getpid()));
vm::write64(vm::cast(a3), authid);
break;
}
case 0x2: {
vm::write64(vm::cast(a2), authid);
break;
}
case 0x3: {
if (!g_ps3_process_info.debug_or_root()) {
return CELL_ENOSYS;
}
break;
}
default:
return 0x8001051du;
}
return CELL_OK;
}
error_code sys_ss_get_console_id(vm::ptr<u8> buf) {
sys_ss.notice("sys_ss_get_console_id(buf=*0x%x)", buf);
return sys_ss_appliance_info_manager(0x19003, buf);
}
error_code sys_ss_get_open_psid(vm::ptr<CellSsOpenPSID> psid) {
sys_ss.notice("sys_ss_get_open_psid(psid=*0x%x)", psid);
psid->high = g_cfg.sys.console_psid_high;
psid->low = g_cfg.sys.console_psid_low;
return CELL_OK;
}
error_code sys_ss_appliance_info_manager(u32 code, vm::ptr<u8> buffer) {
sys_ss.notice("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code,
buffer);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
if (!buffer)
return CELL_EFAULT;
switch (code) {
case 0x19002: {
// AIM_get_device_type
constexpr u8 product_code[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x89};
std::memcpy(buffer.get_ptr(), product_code, 16);
if (g_cfg.core.debug_console_mode)
buffer[15] = 0x81; // DECR
break;
}
case 0x19003: {
// AIM_get_device_id
constexpr u8 idps[] = {0x00, 0x00, 0x00, 0x01, 0x00, 0x89, 0x00, 0x0B,
0x14, 0x00, 0xEF, 0xDD, 0xCA, 0x25, 0x52, 0x66};
std::memcpy(buffer.get_ptr(), idps, 16);
if (g_cfg.core.debug_console_mode) {
buffer[5] = 0x81; // DECR
buffer[7] = 0x09; // DECR-1400
}
break;
}
case 0x19004: {
// AIM_get_ps_code
constexpr u8 pscode[] = {0x00, 0x01, 0x00, 0x85, 0x00, 0x07, 0x00, 0x04};
std::memcpy(buffer.get_ptr(), pscode, 8);
break;
}
case 0x19005: {
// AIM_get_open_ps_id
be_t<u64> psid[2] = {+g_cfg.sys.console_psid_high,
+g_cfg.sys.console_psid_low};
std::memcpy(buffer.get_ptr(), psid, 16);
break;
}
case 0x19006: {
// qa values (dex only) ??
[[fallthrough]];
}
default:
sys_ss.todo("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code,
buffer);
}
return CELL_OK;
}
error_code sys_ss_get_cache_of_product_mode(vm::ptr<u8> ptr) {
sys_ss.todo("sys_ss_get_cache_of_product_mode(ptr=*0x%x)", ptr);
if (!ptr) {
return CELL_EINVAL;
}
// 0xff Happens when hypervisor call returns an error
// 0 - disabled
// 1 - enabled
// except something segfaults when using 0, so error it is!
*ptr = 0xFF;
return CELL_OK;
}
error_code sys_ss_secure_rtc(u64 cmd, u64 a2, u64 a3, u64 a4) {
sys_ss.todo("sys_ss_secure_rtc(cmd=0x%llx, a2=0x%x, a3=0x%llx, a4=0x%llx)",
cmd, a2, a3, a4);
if (cmd == 0x3001) {
if (a3 != 0x20)
return 0x80010500; // bad packet id
return CELL_OK;
} else if (cmd == 0x3002) {
// Get time
if (a2 > 1)
return 0x80010500; // bad packet id
// a3 is actual output, not 100% sure, but best guess is its tb val
vm::write64(::narrow<u32>(a3), get_timebased_time());
// a4 is a pointer to status, non 0 on error
vm::write64(::narrow<u32>(a4), 0);
return CELL_OK;
} else if (cmd == 0x3003) {
return CELL_OK;
}
return 0x80010500; // bad packet id
}
error_code sys_ss_get_cache_of_flash_ext_flag(vm::ptr<u64> flag) {
sys_ss.todo("sys_ss_get_cache_of_flash_ext_flag(flag=*0x%x)", flag);
if (!flag) {
return CELL_EFAULT;
}
*flag = 0xFE; // nand vs nor from lsb
return CELL_OK;
}
error_code sys_ss_get_boot_device(vm::ptr<u64> dev) {
sys_ss.todo("sys_ss_get_boot_device(dev=*0x%x)", dev);
if (!dev) {
return CELL_EINVAL;
}
*dev = 0x190;
return CELL_OK;
}
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4,
u64 a5, u64 a6) {
sys_ss.notice("sys_ss_update_manager(pkg=0x%x, a1=0x%x, a2=0x%x, a3=0x%x, "
"a4=0x%x, a5=0x%x, a6=0x%x)",
pkg_id, a1, a2, a3, a4, a5, a6);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
auto &update_manager = g_fxo->get<lv2_update_manager>();
switch (pkg_id) {
case 0x6001: {
// update package async
break;
}
case 0x6002: {
// inspect package async
break;
}
case 0x6003: {
// get installed package info
[[maybe_unused]] const auto type = ::narrow<u32>(a1);
const auto info_ptr = ::narrow<u32>(a2);
if (!info_ptr)
return CELL_EFAULT;
vm::write64(info_ptr, update_manager.system_sw_version);
break;
}
case 0x6004: {
// get fix instruction
break;
}
case 0x6005: {
// extract package async
break;
}
case 0x6006: {
// get extract package
break;
}
case 0x6007: {
// get flash initialized
break;
}
case 0x6008: {
// set flash initialized
break;
}
case 0x6009: {
// get seed token
break;
}
case 0x600A: {
// set seed token
break;
}
case 0x600B: {
// read eeprom
const auto offset = ::narrow<u32>(a1);
const auto value_ptr = ::narrow<u32>(a2);
if (!value_ptr)
return CELL_EFAULT;
std::shared_lock shared_lock(update_manager.eeprom_mutex);
if (const auto iterator = update_manager.eeprom_map.find(offset);
iterator != update_manager.eeprom_map.end())
vm::write8(value_ptr, iterator->second);
else
vm::write8(value_ptr, 0xFF); // 0xFF if not set
break;
}
case 0x600C: {
// write eeprom
const auto offset = ::narrow<u32>(a1);
const auto value = ::narrow<u8>(a2);
std::unique_lock unique_lock(update_manager.eeprom_mutex);
if (value != 0xFF)
update_manager.eeprom_map[offset] = value;
else
update_manager.eeprom_map.erase(offset); // 0xFF: unset
break;
}
case 0x600D: {
// get async status
break;
}
case 0x600E: {
// allocate buffer
const auto size = ::narrow<u32>(a1);
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x600F: {
// release buffer
const auto addr = ::narrow<u32>(a1);
if (!update_manager.deallocate(addr))
return CELL_ENOMEM;
break;
}
case 0x6010: {
// check integrity
break;
}
case 0x6011: {
// get applicable version
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
vm::write64(addr_ptr, 0x30040ULL << 32); // 3.40
break;
}
case 0x6012: {
// allocate buffer from memory container
[[maybe_unused]] const auto mem_ct = ::narrow<u32>(a1);
const auto size = ::narrow<u32>(a2);
const auto addr_ptr = ::narrow<u32>(a3);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x6013: {
// unknown
break;
}
default: {
sys_ss.error("sys_ss_update_manager(): invalid packet id 0x%x ", pkg_id);
return CELL_EINVAL;
}
}
return CELL_OK;
}
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3,
u64 a4) {
sys_ss.todo("sys_ss_virtual_trm_manager(pkg=0x%llx, a1=0x%llx, a2=0x%llx, "
"a3=0x%llx, a4=0x%llx)",
pkg_id, a1, a2, a3, a4);
return CELL_OK;
}
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2,
vm::ptr<u64> out_size, u64 a4, u64 a5,
u64 a6) {
sys_ss.todo("sys_ss_individual_info_manager(pkg=0x%llx, a2=0x%llx, "
"out_size=*0x%llx, a4=0x%llx, a5=0x%llx, a6=0x%llx)",
pkg_id, a2, out_size, a4, a5, a6);
switch (pkg_id) {
// Read EID
case 0x17002: {
// TODO
vm::_ref<u64>(a5) = a4; // Write back size of buffer
break;
}
// Get EID size
case 0x17001:
*out_size = 0x100;
break;
default:
break;
}
return CELL_OK;
}

View file

@ -0,0 +1,456 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_event.h"
#include "sys_fs.h"
#include "util/shared_ptr.hpp"
#include "sys_storage.h"
LOG_CHANNEL(sys_storage);
namespace {
struct storage_manager {
// This is probably wrong and should be assigned per fd or something
atomic_ptr<lv2_event_queue> asyncequeue;
};
} // namespace
error_code sys_storage_open(u64 device, u64 mode, vm::ptr<u32> fd, u64 flags) {
sys_storage.todo(
"sys_storage_open(device=0x%x, mode=0x%x, fd=*0x%x, flags=0x%x)", device,
mode, fd, flags);
if (device == 0) {
return CELL_ENOENT;
}
if (!fd) {
return CELL_EFAULT;
}
[[maybe_unused]] u64 storage_id = device & 0xFFFFF00FFFFFFFF;
fs::file file;
if (const u32 id =
idm::make<lv2_storage>(device, std::move(file), mode, flags)) {
*fd = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_storage_close(u32 fd) {
sys_storage.todo("sys_storage_close(fd=0x%x)", fd);
idm::remove<lv2_storage>(fd);
return CELL_OK;
}
error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors,
vm::ptr<void> bounce_buf, vm::ptr<u32> sectors_read,
u64 flags) {
sys_storage.todo(
"sys_storage_read(fd=0x%x, mode=0x%x, start_sector=0x%x, "
"num_sectors=0x%x, bounce_buf=*0x%x, sectors_read=*0x%x, flags=0x%x)",
fd, mode, start_sector, num_sectors, bounce_buf, sectors_read, flags);
if (!bounce_buf || !sectors_read) {
return CELL_EFAULT;
}
std::memset(bounce_buf.get_ptr(), 0, num_sectors * 0x200ull);
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle) {
return CELL_ESRCH;
}
if (handle->file) {
handle->file.seek(start_sector * 0x200ull);
const u64 size = num_sectors * 0x200ull;
const u64 result = lv2_file::op_read(handle->file, bounce_buf, size);
num_sectors = ::narrow<u32>(result / 0x200ull);
}
*sectors_read = num_sectors;
return CELL_OK;
}
error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector,
u32 num_sectors, vm::ptr<void> data,
vm::ptr<u32> sectors_wrote, u64 flags) {
sys_storage.todo(
"sys_storage_write(fd=0x%x, mode=0x%x, start_sector=0x%x, "
"num_sectors=0x%x, data=*=0x%x, sectors_wrote=*0x%x, flags=0x%llx)",
fd, mode, start_sector, num_sectors, data, sectors_wrote, flags);
if (!sectors_wrote) {
return CELL_EFAULT;
}
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle) {
return CELL_ESRCH;
}
*sectors_wrote = num_sectors;
return CELL_OK;
}
error_code sys_storage_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen) {
sys_storage.todo("sys_storage_send_device_command(dev_handle=0x%x, "
"cmd=0x%llx, in=*0x%, inlen=0x%x, out=*0x%x, outlen=0x%x)",
dev_handle, cmd, in, inlen, out, outlen);
return CELL_OK;
}
error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id,
u32 unk) {
sys_storage.todo("sys_storage_async_configure(fd=0x%x, io_buf=0x%x, "
"equeue_id=0x%x, unk=*0x%x)",
fd, io_buf, equeue_id, unk);
auto &manager = g_fxo->get<storage_manager>();
if (auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id)) {
manager.asyncequeue.store(queue);
} else {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_storage_async_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen,
u64 unk) {
sys_storage.todo(
"sys_storage_async_send_device_command(dev_handle=0x%x, cmd=0x%llx, "
"in=*0x%x, inlen=0x%x, out=*0x%x, outlen=0x%x, unk=0x%x)",
dev_handle, cmd, in, inlen, out, outlen, unk);
auto &manager = g_fxo->get<storage_manager>();
if (auto q = manager.asyncequeue.load()) {
q->send(0, unk, unk, unk);
}
return CELL_OK;
}
error_code sys_storage_async_read() {
sys_storage.todo("sys_storage_async_read()");
return CELL_OK;
}
error_code sys_storage_async_write() {
sys_storage.todo("sys_storage_async_write()");
return CELL_OK;
}
error_code sys_storage_async_cancel() {
sys_storage.todo("sys_storage_async_cancel()");
return CELL_OK;
}
error_code sys_storage_get_device_info(u64 device,
vm::ptr<StorageDeviceInfo> buffer) {
sys_storage.todo("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)",
device, buffer);
if (!buffer) {
return CELL_EFAULT;
}
memset(buffer.get_ptr(), 0, sizeof(StorageDeviceInfo));
u64 storage = device & 0xFFFFF00FFFFFFFF;
u32 dev_num = (device >> 32) & 0xFF;
if (storage == ATA_HDD) // dev_hdd?
{
if (dev_num > 2) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// set partition size based on dev_num
// stole these sizes from kernel dump, unknown if they are 100% correct
// vsh reports only 2 partitions even though there is 3 sizes
switch (dev_num) {
case 0:
buffer->sector_count = 0x2542EAB0; // possibly total size
break;
case 1:
buffer->sector_count = 0x24FAEA98; // which makes this hdd0
break;
case 2:
buffer->sector_count = 0x3FFFF8; // and this one hdd1
break;
}
} else if (storage == BDVD_DRIVE) // dev_bdvd?
{
if (dev_num > 0) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_count = 0x4D955;
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
} else if (storage == USB_MASS_STORAGE_1(0)) {
if (dev_num > 0) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
/*buffer->sector_count = 0x4D955;*/
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
} else if (storage == NAND_FLASH) {
if (dev_num > 6) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x80000;
break;
case 1:
buffer->sector_count = 0x75F8;
break;
case 2:
buffer->sector_count = 0x63E00;
break;
case 3:
buffer->sector_count = 0x8000;
break;
case 4:
buffer->sector_count = 0x400;
break;
case 5:
buffer->sector_count = 0x2000;
break;
case 6:
buffer->sector_count = 0x200;
break;
}
} else if (storage == NOR_FLASH) {
if (dev_num > 3) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x8000;
break;
case 1:
buffer->sector_count = 0x77F8;
break;
case 2:
buffer->sector_count = 0x100; // offset, 0x20000
break;
case 3:
buffer->sector_count = 0x400;
break;
}
} else if (storage == NAND_UNK) {
if (dev_num > 1) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x7FFFFFFF;
break;
}
} else {
sys_storage.error("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)",
device, buffer);
}
return CELL_OK;
}
error_code sys_storage_get_device_config(vm::ptr<u32> storages,
vm::ptr<u32> devices) {
sys_storage.todo(
"sys_storage_get_device_config(storages=*0x%x, devices=*0x%x)", storages,
devices);
if (storages)
*storages = 6;
else
return CELL_EFAULT;
if (devices)
*devices = 17;
else
return CELL_EFAULT;
return CELL_OK;
}
error_code sys_storage_report_devices(u32 storages, u32 start, u32 devices,
vm::ptr<u64> device_ids) {
sys_storage.todo("sys_storage_report_devices(storages=0x%x, start=0x%x, "
"devices=0x%x, device_ids=0x%x)",
storages, start, devices, device_ids);
if (!device_ids) {
return CELL_EFAULT;
}
static constexpr std::array<u64, 0x11> all_devs = [] {
std::array<u64, 0x11> all_devs{};
all_devs[0] = 0x10300000000000A;
for (int i = 0; i < 7; ++i) {
all_devs[i + 1] = 0x100000000000001 | (static_cast<u64>(i) << 32);
}
for (int i = 0; i < 3; ++i) {
all_devs[i + 8] = 0x101000000000007 | (static_cast<u64>(i) << 32);
}
all_devs[11] = 0x101000000000006;
for (int i = 0; i < 4; ++i) {
all_devs[i + 12] = 0x100000000000004 | (static_cast<u64>(i) << 32);
}
all_devs[16] = 0x100000000000003;
return all_devs;
}();
if (!devices || start >= all_devs.size() ||
devices > all_devs.size() - start) {
return CELL_EINVAL;
}
std::copy_n(all_devs.begin() + start, devices, device_ids.get_ptr());
return CELL_OK;
}
error_code sys_storage_configure_medium_event(u32 fd, u32 equeue_id, u32 c) {
sys_storage.todo(
"sys_storage_configure_medium_event(fd=0x%x, equeue_id=0x%x, c=0x%x)", fd,
equeue_id, c);
return CELL_OK;
}
error_code sys_storage_set_medium_polling_interval() {
sys_storage.todo("sys_storage_set_medium_polling_interval()");
return CELL_OK;
}
error_code sys_storage_create_region() {
sys_storage.todo("sys_storage_create_region()");
return CELL_OK;
}
error_code sys_storage_delete_region() {
sys_storage.todo("sys_storage_delete_region()");
return CELL_OK;
}
error_code sys_storage_execute_device_command(
u32 fd, u64 cmd, vm::ptr<char> cmdbuf, u64 cmdbuf_size,
vm::ptr<char> databuf, u64 databuf_size, vm::ptr<u32> driver_status) {
sys_storage.todo("sys_storage_execute_device_command(fd=0x%x, cmd=0x%llx, "
"cmdbuf=*0x%x, cmdbuf_size=0x%llx, databuf=*0x%x, "
"databuf_size=0x%llx, driver_status=*0x%x)",
fd, cmd, cmdbuf, cmdbuf_size, databuf, databuf_size,
driver_status);
// cmd == 2 is get device info,
// databuf, first byte 0 == status ok?
// byte 1, if < 0 , not ata device
return CELL_OK;
}
error_code sys_storage_check_region_acl() {
sys_storage.todo("sys_storage_check_region_acl()");
return CELL_OK;
}
error_code sys_storage_set_region_acl() {
sys_storage.todo("sys_storage_set_region_acl()");
return CELL_OK;
}
error_code sys_storage_get_region_offset() {
sys_storage.todo("sys_storage_get_region_offset()");
return CELL_OK;
}
error_code sys_storage_set_emulated_speed() {
sys_storage.todo("sys_storage_set_emulated_speed()");
// todo: only debug kernel has this
return CELL_ENOSYS;
}

View file

@ -0,0 +1,446 @@
#include "stdafx.h"
#include "sys_time.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/system_config.h"
#include "sys_process.h"
#include "util/tsc.hpp"
#include "util/sysinfo.hpp"
u64 g_timebase_offs{};
static u64 systemtime_offset;
#ifndef __linux__
#include "util/asm.hpp"
#endif
#ifdef _WIN32
#include <windows.h>
struct time_aux_info_t {
u64 perf_freq;
u64 start_time;
u64 start_ftime; // time in 100ns units since Epoch
};
// Initialize time-related values
const auto s_time_aux_info = []() -> time_aux_info_t {
LARGE_INTEGER freq;
if (!QueryPerformanceFrequency(&freq)) {
MessageBox(
nullptr,
L"Your hardware doesn't support a high-resolution performance counter",
L"Error", MB_OK | MB_ICONERROR);
return {};
}
LARGE_INTEGER start;
QueryPerformanceCounter(&start); // get time in 1/perf_freq units from RDTSC
FILETIME ftime;
GetSystemTimeAsFileTime(
&ftime); // get time in 100ns units since January 1, 1601 (UTC)
time_aux_info_t result;
result.perf_freq = freq.QuadPart;
result.start_time = start.QuadPart;
result.start_ftime =
(ftime.dwLowDateTime | static_cast<u64>(ftime.dwHighDateTime) << 32) -
116444736000000000;
return result;
}();
#elif __APPLE__
// XXX only supports a single timer
#if !defined(HAVE_CLOCK_GETTIME)
#define TIMER_ABSTIME -1
// The opengroup spec isn't clear on the mapping from REALTIME to CALENDAR being
// appropriate or not.
// http://pubs.opengroup.org/onlinepubs/009695299/basedefs/time.h.html
#define CLOCK_REALTIME 1 // #define CALENDAR_CLOCK 1 from mach/clock_types.h
#define CLOCK_MONOTONIC 0 // #define SYSTEM_CLOCK 0
// the mach kernel uses struct mach_timespec, so struct timespec is loaded from
// <sys/_types/_timespec.h> for compatability struct timespec { time_t tv_sec;
// long tv_nsec; };
#include <mach/clock.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <sys/_types/_timespec.h>
#include <sys/types.h>
#undef CPU_STATE_MAX
#define MT_NANO (+1.0E-9)
#define MT_GIGA UINT64_C(1000000000)
// TODO create a list of timers,
static double mt_timebase = 0.0;
static u64 mt_timestart = 0;
static int clock_gettime(int clk_id, struct timespec *tp) {
kern_return_t retval = KERN_SUCCESS;
if (clk_id == TIMER_ABSTIME) {
if (!mt_timestart) {
// only one timer, initilized on the first call to the TIMER
mach_timebase_info_data_t tb = {0};
mach_timebase_info(&tb);
mt_timebase = tb.numer;
mt_timebase /= tb.denom;
mt_timestart = mach_absolute_time();
}
double diff = (mach_absolute_time() - mt_timestart) * mt_timebase;
tp->tv_sec = diff * MT_NANO;
tp->tv_nsec = diff - (tp->tv_sec * MT_GIGA);
} else // other clk_ids are mapped to the coresponding mach clock_service
{
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), clk_id, &cclock);
retval = clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
tp->tv_sec = mts.tv_sec;
tp->tv_nsec = mts.tv_nsec;
}
return retval;
}
#endif
#endif
#ifndef _WIN32
#include <sys/time.h>
static struct timespec start_time = []() {
struct timespec ts;
if (::clock_gettime(CLOCK_REALTIME, &ts) != 0) {
// Fatal error
std::terminate();
}
tzset();
return ts;
}();
#endif
LOG_CHANNEL(sys_time);
static constexpr u64 g_timebase_freq = /*79800000*/ 80000000ull; // 80 Mhz
// Convert time is microseconds to timebased time
u64 convert_to_timebased_time(u64 time) {
const u64 result =
time * (g_timebase_freq / 1000000ull) * g_cfg.core.clocks_scale / 100u;
ensure(result >= g_timebase_offs);
return result - g_timebase_offs;
}
u64 get_timebased_time() {
if (u64 freq = utils::get_tsc_freq()) {
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result =
static_cast<u64>(u128_from_mul(tsc, g_timebase_freq) / freq) *
g_cfg.core.clocks_scale / 100u;
#else
const u64 result =
(tsc / freq * g_timebase_freq + tsc % freq * g_timebase_freq / freq) *
g_cfg.core.clocks_scale / 100u;
#endif
return result - g_timebase_offs;
}
while (true) {
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(
u128_from_mul(time * g_cfg.core.clocks_scale, g_timebase_freq) / freq /
100u);
#else
const u64 result =
(time / freq * g_timebase_freq + time % freq * g_timebase_freq / freq) *
g_cfg.core.clocks_scale / 100u;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result =
(static_cast<u64>(ts.tv_sec) * g_timebase_freq +
static_cast<u64>(ts.tv_nsec) * g_timebase_freq / 1000000000ull) *
g_cfg.core.clocks_scale / 100u;
#endif
if (result)
return result - g_timebase_offs;
}
}
// Add an offset to get_timebased_time to avoid leaking PC's uptime into the
// game As if PS3 starts at value 0 (base time) when the game boots If none-zero
// arg is specified it will become the base time (for savestates)
void initialize_timebased_time(u64 timebased_init, bool reset) {
g_timebase_offs = 0;
if (reset) {
// We simply want to zero-out these values
systemtime_offset = 0;
return;
}
const u64 current = get_timebased_time();
timebased_init = current - timebased_init;
g_timebase_offs = timebased_init;
systemtime_offset = timebased_init / (g_timebase_freq / 1000000);
}
// Returns some relative time in microseconds, don't change this fact
u64 get_system_time() {
if (u64 freq = utils::get_tsc_freq()) {
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(tsc, 1000000ull) / freq);
#else
const u64 result =
(tsc / freq * 1000000ull + tsc % freq * 1000000ull / freq);
#endif
return result;
}
while (true) {
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(time, 1000000ull) / freq);
#else
const u64 result =
time / freq * 1000000ull + (time % freq) * 1000000ull / freq;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result = static_cast<u64>(ts.tv_sec) * 1000000ull +
static_cast<u64>(ts.tv_nsec) / 1000u;
#endif
if (result)
return result;
}
}
// As get_system_time but obeys Clocks scaling setting
u64 get_guest_system_time(u64 time) {
const u64 result =
(time != umax ? time : get_system_time()) * g_cfg.core.clocks_scale / 100;
return result - systemtime_offset;
}
// Functions
error_code sys_time_set_timezone(s32 timezone, s32 summertime) {
sys_time.trace("sys_time_set_timezone(timezone=0x%x, summertime=0x%x)",
timezone, summertime);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code sys_time_get_timezone(vm::ptr<s32> timezone,
vm::ptr<s32> summertime) {
sys_time.trace("sys_time_get_timezone(timezone=*0x%x, summertime=*0x%x)",
timezone, summertime);
#ifdef _WIN32
TIME_ZONE_INFORMATION tz{};
switch (GetTimeZoneInformation(&tz)) {
case TIME_ZONE_ID_UNKNOWN: {
*timezone = -tz.Bias;
*summertime = 0;
break;
}
case TIME_ZONE_ID_STANDARD: {
*timezone = -tz.Bias;
*summertime = -tz.StandardBias;
if (tz.StandardBias) {
sys_time.error("Unexpected timezone bias (base=%d, std=%d, daylight=%d)",
tz.Bias, tz.StandardBias, tz.DaylightBias);
}
break;
}
case TIME_ZONE_ID_DAYLIGHT: {
*timezone = -tz.Bias;
*summertime = -tz.DaylightBias;
break;
}
default: {
ensure(0);
}
}
#elif __linux__
*timezone = ::narrow<s16>(-::timezone / 60);
*summertime = !::daylight ? 0 : []() -> s32 {
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
// Check bounds [0,1]
if (test.tm_isdst & -2) {
sys_time.error(
"No information for timezone DST bias (timezone=%.2fh, tm_gmtoff=%d)",
-::timezone / 3600.0, test.tm_gmtoff);
return 0;
} else {
return test.tm_isdst ? ::narrow<s16>((test.tm_gmtoff + ::timezone) / 60)
: 0;
}
}();
#else
// gettimeofday doesn't return timezone on linux anymore, but this should work
// on other OSes?
struct timezone tz{};
ensure(gettimeofday(nullptr, &tz) == 0);
*timezone = ::narrow<s16>(-tz.tz_minuteswest);
*summertime = !tz.tz_dsttime ? 0 : [&]() -> s32 {
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
return test.tm_isdst
? ::narrow<s16>(test.tm_gmtoff / 60 + tz.tz_minuteswest)
: 0;
}();
#endif
return CELL_OK;
}
error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec) {
sys_time.trace("sys_time_get_current_time(sec=*0x%x, nsec=*0x%x)", sec, nsec);
if (!sec) {
return CELL_EFAULT;
}
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 diff_base = count.QuadPart - s_time_aux_info.start_time;
// Get time difference in nanoseconds (using 128 bit accumulator)
const u64 diff_sl = diff_base * 1000000000ull;
const u64 diff_sh = utils::umulh64(diff_base, 1000000000ull);
const u64 diff = utils::udiv128(diff_sh, diff_sl, s_time_aux_info.perf_freq);
// get time since Epoch in nanoseconds
const u64 time = s_time_aux_info.start_ftime * 100u +
(diff * g_cfg.core.clocks_scale / 100u);
// scale to seconds, and add the console time offset (which might be negative)
*sec = (time / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
*nsec = time % 1000000000ull;
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_REALTIME, &ts) == 0);
if (g_cfg.core.clocks_scale == 100) {
// get the seconds from the system clock, and add the console time offset
// (which might be negative)
*sec = ts.tv_sec + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
*nsec = ts.tv_nsec;
return CELL_OK;
}
u64 tv_sec = ts.tv_sec, stv_sec = start_time.tv_sec;
u64 tv_nsec = ts.tv_nsec, stv_nsec = start_time.tv_nsec;
// Substruct time since Epoch and since start time
tv_sec -= stv_sec;
if (tv_nsec < stv_nsec) {
// Correct value if borrow encountered
tv_sec -= 1;
tv_nsec = 1'000'000'000ull - (stv_nsec - tv_nsec);
} else {
tv_nsec -= stv_nsec;
}
// Scale nanocseconds
tv_nsec = stv_nsec + (tv_nsec * g_cfg.core.clocks_scale / 100);
// Scale seconds and add from nanoseconds / 1'000'000'000, and add the console
// time offset (which might be negative)
*sec = stv_sec + (tv_sec * g_cfg.core.clocks_scale / 100u) +
(tv_nsec / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
// Set nanoseconds
*nsec = tv_nsec % 1000000000ull;
#endif
return CELL_OK;
}
error_code sys_time_set_current_time(s64 sec, s64 nsec) {
sys_time.trace("sys_time_set_current_time(sec=0x%x, nsec=0x%x)", sec, nsec);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
u64 sys_time_get_timebase_frequency() {
sys_time.trace("sys_time_get_timebase_frequency()");
return g_timebase_freq;
}
error_code sys_time_get_rtc(vm::ptr<u64> rtc) {
sys_time.todo("sys_time_get_rtc(rtc=*0x%x)", rtc);
return CELL_OK;
}

View file

@ -0,0 +1,450 @@
#include "stdafx.h"
#include "sys_timer.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "sys_event.h"
#include "sys_process.h"
#include "util/asm.hpp"
#include <deque>
#include <thread>
LOG_CHANNEL(sys_timer);
struct lv2_timer_thread {
shared_mutex mutex;
std::deque<shared_ptr<lv2_timer>> timers;
lv2_timer_thread();
void operator()();
// SAVESTATE_INIT_POS(46); // FREE SAVESTATE_INIT_POS number
static constexpr auto thread_name = "Timer Thread"sv;
};
lv2_timer::lv2_timer(utils::serial &ar)
: lv2_obj(1), state(ar), port(lv2_event_queue::load_ptr(ar, port, "timer")),
source(ar), data1(ar), data2(ar), expire(ar), period(ar) {}
void lv2_timer::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(state), lv2_event_queue::save_ptr(ar, port.get()),
ar(source, data1, data2, expire, period);
}
u64 lv2_timer::check(u64 _now) noexcept {
while (true) {
const u32 _state = +state;
if (_state == SYS_TIMER_STATE_RUN) {
u64 next = expire;
// If aborting, perform the last accurate check for event
if (_now >= next) {
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex);
return check_unlocked(_now);
}
return (next - _now);
}
break;
}
return umax;
}
u64 lv2_timer::check_unlocked(u64 _now) noexcept {
const u64 next = expire;
if (_now < next || state != SYS_TIMER_STATE_RUN) {
return umax;
}
if (port) {
port->send(source, data1, data2, next);
}
if (period) {
// Set next expiration time and check again
const u64 expire0 = utils::add_saturate<u64>(next, period);
expire.release(expire0);
return utils::sub_saturate<u64>(expire0, _now);
}
// Stop after oneshot
state.release(SYS_TIMER_STATE_STOP);
return umax;
}
lv2_timer_thread::lv2_timer_thread() {
Emu.PostponeInitCode([this]() {
idm::select<lv2_obj, lv2_timer>([&](u32 id, lv2_timer &) {
timers.emplace_back(idm::get_unlocked<lv2_obj, lv2_timer>(id));
});
});
}
void lv2_timer_thread::operator()() {
u64 sleep_time = 0;
while (true) {
if (sleep_time != umax) {
// Scale time
sleep_time =
std::min(sleep_time, u64{umax} / 100) * 100 / g_cfg.core.clocks_scale;
}
thread_ctrl::wait_for(sleep_time);
if (thread_ctrl::state() == thread_state::aborting) {
break;
}
sleep_time = umax;
if (Emu.IsPausedOrReady()) {
sleep_time = 10000;
continue;
}
const u64 _now = get_guest_system_time();
reader_lock lock(mutex);
for (const auto &timer : timers) {
while (lv2_obj::check(timer)) {
if (thread_ctrl::state() == thread_state::aborting) {
break;
}
if (const u64 advised_sleep_time = timer->check(_now)) {
if (sleep_time > advised_sleep_time) {
sleep_time = advised_sleep_time;
}
break;
}
}
}
}
}
error_code sys_timer_create(ppu_thread &ppu, vm::ptr<u32> timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_create(timer_id=*0x%x)", timer_id);
if (auto ptr = idm::make_ptr<lv2_obj, lv2_timer>()) {
auto &thread = g_fxo->get<named_thread<lv2_timer_thread>>();
{
std::lock_guard lock(thread.mutex);
// Theoretically could have been destroyed by sys_timer_destroy by now
if (auto it = std::find(thread.timers.begin(), thread.timers.end(), ptr);
it == thread.timers.end()) {
thread.timers.emplace_back(std::move(ptr));
}
}
ppu.check_state();
*timer_id = idm::last_id();
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_timer_destroy(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_destroy(timer_id=0x%x)", timer_id);
auto timer = idm::withdraw<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
if (reader_lock lock(timer.mutex); lv2_obj::check(timer.port)) {
return CELL_EISCONN;
}
timer.exists--;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
auto &thread = g_fxo->get<named_thread<lv2_timer_thread>>();
std::lock_guard lock(thread.mutex);
if (auto it =
std::find(thread.timers.begin(), thread.timers.end(), timer.ptr);
it != thread.timers.end()) {
thread.timers.erase(it);
}
return CELL_OK;
}
error_code sys_timer_get_information(ppu_thread &ppu, u32 timer_id,
vm::ptr<sys_timer_information_t> info) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_get_information(timer_id=0x%x, info=*0x%x)",
timer_id, info);
sys_timer_information_t _info{};
const u64 now = get_guest_system_time();
const auto timer =
idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer &timer) {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.get_information(_info);
});
if (!timer) {
return CELL_ESRCH;
}
ppu.check_state();
std::memcpy(info.get_ptr(), &_info, info.size());
return CELL_OK;
}
error_code _sys_timer_start(ppu_thread &ppu, u32 timer_id, u64 base_time,
u64 period) {
ppu.state += cpu_flag::wait;
(period ? sys_timer.warning : sys_timer.trace)(
"_sys_timer_start(timer_id=0x%x, base_time=0x%llx, period=0x%llx)",
timer_id, base_time, period);
const u64 start_time = get_guest_system_time();
if (period && period < 100) {
// Invalid periodic timer
return CELL_EINVAL;
}
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
std::lock_guard lock(timer.mutex);
// LV2 Disassembly: Simple nullptr check (assignment test, do not use
// lv2_obj::check here)
if (!timer.port) {
return CELL_ENOTCONN;
}
timer.check_unlocked(start_time);
if (timer.state != SYS_TIMER_STATE_STOP) {
return CELL_EBUSY;
}
if (!period && start_time >= base_time) {
// Invalid oneshot
return CELL_ETIMEDOUT;
}
const u64 expire =
period == 0 ? base_time : // oneshot
base_time == 0
? utils::add_saturate(start_time, period)
:
// periodic timer with no base (using start time as base)
start_time < utils::add_saturate(base_time, period)
? utils::add_saturate(base_time, period)
:
// periodic with base time over start time
[&]() -> u64 // periodic timer base before start time (align to
// be at least a period over start time)
{
// Optimized from a loop in LV2:
// do
// {
// base_time += period;
// }
// while (base_time < start_time);
const u64 start_time_with_base_time_reminder = utils::add_saturate(
start_time - start_time % period, base_time % period);
return utils::add_saturate(
start_time_with_base_time_reminder,
start_time_with_base_time_reminder < start_time ? period : 0);
}();
timer.expire = expire;
timer.period = period;
timer.state = SYS_TIMER_STATE_RUN;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
if (timer.ret == CELL_ETIMEDOUT) {
return not_an_error(timer.ret);
}
return timer.ret;
}
g_fxo->get<named_thread<lv2_timer_thread>>()([] {});
return CELL_OK;
}
error_code sys_timer_stop(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_stop()");
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [now = get_guest_system_time(),
notify = lv2_obj::notify_all_t()](lv2_timer &timer) {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
});
if (!timer) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_timer_connect_event_queue(ppu_thread &ppu, u32 timer_id,
u32 queue_id, u64 name, u64 data1,
u64 data2) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_connect_event_queue(timer_id=0x%x, "
"queue_id=0x%x, name=0x%llx, data1=0x%llx, data2=0x%llx)",
timer_id, queue_id, name, data1, data2);
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
auto found = idm::get_unlocked<lv2_obj, lv2_event_queue>(queue_id);
if (!found) {
return CELL_ESRCH;
}
std::lock_guard lock(timer.mutex);
if (lv2_obj::check(timer.port)) {
return CELL_EISCONN;
}
// Connect event queue
timer.port = found;
timer.source =
name ? name : (u64{process_getpid() + 0u} << 32) | u64{timer_id};
timer.data1 = data1;
timer.data2 = data2;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_disconnect_event_queue(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_disconnect_event_queue(timer_id=0x%x)",
timer_id);
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id,
[now = get_guest_system_time(),
notify = lv2_obj::notify_all_t()](lv2_timer &timer) -> CellError {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
if (!lv2_obj::check(timer.port)) {
return CELL_ENOTCONN;
}
timer.port.reset();
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_sleep(ppu_thread &ppu, u32 sleep_time) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_sleep(sleep_time=%d)", sleep_time);
return sys_timer_usleep(ppu, sleep_time * u64{1000000});
}
error_code sys_timer_usleep(ppu_thread &ppu, u64 sleep_time) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_usleep(sleep_time=0x%llx)", sleep_time);
if (sleep_time) {
const s64 add_time = g_cfg.core.usleep_addend;
// Over/underflow checks
if (add_time >= 0) {
sleep_time = utils::add_saturate<u64>(sleep_time, add_time);
} else {
sleep_time =
std::max<u64>(1, utils::sub_saturate<u64>(sleep_time, -add_time));
}
lv2_obj::sleep(ppu, g_cfg.core.sleep_timers_accuracy <
sleep_timers_accuracy_level::_usleep
? sleep_time
: 0);
if (!lv2_obj::wait_timeout(sleep_time, &ppu, true, true)) {
ppu.state += cpu_flag::again;
}
} else {
std::this_thread::yield();
}
return CELL_OK;
}

View file

@ -0,0 +1,59 @@
#include "stdafx.h"
#include "sys_trace.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_trace);
// TODO: DEX/DECR mode support?
s32 sys_trace_create() {
sys_trace.todo("sys_trace_create()");
return CELL_ENOSYS;
}
s32 sys_trace_start() {
sys_trace.todo("sys_trace_start()");
return CELL_ENOSYS;
}
s32 sys_trace_stop() {
sys_trace.todo("sys_trace_stop()");
return CELL_ENOSYS;
}
s32 sys_trace_update_top_index() {
sys_trace.todo("sys_trace_update_top_index()");
return CELL_ENOSYS;
}
s32 sys_trace_destroy() {
sys_trace.todo("sys_trace_destroy()");
return CELL_ENOSYS;
}
s32 sys_trace_drain() {
sys_trace.todo("sys_trace_drain()");
return CELL_ENOSYS;
}
s32 sys_trace_attach_process() {
sys_trace.todo("sys_trace_attach_process()");
return CELL_ENOSYS;
}
s32 sys_trace_allocate_buffer() {
sys_trace.todo("sys_trace_allocate_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_free_buffer() {
sys_trace.todo("sys_trace_free_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_create2() {
sys_trace.todo("sys_trace_create2()");
return CELL_ENOSYS;
}

View file

@ -0,0 +1,188 @@
#include "stdafx.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/system_config.h"
#include "sys_tty.h"
#include <deque>
#include <mutex>
LOG_CHANNEL(sys_tty);
extern fs::file g_tty;
extern atomic_t<s64> g_tty_size;
extern std::array<std::deque<std::string>, 16> g_tty_input;
extern std::mutex g_tty_mutex;
error_code sys_tty_read(s32 ch, vm::ptr<char> buf, u32 len,
vm::ptr<u32> preadlen) {
sys_tty.trace("sys_tty_read(ch=%d, buf=*0x%x, len=%d, preadlen=*0x%x)", ch,
buf, len, preadlen);
if (!g_cfg.core.debug_console_mode) {
return CELL_EIO;
}
if (ch > 15 || ch < 0 || !buf) {
return CELL_EINVAL;
}
if (ch < SYS_TTYP_USER1) {
sys_tty.warning("sys_tty_read called with system channel %d", ch);
}
usz chars_to_read =
0; // number of chars that will be read from the input string
std::string tty_read; // string for storage of read chars
if (len > 0) {
std::lock_guard lock(g_tty_mutex);
if (!g_tty_input[ch].empty()) {
// reference to our first queue element
std::string &input = g_tty_input[ch].front();
// we have to stop reading at either a new line, the param len, or our
// input string size
usz new_line_pos = input.find_first_of('\n');
if (new_line_pos != input.npos) {
chars_to_read = std::min(new_line_pos, static_cast<usz>(len));
} else {
chars_to_read = std::min(input.size(), static_cast<usz>(len));
}
// read the previously calculated number of chars from the beginning of
// the input string
tty_read = input.substr(0, chars_to_read);
// remove the just read text from the input string
input = input.substr(chars_to_read, input.size() - 1);
if (input.empty()) {
// pop the first queue element if it was completely consumed
g_tty_input[ch].pop_front();
}
}
}
if (!preadlen) {
return CELL_EFAULT;
}
*preadlen = static_cast<u32>(chars_to_read);
if (chars_to_read > 0) {
std::memcpy(buf.get_ptr(), tty_read.c_str(), chars_to_read);
sys_tty.success("sys_tty_read(ch=%d, len=%d) read %s with length %d", ch,
len, tty_read, *preadlen);
}
return CELL_OK;
}
std::string dump_useful_thread_info();
error_code sys_tty_write([[maybe_unused]] ppu_thread &ppu, s32 ch,
vm::cptr<char> buf, u32 len, vm::ptr<u32> pwritelen) {
ppu.state += cpu_flag::wait;
sys_tty.notice("sys_tty_write(ch=%d, buf=*0x%x, len=%d, pwritelen=*0x%x)", ch,
buf, len, pwritelen);
std::string msg;
if (static_cast<s32>(len) > 0 &&
vm::check_addr(buf.addr(), vm::page_readable, len)) {
msg.resize(len);
if (!vm::try_access(buf.addr(), msg.data(), len, false)) {
msg.clear();
}
}
auto find_word = [](std::string_view msg, std::string_view word) -> bool {
// Match uppercase and lowercase starting words
const usz index = msg.find(word.substr(1));
if (index != umax && index >= 1u) {
return std::tolower(static_cast<u8>(msg[index - 1])) == word[0];
}
return false;
};
std::string_view sample = std::string_view(msg).substr(0, 1024);
const bool warning =
find_word(sample, "failed"sv) || find_word(sample, "abort"sv) ||
find_word(sample, "crash"sv) || find_word(sample, "error"sv) ||
find_word(sample, "unexpected"sv) || find_word(sample, "0x8001"sv);
sample = {}; // Remove reference to string
if (msg.size() >= 2u && [&]() {
static thread_local u64 last_write = 0;
// Dump thread about every period which TTY was not being touched for
// about half a second
const u64 current = get_system_time();
return current - std::exchange(last_write, current) >=
(warning ? 500'000 : 3'000'000);
}()) {
ppu_log.notice("\n%s", dump_useful_thread_info());
}
// Hack: write to tty even on CEX mode, but disable all error checks
if (ch < 0 || ch > 15) {
if (g_cfg.core.debug_console_mode) {
return CELL_EINVAL;
} else {
msg.clear();
}
}
if (g_cfg.core.debug_console_mode) {
// Don't modify it in CEX mode
len = static_cast<s32>(len) > 0 ? len : 0;
}
if (static_cast<s32>(len) > 0) {
if (!msg.empty()) {
if (msg.ends_with("\n")) {
// Avoid logging trailing newlines, log them verbosely instead
const std::string_view msg_clear =
std::string_view(msg).substr(0, msg.find_last_not_of('\n') + 1);
if (msg.size() - 1 == msg_clear.size()) {
(warning ? sys_tty.warning : sys_tty.notice)(
u8"sys_tty_write(): “%s“ << endl", msg_clear);
} else {
(warning ? sys_tty.warning
: sys_tty.notice)(u8"sys_tty_write(): “%s” << endl(%u)",
msg_clear, msg.size() - msg_clear.size());
}
} else {
(warning ? sys_tty.warning : sys_tty.notice)(u8"sys_tty_write(): “%s”",
msg);
}
if (g_tty) {
// Lock size by making it negative
g_tty_size -= (1ll << 48);
g_tty.write(msg);
g_tty_size += (1ll << 48) + len;
}
} else if (g_cfg.core.debug_console_mode) {
return {CELL_EFAULT, buf.addr()};
}
}
if (!pwritelen.try_write(len)) {
return {CELL_EFAULT, pwritelen};
}
return CELL_OK;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,401 @@
#include "stdafx.h"
#include "sys_process.h"
#include "sys_vm.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
sys_vm_t::sys_vm_t(u32 _addr, u32 vsize, lv2_memory_container *ct, u32 psize)
: ct(ct), addr(_addr), size(vsize), psize(psize) {
// Write ID
g_ids[addr >> 28].release(idm::last_id());
}
void sys_vm_t::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_vm);
ar(ct->id, addr, size, psize);
}
sys_vm_t::~sys_vm_t() {
// Free ID
g_ids[addr >> 28].release(id_manager::id_traits<sys_vm_t>::invalid);
}
LOG_CHANNEL(sys_vm);
struct sys_vm_global_t {
atomic_t<u32> total_vsize = 0;
};
sys_vm_t::sys_vm_t(utils::serial &ar)
: ct(lv2_memory_container::search(ar)), addr(ar), size(ar), psize(ar) {
g_ids[addr >> 28].release(idm::last_id());
g_fxo->need<sys_vm_global_t>();
g_fxo->get<sys_vm_global_t>().total_vsize += size;
}
error_code sys_vm_memory_map(ppu_thread &ppu, u64 vsize, u64 psize, u32 cid,
u64 flag, u64 policy, vm::ptr<u32> addr) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_memory_map(vsize=0x%x, psize=0x%x, cid=0x%x, "
"flags=0x%x, policy=0x%x, addr=*0x%x)",
vsize, psize, cid, flag, policy, addr);
if (!vsize || !psize || vsize % 0x200'0000 || vsize > 0x1000'0000 ||
psize % 0x1'0000 || policy != SYS_VM_POLICY_AUTO_RECOMMENDED) {
return CELL_EINVAL;
}
if (ppu.gpr[11] == 300 && psize < 0x10'0000) {
return CELL_EINVAL;
}
const auto idm_ct = idm::get_unlocked<lv2_memory_container>(cid);
const auto ct = cid == SYS_MEMORY_CONTAINER_ID_INVALID
? &g_fxo->get<lv2_memory_container>()
: idm_ct.get();
if (!ct) {
return CELL_ESRCH;
}
if (!g_fxo->get<sys_vm_global_t>()
.total_vsize
.fetch_op([vsize, has_root = g_ps3_process_info.has_root_perm()](
u32 &size) {
// A single process can hold up to 256MB of virtual memory, even on
// DECR
// VSH can hold more
if ((has_root ? 0x1E000000 : 0x10000000) - size < vsize) {
return false;
}
size += static_cast<u32>(vsize);
return true;
})
.second) {
return CELL_EBUSY;
}
if (!ct->take(psize)) {
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
return CELL_ENOMEM;
}
// Look for unmapped space
if (const auto area = vm::find_map(0x10000000, 0x10000000,
2 | (flag & SYS_MEMORY_PAGE_SIZE_MASK))) {
sys_vm.warning("sys_vm_memory_map(): Found VM 0x%x area (vsize=0x%x)", addr,
vsize);
// Alloc all memory (shall not fail)
ensure(area->alloc(static_cast<u32>(vsize)));
vm::lock_sudo(area->addr, static_cast<u32>(vsize));
idm::make<sys_vm_t>(area->addr, static_cast<u32>(vsize), ct,
static_cast<u32>(psize));
// Write a pointer for the allocated memory
ppu.check_state();
*addr = area->addr;
return CELL_OK;
}
ct->free(psize);
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
return CELL_ENOMEM;
}
error_code sys_vm_memory_map_different(ppu_thread &ppu, u64 vsize, u64 psize,
u32 cid, u64 flag, u64 policy,
vm::ptr<u32> addr) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_memory_map_different(vsize=0x%x, psize=0x%x, "
"cid=0x%x, flags=0x%llx, policy=0x%llx, addr=*0x%x)",
vsize, psize, cid, flag, policy, addr);
// TODO: if needed implement different way to map memory, unconfirmed.
return sys_vm_memory_map(ppu, vsize, psize, cid, flag, policy, addr);
}
error_code sys_vm_unmap(ppu_thread &ppu, u32 addr) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_unmap(addr=0x%x)", addr);
// Special case, check if its a start address by alignment
if (addr % 0x10000000) {
return CELL_EINVAL;
}
// Free block and info
const auto vmo =
idm::withdraw<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t &vmo) {
// Free block
ensure(vm::unmap(addr).second);
// Return memory
vmo.ct->free(vmo.psize);
g_fxo->get<sys_vm_global_t>().total_vsize -= vmo.size;
});
if (!vmo) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_append_memory(ppu_thread &ppu, u32 addr, u64 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_append_memory(addr=0x%x, size=0x%x)", addr, size);
if (!size || size % 0x100000) {
return CELL_EINVAL;
}
const auto block = idm::check<sys_vm_t>(sys_vm_t::find_id(addr),
[&](sys_vm_t &vmo) -> CellError {
if (vmo.addr != addr) {
return CELL_EINVAL;
}
if (!vmo.ct->take(size)) {
return CELL_ENOMEM;
}
vmo.psize += static_cast<u32>(size);
return {};
});
if (!block) {
return CELL_EINVAL;
}
if (block.ret) {
return block.ret;
}
return CELL_OK;
}
error_code sys_vm_return_memory(ppu_thread &ppu, u32 addr, u64 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_return_memory(addr=0x%x, size=0x%x)", addr, size);
if (!size || size % 0x100000) {
return CELL_EINVAL;
}
const auto block = idm::check<sys_vm_t>(
sys_vm_t::find_id(addr), [&](sys_vm_t &vmo) -> CellError {
if (vmo.addr != addr) {
return CELL_EINVAL;
}
auto [_, ok] = vmo.psize.fetch_op([&](u32 &value) {
if (value <= size || value - size < 0x100000ull) {
return false;
}
value -= static_cast<u32>(size);
return true;
});
if (!ok) {
return CELL_EBUSY;
}
vmo.ct->free(size);
return {};
});
if (!block) {
return CELL_EINVAL;
}
if (block.ret) {
return block.ret;
}
return CELL_OK;
}
error_code sys_vm_lock(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_lock(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_unlock(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_unlock(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_touch(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_touch(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_flush(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_flush(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_invalidate(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_invalidate(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_store(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_store(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_sync(ppu_thread &ppu, u32 addr, u32 size) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_sync(addr=0x%x, size=0x%x)", addr, size);
if (!size) {
return CELL_EINVAL;
}
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_test(ppu_thread &ppu, u32 addr, u32 size,
vm::ptr<u64> result) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_test(addr=0x%x, size=0x%x, result=*0x%x)", addr, size,
result);
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size) {
return CELL_EINVAL;
}
ppu.check_state();
*result = SYS_VM_STATE_ON_MEMORY;
return CELL_OK;
}
error_code sys_vm_get_statistics(ppu_thread &ppu, u32 addr,
vm::ptr<sys_vm_statistics_t> stat) {
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_get_statistics(addr=0x%x, stat=*0x%x)", addr, stat);
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || block->addr != addr) {
return CELL_EINVAL;
}
ppu.check_state();
stat->page_fault_ppu = 0;
stat->page_fault_spu = 0;
stat->page_in = 0;
stat->page_out = 0;
stat->pmem_total = block->psize;
stat->pmem_used = 0;
stat->timestamp = get_timebased_time();
return CELL_OK;
}
DECLARE(sys_vm_t::g_ids){};