Move rpcs3/Emu/Cell/lv2 to kernel/cellos

This commit is contained in:
DH 2025-10-04 16:46:36 +03:00
parent fce4127c2e
commit dbfa5002e5
282 changed files with 40062 additions and 41342 deletions

View file

@ -185,12 +185,13 @@ if(WITH_LLVM)
message(FATAL_ERROR "Can't find LLVM libraries from the CMAKE_PREFIX_PATH path or LLVM_DIR. \
Enable BUILD_LLVM option to build LLVM from included as a git submodule.")
endif()
if (LLVM_VERSION VERSION_LESS 18)
message(FATAL_ERROR "Found LLVM version ${LLVM_VERSION}. Required version 18 or above.")
endif()
if (NOT MLIR_FOUND)
message(FATAL_ERROR "Can't find MLIR libraries from the CMAKE_PREFIX_PATH path or MLIR_DIR")
message(FATAL_ERROR "Can't find MLIR libraries from the CMAKE_PREFIX_PATH path or MLIR_DIR: ${MLIR_DIR}")
endif()

View file

@ -279,6 +279,7 @@ add_subdirectory(rpcsx)
if (WITH_PS3)
include(ConfigureCompiler)
add_subdirectory(kernel/cellos)
add_subdirectory(rpcs3)
add_subdirectory(ps3fw)
endif()

View file

@ -4,7 +4,7 @@
#include "Emu/Audio/Null/NullAudioBackend.h"
#include "Emu/Cell/PPUAnalyser.h"
#include "Emu/Cell/SPURecompiler.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellos/sys_sync.h"
#include "Emu/IdManager.h"
#include "Emu/Io/KeyboardHandler.h"
#include "Emu/Io/Null/NullKeyboardHandler.h"

View file

@ -0,0 +1,70 @@
add_library(cellos-kernel STATIC
src/lv2.cpp
src/sys_bdemu.cpp
src/sys_btsetting.cpp
src/sys_cond.cpp
src/sys_console.cpp
src/sys_crypto_engine.cpp
src/sys_config.cpp
src/sys_dbg.cpp
src/sys_event.cpp
src/sys_event_flag.cpp
src/sys_fs.cpp
src/sys_game.cpp
src/sys_gamepad.cpp
src/sys_gpio.cpp
src/sys_hid.cpp
src/sys_interrupt.cpp
src/sys_io.cpp
src/sys_lwcond.cpp
src/sys_lwmutex.cpp
src/sys_memory.cpp
src/sys_mmapper.cpp
src/sys_mutex.cpp
src/sys_net.cpp
src/sys_net/lv2_socket.cpp
src/sys_net/lv2_socket_native.cpp
src/sys_net/lv2_socket_raw.cpp
src/sys_net/lv2_socket_p2p.cpp
src/sys_net/lv2_socket_p2ps.cpp
src/sys_net/network_context.cpp
src/sys_net/nt_p2p_port.cpp
src/sys_net/sys_net_helpers.cpp
src/sys_overlay.cpp
src/sys_ppu_thread.cpp
src/sys_process.cpp
src/sys_prx.cpp
src/sys_rsx.cpp
src/sys_rsxaudio.cpp
src/sys_rwlock.cpp
src/sys_semaphore.cpp
src/sys_spu.cpp
src/sys_sm.cpp
src/sys_ss.cpp
src/sys_storage.cpp
src/sys_time.cpp
src/sys_timer.cpp
src/sys_trace.cpp
src/sys_tty.cpp
src/sys_uart.cpp
src/sys_usbd.cpp
src/sys_vm.cpp
)
target_include_directories(cellos-kernel
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include/cellos
)
target_link_libraries(cellos-kernel PUBLIC
rpcs3_core # FIXME: remove
3rdparty::soundtouch # FIXME: remove
3rdparty::flatbuffers # FIXME: remove
3rdparty::wolfssl # FIXME: remove
3rdparty::miniupnpc # FIXME: remove
3rdparty::libusb # FIXME: remove
3rdparty::rtmidi # FIXME: remove
)

View file

@ -1,8 +1,9 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf, u64 buf_len);
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf,
u64 buf_len);

View file

@ -1,7 +1,7 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls

View file

@ -0,0 +1,49 @@
#pragma once
#include "sys_mutex.h"
#include "sys_sync.h"
struct lv2_mutex;
struct sys_cond_attribute_t {
be_t<u32> pshared;
be_t<s32> flags;
be_t<u64> ipc_key;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_cond final : lv2_obj {
static const u32 id_base = 0x86000000;
const u64 key;
const u64 name;
const u32 mtx_id;
lv2_mutex *mutex; // Associated Mutex
shared_ptr<lv2_obj> _mutex;
ppu_thread *sq{};
lv2_cond(u64 key, u64 name, u32 mtx_id, shared_ptr<lv2_obj> mutex0) noexcept;
lv2_cond(utils::serial &ar) noexcept;
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
CellError on_id_create();
};
class ppu_thread;
// Syscalls
error_code sys_cond_create(ppu_thread &ppu, vm::ptr<u32> cond_id, u32 mutex_id,
vm::ptr<sys_cond_attribute_t> attr);
error_code sys_cond_destroy(ppu_thread &ppu, u32 cond_id);
error_code sys_cond_wait(ppu_thread &ppu, u32 cond_id, u64 timeout);
error_code sys_cond_signal(ppu_thread &ppu, u32 cond_id);
error_code sys_cond_signal_all(ppu_thread &ppu, u32 cond_id);
error_code sys_cond_signal_to(ppu_thread &ppu, u32 cond_id, u32 thread_id);

View file

@ -0,0 +1,444 @@
#pragma once
#include "Emu/Cell/timers.hpp"
#include "sys_event.h"
#include "util/atomic.hpp"
#include "util/mutex.h"
#include "util/shared_ptr.hpp"
/*
* sys_config is a "subscription-based data storage API"
*
* It has the concept of services and listeners. Services provide data,
* listeners subscribe to registration/unregistration events from specific
* services.
*
* Services are divided into two classes: LV2 services (positive service IDs)
* and User services (negative service IDs). LV2 services seem to be implictly
* "available", probably constructed on-demand with internal LV2 code generating
* the data. An example is PadManager (service ID 0x11). User services may be
* registered through a syscall, and have negative IDs. An example is libPad
* (service ID 0x8000'0000'0000'0001). Note that user-mode *cannot* register
* positive service IDs.
*
* To start with, you have to get a sys_config handle by calling sys_config_open
* and providing an event queue. This event queue will be used for sys_config
* notifications if a subscribed config event is registered.
*
* With a sys_config handle, listeners can be added to specific services using
* sys_config_add_service_listener. This syscall returns a service listener
* handle, which can be used to close the listener and stop further
* notifications. Once subscribed, any matching past service registrations will
* be automatically sent to the supplied queue (thus the "data storage").
*
* Services exist "implicitly", and data may be registered *onto* a service by
* calling sys_config_register_service. You can remove config events by calling
* sys_config_unregister_service and providing the handle returned when
* registering a service.
*
* If a service is registered (or unregistered) and matches any active listener,
* that listener will get an event sent to the event queue provided in the call
* to sys_config_open.
*
* This event will contain the type of config event ("service event" or "IO
* event", in event.source), the corresponding sys_config handle (event.data1),
* the config event ID (event.data2 & 0xffff'ffff), whether the service was
* registered or unregistered ('data2 >> 32'), and what buffer size will be
* needed to read the corresponding service event (event.data3).
*
* NOTE: if multiple listeners exist, each gets a separate event ID even though
* all events are the same!
*
* After receiving such an event from the event queue, the user should allocate
* enough buffer and call sys_config_get_service_event (or sys_config_io_event)
* with the given event ID, in order to obtain a sys_config_service_event_t (or
* sys_config_io_event_t) structure with the contents of the service that was
* (un)registered.
*/
class lv2_config_handle;
class lv2_config_service;
class lv2_config_service_listener;
class lv2_config_service_event;
// Known sys_config service IDs
enum sys_config_service_id : s64 {
SYS_CONFIG_SERVICE_PADMANAGER = 0x11,
SYS_CONFIG_SERVICE_PADMANAGER2 =
0x12, // lv2 seems to send padmanager events to both 0x11 and 0x12
SYS_CONFIG_SERVICE_0x20 = 0x20,
SYS_CONFIG_SERVICE_0x30 = 0x30,
SYS_CONFIG_SERVICE_USER_BASE =
static_cast<s64>(UINT64_C(0x8000'0000'0000'0000)),
SYS_CONFIG_SERVICE_USER_LIBPAD = SYS_CONFIG_SERVICE_USER_BASE + 1,
SYS_CONFIG_SERVICE_USER_LIBKB = SYS_CONFIG_SERVICE_USER_BASE + 2,
SYS_CONFIG_SERVICE_USER_LIBMOUSE = SYS_CONFIG_SERVICE_USER_BASE + 3,
SYS_CONFIG_SERVICE_USER_0x1000 = SYS_CONFIG_SERVICE_USER_BASE + 0x1000,
SYS_CONFIG_SERVICE_USER_0x1010 = SYS_CONFIG_SERVICE_USER_BASE + 0x1010,
SYS_CONFIG_SERVICE_USER_0x1011 = SYS_CONFIG_SERVICE_USER_BASE + 0x1011,
SYS_CONFIG_SERVICE_USER_0x1013 = SYS_CONFIG_SERVICE_USER_BASE + 0x1013,
SYS_CONFIG_SERVICE_USER_0x1020 = SYS_CONFIG_SERVICE_USER_BASE + 0x1020,
SYS_CONFIG_SERVICE_USER_0x1030 = SYS_CONFIG_SERVICE_USER_BASE + 0x1030,
};
enum sys_config_service_listener_type : u32 {
SYS_CONFIG_SERVICE_LISTENER_ONCE = 0,
SYS_CONFIG_SERVICE_LISTENER_REPEATING = 1
};
enum sys_config_event_source : u64 {
SYS_CONFIG_EVENT_SOURCE_SERVICE = 1,
SYS_CONFIG_EVENT_SOURCE_IO = 2
};
/*
* Dynamic-sized struct to describe a sys_config_service_event
* We never allocate it - the guest does it for us and provides a pointer
*/
struct sys_config_service_event_t {
// Handle to the service listener for whom this event is destined
be_t<u32> service_listener_handle;
// 1 if this service is currently registered or unregistered
be_t<u32> registered;
// Service ID that triggered this event
be_t<u64> service_id;
// Custom ID provided by the user, used to uniquely identify service events
// (provided to sys_config_register_event) When a service is unregistered,
// this is the only value available to distinguish which service event was
// unregistered.
be_t<u64> user_id;
/* if added==0, the structure ends here */
// Verbosity of this service event (provided to sys_config_register_event)
be_t<u64> verbosity;
// Size of 'data'
be_t<u32> data_size;
// Ignored, seems to be simply 32-bits of padding
be_t<u32> padding;
// Buffer containing event data (copy of the buffer supplied to
// sys_config_register_service) NOTE: This buffer size is dynamic, according
// to 'data_size', and can be 0. Here it is set to 1 since zero-sized buffers
// are not standards-compliant
u8 data[1];
};
/*
* Event data structure for SYS_CONFIG_SERVICE_PADMANAGER
* This is a guess
*/
struct sys_config_padmanager_data_t {
be_t<u16> unk[5]; // hid device type ?
be_t<u16> vid;
be_t<u16> pid;
be_t<u16> unk2[6]; // bluetooth address?
};
static_assert(sizeof(sys_config_padmanager_data_t) == 26);
/*
* Global sys_config state
*/
class lv2_config {
atomic_t<u32> m_state = 0;
// LV2 Config mutex
shared_mutex m_mutex;
// Map of LV2 Service Events
std::unordered_map<u32, shared_ptr<lv2_config_service_event>> events;
public:
void initialize();
// Service Events
void add_service_event(shared_ptr<lv2_config_service_event> event);
void remove_service_event(u32 id);
shared_ptr<lv2_config_service_event> find_event(u32 id) {
reader_lock lock(m_mutex);
const auto it = events.find(id);
if (it == events.cend())
return null_ptr;
if (it->second) {
return it->second;
}
return null_ptr;
}
~lv2_config() noexcept;
};
/*
* LV2 Config Handle object, managed by IDM
*/
class lv2_config_handle {
public:
static const u32 id_base = 0x41000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(37);
private:
u32 idm_id;
// queue for service/io event notifications
const shared_ptr<lv2_event_queue> queue;
bool send_queue_event(u64 source, u64 d1, u64 d2, u64 d3) const {
if (auto sptr = queue) {
return sptr->send(source, d1, d2, d3) == 0;
}
return false;
}
public:
// Constructors (should not be used directly)
lv2_config_handle(shared_ptr<lv2_event_queue> _queue) noexcept
: queue(std::move(_queue)) {}
// Factory
template <typename... Args>
static shared_ptr<lv2_config_handle> create(Args &&...args) {
if (auto cfg =
idm::make_ptr<lv2_config_handle>(std::forward<Args>(args)...)) {
cfg->idm_id = idm::last_id();
return cfg;
}
return null_ptr;
}
// Notify event queue for this handle
bool notify(u64 source, u64 data2, u64 data3) const {
return send_queue_event(source, idm_id, data2, data3);
}
};
/*
* LV2 Service object, managed by IDM
*/
class lv2_config_service {
public:
static const u32 id_base = 0x43000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(38);
private:
// IDM data
u32 idm_id;
// Whether this service is currently registered or not
bool registered = true;
public:
const u64 timestamp;
const sys_config_service_id id;
const u64 user_id;
const u64 verbosity;
const u32 padding; // not used, but stored here just in case
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity,
u32 _padding, const u8 *_data, usz size) noexcept
: timestamp(get_system_time()), id(_id), user_id(_user_id),
verbosity(_verbosity), padding(_padding),
data(&_data[0], &_data[size]) {}
// Factory
template <typename... Args>
static shared_ptr<lv2_config_service> create(Args &&...args) {
if (auto service =
idm::make_ptr<lv2_config_service>(std::forward<Args>(args)...)) {
service->idm_id = idm::last_id();
return service;
}
return null_ptr;
}
// Registration
bool is_registered() const { return registered; }
void unregister();
// Notify listeners
void notify() const;
// Utilities
usz get_size() const {
return sizeof(sys_config_service_event_t) - 1 + data.size();
}
shared_ptr<lv2_config_service> get_shared_ptr() const {
return stx::make_shared_from_this<lv2_config_service>(this);
}
u32 get_id() const { return idm_id; }
};
/*
* LV2 Service Event Listener object, managed by IDM
*/
class lv2_config_service_listener {
public:
static const u32 id_base = 0x42000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(39);
private:
// IDM data
u32 idm_id;
// The service listener owns the service events - service events will not be
// freed as long as their corresponding listener exists This has been
// confirmed to be the case in realhw
std::vector<shared_ptr<lv2_config_service_event>> service_events;
shared_ptr<lv2_config_handle> handle;
bool notify(const shared_ptr<lv2_config_service_event> &event);
public:
const sys_config_service_id service_id;
const u64 min_verbosity;
const sys_config_service_listener_type type;
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service_listener(shared_ptr<lv2_config_handle> _handle,
sys_config_service_id _service_id,
u64 _min_verbosity,
sys_config_service_listener_type _type,
const u8 *_data, usz size) noexcept
: handle(std::move(_handle)), service_id(_service_id),
min_verbosity(_min_verbosity), type(_type),
data(&_data[0], &_data[size]) {}
// Factory
template <typename... Args>
static shared_ptr<lv2_config_service_listener> create(Args &&...args) {
if (auto listener = idm::make_ptr<lv2_config_service_listener>(
std::forward<Args>(args)...)) {
listener->idm_id = idm::last_id();
return listener;
}
return null_ptr;
}
// Check whether service matches
bool check_service(const lv2_config_service &service) const;
// Register new event, and notify queue
bool notify(const shared_ptr<lv2_config_service> &service);
// (Re-)notify about all still-registered past events
void notify_all();
// Utilities
u32 get_id() const { return idm_id; }
shared_ptr<lv2_config_service_listener> get_shared_ptr() const {
return stx::make_shared_from_this<lv2_config_service_listener>(this);
}
};
/*
* LV2 Service Event object (*not* managed by IDM)
*/
class lv2_config_service_event {
static u32 get_next_id() {
struct service_event_id {
atomic_t<u32> next_id = 0;
};
return g_fxo->get<service_event_id>().next_id++;
}
atomic_t<bool> m_destroyed = false;
friend class lv2_config;
public:
const u32 id;
// Note: Events hold a shared_ptr to their corresponding service - services
// only get freed once there are no more pending service events This has been
// confirmed to be the case in realhw
const shared_ptr<lv2_config_handle> handle;
const shared_ptr<lv2_config_service> service;
const lv2_config_service_listener &listener;
// Constructors (should not be used directly)
lv2_config_service_event(
shared_ptr<lv2_config_handle> _handle,
shared_ptr<lv2_config_service> _service,
const lv2_config_service_listener &_listener) noexcept
: id(get_next_id()), handle(std::move(_handle)),
service(std::move(_service)), listener(_listener) {}
// Factory
template <typename... Args>
static shared_ptr<lv2_config_service_event> create(Args &&...args) {
auto ev =
make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
g_fxo->get<lv2_config>().add_service_event(ev);
return ev;
}
// Destructor
lv2_config_service_event &operator=(thread_state s) noexcept;
~lv2_config_service_event() noexcept;
// Notify queue that this event exists
bool notify() const;
// Write event to buffer
void write(sys_config_service_event_t *dst) const;
// Check if the buffer can fit the current event, return false otherwise
bool check_buffer_size(usz size) const { return service->get_size() <= size; }
};
/*
* Syscalls
*/
/*516*/ error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl);
/*517*/ error_code sys_config_close(u32 config_hdl);
/*518*/ error_code
sys_config_get_service_event(u32 config_hdl, u32 event_id,
vm::ptr<sys_config_service_event_t> dst, u64 size);
/*519*/ error_code sys_config_add_service_listener(
u32 config_hdl, sys_config_service_id service_id, u64 min_verbosity,
vm::ptr<void> in, u64 size, sys_config_service_listener_type type,
vm::ptr<u32> out_listener_hdl);
/*520*/ error_code sys_config_remove_service_listener(u32 config_hdl,
u32 listener_hdl);
/*521*/ error_code sys_config_register_service(u32 config_hdl,
sys_config_service_id service_id,
u64 user_id, u64 verbosity,
vm::ptr<u8> data_buf, u64 size,
vm::ptr<u32> out_service_hdl);
/*522*/ error_code sys_config_unregister_service(u32 config_hdl,
u32 service_hdl);
// Following syscalls have not been REd yet
/*523*/ error_code sys_config_get_io_event(u32 config_hdl, u32 event_id /*?*/,
vm::ptr<void> out_buf /*?*/,
u64 size /*?*/);
/*524*/ error_code sys_config_register_io_error_listener(u32 config_hdl);
/*525*/ error_code sys_config_unregister_io_error_listener(u32 config_hdl);

View file

@ -1,5 +1,6 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls

View file

@ -1,10 +1,11 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls
error_code sys_crypto_engine_create(vm::ptr<u32> id);
error_code sys_crypto_engine_destroy(u32 id);
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer, u64 buffer_size);
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer,
u64 buffer_size);

View file

@ -1,9 +1,11 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// Syscalls
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size, vm::ptr<void> data);
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size, vm::cptr<void> data);
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size,
vm::ptr<void> data);
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size,
vm::cptr<void> data);

View file

@ -0,0 +1,153 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include <deque>
class cpu_thread;
class spu_thrread;
// Event Queue Type
enum : u32 {
SYS_PPU_QUEUE = 1,
SYS_SPU_QUEUE = 2,
};
// Event Queue Destroy Mode
enum : s32 {
SYS_EVENT_QUEUE_DESTROY_FORCE = 1,
};
// Event Queue Ipc Key
enum : u64 {
SYS_EVENT_QUEUE_LOCAL = 0,
};
// Event Port Type
enum : s32 {
SYS_EVENT_PORT_LOCAL = 1,
SYS_EVENT_PORT_IPC = 3, // Unofficial name
};
// Event Port Name
enum : u64 {
SYS_EVENT_PORT_NO_NAME = 0,
};
// Event Source Type
enum : u32 {
SYS_SPU_THREAD_EVENT_USER = 1,
SYS_SPU_THREAD_EVENT_DMA = 2, // not supported
};
// Event Source Key
enum : u64 {
SYS_SPU_THREAD_EVENT_USER_KEY = 0xFFFFFFFF53505501ull,
SYS_SPU_THREAD_EVENT_DMA_KEY = 0xFFFFFFFF53505502ull,
SYS_SPU_THREAD_EVENT_EXCEPTION_KEY = 0xFFFFFFFF53505503ull,
};
struct sys_event_queue_attribute_t {
be_t<u32> protocol; // SYS_SYNC_PRIORITY or SYS_SYNC_FIFO
be_t<s32> type; // SYS_PPU_QUEUE or SYS_SPU_QUEUE
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct sys_event_t {
be_t<u64> source;
be_t<u64> data1;
be_t<u64> data2;
be_t<u64> data3;
};
// Source, data1, data2, data3
using lv2_event = std::tuple<u64, u64, u64, u64>;
struct lv2_event_port;
struct lv2_event_queue final : public lv2_obj {
static const u32 id_base = 0x8d000000;
const u32 id;
const lv2_protocol protocol;
const u8 type;
const u8 size;
const u64 name;
const u64 key;
shared_mutex mutex;
std::deque<lv2_event> events;
spu_thread *sq{};
ppu_thread *pq{};
lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name,
u64 ipc_key) noexcept;
lv2_event_queue(utils::serial &ar) noexcept;
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
static void save_ptr(utils::serial &, lv2_event_queue *);
static shared_ptr<lv2_event_queue>
load_ptr(utils::serial &ar, shared_ptr<lv2_event_queue> &queue,
std::string_view msg = {});
CellError send(lv2_event event, bool *notified_thread = nullptr,
lv2_event_port *port = nullptr);
CellError send(u64 source, u64 d1, u64 d2, u64 d3,
bool *notified_thread = nullptr,
lv2_event_port *port = nullptr) {
return send(std::make_tuple(source, d1, d2, d3), notified_thread, port);
}
// Get event queue by its global key
static shared_ptr<lv2_event_queue> find(u64 ipc_key);
};
struct lv2_event_port final : lv2_obj {
static const u32 id_base = 0x0e000000;
const s32 type; // Port type, either IPC or local
const u64 name; // Event source (generated from id and process id if not set)
atomic_t<usz> is_busy = 0; // Counts threads waiting on event sending
shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
lv2_event_port(s32 type, u64 name) : type(type), name(name) {}
lv2_event_port(utils::serial &ar);
void save(utils::serial &ar);
};
class ppu_thread;
// Syscalls
error_code sys_event_queue_create(cpu_thread &cpu, vm::ptr<u32> equeue_id,
vm::ptr<sys_event_queue_attribute_t> attr,
u64 event_queue_key, s32 size);
error_code sys_event_queue_destroy(ppu_thread &ppu, u32 equeue_id, s32 mode);
error_code sys_event_queue_receive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> dummy_event,
u64 timeout);
error_code sys_event_queue_tryreceive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> event_array,
s32 size, vm::ptr<u32> number);
error_code sys_event_queue_drain(ppu_thread &ppu, u32 event_queue_id);
error_code sys_event_port_create(cpu_thread &cpu, vm::ptr<u32> eport_id,
s32 port_type, u64 name);
error_code sys_event_port_destroy(ppu_thread &ppu, u32 eport_id);
error_code sys_event_port_connect_local(cpu_thread &cpu, u32 event_port_id,
u32 event_queue_id);
error_code sys_event_port_connect_ipc(ppu_thread &ppu, u32 eport_id,
u64 ipc_key);
error_code sys_event_port_disconnect(ppu_thread &ppu, u32 eport_id);
error_code sys_event_port_send(u32 event_port_id, u64 data1, u64 data2,
u64 data3);

View file

@ -0,0 +1,118 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
enum {
SYS_SYNC_WAITER_SINGLE = 0x10000,
SYS_SYNC_WAITER_MULTIPLE = 0x20000,
SYS_EVENT_FLAG_WAIT_AND = 0x01,
SYS_EVENT_FLAG_WAIT_OR = 0x02,
SYS_EVENT_FLAG_WAIT_CLEAR = 0x10,
SYS_EVENT_FLAG_WAIT_CLEAR_ALL = 0x20,
};
struct sys_event_flag_attribute_t {
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<s32> type;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_event_flag final : lv2_obj {
static const u32 id_base = 0x98000000;
const lv2_protocol protocol;
const u64 key;
const s32 type;
const u64 name;
shared_mutex mutex;
atomic_t<u64> pattern;
ppu_thread *sq{};
lv2_event_flag(u32 protocol, u64 key, s32 type, u64 name,
u64 pattern) noexcept
: protocol{static_cast<u8>(protocol)}, key(key), type(type), name(name),
pattern(pattern) {}
lv2_event_flag(utils::serial &ar);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
// Check mode arg
static bool check_mode(u32 mode) {
switch (mode & 0xf) {
case SYS_EVENT_FLAG_WAIT_AND:
break;
case SYS_EVENT_FLAG_WAIT_OR:
break;
default:
return false;
}
switch (mode & ~0xf) {
case 0:
break;
case SYS_EVENT_FLAG_WAIT_CLEAR:
break;
case SYS_EVENT_FLAG_WAIT_CLEAR_ALL:
break;
default:
return false;
}
return true;
}
// Check and clear pattern (must be atomic op)
static bool check_pattern(u64 &pattern, u64 bitptn, u64 mode, u64 *result) {
// Write pattern
if (result) {
*result = pattern;
}
// Check pattern
if (((mode & 0xf) == SYS_EVENT_FLAG_WAIT_AND &&
(pattern & bitptn) != bitptn) ||
((mode & 0xf) == SYS_EVENT_FLAG_WAIT_OR && (pattern & bitptn) == 0)) {
return false;
}
// Clear pattern if necessary
if ((mode & ~0xf) == SYS_EVENT_FLAG_WAIT_CLEAR) {
pattern &= ~bitptn;
} else if ((mode & ~0xf) == SYS_EVENT_FLAG_WAIT_CLEAR_ALL) {
pattern = 0;
}
return true;
}
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_event_flag_create(ppu_thread &ppu, vm::ptr<u32> id,
vm::ptr<sys_event_flag_attribute_t> attr,
u64 init);
error_code sys_event_flag_destroy(ppu_thread &ppu, u32 id);
error_code sys_event_flag_wait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result, u64 timeout);
error_code sys_event_flag_trywait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result);
error_code sys_event_flag_set(cpu_thread &cpu, u32 id, u64 bitptn);
error_code sys_event_flag_clear(ppu_thread &ppu, u32 id, u64 bitptn);
error_code sys_event_flag_cancel(ppu_thread &ppu, u32 id, vm::ptr<u32> num);
error_code sys_event_flag_get(ppu_thread &ppu, u32 id, vm::ptr<u64> flags);

View file

@ -0,0 +1,665 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "util/File.h"
#include "util/StrUtil.h"
#include "util/mutex.h"
#include <string>
// Open Flags
enum : s32 {
CELL_FS_O_RDONLY = 000000,
CELL_FS_O_WRONLY = 000001,
CELL_FS_O_RDWR = 000002,
CELL_FS_O_ACCMODE = 000003,
CELL_FS_O_CREAT = 000100,
CELL_FS_O_EXCL = 000200,
CELL_FS_O_TRUNC = 001000,
CELL_FS_O_APPEND = 002000,
CELL_FS_O_MSELF = 010000,
CELL_FS_O_UNK =
01000000, // Tests have shown this is independent of other flags. Only
// known to be called in Rockband games.
};
// Seek Mode
enum : s32 {
CELL_FS_SEEK_SET,
CELL_FS_SEEK_CUR,
CELL_FS_SEEK_END,
};
enum : s32 {
CELL_FS_MAX_FS_PATH_LENGTH = 1024,
CELL_FS_MAX_FS_FILE_NAME_LENGTH = 255,
CELL_FS_MAX_MP_LENGTH = 31,
};
enum : s32 {
CELL_FS_S_IFMT = 0170000,
CELL_FS_S_IFDIR = 0040000, // directory
CELL_FS_S_IFREG = 0100000, // regular
CELL_FS_S_IFLNK = 0120000, // symbolic link
CELL_FS_S_IFWHT = 0160000, // unknown
CELL_FS_S_IRUSR = 0000400, // R for owner
CELL_FS_S_IWUSR = 0000200, // W for owner
CELL_FS_S_IXUSR = 0000100, // X for owner
CELL_FS_S_IRGRP = 0000040, // R for group
CELL_FS_S_IWGRP = 0000020, // W for group
CELL_FS_S_IXGRP = 0000010, // X for group
CELL_FS_S_IROTH = 0000004, // R for other
CELL_FS_S_IWOTH = 0000002, // W for other
CELL_FS_S_IXOTH = 0000001, // X for other
};
// CellFsDirent.d_type
enum : u8 {
CELL_FS_TYPE_UNKNOWN = 0,
CELL_FS_TYPE_DIRECTORY = 1,
CELL_FS_TYPE_REGULAR = 2,
CELL_FS_TYPE_SYMLINK = 3,
};
enum : u32 {
CELL_FS_IO_BUFFER_PAGE_SIZE_64KB = 0x0002,
CELL_FS_IO_BUFFER_PAGE_SIZE_1MB = 0x0004,
};
struct CellFsDirent {
u8 d_type;
u8 d_namlen;
char d_name[256];
};
struct CellFsStat {
be_t<s32> mode;
be_t<s32> uid;
be_t<s32> gid;
be_t<s64, 4> atime;
be_t<s64, 4> mtime;
be_t<s64, 4> ctime;
be_t<u64, 4> size;
be_t<u64, 4> blksize;
};
CHECK_SIZE_ALIGN(CellFsStat, 52, 4);
struct CellFsDirectoryEntry {
CellFsStat attribute;
CellFsDirent entry_name;
};
struct CellFsUtimbuf {
be_t<s64, 4> actime;
be_t<s64, 4> modtime;
};
CHECK_SIZE_ALIGN(CellFsUtimbuf, 16, 4);
// MSelf file structs
struct FsMselfHeader {
be_t<u32> m_magic;
be_t<u32> m_format_version;
be_t<u64> m_file_size;
be_t<u32> m_entry_num;
be_t<u32> m_entry_size;
u8 m_reserve[40];
};
struct FsMselfEntry {
char m_name[32];
be_t<u64> m_offset;
be_t<u64> m_size;
u8 m_reserve[16];
};
enum class lv2_mp_flag {
read_only,
no_uid_gid,
strict_get_block_size,
cache,
__bitset_enum_max
};
enum class lv2_file_type {
regular = 0,
sdata,
edata,
};
struct lv2_fs_mount_point {
const std::string_view root;
const std::string_view file_system;
const std::string_view device;
const u32 sector_size = 512;
const u64 sector_count = 256;
const u32 block_size = 4096;
const bs_t<lv2_mp_flag> flags{};
lv2_fs_mount_point *const next = nullptr;
mutable shared_mutex mutex;
};
extern lv2_fs_mount_point g_mp_sys_dev_hdd0;
extern lv2_fs_mount_point g_mp_sys_no_device;
struct lv2_fs_mount_info {
lv2_fs_mount_point *const mp;
const std::string device;
const std::string file_system;
const bool read_only;
lv2_fs_mount_info(lv2_fs_mount_point *mp = nullptr,
std::string_view device = {},
std::string_view file_system = {}, bool read_only = false)
: mp(mp ? mp : &g_mp_sys_no_device),
device(device.empty() ? this->mp->device : device),
file_system(file_system.empty() ? this->mp->file_system : file_system),
read_only(
(this->mp->flags & lv2_mp_flag::read_only) ||
read_only) // respect the original flags of the mount point as well
{}
constexpr bool operator==(const lv2_fs_mount_info &rhs) const noexcept {
return this == &rhs;
}
constexpr bool
operator==(const lv2_fs_mount_point *const &rhs) const noexcept {
return mp == rhs;
}
constexpr lv2_fs_mount_point *operator->() const noexcept { return mp; }
};
extern lv2_fs_mount_info g_mi_sys_not_found;
struct CellFsMountInfo; // Forward Declaration
struct lv2_fs_mount_info_map {
public:
SAVESTATE_INIT_POS(40);
lv2_fs_mount_info_map();
lv2_fs_mount_info_map(const lv2_fs_mount_info_map &) = delete;
lv2_fs_mount_info_map &operator=(const lv2_fs_mount_info_map &) = delete;
~lv2_fs_mount_info_map();
// Forwarding arguments to map.try_emplace(): refer to the constructor of
// lv2_fs_mount_info
template <typename... Args> bool add(Args &&...args) {
return map.try_emplace(std::forward<Args>(args)...).second;
}
bool remove(std::string_view path);
const lv2_fs_mount_info &lookup(std::string_view path,
bool no_cell_fs_path = false,
std::string *mount_path = nullptr) const;
u64 get_all(CellFsMountInfo *info = nullptr, u64 len = 0) const;
bool is_device_mounted(std::string_view device_name) const;
static bool vfs_unmount(std::string_view vpath, bool remove_from_map = true);
private:
std::unordered_map<std::string, lv2_fs_mount_info, fmt::string_hash,
std::equal_to<>>
map;
};
struct lv2_fs_object {
static constexpr u32 id_base = 3;
static constexpr u32 id_step = 1;
static constexpr u32 id_count = 255 - id_base;
static constexpr bool id_lowest = true;
SAVESTATE_INIT_POS(49);
// File Name (max 1055)
const std::array<char, 0x420> name;
// Mount Info
const lv2_fs_mount_info &mp;
protected:
lv2_fs_object(std::string_view filename);
lv2_fs_object(utils::serial &ar, bool dummy);
public:
lv2_fs_object(const lv2_fs_object &) = delete;
lv2_fs_object &operator=(const lv2_fs_object &) = delete;
// Normalize a virtual path
static std::string get_normalized_path(std::string_view path);
// Get the device's root path (e.g. "/dev_hdd0") from a given path
static std::string get_device_root(std::string_view filename);
// Filename can be either a path starting with '/' or a CELL_FS device name
// This should be used only when handling devices that are not mounted
// Otherwise, use g_fxo->get<lv2_fs_mount_info_map>().lookup() to look up
// mounted devices accurately
static lv2_fs_mount_point *get_mp(std::string_view filename,
std::string *vfs_path = nullptr);
static std::array<char, 0x420> get_name(std::string_view filename) {
std::array<char, 0x420> name;
if (filename.size() >= 0x420) {
filename = filename.substr(0, 0x420 - 1);
}
filename.copy(name.data(), filename.size());
name[filename.size()] = 0;
return name;
}
void save(utils::serial &) {}
};
struct lv2_file final : lv2_fs_object {
static constexpr u32 id_type = 1;
fs::file file;
const s32 mode;
const s32 flags;
std::string real_path;
const lv2_file_type type;
// IO Container
u32 ct_id{}, ct_used{};
// Stream lock
atomic_t<u32> lock{0};
// Some variables for convenience of data restoration
struct save_restore_t {
u64 seek_pos;
u64 atime;
u64 mtime;
} restore_data{};
lv2_file(std::string_view filename, fs::file &&file, s32 mode, s32 flags,
const std::string &real_path, lv2_file_type type = {})
: lv2_fs_object(filename), file(std::move(file)), mode(mode),
flags(flags), real_path(real_path), type(type) {}
lv2_file(const lv2_file &host, fs::file &&file, s32 mode, s32 flags,
const std::string &real_path, lv2_file_type type = {})
: lv2_fs_object(host.name.data()), file(std::move(file)), mode(mode),
flags(flags), real_path(real_path), type(type) {}
lv2_file(utils::serial &ar);
void save(utils::serial &ar);
struct open_raw_result_t {
CellError error;
fs::file file;
};
struct open_result_t {
CellError error;
std::string ppath;
std::string real_path;
fs::file file;
lv2_file_type type;
};
// Open a file with wrapped logic of sys_fs_open
static open_raw_result_t
open_raw(const std::string &path, s32 flags, s32 mode,
lv2_file_type type = lv2_file_type::regular,
const lv2_fs_mount_info &mp = g_mi_sys_not_found);
static open_result_t open(std::string_view vpath, s32 flags, s32 mode,
const void *arg = {}, u64 size = 0);
// File reading with intermediate buffer
static u64 op_read(const fs::file &file, vm::ptr<void> buf, u64 size,
u64 opt_pos = umax);
u64 op_read(vm::ptr<void> buf, u64 size, u64 opt_pos = umax) const {
return op_read(file, buf, size, opt_pos);
}
// File writing with intermediate buffer
static u64 op_write(const fs::file &file, vm::cptr<void> buf, u64 size);
u64 op_write(vm::cptr<void> buf, u64 size) const {
return op_write(file, buf, size);
}
// For MSELF support
struct file_view;
// Make file view from lv2_file object (for MSELF support)
static fs::file make_view(const shared_ptr<lv2_file> &_file, u64 offset);
};
struct lv2_dir final : lv2_fs_object {
static constexpr u32 id_type = 2;
const std::vector<fs::dir_entry> entries;
// Current reading position
atomic_t<u64> pos{0};
lv2_dir(std::string_view filename, std::vector<fs::dir_entry> &&entries)
: lv2_fs_object(filename), entries(std::move(entries)) {}
lv2_dir(utils::serial &ar);
void save(utils::serial &ar);
// Read next
const fs::dir_entry *dir_read() {
const u64 old_pos = pos;
if (const u64 cur = (old_pos < entries.size() ? pos++ : old_pos);
cur < entries.size()) {
return &entries[cur];
}
return nullptr;
}
};
// sys_fs_fcntl arg base class (left empty for PODness)
struct lv2_file_op {};
namespace vtable {
struct lv2_file_op {
// Speculation
vm::bptrb<vm::ptrb<void>(vm::ptrb<lv2_file_op>)> get_data;
vm::bptrb<u32(vm::ptrb<lv2_file_op>)> get_size;
vm::bptrb<void(vm::ptrb<lv2_file_op>)> _dtor1;
vm::bptrb<void(vm::ptrb<lv2_file_op>)> _dtor2;
};
} // namespace vtable
// sys_fs_fcntl: read with offset, write with offset
struct lv2_file_op_rw : lv2_file_op {
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8; // ???
be_t<u32> _xc; // ???
be_t<u32> fd; // File descriptor (3..255)
vm::bptrb<void> buf; // Buffer for data
be_t<u64> offset; // File offset
be_t<u64> size; // Access size
be_t<s32> out_code; // Op result
be_t<u64> out_size; // Size processed
};
CHECK_SIZE(lv2_file_op_rw, 0x38);
// sys_fs_fcntl: cellFsSdataOpenByFd
struct lv2_file_op_09 : lv2_file_op {
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
be_t<u32> _xc;
be_t<u32> fd;
be_t<u64> offset;
be_t<u32> _vtabl2;
be_t<u32> arg1; // 0x180
be_t<u32> arg2; // 0x10
be_t<u32> arg_size; // 6th arg
be_t<u32> arg_ptr; // 5th arg
be_t<u32> _x34;
be_t<s32> out_code;
be_t<u32> out_fd;
};
CHECK_SIZE(lv2_file_op_09, 0x40);
struct lv2_file_e0000025 : lv2_file_op {
be_t<u32> size; // 0x30
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x28 - offset of out_code
be_t<u32> name_size;
vm::bcptr<char> name;
be_t<u32> _x14;
be_t<u32> _x18; // 0
be_t<u32> _x1c; // 0
be_t<u32> _x20; // 16
be_t<u32> _x24; // unk, seems to be memory location
be_t<u32> out_code; // out_code
be_t<u32> fd; // 0xffffffff - likely fd out
};
CHECK_SIZE(lv2_file_e0000025, 0x30);
// sys_fs_fnctl: cellFsGetDirectoryEntries
struct lv2_file_op_dir : lv2_file_op {
struct dir_info : lv2_file_op {
be_t<s32> _code; // Op result
be_t<u32> _size; // Number of entries written
vm::bptrb<CellFsDirectoryEntry> ptr;
be_t<u32> max;
};
CHECK_SIZE(dir_info, 0x10);
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
dir_info arg;
};
CHECK_SIZE(lv2_file_op_dir, 0x1c);
// sys_fs_fcntl: cellFsGetFreeSize (for dev_hdd0)
struct lv2_file_c0000002 : lv2_file_op {
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
vm::bcptr<char> path;
be_t<u32> _x10; // 0
be_t<u32> _x14;
be_t<u32> out_code; // CELL_ENOSYS
be_t<u32> out_block_size;
be_t<u64> out_block_count;
};
CHECK_SIZE(lv2_file_c0000002, 0x28);
// sys_fs_fcntl: unknown (called before cellFsOpen, for example)
struct lv2_file_c0000006 : lv2_file_op {
be_t<u32> size; // 0x20
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> name_size;
vm::bcptr<char> name;
be_t<u32> _x14; // 0
be_t<u32> out_code; // 0x80010003
be_t<u32> out_id; // set to 0, may return 0x1b5
};
CHECK_SIZE(lv2_file_c0000006, 0x20);
// sys_fs_fcntl: cellFsArcadeHddSerialNumber
struct lv2_file_c0000007 : lv2_file_op {
be_t<u32> out_code; // set to 0
vm::bcptr<char> device; // CELL_FS_IOS:ATA_HDD
be_t<u32> device_size; // 0x14
vm::bptr<char> model;
be_t<u32> model_size; // 0x29
vm::bptr<char> serial;
be_t<u32> serial_size; // 0x15
};
CHECK_SIZE(lv2_file_c0000007, 0x1c);
struct lv2_file_c0000008 : lv2_file_op {
u8 _x0[4];
be_t<u32> op; // 0xC0000008
u8 _x8[8];
be_t<u64> container_id;
be_t<u32> size;
be_t<u32> page_type; // 0x4000 for cellFsSetDefaultContainer
// 0x4000 | page_type given by user, valid values seem to
// be: CELL_FS_IO_BUFFER_PAGE_SIZE_64KB 0x0002
// CELL_FS_IO_BUFFER_PAGE_SIZE_1MB 0x0004
be_t<u32> out_code;
u8 _x24[4];
};
CHECK_SIZE(lv2_file_c0000008, 0x28);
struct lv2_file_c0000015 : lv2_file_op {
be_t<u32> size; // 0x20
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> path_size;
vm::bcptr<char> path;
be_t<u32> _x14; //
be_t<u16> vendorID;
be_t<u16> productID;
be_t<u32> out_code; // set to 0
};
CHECK_SIZE(lv2_file_c0000015, 0x20);
struct lv2_file_c000001a : lv2_file_op {
be_t<u32>
disc_retry_type; // CELL_FS_DISC_READ_RETRY_NONE results in a 0 here
// CELL_FS_DISC_READ_RETRY_DEFAULT results in a 0x63 here
be_t<u32> _x4; // 0
be_t<u32> _x8; // 0x000186A0
be_t<u32> _xC; // 0
be_t<u32> _x10; // 0
be_t<u32> _x14; // 0
};
CHECK_SIZE(lv2_file_c000001a, 0x18);
struct lv2_file_c000001c : lv2_file_op {
be_t<u32> size; // 0x60
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> path_size;
vm::bcptr<char> path;
be_t<u32> unk1;
be_t<u16> vendorID;
be_t<u16> productID;
be_t<u32> out_code; // set to 0
be_t<u16> serial[32];
};
CHECK_SIZE(lv2_file_c000001c, 0x60);
// sys_fs_fcntl: cellFsAllocateFileAreaWithoutZeroFill
struct lv2_file_e0000017 : lv2_file_op {
be_t<u32> size; // 0x28
be_t<u32> _x4; // 0x10, offset
be_t<u32> _x8; // 0x20, offset
be_t<u32> _xc; // -
vm::bcptr<char> file_path;
be_t<u64> file_size;
be_t<u32> out_code;
};
CHECK_SIZE(lv2_file_e0000017, 0x28);
struct CellFsMountInfo {
char mount_path[0x20]; // 0x0
char filesystem[0x20]; // 0x20
char dev_name[0x40]; // 0x40
be_t<u32> unk[5]; // 0x80, probably attributes
};
CHECK_SIZE(CellFsMountInfo, 0x94);
// Default IO container
struct default_sys_fs_container {
shared_mutex mutex;
u32 id = 0;
u32 cap = 0;
u32 used = 0;
};
// Syscalls
error_code sys_fs_test(ppu_thread &ppu, u32 arg1, u32 arg2, vm::ptr<u32> arg3,
u32 arg4, vm::ptr<char> buf, u32 buf_size);
error_code sys_fs_open(ppu_thread &ppu, vm::cptr<char> path, s32 flags,
vm::ptr<u32> fd, s32 mode, vm::cptr<void> arg, u64 size);
error_code sys_fs_read(ppu_thread &ppu, u32 fd, vm::ptr<void> buf, u64 nbytes,
vm::ptr<u64> nread);
error_code sys_fs_write(ppu_thread &ppu, u32 fd, vm::cptr<void> buf, u64 nbytes,
vm::ptr<u64> nwrite);
error_code sys_fs_close(ppu_thread &ppu, u32 fd);
error_code sys_fs_opendir(ppu_thread &ppu, vm::cptr<char> path,
vm::ptr<u32> fd);
error_code sys_fs_readdir(ppu_thread &ppu, u32 fd, vm::ptr<CellFsDirent> dir,
vm::ptr<u64> nread);
error_code sys_fs_closedir(ppu_thread &ppu, u32 fd);
error_code sys_fs_stat(ppu_thread &ppu, vm::cptr<char> path,
vm::ptr<CellFsStat> sb);
error_code sys_fs_fstat(ppu_thread &ppu, u32 fd, vm::ptr<CellFsStat> sb);
error_code sys_fs_link(ppu_thread &ppu, vm::cptr<char> from, vm::cptr<char> to);
error_code sys_fs_mkdir(ppu_thread &ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_rename(ppu_thread &ppu, vm::cptr<char> from,
vm::cptr<char> to);
error_code sys_fs_rmdir(ppu_thread &ppu, vm::cptr<char> path);
error_code sys_fs_unlink(ppu_thread &ppu, vm::cptr<char> path);
error_code sys_fs_access(ppu_thread &ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_fcntl(ppu_thread &ppu, u32 fd, u32 op, vm::ptr<void> arg,
u32 size);
error_code sys_fs_lseek(ppu_thread &ppu, u32 fd, s64 offset, s32 whence,
vm::ptr<u64> pos);
error_code sys_fs_fdatasync(ppu_thread &ppu, u32 fd);
error_code sys_fs_fsync(ppu_thread &ppu, u32 fd);
error_code sys_fs_fget_block_size(ppu_thread &ppu, u32 fd,
vm::ptr<u64> sector_size,
vm::ptr<u64> block_size, vm::ptr<u64> arg4,
vm::ptr<s32> out_flags);
error_code sys_fs_get_block_size(ppu_thread &ppu, vm::cptr<char> path,
vm::ptr<u64> sector_size,
vm::ptr<u64> block_size, vm::ptr<u64> arg4);
error_code sys_fs_truncate(ppu_thread &ppu, vm::cptr<char> path, u64 size);
error_code sys_fs_ftruncate(ppu_thread &ppu, u32 fd, u64 size);
error_code sys_fs_symbolic_link(ppu_thread &ppu, vm::cptr<char> target,
vm::cptr<char> linkpath);
error_code sys_fs_chmod(ppu_thread &ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_chown(ppu_thread &ppu, vm::cptr<char> path, s32 uid, s32 gid);
error_code sys_fs_disk_free(ppu_thread &ppu, vm::cptr<char> path,
vm::ptr<u64> total_free, vm::ptr<u64> avail_free);
error_code sys_fs_utime(ppu_thread &ppu, vm::cptr<char> path,
vm::cptr<CellFsUtimbuf> timep);
error_code sys_fs_acl_read(ppu_thread &ppu, vm::cptr<char> path, vm::ptr<void>);
error_code sys_fs_acl_write(ppu_thread &ppu, vm::cptr<char> path,
vm::ptr<void>);
error_code sys_fs_lsn_get_cda_size(ppu_thread &ppu, u32 fd, vm::ptr<u64> ptr);
error_code sys_fs_lsn_get_cda(ppu_thread &ppu, u32 fd, vm::ptr<void>, u64,
vm::ptr<u64>);
error_code sys_fs_lsn_lock(ppu_thread &ppu, u32 fd);
error_code sys_fs_lsn_unlock(ppu_thread &ppu, u32 fd);
error_code sys_fs_lsn_read(ppu_thread &ppu, u32 fd, vm::cptr<void>, u64);
error_code sys_fs_lsn_write(ppu_thread &ppu, u32 fd, vm::cptr<void>, u64);
error_code sys_fs_mapped_allocate(ppu_thread &ppu, u32 fd, u64,
vm::pptr<void> out_ptr);
error_code sys_fs_mapped_free(ppu_thread &ppu, u32 fd, vm::ptr<void> ptr);
error_code sys_fs_truncate2(ppu_thread &ppu, u32 fd, u64 size);
error_code sys_fs_newfs(ppu_thread &ppu, vm::cptr<char> dev_name,
vm::cptr<char> file_system, s32 unk1,
vm::cptr<char> str1);
error_code sys_fs_mount(ppu_thread &ppu, vm::cptr<char> dev_name,
vm::cptr<char> file_system, vm::cptr<char> path,
s32 unk1, s32 prot, s32 unk2, vm::cptr<char> str1,
u32 str_len);
error_code sys_fs_unmount(ppu_thread &ppu, vm::cptr<char> path, s32 unk1,
s32 force);
error_code sys_fs_get_mount_info_size(ppu_thread &ppu, vm::ptr<u64> len);
error_code sys_fs_get_mount_info(ppu_thread &ppu, vm::ptr<CellFsMountInfo> info,
u64 len, vm::ptr<u64> out_len);

View file

@ -1,5 +1,9 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "rx/types.hpp"
void abort_lv2_watchdog();
error_code _sys_game_watchdog_start(u32 timeout);
@ -8,5 +12,6 @@ error_code _sys_game_watchdog_clear();
error_code _sys_game_set_system_sw_version(u64 version);
u64 _sys_game_get_system_sw_version();
error_code _sys_game_board_storage_read(vm::ptr<u8> buffer, vm::ptr<u8> status);
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer, vm::ptr<u8> status);
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer,
vm::ptr<u8> status);
error_code _sys_game_get_rtc_status(vm::ptr<s32> status);

View file

@ -1,10 +1,9 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
enum : u64
{
enum : u64 {
SYS_GPIO_UNKNOWN_DEVICE_ID = 0x0,
SYS_GPIO_LED_DEVICE_ID = 0x1,
SYS_GPIO_DIP_SWITCH_DEVICE_ID = 0x2,

View file

@ -0,0 +1,44 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// set sensor mode? also getinfo?
struct sys_hid_info_5 {
le_t<u16> vid;
le_t<u16> pid;
u8 status;
// todo: more in this, not sure what tho
};
struct sys_hid_info_2 {
be_t<u16> vid;
be_t<u16> pid;
u8 unk[17];
};
struct sys_hid_ioctl_68 {
u8 unk;
u8 unk2;
};
// unk
struct sys_hid_manager_514_pkg_d {
be_t<u32> unk1;
u8 unk2;
};
// SysCalls
error_code sys_hid_manager_open(ppu_thread &ppu, u64 device_type, u64 port_no,
vm::ptr<u32> handle);
error_code sys_hid_manager_ioctl(u32 hid_handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size);
error_code sys_hid_manager_add_hot_key_observer(u32 event_queue,
vm::ptr<u32> unk);
error_code sys_hid_manager_check_focus();
error_code sys_hid_manager_is_process_permission_root(u32 pid);
error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf, u64 buf_size);
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size);
error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size);

View file

@ -0,0 +1,46 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
class ppu_thread;
struct lv2_int_tag final : public lv2_obj {
static const u32 id_base = 0x0a000000;
const u32 id;
shared_ptr<struct lv2_int_serv> handler;
lv2_int_tag() noexcept;
lv2_int_tag(utils::serial &ar) noexcept;
void save(utils::serial &ar);
};
struct lv2_int_serv final : public lv2_obj {
static const u32 id_base = 0x0b000000;
const u32 id;
const shared_ptr<named_thread<ppu_thread>> thread;
const u64 arg1;
const u64 arg2;
lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread, u64 arg1,
u64 arg2) noexcept;
lv2_int_serv(utils::serial &ar) noexcept;
void save(utils::serial &ar);
void exec() const;
void join() const;
};
// Syscalls
error_code sys_interrupt_tag_destroy(ppu_thread &ppu, u32 intrtag);
error_code _sys_interrupt_thread_establish(ppu_thread &ppu, vm::ptr<u32> ih,
u32 intrtag, u32 intrthread,
u64 arg1, u64 arg2);
error_code _sys_interrupt_thread_disestablish(ppu_thread &ppu, u32 ih,
vm::ptr<u64> r13);
void sys_interrupt_thread_eoi(ppu_thread &ppu);

View file

@ -0,0 +1,28 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
struct lv2_io_buf {
static const u32 id_base = 0x44000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(41);
const u32 block_count;
const u32 block_size;
const u32 blocks;
const u32 unk1;
lv2_io_buf(u32 block_count, u32 block_size, u32 blocks, u32 unk1)
: block_count(block_count), block_size(block_size), blocks(blocks),
unk1(unk1) {}
};
// SysCalls
error_code sys_io_buffer_create(u32 block_count, u32 block_size, u32 blocks,
u32 unk1, vm::ptr<u32> handle);
error_code sys_io_buffer_destroy(u32 handle);
error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block);
error_code sys_io_buffer_free(u32 handle, u32 block);

View file

@ -0,0 +1,57 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_lwmutex_t;
struct sys_lwcond_attribute_t {
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct sys_lwcond_t {
vm::bptr<sys_lwmutex_t> lwmutex;
be_t<u32> lwcond_queue; // lwcond pseudo-id
};
struct lv2_lwcond final : lv2_obj {
static const u32 id_base = 0x97000000;
const be_t<u64> name;
const u32 lwid;
const lv2_protocol protocol;
vm::ptr<sys_lwcond_t> control;
shared_mutex mutex;
ppu_thread *sq{};
atomic_t<s32> lwmutex_waiters = 0;
lv2_lwcond(u64 name, u32 lwid, u32 protocol,
vm::ptr<sys_lwcond_t> control) noexcept
: name(std::bit_cast<be_t<u64>>(name)), lwid(lwid),
protocol{static_cast<u8>(protocol)}, control(control) {}
lv2_lwcond(utils::serial &ar);
void save(utils::serial &ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code _sys_lwcond_create(ppu_thread &ppu, vm::ptr<u32> lwcond_id,
u32 lwmutex_id, vm::ptr<sys_lwcond_t> control,
u64 name);
error_code _sys_lwcond_destroy(ppu_thread &ppu, u32 lwcond_id);
error_code _sys_lwcond_signal(ppu_thread &ppu, u32 lwcond_id, u32 lwmutex_id,
u64 ppu_thread_id, u32 mode);
error_code _sys_lwcond_signal_all(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u32 mode);
error_code _sys_lwcond_queue_wait(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u64 timeout);

View file

@ -0,0 +1,180 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_lwmutex_attribute_t {
be_t<u32> protocol;
be_t<u32> recursive;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
enum : u32 {
lwmutex_free = 0xffffffffu,
lwmutex_dead = 0xfffffffeu,
lwmutex_reserved = 0xfffffffdu,
};
struct sys_lwmutex_t {
struct alignas(8) sync_var_t {
be_t<u32> owner;
be_t<u32> waiter;
};
union {
atomic_t<sync_var_t> lock_var;
struct {
atomic_be_t<u32> owner;
atomic_be_t<u32> waiter;
} vars;
atomic_be_t<u64> all_info;
};
be_t<u32> attribute;
be_t<u32> recursive_count;
be_t<u32> sleep_queue; // lwmutex pseudo-id
be_t<u32> pad;
};
struct lv2_lwmutex final : lv2_obj {
static const u32 id_base = 0x95000000;
const lv2_protocol protocol;
const vm::ptr<sys_lwmutex_t> control;
const be_t<u64> name;
shared_mutex mutex;
atomic_t<s32> lwcond_waiters{0};
struct alignas(16) control_data_t {
s32 signaled{0};
u32 reserved{};
ppu_thread *sq{};
};
atomic_t<control_data_t> lv2_control{};
lv2_lwmutex(u32 protocol, vm::ptr<sys_lwmutex_t> control, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}, control(control),
name(std::bit_cast<be_t<u64>>(name)) {}
lv2_lwmutex(utils::serial &ar);
void save(utils::serial &ar);
ppu_thread *load_sq() const {
return atomic_storage<ppu_thread *>::load(lv2_control.raw().sq);
}
template <typename T> s32 try_own(T *cpu, bool wait_only = false) {
const s32 signal =
lv2_control
.fetch_op([&](control_data_t &data) {
if (!data.signaled) {
cpu->prio.atomic_op(
[tag = ++g_priority_order_tag](
std::common_type_t<decltype(T::prio)> &prio) {
prio.order = tag;
});
cpu->next_cpu = data.sq;
data.sq = cpu;
} else {
ensure(!wait_only);
data.signaled = 0;
}
})
.signaled;
if (signal) {
cpu->next_cpu = nullptr;
} else {
const bool notify = lwcond_waiters
.fetch_op([](s32 &val) {
if (val + 0u <= 1u << 31) {
// Value was either positive or INT32_MIN
return false;
}
// lwmutex was set to be destroyed, but there
// are lwcond waiters
// Turn off the "lwcond_waiters notification"
// bit as we are adding an lwmutex waiter
val &= 0x7fff'ffff;
return true;
})
.second;
if (notify) {
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
lwcond_waiters.notify_all();
}
}
return signal;
}
bool try_unlock(bool unlock2) {
if (!load_sq()) {
control_data_t old{};
old.signaled = atomic_storage<s32>::load(lv2_control.raw().signaled);
control_data_t store = old;
store.signaled |= (unlock2 ? s32{smin} : 1);
if (lv2_control.compare_exchange(old, store)) {
return true;
}
}
return false;
}
template <typename T> T *reown(bool unlock2 = false) {
T *res = nullptr;
lv2_control.fetch_op([&](control_data_t &data) {
res = nullptr;
if (auto sq = static_cast<T *>(data.sq)) {
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq) {
return false;
}
return true;
} else {
data.signaled |= (unlock2 ? s32{smin} : 1);
return true;
}
});
if (res && cpu_flag::again - res->state) {
// Detach manually (fetch_op can fail, so avoid side-effects on the first
// node in this case)
res->next_cpu = nullptr;
}
return res;
}
};
// Aux
class ppu_thread;
// Syscalls
error_code _sys_lwmutex_create(ppu_thread &ppu, vm::ptr<u32> lwmutex_id,
u32 protocol, vm::ptr<sys_lwmutex_t> control,
s32 has_name, u64 name);
error_code _sys_lwmutex_destroy(ppu_thread &ppu, u32 lwmutex_id);
error_code _sys_lwmutex_lock(ppu_thread &ppu, u32 lwmutex_id, u64 timeout);
error_code _sys_lwmutex_trylock(ppu_thread &ppu, u32 lwmutex_id);
error_code _sys_lwmutex_unlock(ppu_thread &ppu, u32 lwmutex_id);
error_code _sys_lwmutex_unlock2(ppu_thread &ppu, u32 lwmutex_id);

View file

@ -0,0 +1,135 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
class cpu_thread;
enum lv2_mem_container_id : u32 {
SYS_MEMORY_CONTAINER_ID_INVALID = 0xFFFFFFFF,
};
enum : u64 {
SYS_MEMORY_ACCESS_RIGHT_NONE = 0x00000000000000F0ULL,
SYS_MEMORY_ACCESS_RIGHT_ANY = 0x000000000000000FULL,
SYS_MEMORY_ACCESS_RIGHT_PPU_THR = 0x0000000000000008ULL,
SYS_MEMORY_ACCESS_RIGHT_HANDLER = 0x0000000000000004ULL,
SYS_MEMORY_ACCESS_RIGHT_SPU_THR = 0x0000000000000002ULL,
SYS_MEMORY_ACCESS_RIGHT_RAW_SPU = 0x0000000000000001ULL,
SYS_MEMORY_ATTR_READ_ONLY = 0x0000000000080000ULL,
SYS_MEMORY_ATTR_READ_WRITE = 0x0000000000040000ULL,
};
enum : u64 {
SYS_MEMORY_PAGE_SIZE_4K = 0x100ull,
SYS_MEMORY_PAGE_SIZE_64K = 0x200ull,
SYS_MEMORY_PAGE_SIZE_1M = 0x400ull,
SYS_MEMORY_PAGE_SIZE_MASK = 0xf00ull,
};
enum : u64 {
SYS_MEMORY_GRANULARITY_64K = 0x0000000000000200,
SYS_MEMORY_GRANULARITY_1M = 0x0000000000000400,
SYS_MEMORY_GRANULARITY_MASK = 0x0000000000000f00,
};
enum : u64 {
SYS_MEMORY_PROT_READ_WRITE = 0x0000000000040000,
SYS_MEMORY_PROT_READ_ONLY = 0x0000000000080000,
SYS_MEMORY_PROT_MASK = 0x00000000000f0000,
};
struct sys_memory_info_t {
be_t<u32> total_user_memory;
be_t<u32> available_user_memory;
};
struct sys_page_attr_t {
be_t<u64> attribute;
be_t<u64> access_right;
be_t<u32> page_size;
be_t<u32> pad;
};
struct lv2_memory_container {
static const u32 id_base = 0x3F000000;
static const u32 id_step = 0x1;
static const u32 id_count = 16;
const u32 size; // Amount of "physical" memory in this container
const lv2_mem_container_id id; // ID of the container in if placed at IDM,
// otherwise SYS_MEMORY_CONTAINER_ID_INVALID
atomic_t<u32> used{}; // Amount of "physical" memory currently used
SAVESTATE_INIT_POS(1);
lv2_memory_container(u32 size, bool from_idm = false) noexcept;
lv2_memory_container(utils::serial &ar, bool from_idm = false) noexcept;
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
static lv2_memory_container *search(u32 id);
// Try to get specified amount of "physical" memory
// Values greater than UINT32_MAX will fail
u32 take(u64 amount) {
auto [_, result] = used.fetch_op([&](u32 &value) -> u32 {
if (size - value >= amount) {
value += static_cast<u32>(amount);
return static_cast<u32>(amount);
}
return 0;
});
return result;
}
u32 free(u64 amount) {
auto [_, result] = used.fetch_op([&](u32 &value) -> u32 {
if (value >= amount) {
value -= static_cast<u32>(amount);
return static_cast<u32>(amount);
}
return 0;
});
// Sanity check
ensure(result == amount);
return result;
}
};
struct sys_memory_user_memory_stat_t {
be_t<u32> a; // 0x0
be_t<u32> b; // 0x4
be_t<u32> c; // 0x8
be_t<u32> d; // 0xc
be_t<u32> e; // 0x10
be_t<u32> f; // 0x14
be_t<u32> g; // 0x18
};
// SysCalls
error_code sys_memory_allocate(cpu_thread &cpu, u64 size, u64 flags,
vm::ptr<u32> alloc_addr);
error_code sys_memory_allocate_from_container(cpu_thread &cpu, u64 size,
u32 cid, u64 flags,
vm::ptr<u32> alloc_addr);
error_code sys_memory_free(cpu_thread &cpu, u32 start_addr);
error_code sys_memory_get_page_attribute(cpu_thread &cpu, u32 addr,
vm::ptr<sys_page_attr_t> attr);
error_code sys_memory_get_user_memory_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info);
error_code sys_memory_get_user_memory_stat(
cpu_thread &cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat);
error_code sys_memory_container_create(cpu_thread &cpu, vm::ptr<u32> cid,
u64 size);
error_code sys_memory_container_destroy(cpu_thread &cpu, u32 cid);
error_code sys_memory_container_get_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info,
u32 cid);
error_code sys_memory_container_destroy_parent_with_childs(
cpu_thread &cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child);

View file

@ -0,0 +1,125 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include <vector>
struct lv2_memory_container;
namespace utils {
class shm;
}
struct lv2_memory : lv2_obj {
static const u32 id_base = 0x08000000;
const u32 size; // Memory size
const u32 align; // Alignment required
const u64 flags;
const u64 key; // IPC key
const bool pshared; // Process shared flag
lv2_memory_container *const ct; // Associated memory container
const std::shared_ptr<utils::shm> shm;
atomic_t<u32> counter{0};
lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared,
lv2_memory_container *ct);
lv2_memory(utils::serial &ar);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
CellError on_id_create();
};
enum : u64 {
SYS_MEMORY_PAGE_FAULT_EVENT_KEY = 0xfffe000000000000ULL,
};
enum : u64 {
SYS_MMAPPER_NO_SHM_KEY = 0xffff000000000000ull, // Unofficial name
};
enum : u64 {
SYS_MEMORY_PAGE_FAULT_CAUSE_NON_MAPPED = 0x2ULL,
SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY = 0x1ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_PPU_THREAD = 0x0ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_SPU_THREAD = 0x1ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_RAW_SPU = 0x2ULL,
};
struct page_fault_notification_entry {
ENABLE_BITWISE_SERIALIZATION;
u32 start_addr; // Starting address of region to monitor.
u32 event_queue_id; // Queue to be notified.
u32 port_id; // Port used to notify the queue.
};
// Used to hold list of queues to be notified on page fault event.
struct page_fault_notification_entries {
std::vector<page_fault_notification_entry> entries;
shared_mutex mutex;
SAVESTATE_INIT_POS(44);
page_fault_notification_entries() = default;
page_fault_notification_entries(utils::serial &ar);
void save(utils::serial &ar);
};
struct page_fault_event_entries {
// First = thread, second = addr
std::unordered_map<class cpu_thread *, u32> events;
shared_mutex pf_mutex;
};
struct mmapper_unk_entry_struct0 {
be_t<u32> a; // 0x0
be_t<u32> b; // 0x4
be_t<u32> c; // 0x8
be_t<u32> d; // 0xc
be_t<u64> type; // 0x10
};
// Aux
class ppu_thread;
error_code mmapper_thread_recover_page_fault(cpu_thread *cpu);
// SysCalls
error_code sys_mmapper_allocate_address(ppu_thread &, u64 size, u64 flags,
u64 alignment, vm::ptr<u32> alloc_addr);
error_code sys_mmapper_allocate_fixed_address(ppu_thread &);
error_code sys_mmapper_allocate_shared_memory(ppu_thread &, u64 ipc_key,
u64 size, u64 flags,
vm::ptr<u32> mem_id);
error_code
sys_mmapper_allocate_shared_memory_from_container(ppu_thread &, u64 ipc_key,
u64 size, u32 cid, u64 flags,
vm::ptr<u32> mem_id);
error_code sys_mmapper_allocate_shared_memory_ext(
ppu_thread &, u64 ipc_key, u64 size, u32 flags,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id);
error_code sys_mmapper_allocate_shared_memory_from_container_ext(
ppu_thread &, u64 ipc_key, u64 size, u64 flags, u32 cid,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id);
error_code sys_mmapper_change_address_access_right(ppu_thread &, u32 addr,
u64 flags);
error_code sys_mmapper_free_address(ppu_thread &, u32 addr);
error_code sys_mmapper_free_shared_memory(ppu_thread &, u32 mem_id);
error_code sys_mmapper_map_shared_memory(ppu_thread &, u32 addr, u32 mem_id,
u64 flags);
error_code sys_mmapper_search_and_map(ppu_thread &, u32 start_addr, u32 mem_id,
u64 flags, vm::ptr<u32> alloc_addr);
error_code sys_mmapper_unmap_shared_memory(ppu_thread &, u32 addr,
vm::ptr<u32> mem_id);
error_code sys_mmapper_enable_page_fault_notification(ppu_thread &,
u32 start_addr,
u32 event_queue_id);

View file

@ -0,0 +1,174 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/PPUThread.h"
struct sys_mutex_attribute_t {
be_t<u32>
protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT
be_t<u32> recursive; // SYS_SYNC_RECURSIVE or SYS_SYNC_NOT_RECURSIVE
be_t<u32> pshared;
be_t<u32> adaptive;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
class ppu_thread;
struct lv2_mutex final : lv2_obj {
static const u32 id_base = 0x85000000;
const lv2_protocol protocol;
const u32 recursive;
const u32 adaptive;
const u64 key;
const u64 name;
u32 cond_count = 0; // Condition Variables
shared_mutex mutex;
atomic_t<u32> lock_count{0}; // Recursive Locks
struct alignas(16) control_data_t {
u32 owner{};
u32 reserved{};
ppu_thread *sq{};
};
atomic_t<control_data_t> control{};
lv2_mutex(u32 protocol, u32 recursive, u32 adaptive, u64 key,
u64 name) noexcept
: protocol{static_cast<u8>(protocol)}, recursive(recursive),
adaptive(adaptive), key(key), name(name) {}
lv2_mutex(utils::serial &ar);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
template <typename T> CellError try_lock(T &cpu) {
auto it = control.load();
if (!it.owner) {
auto store = it;
store.owner = cpu.id;
if (!control.compare_and_swap_test(it, store)) {
return CELL_EBUSY;
}
return {};
}
if (it.owner == cpu.id) {
// Recursive locking
if (recursive == SYS_SYNC_RECURSIVE) {
if (lock_count == 0xffffffffu) {
return CELL_EKRESOURCE;
}
lock_count++;
return {};
}
return CELL_EDEADLK;
}
return CELL_EBUSY;
}
template <typename T> bool try_own(T &cpu) {
if (control.atomic_op([&](control_data_t &data) {
if (data.owner) {
cpu.prio.atomic_op(
[tag = ++g_priority_order_tag](
std::common_type_t<decltype(T::prio)> &prio) {
prio.order = tag;
});
cpu.next_cpu = data.sq;
data.sq = &cpu;
return false;
} else {
data.owner = cpu.id;
return true;
}
})) {
cpu.next_cpu = nullptr;
return true;
}
return false;
}
template <typename T> CellError try_unlock(T &cpu) {
auto it = control.load();
if (it.owner != cpu.id) {
return CELL_EPERM;
}
if (lock_count) {
lock_count--;
return {};
}
if (!it.sq) {
auto store = it;
store.owner = 0;
if (control.compare_and_swap_test(it, store)) {
return {};
}
}
return CELL_EBUSY;
}
template <typename T> T *reown() {
T *res{};
control.fetch_op([&](control_data_t &data) {
res = nullptr;
if (auto sq = static_cast<T *>(data.sq)) {
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq) {
atomic_storage<u32>::release(control.raw().owner, res->id);
return false;
}
data.owner = res->id;
return true;
} else {
data.owner = 0;
return true;
}
});
if (res && cpu_flag::again - res->state) {
// Detach manually (fetch_op can fail, so avoid side-effects on the first
// node in this case)
res->next_cpu = nullptr;
}
return res;
}
};
// Syscalls
error_code sys_mutex_create(ppu_thread &ppu, vm::ptr<u32> mutex_id,
vm::ptr<sys_mutex_attribute_t> attr);
error_code sys_mutex_destroy(ppu_thread &ppu, u32 mutex_id);
error_code sys_mutex_lock(ppu_thread &ppu, u32 mutex_id, u64 timeout);
error_code sys_mutex_trylock(ppu_thread &ppu, u32 mutex_id);
error_code sys_mutex_unlock(ppu_thread &ppu, u32 mutex_id);

View file

@ -0,0 +1,365 @@
#pragma once
#include "util/bit_set.h"
#include "util/mutex.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include <functional>
#include <queue>
#include <utility>
#include <vector>
// Error codes
enum sys_net_error : s32 {
SYS_NET_ENOENT = 2,
SYS_NET_EINTR = 4,
SYS_NET_EBADF = 9,
SYS_NET_ENOMEM = 12,
SYS_NET_EACCES = 13,
SYS_NET_EFAULT = 14,
SYS_NET_EBUSY = 16,
SYS_NET_EINVAL = 22,
SYS_NET_EMFILE = 24,
SYS_NET_ENOSPC = 28,
SYS_NET_EPIPE = 32,
SYS_NET_EAGAIN = 35,
SYS_NET_EWOULDBLOCK = SYS_NET_EAGAIN,
SYS_NET_EINPROGRESS = 36,
SYS_NET_EALREADY = 37,
SYS_NET_EDESTADDRREQ = 39,
SYS_NET_EMSGSIZE = 40,
SYS_NET_EPROTOTYPE = 41,
SYS_NET_ENOPROTOOPT = 42,
SYS_NET_EPROTONOSUPPORT = 43,
SYS_NET_EOPNOTSUPP = 45,
SYS_NET_EPFNOSUPPORT = 46,
SYS_NET_EAFNOSUPPORT = 47,
SYS_NET_EADDRINUSE = 48,
SYS_NET_EADDRNOTAVAIL = 49,
SYS_NET_ENETDOWN = 50,
SYS_NET_ENETUNREACH = 51,
SYS_NET_ECONNABORTED = 53,
SYS_NET_ECONNRESET = 54,
SYS_NET_ENOBUFS = 55,
SYS_NET_EISCONN = 56,
SYS_NET_ENOTCONN = 57,
SYS_NET_ESHUTDOWN = 58,
SYS_NET_ETOOMANYREFS = 59,
SYS_NET_ETIMEDOUT = 60,
SYS_NET_ECONNREFUSED = 61,
SYS_NET_EHOSTDOWN = 64,
SYS_NET_EHOSTUNREACH = 65,
};
static constexpr sys_net_error operator-(sys_net_error v) {
return sys_net_error{-+v};
}
// Socket types (prefixed with SYS_NET_)
enum lv2_socket_type : s32 {
SYS_NET_SOCK_STREAM = 1,
SYS_NET_SOCK_DGRAM = 2,
SYS_NET_SOCK_RAW = 3,
SYS_NET_SOCK_DGRAM_P2P = 6,
SYS_NET_SOCK_STREAM_P2P = 10,
};
// Socket options (prefixed with SYS_NET_)
enum lv2_socket_option : s32 {
SYS_NET_SO_SNDBUF = 0x1001,
SYS_NET_SO_RCVBUF = 0x1002,
SYS_NET_SO_SNDLOWAT = 0x1003,
SYS_NET_SO_RCVLOWAT = 0x1004,
SYS_NET_SO_SNDTIMEO = 0x1005,
SYS_NET_SO_RCVTIMEO = 0x1006,
SYS_NET_SO_ERROR = 0x1007,
SYS_NET_SO_TYPE = 0x1008,
SYS_NET_SO_NBIO = 0x1100, // Non-blocking IO
SYS_NET_SO_TPPOLICY = 0x1101,
SYS_NET_SO_REUSEADDR = 0x0004,
SYS_NET_SO_KEEPALIVE = 0x0008,
SYS_NET_SO_BROADCAST = 0x0020,
SYS_NET_SO_LINGER = 0x0080,
SYS_NET_SO_OOBINLINE = 0x0100,
SYS_NET_SO_REUSEPORT = 0x0200,
SYS_NET_SO_ONESBCAST = 0x0800,
SYS_NET_SO_USECRYPTO = 0x1000,
SYS_NET_SO_USESIGNATURE = 0x2000,
SYS_NET_SOL_SOCKET = 0xffff,
};
// IP options (prefixed with SYS_NET_)
enum lv2_ip_option : s32 {
SYS_NET_IP_HDRINCL = 2,
SYS_NET_IP_TOS = 3,
SYS_NET_IP_TTL = 4,
SYS_NET_IP_MULTICAST_IF = 9,
SYS_NET_IP_MULTICAST_TTL = 10,
SYS_NET_IP_MULTICAST_LOOP = 11,
SYS_NET_IP_ADD_MEMBERSHIP = 12,
SYS_NET_IP_DROP_MEMBERSHIP = 13,
SYS_NET_IP_TTLCHK = 23,
SYS_NET_IP_MAXTTL = 24,
SYS_NET_IP_DONTFRAG = 26
};
// Family (prefixed with SYS_NET_)
enum lv2_socket_family : s32 {
SYS_NET_AF_UNSPEC = 0,
SYS_NET_AF_LOCAL = 1,
SYS_NET_AF_UNIX = SYS_NET_AF_LOCAL,
SYS_NET_AF_INET = 2,
SYS_NET_AF_INET6 = 24,
};
// Flags (prefixed with SYS_NET_)
enum {
SYS_NET_MSG_OOB = 0x1,
SYS_NET_MSG_PEEK = 0x2,
SYS_NET_MSG_DONTROUTE = 0x4,
SYS_NET_MSG_EOR = 0x8,
SYS_NET_MSG_TRUNC = 0x10,
SYS_NET_MSG_CTRUNC = 0x20,
SYS_NET_MSG_WAITALL = 0x40,
SYS_NET_MSG_DONTWAIT = 0x80,
SYS_NET_MSG_BCAST = 0x100,
SYS_NET_MSG_MCAST = 0x200,
SYS_NET_MSG_USECRYPTO = 0x400,
SYS_NET_MSG_USESIGNATURE = 0x800,
};
// Shutdown types (prefixed with SYS_NET_)
enum {
SYS_NET_SHUT_RD = 0,
SYS_NET_SHUT_WR = 1,
SYS_NET_SHUT_RDWR = 2,
};
// TCP options (prefixed with SYS_NET_)
enum lv2_tcp_option : s32 {
SYS_NET_TCP_NODELAY = 1,
SYS_NET_TCP_MAXSEG = 2,
SYS_NET_TCP_MSS_TO_ADVERTISE = 3,
};
// IP protocols (prefixed with SYS_NET_)
enum lv2_ip_protocol : s32 {
SYS_NET_IPPROTO_IP = 0,
SYS_NET_IPPROTO_ICMP = 1,
SYS_NET_IPPROTO_IGMP = 2,
SYS_NET_IPPROTO_TCP = 6,
SYS_NET_IPPROTO_UDP = 17,
SYS_NET_IPPROTO_ICMPV6 = 58,
};
// Poll events (prefixed with SYS_NET_)
enum {
SYS_NET_POLLIN = 0x0001,
SYS_NET_POLLPRI = 0x0002,
SYS_NET_POLLOUT = 0x0004,
SYS_NET_POLLERR = 0x0008, /* revent only */
SYS_NET_POLLHUP = 0x0010, /* revent only */
SYS_NET_POLLNVAL = 0x0020, /* revent only */
SYS_NET_POLLRDNORM = 0x0040,
SYS_NET_POLLWRNORM = SYS_NET_POLLOUT,
SYS_NET_POLLRDBAND = 0x0080,
SYS_NET_POLLWRBAND = 0x0100,
};
enum lv2_socket_abort_flags : s32 {
SYS_NET_ABORT_STRICT_CHECK = 1,
};
// in_addr_t type prefixed with sys_net_
using sys_net_in_addr_t = u32;
// in_port_t type prefixed with sys_net_
using sys_net_in_port_t = u16;
// sa_family_t type prefixed with sys_net_
using sys_net_sa_family_t = u8;
// socklen_t type prefixed with sys_net_
using sys_net_socklen_t = u32;
// fd_set prefixed with sys_net_
struct sys_net_fd_set {
be_t<u32> fds_bits[32];
u32 bit(s32 s) const { return (fds_bits[(s >> 5) & 31] >> (s & 31)) & 1u; }
void set(s32 s) { fds_bits[(s >> 5) & 31] |= (1u << (s & 31)); }
};
// hostent prefixed with sys_net_
struct sys_net_hostent {
vm::bptr<char> h_name;
vm::bpptr<char> h_aliases;
be_t<s32> h_addrtype;
be_t<s32> h_length;
vm::bpptr<char> h_addr_list;
};
// in_addr prefixed with sys_net_
struct sys_net_in_addr {
be_t<u32> _s_addr;
};
// iovec prefixed with sys_net_
struct sys_net_iovec {
be_t<s32> zero1;
vm::bptr<void> iov_base;
be_t<s32> zero2;
be_t<u32> iov_len;
};
// ip_mreq prefixed with sys_net_
struct sys_net_ip_mreq {
be_t<u32> imr_multiaddr;
be_t<u32> imr_interface;
};
// msghdr prefixed with sys_net_
struct sys_net_msghdr {
be_t<s32> zero1;
vm::bptr<void> msg_name;
be_t<u32> msg_namelen;
be_t<s32> pad1;
be_t<s32> zero2;
vm::bptr<sys_net_iovec> msg_iov;
be_t<s32> msg_iovlen;
be_t<s32> pad2;
be_t<s32> zero3;
vm::bptr<void> msg_control;
be_t<u32> msg_controllen;
be_t<s32> msg_flags;
};
// pollfd prefixed with sys_net_
struct sys_net_pollfd {
be_t<s32> fd;
be_t<s16> events;
be_t<s16> revents;
};
// sockaddr prefixed with sys_net_
struct sys_net_sockaddr {
ENABLE_BITWISE_SERIALIZATION;
u8 sa_len;
u8 sa_family;
char sa_data[14];
};
// sockaddr_dl prefixed with sys_net_
struct sys_net_sockaddr_dl {
ENABLE_BITWISE_SERIALIZATION;
u8 sdl_len;
u8 sdl_family;
be_t<u16> sdl_index;
u8 sdl_type;
u8 sdl_nlen;
u8 sdl_alen;
u8 sdl_slen;
char sdl_data[12];
};
// sockaddr_in prefixed with sys_net_
struct sys_net_sockaddr_in {
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;
be_t<u32> sin_addr;
be_t<u64> sin_zero;
};
// sockaddr_in_p2p prefixed with sys_net_
struct sys_net_sockaddr_in_p2p {
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;
be_t<u32> sin_addr;
be_t<u16> sin_vport;
char sin_zero[6];
};
// timeval prefixed with sys_net_
struct sys_net_timeval {
be_t<s64> tv_sec;
be_t<s64> tv_usec;
};
// linger prefixed with sys_net_
struct sys_net_linger {
be_t<s32> l_onoff;
be_t<s32> l_linger;
};
class ppu_thread;
// Syscalls
error_code sys_net_bnet_accept(ppu_thread &, s32 s,
vm::ptr<sys_net_sockaddr> addr,
vm::ptr<u32> paddrlen);
error_code sys_net_bnet_bind(ppu_thread &, s32 s,
vm::cptr<sys_net_sockaddr> addr, u32 addrlen);
error_code sys_net_bnet_connect(ppu_thread &, s32 s,
vm::ptr<sys_net_sockaddr> addr, u32 addrlen);
error_code sys_net_bnet_getpeername(ppu_thread &, s32 s,
vm::ptr<sys_net_sockaddr> addr,
vm::ptr<u32> paddrlen);
error_code sys_net_bnet_getsockname(ppu_thread &, s32 s,
vm::ptr<sys_net_sockaddr> addr,
vm::ptr<u32> paddrlen);
error_code sys_net_bnet_getsockopt(ppu_thread &, s32 s, s32 level, s32 optname,
vm::ptr<void> optval, vm::ptr<u32> optlen);
error_code sys_net_bnet_listen(ppu_thread &, s32 s, s32 backlog);
error_code sys_net_bnet_recvfrom(ppu_thread &, s32 s, vm::ptr<void> buf,
u32 len, s32 flags,
vm::ptr<sys_net_sockaddr> addr,
vm::ptr<u32> paddrlen);
error_code sys_net_bnet_recvmsg(ppu_thread &, s32 s,
vm::ptr<sys_net_msghdr> msg, s32 flags);
error_code sys_net_bnet_sendmsg(ppu_thread &, s32 s,
vm::cptr<sys_net_msghdr> msg, s32 flags);
error_code sys_net_bnet_sendto(ppu_thread &, s32 s, vm::cptr<void> buf, u32 len,
s32 flags, vm::cptr<sys_net_sockaddr> addr,
u32 addrlen);
error_code sys_net_bnet_setsockopt(ppu_thread &, s32 s, s32 level, s32 optname,
vm::cptr<void> optval, u32 optlen);
error_code sys_net_bnet_shutdown(ppu_thread &, s32 s, s32 how);
error_code sys_net_bnet_socket(ppu_thread &, lv2_socket_family family,
lv2_socket_type type, lv2_ip_protocol protocol);
error_code sys_net_bnet_close(ppu_thread &, s32 s);
error_code sys_net_bnet_poll(ppu_thread &, vm::ptr<sys_net_pollfd> fds,
s32 nfds, s32 ms);
error_code sys_net_bnet_select(ppu_thread &, s32 nfds,
vm::ptr<sys_net_fd_set> readfds,
vm::ptr<sys_net_fd_set> writefds,
vm::ptr<sys_net_fd_set> exceptfds,
vm::ptr<sys_net_timeval> timeout);
error_code _sys_net_open_dump(ppu_thread &, s32 len, s32 flags);
error_code _sys_net_read_dump(ppu_thread &, s32 id, vm::ptr<void> buf, s32 len,
vm::ptr<s32> pflags);
error_code _sys_net_close_dump(ppu_thread &, s32 id, vm::ptr<s32> pflags);
error_code _sys_net_write_dump(ppu_thread &, s32 id, vm::cptr<void> buf,
s32 len, u32 unknown);
error_code sys_net_abort(ppu_thread &, s32 type, u64 arg, s32 flags);
error_code sys_net_infoctl(ppu_thread &, s32 cmd, vm::ptr<void> arg);
error_code sys_net_control(ppu_thread &, u32 arg1, s32 arg2, vm::ptr<void> arg3,
s32 arg4);
error_code sys_net_bnet_ioctl(ppu_thread &, s32 arg1, u32 arg2, u32 arg3);
error_code sys_net_bnet_sysctl(ppu_thread &, u32 arg1, u32 arg2, u32 arg3,
vm::ptr<void> arg4, u32 arg5, u32 arg6);
error_code sys_net_eurus_post_command(ppu_thread &, s32 arg1, u32 arg2,
u32 arg3);

View file

@ -0,0 +1,164 @@
#pragma once
#include <functional>
#include <optional>
#include "Emu/IdManager.h"
#include "Emu/NP/ip_address.h"
#include "cellos/sys_net.h"
#include "util/mutex.h"
#ifdef _WIN32
#include <WS2tcpip.h>
#include <winsock2.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <poll.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
enum class thread_state : u32;
class lv2_socket {
public:
// Poll events
enum class poll_t {
read,
write,
error,
__bitset_enum_max
};
union sockopt_data {
char ch[128];
be_t<s32> _int = 0;
sys_net_timeval timeo;
sys_net_linger linger;
};
struct sockopt_cache {
sockopt_data data{};
s32 len = 0;
};
public:
SAVESTATE_INIT_POS(7); // Dependency on RPCN
lv2_socket(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol);
lv2_socket(utils::serial &) {}
lv2_socket(utils::serial &, lv2_socket_type type);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &, bool save_only_this_class = false);
virtual ~lv2_socket() noexcept;
lv2_socket &operator=(thread_state s) noexcept;
std::unique_lock<shared_mutex> lock();
void set_lv2_id(u32 id);
bs_t<poll_t> get_events() const;
void set_poll_event(bs_t<poll_t> event);
void poll_queue(shared_ptr<ppu_thread> ppu, bs_t<poll_t> event,
std::function<bool(bs_t<poll_t>)> poll_cb);
u32 clear_queue(ppu_thread *);
void handle_events(const pollfd &native_fd, bool unset_connecting = false);
void queue_wake(ppu_thread *ppu);
lv2_socket_family get_family() const;
lv2_socket_type get_type() const;
lv2_ip_protocol get_protocol() const;
std::size_t get_queue_size() const;
socket_type get_socket() const;
#ifdef _WIN32
bool is_connecting() const;
void set_connecting(bool is_connecting);
#endif
public:
virtual std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
accept(bool is_lock = true) = 0;
virtual s32 bind(const sys_net_sockaddr &addr) = 0;
virtual std::optional<s32> connect(const sys_net_sockaddr &addr) = 0;
virtual s32 connect_followup() = 0;
virtual std::pair<s32, sys_net_sockaddr> getpeername() = 0;
virtual std::pair<s32, sys_net_sockaddr> getsockname() = 0;
virtual std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname,
u32 len) = 0;
virtual s32 setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) = 0;
virtual s32 listen(s32 backlog) = 0;
virtual std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
recvfrom(s32 flags, u32 len, bool is_lock = true) = 0;
virtual std::optional<s32> sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock = true) = 0;
virtual std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr &msg,
bool is_lock = true) = 0;
virtual void close() = 0;
virtual s32 shutdown(s32 how) = 0;
virtual s32 poll(sys_net_pollfd &sn_pfd, pollfd &native_pfd) = 0;
virtual std::tuple<bool, bool, bool> select(bs_t<poll_t> selected,
pollfd &native_pfd) = 0;
error_code abort_socket(s32 flags);
public:
// IDM data
static const u32 id_base = 24;
static const u32 id_step = 1;
static const u32 id_count = 1000;
protected:
lv2_socket(utils::serial &, bool);
shared_mutex mutex;
s32 lv2_id = 0;
socket_type native_socket = 0;
lv2_socket_family family{};
lv2_socket_type type{};
lv2_ip_protocol protocol{};
// Events selected for polling
atomic_bs_t<poll_t> events{};
// Event processing workload (pair of thread id and the processing function)
std::vector<
std::pair<shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>>
queue;
// Socket options value keepers
// Non-blocking IO option
s32 so_nbio = 0;
// Error, only used for connection result for non blocking stream sockets
s32 so_error = 0;
// Unsupported option
s32 so_tcp_maxseg = 1500;
#ifdef _WIN32
s32 so_reuseaddr = 0;
s32 so_reuseport = 0;
// Tracks connect for WSAPoll workaround
bool connecting = false;
#endif
sys_net_sockaddr last_bound_addr{};
public:
u64 so_rcvtimeo = 0;
u64 so_sendtimeo = 0;
};

View file

@ -0,0 +1,84 @@
#pragma once
#ifdef _WIN32
#include <WS2tcpip.h>
#include <winsock2.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <arpa/inet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "lv2_socket.h"
class lv2_socket_native final : public lv2_socket {
public:
static constexpr u32 id_type = 1;
lv2_socket_native(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol);
lv2_socket_native(utils::serial &ar, lv2_socket_type type);
~lv2_socket_native() noexcept override;
void save(utils::serial &ar);
s32 create_socket();
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr &addr) override;
std::optional<s32> connect(const sys_net_sockaddr &addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname,
u32 len) override;
s32 setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr &msg,
bool is_lock = true) override;
s32 poll(sys_net_pollfd &sn_pfd, pollfd &native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected,
pollfd &native_pfd) override;
bool is_socket_connected();
s32 listen(s32 backlog) override;
void close() override;
s32 shutdown(s32 how) override;
private:
void set_socket(socket_type native_socket, lv2_socket_family family,
lv2_socket_type type, lv2_ip_protocol protocol);
void set_default_buffers();
void set_non_blocking();
private:
// Value keepers
#ifdef _WIN32
s32 so_reuseaddr = 0;
s32 so_reuseport = 0;
#endif
u16 bound_port = 0;
bool feign_tcp_conn_failure = false; // Savestate load related
};

View file

@ -0,0 +1,57 @@
#pragma once
#include "lv2_socket.h"
class lv2_socket_p2p : public lv2_socket {
public:
lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol);
lv2_socket_p2p(utils::serial &ar, lv2_socket_type type);
void save(utils::serial &ar);
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr &addr) override;
std::optional<s32> connect(const sys_net_sockaddr &addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname,
u32 len) override;
s32 setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr &msg,
bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd &sn_pfd, pollfd &native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected,
pollfd &native_pfd) override;
void handle_new_data(sys_net_sockaddr_in_p2p p2p_addr,
std::vector<u8> p2p_data);
protected:
// Port(actual bound port) and Virtual Port(indicated by u16 at the start of
// the packet)
u16 port = 3658, vport = 0;
u32 bound_addr = 0;
// Queue containing received packets from network_thread for
// SYS_NET_SOCK_DGRAM_P2P sockets
std::queue<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data{};
// List of sock options
std::map<u64, sockopt_cache> sockopts;
};

View file

@ -0,0 +1,128 @@
#pragma once
#ifdef _WIN32
#include <WS2tcpip.h>
#include <winsock2.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <netinet/in.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "lv2_socket_p2p.h"
struct nt_p2p_port;
constexpr be_t<u32> P2PS_U2S_SIG =
(static_cast<u32>('U') << 24 | static_cast<u32>('2') << 16 |
static_cast<u32>('S') << 8 | static_cast<u32>('0'));
struct p2ps_encapsulated_tcp {
be_t<u32> signature =
P2PS_U2S_SIG; // Signature to verify it's P2P Stream data
be_t<u32> length = 0; // Length of data
be_t<u64> seq = 0; // This should be u32 but changed to u64 for simplicity
be_t<u64> ack = 0;
be_t<u16> src_port = 0; // fake source tcp port
be_t<u16> dst_port = 0; // fake dest tcp port(should be == vport)
be_t<u16> checksum = 0;
u8 flags = 0;
};
enum p2ps_stream_status {
stream_closed, // Default when port is not listening nor connected
stream_listening, // Stream is listening, accepting SYN packets
stream_handshaking, // Currently handshaking
stream_connected, // This is an established connection(after tcp handshake)
};
enum p2ps_tcp_flags : u8 {
FIN = (1 << 0),
SYN = (1 << 1),
RST = (1 << 2),
PSH = (1 << 3),
ACK = (1 << 4),
URG = (1 << 5),
ECE = (1 << 6),
CWR = (1 << 7),
};
u16 u2s_tcp_checksum(const le_t<u16> *buffer, usz size);
std::vector<u8> generate_u2s_packet(const p2ps_encapsulated_tcp &header,
const u8 *data, const u32 datasize);
class lv2_socket_p2ps final : public lv2_socket_p2p {
public:
static constexpr u32 id_type = 2;
lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol);
lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr,
u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq,
s32 so_nbio);
lv2_socket_p2ps(utils::serial &ar, lv2_socket_type type);
void save(utils::serial &ar);
p2ps_stream_status get_status() const;
void set_status(p2ps_stream_status new_status);
bool handle_connected(p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr, nt_p2p_port *p2p_port);
bool handle_listening(p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr);
void send_u2s_packet(std::vector<u8> data, const ::sockaddr_in *dst, u64 seq,
bool require_ack);
void close_stream();
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr &addr) override;
std::optional<s32> connect(const sys_net_sockaddr &addr) override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr &msg,
bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd &sn_pfd, pollfd &native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected,
pollfd &native_pfd) override;
private:
void close_stream_nl(nt_p2p_port *p2p_port);
private:
static constexpr usz MAX_RECEIVED_BUFFER = (1024 * 1024 * 10);
p2ps_stream_status status = p2ps_stream_status::stream_closed;
usz max_backlog = 0; // set on listen
std::deque<s32> backlog;
u16 op_port = 0, op_vport = 0;
u32 op_addr = 0;
u64 data_beg_seq = 0; // Seq of first byte of received_data
u64 data_available =
0; // Amount of continuous data available(calculated on ACK send)
std::map<u64, std::vector<u8>>
received_data; // holds seq/data of data received
u64 cur_seq = 0; // SEQ of next packet to be sent
};

View file

@ -0,0 +1,45 @@
#pragma once
#include "lv2_socket.h"
class lv2_socket_raw final : public lv2_socket {
public:
static constexpr u32 id_type = 1;
lv2_socket_raw(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol);
lv2_socket_raw(utils::serial &ar, lv2_socket_type type);
void save(utils::serial &ar);
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr &addr) override;
std::optional<s32> connect(const sys_net_sockaddr &addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname,
u32 len) override;
s32 setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr &msg,
bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd &sn_pfd, pollfd &native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected,
pollfd &native_pfd) override;
};

View file

@ -0,0 +1,45 @@
#pragma once
#include "Emu/Cell/PPUThread.h"
#include "util/mutex.h"
#include <map>
#include <vector>
#include "nt_p2p_port.h"
struct base_network_thread {
void add_ppu_to_awake(ppu_thread *ppu);
void del_ppu_to_awake(ppu_thread *ppu);
shared_mutex mutex_ppu_to_awake;
std::vector<ppu_thread *> ppu_to_awake;
void wake_threads();
};
struct network_thread : base_network_thread {
shared_mutex mutex_thread_loop;
atomic_t<u32> num_polls = 0;
static constexpr auto thread_name = "Network Thread";
void operator()();
};
struct p2p_thread : base_network_thread {
shared_mutex list_p2p_ports_mutex;
std::map<u16, nt_p2p_port> list_p2p_ports;
atomic_t<u32> num_p2p_ports = 0;
static constexpr auto thread_name = "Network P2P Thread";
p2p_thread();
void create_p2p_port(u16 p2p_port);
void bind_sce_np_port();
void operator()();
};
using network_context = named_thread<network_thread>;
using p2p_context = named_thread<p2p_thread>;

View file

@ -0,0 +1,82 @@
#pragma once
#include <set>
#include "lv2_socket_p2ps.h"
#ifdef _WIN32
#include <WS2tcpip.h>
#include <winsock2.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
// dst_vport src_vport flags
constexpr s32 VPORT_P2P_HEADER_SIZE = sizeof(u16) + sizeof(u16) + sizeof(u16);
enum VPORT_P2P_FLAGS {
P2P_FLAG_P2P = 1,
P2P_FLAG_P2PS = 1 << 1,
};
struct signaling_message {
u32 src_addr = 0;
u16 src_port = 0;
std::vector<u8> data;
};
namespace sys_net_helpers {
bool all_reusable(const std::set<s32> &sock_ids);
}
struct nt_p2p_port {
// Real socket where P2P packets are received/sent
socket_type p2p_socket = 0;
u16 port = 0;
bool is_ipv6 = false;
shared_mutex bound_p2p_vports_mutex;
// For DGRAM_P2P sockets (vport, sock_ids)
std::map<u16, std::set<s32>> bound_p2p_vports{};
// For STREAM_P2P sockets (vport, sock_ids)
std::map<u16, std::set<s32>> bound_p2ps_vports{};
// List of active(either from a connect or an accept) P2PS sockets (key,
// sock_id) key is ( (src_vport) << 48 | (dst_vport) << 32 | addr ) with
// src_vport and addr being 0 for listening sockets
std::map<u64, s32> bound_p2p_streams{};
// Current free port index
u16 binding_port = 30000;
// Queued messages from RPCN
shared_mutex s_rpcn_mutex;
std::vector<std::vector<u8>> rpcn_msgs{};
// Queued signaling messages
shared_mutex s_sign_mutex;
std::vector<signaling_message> sign_msgs{};
std::array<u8, 65535> p2p_recv_data{};
nt_p2p_port(u16 port);
~nt_p2p_port();
static void dump_packet(p2ps_encapsulated_tcp *tcph);
u16 get_port();
bool handle_connected(s32 sock_id, p2ps_encapsulated_tcp *tcp_header,
u8 *data, ::sockaddr_storage *op_addr);
bool handle_listening(s32 sock_id, p2ps_encapsulated_tcp *tcp_header,
u8 *data, ::sockaddr_storage *op_addr);
bool recv_data();
};

View file

@ -0,0 +1,34 @@
#pragma once
#ifdef _WIN32
#include <WS2tcpip.h>
#include <winsock2.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <netinet/in.h>
#include <sys/socket.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "cellos/sys_net.h"
int get_native_error();
sys_net_error convert_error(bool is_blocking, int native_error,
bool is_connecting = false);
sys_net_error get_last_error(bool is_blocking, bool is_connecting = false);
sys_net_sockaddr
native_addr_to_sys_net_addr(const ::sockaddr_storage &native_addr);
::sockaddr_in sys_net_addr_to_native_addr(const sys_net_sockaddr &sn_addr);
bool is_ip_public_address(const ::sockaddr_in &addr);
u32 network_clear_queue(ppu_thread &ppu);
void clear_ppu_to_awake(ppu_thread &ppu);
#ifdef _WIN32
void windows_poll(std::vector<pollfd> &fds, unsigned long nfds, int timeout,
std::vector<bool> &connecting);
#endif

View file

@ -4,8 +4,7 @@
#include "Emu/Memory/vm_ptr.h"
#include "sys_sync.h"
struct lv2_overlay final : ppu_module<lv2_obj>
{
struct lv2_overlay final : ppu_module<lv2_obj> {
static const u32 id_base = 0x25000000;
u32 entry{};
@ -17,9 +16,14 @@ struct lv2_overlay final : ppu_module<lv2_obj>
void save(utils::serial &ar);
};
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path, u64 flags, vm::ptr<u32> entry);
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd, u64 offset, u64 flags, vm::ptr<u32> entry);
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path,
u64 flags, vm::ptr<u32> entry);
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd,
u64 offset, u64 flags,
vm::ptr<u32> entry);
error_code sys_overlay_unload_module(u32 ovlmid);
// clang-format off
// error_code sys_overlay_get_module_list(sys_pid_t pid, usz ovlmids_num, sys_overlay_t * ovlmids, usz * num_of_modules);
// error_code sys_overlay_get_module_info(sys_pid_t pid, sys_overlay_t ovlmid, sys_overlay_module_info_t * info);
// error_code sys_overlay_get_module_info2(sys_pid_t pid, sys_overlay_t ovlmid, sys_overlay_module_info2_t * info);//
@ -27,3 +31,4 @@ error_code sys_overlay_unload_module(u32 ovlmid);
// error_code sys_overlay_get_module_dbg_info(); //3 params?
// error_code _sys_prx_load_module(vm::ps3::cptr<char> path, u64 flags, vm::ps3::ptr<sys_prx_load_module_option_t> pOpt);
// clang-format on

View file

@ -0,0 +1,69 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
class ppu_thread;
enum : s32 {
SYS_PPU_THREAD_ONCE_INIT = 0,
SYS_PPU_THREAD_DONE_INIT = 1,
};
// PPU Thread Flags
enum : u64 {
SYS_PPU_THREAD_CREATE_JOINABLE = 0x1,
SYS_PPU_THREAD_CREATE_INTERRUPT = 0x2,
};
struct sys_ppu_thread_stack_t {
be_t<u32> pst_addr;
be_t<u32> pst_size;
};
struct ppu_thread_param_t {
vm::bptr<void(u64)> entry;
be_t<u32> tls; // vm::bptr<void>
};
struct sys_ppu_thread_icontext_t {
be_t<u64> gpr[32];
be_t<u32> cr;
be_t<u32> rsv1;
be_t<u64> xer;
be_t<u64> lr;
be_t<u64> ctr;
be_t<u64> pc;
};
// Syscalls
void _sys_ppu_thread_exit(ppu_thread &ppu, u64 errorcode);
s32 sys_ppu_thread_yield(
ppu_thread &ppu); // Return value is ignored by the library
error_code sys_ppu_thread_join(ppu_thread &ppu, u32 thread_id,
vm::ptr<u64> vptr);
error_code sys_ppu_thread_detach(ppu_thread &ppu, u32 thread_id);
error_code sys_ppu_thread_get_join_state(
ppu_thread &ppu,
vm::ptr<s32> isjoinable); // Error code is ignored by the library
error_code sys_ppu_thread_set_priority(ppu_thread &ppu, u32 thread_id,
s32 prio);
error_code sys_ppu_thread_get_priority(ppu_thread &ppu, u32 thread_id,
vm::ptr<s32> priop);
error_code
sys_ppu_thread_get_stack_information(ppu_thread &ppu,
vm::ptr<sys_ppu_thread_stack_t> sp);
error_code sys_ppu_thread_stop(ppu_thread &ppu, u32 thread_id);
error_code sys_ppu_thread_restart(ppu_thread &ppu);
error_code _sys_ppu_thread_create(ppu_thread &ppu, vm::ptr<u64> thread_id,
vm::ptr<ppu_thread_param_t> param, u64 arg,
u64 arg4, s32 prio, u32 stacksize, u64 flags,
vm::cptr<char> threadname);
error_code sys_ppu_thread_start(ppu_thread &ppu, u32 thread_id);
error_code sys_ppu_thread_rename(ppu_thread &ppu, u32 thread_id,
vm::cptr<char> name);
error_code sys_ppu_thread_recover_page_fault(ppu_thread &ppu, u32 thread_id);
error_code
sys_ppu_thread_get_page_fault_context(ppu_thread &ppu, u32 thread_id,
vm::ptr<sys_ppu_thread_icontext_t> ctxt);

View file

@ -0,0 +1,130 @@
#pragma once
#include "Crypto/unself.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// Process Local Object Type
enum : u32 {
SYS_MEM_OBJECT = 0x08,
SYS_MUTEX_OBJECT = 0x85,
SYS_COND_OBJECT = 0x86,
SYS_RWLOCK_OBJECT = 0x88,
SYS_INTR_TAG_OBJECT = 0x0A,
SYS_INTR_SERVICE_HANDLE_OBJECT = 0x0B,
SYS_EVENT_QUEUE_OBJECT = 0x8D,
SYS_EVENT_PORT_OBJECT = 0x0E,
SYS_TRACE_OBJECT = 0x21,
SYS_SPUIMAGE_OBJECT = 0x22,
SYS_PRX_OBJECT = 0x23,
SYS_SPUPORT_OBJECT = 0x24,
SYS_OVERLAY_OBJECT = 0x25,
SYS_LWMUTEX_OBJECT = 0x95,
SYS_TIMER_OBJECT = 0x11,
SYS_SEMAPHORE_OBJECT = 0x96,
SYS_FS_FD_OBJECT = 0x73,
SYS_LWCOND_OBJECT = 0x97,
SYS_EVENT_FLAG_OBJECT = 0x98,
SYS_RSXAUDIO_OBJECT = 0x60,
};
enum : u64 {
SYS_PROCESS_PRIMARY_STACK_SIZE_32K = 0x0000000000000010,
SYS_PROCESS_PRIMARY_STACK_SIZE_64K = 0x0000000000000020,
SYS_PROCESS_PRIMARY_STACK_SIZE_96K = 0x0000000000000030,
SYS_PROCESS_PRIMARY_STACK_SIZE_128K = 0x0000000000000040,
SYS_PROCESS_PRIMARY_STACK_SIZE_256K = 0x0000000000000050,
SYS_PROCESS_PRIMARY_STACK_SIZE_512K = 0x0000000000000060,
SYS_PROCESS_PRIMARY_STACK_SIZE_1M = 0x0000000000000070,
};
constexpr auto SYS_PROCESS_PARAM_SECTION_NAME = ".sys_proc_param";
enum {
SYS_PROCESS_PARAM_INVALID_PRIO = -32768,
};
enum : u32 {
SYS_PROCESS_PARAM_INVALID_STACK_SIZE = 0xffffffff,
SYS_PROCESS_PARAM_STACK_SIZE_MIN = 0x1000, // 4KB
SYS_PROCESS_PARAM_STACK_SIZE_MAX = 0x100000, // 1MB
SYS_PROCESS_PARAM_VERSION_INVALID = 0xffffffff,
SYS_PROCESS_PARAM_VERSION_1 = 0x00000001, // for SDK 08X
SYS_PROCESS_PARAM_VERSION_084_0 = 0x00008400,
SYS_PROCESS_PARAM_VERSION_090_0 = 0x00009000,
SYS_PROCESS_PARAM_VERSION_330_0 = 0x00330000,
SYS_PROCESS_PARAM_MAGIC = 0x13bcc5f6,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_NONE = 0x00000000,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_64K = 0x00010000,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_1M = 0x00100000,
SYS_PROCESS_PARAM_PPC_SEG_DEFAULT = 0x00000000,
SYS_PROCESS_PARAM_PPC_SEG_OVLM = 0x00000001,
SYS_PROCESS_PARAM_PPC_SEG_FIXEDADDR_PRX = 0x00000002,
SYS_PROCESS_PARAM_SDK_VERSION_UNKNOWN = 0xffffffff,
};
struct sys_exit2_param {
be_t<u64> x0; // 0x85
be_t<u64> this_size; // 0x30
be_t<u64> next_size;
be_t<s64> prio;
be_t<u64> flags;
vm::bpptr<char, u64, u64> args;
};
struct ps3_process_info_t {
u32 sdk_ver;
u32 ppc_seg;
SelfAdditionalInfo self_info;
u32 ctrl_flags1 = 0;
bool has_root_perm() const;
bool has_debug_perm() const;
bool debug_or_root() const;
std::string_view get_cellos_appname() const;
};
extern ps3_process_info_t g_ps3_process_info;
// Auxiliary functions
s32 process_getpid();
s32 process_get_sdk_version(u32 pid, s32 &ver);
void lv2_exitspawn(ppu_thread &ppu, std::vector<std::string> &argv,
std::vector<std::string> &envp, std::vector<u8> &data);
enum CellError : u32;
CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags);
// SysCalls
s32 sys_process_getpid();
s32 sys_process_getppid();
error_code sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump);
error_code sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size);
error_code sys_process_get_id2(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size);
error_code _sys_process_get_paramsfo(vm::ptr<char> buffer);
error_code sys_process_get_sdk_version(u32 pid, vm::ptr<s32> version);
error_code sys_process_get_status(u64 unk);
error_code sys_process_is_spu_lock_line_reservation_address(u32 addr,
u64 flags);
error_code sys_process_kill(u32 pid);
error_code sys_process_wait_for_child(u32 pid, vm::ptr<u32> status, u64 unk);
error_code sys_process_wait_for_child2(u64 unk1, u64 unk2, u64 unk3, u64 unk4,
u64 unk5, u64 unk6);
error_code sys_process_detach_child(u64 unk);
void _sys_process_exit(ppu_thread &ppu, s32 status, u32 arg2, u32 arg3);
void _sys_process_exit2(ppu_thread &ppu, s32 status,
vm::ptr<sys_exit2_param> arg, u32 arg_size, u32 arg4);
void sys_process_exit3(ppu_thread &ppu, s32 status);
error_code sys_process_spawns_a_self2(vm::ptr<u32> pid, u32 primary_prio,
u64 flags, vm::ptr<void> stack,
u32 stack_size, u32 mem_id,
vm::ptr<void> param_sfo,
vm::ptr<void> dbg_data);

View file

@ -0,0 +1,293 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUAnalyser.h"
#include "Emu/Memory/vm_ptr.h"
// Return codes
enum CellPrxError : u32 {
CELL_PRX_ERROR_ERROR = 0x80011001, // Error state
CELL_PRX_ERROR_ILLEGAL_PERM = 0x800110d1, // No permission to execute API
CELL_PRX_ERROR_UNKNOWN_MODULE =
0x8001112e, // Specified PRX could not be found
CELL_PRX_ERROR_ALREADY_STARTED =
0x80011133, // Specified PRX is already started
CELL_PRX_ERROR_NOT_STARTED = 0x80011134, // Specified PRX is not started
CELL_PRX_ERROR_ALREADY_STOPPED =
0x80011135, // Specified PRX is already stopped
CELL_PRX_ERROR_CAN_NOT_STOP = 0x80011136, // Specified PRX must not be stopped
CELL_PRX_ERROR_NOT_REMOVABLE =
0x80011138, // Specified PRX must not be deleted
CELL_PRX_ERROR_LIBRARY_NOT_YET_LINKED =
0x8001113a, // Called unlinked function
CELL_PRX_ERROR_LIBRARY_FOUND =
0x8001113b, // Specified library is already registered
CELL_PRX_ERROR_LIBRARY_NOTFOUND =
0x8001113c, // Specified library is not registered
CELL_PRX_ERROR_ILLEGAL_LIBRARY = 0x8001113d, // Library structure is invalid
CELL_PRX_ERROR_LIBRARY_INUSE =
0x8001113e, // Library cannot be deleted because it is linked
CELL_PRX_ERROR_ALREADY_STOPPING =
0x8001113f, // Specified PRX is in the process of stopping
CELL_PRX_ERROR_UNSUPPORTED_PRX_TYPE =
0x80011148, // Specified PRX format is invalid and cannot be loaded
CELL_PRX_ERROR_INVAL = 0x80011324, // Argument value is invalid
CELL_PRX_ERROR_ILLEGAL_PROCESS =
0x80011801, // Specified process does not exist
CELL_PRX_ERROR_NO_LIBLV2 = 0x80011881, // liblv2.sprx does not exist
CELL_PRX_ERROR_UNSUPPORTED_ELF_TYPE =
0x80011901, // ELF type of specified file is not supported
CELL_PRX_ERROR_UNSUPPORTED_ELF_CLASS =
0x80011902, // ELF class of specified file is not supported
CELL_PRX_ERROR_UNDEFINED_SYMBOL = 0x80011904, // References undefined symbols
CELL_PRX_ERROR_UNSUPPORTED_RELOCATION_TYPE =
0x80011905, // Uses unsupported relocation type
CELL_PRX_ERROR_ELF_IS_REGISTERED =
0x80011910, // Fixed ELF is already registered
CELL_PRX_ERROR_NO_EXIT_ENTRY = 0x80011911,
};
enum { SYS_PRX_MODULE_FILENAME_SIZE = 512 };
struct sys_prx_get_module_id_by_name_option_t {
be_t<u64> size;
vm::ptr<void> base;
};
struct sys_prx_load_module_option_t {
be_t<u64> size;
vm::bptr<void> base_addr;
};
struct sys_prx_segment_info_t {
be_t<u64> base;
be_t<u64> filesz;
be_t<u64> memsz;
be_t<u64> index;
be_t<u64> type;
};
struct sys_prx_module_info_t {
be_t<u64> size; // 0
char name[30]; // 8
char version[2]; // 0x26
be_t<u32> modattribute; // 0x28
be_t<u32> start_entry; // 0x2c
be_t<u32> stop_entry; // 0x30
be_t<u32> all_segments_num; // 0x34
vm::bptr<char> filename; // 0x38
be_t<u32> filename_size; // 0x3c
vm::bptr<sys_prx_segment_info_t> segments; // 0x40
be_t<u32> segments_num; // 0x44
};
struct sys_prx_module_info_v2_t : sys_prx_module_info_t {
be_t<u32> exports_addr; // 0x48
be_t<u32> exports_size; // 0x4C
be_t<u32> imports_addr; // 0x50
be_t<u32> imports_size; // 0x54
};
struct sys_prx_module_info_option_t {
be_t<u64> size; // 0x10
union {
vm::bptr<sys_prx_module_info_t> info;
vm::bptr<sys_prx_module_info_v2_t> info_v2;
};
};
struct sys_prx_start_module_option_t {
be_t<u64> size;
};
struct sys_prx_stop_module_option_t {
be_t<u64> size;
};
struct sys_prx_start_stop_module_option_t {
be_t<u64> size;
be_t<u64> cmd;
vm::bptr<s32(u32 argc, vm::ptr<void> argv), u64> entry;
be_t<u64> res;
vm::bptr<s32(vm::ptr<s32(u32, vm::ptr<void>), u64>, u32 argc,
vm::ptr<void> argv),
u64>
entry2;
};
struct sys_prx_unload_module_option_t {
be_t<u64> size;
};
struct sys_prx_get_module_list_t {
be_t<u64> size;
be_t<u32> max;
be_t<u32> count;
vm::bptr<u32> idlist;
};
struct sys_prx_get_module_list_option_t {
be_t<u64> size; // 0x20
be_t<u32> pad;
be_t<u32> max;
be_t<u32> count;
vm::bptr<u32> idlist;
be_t<u32> unk; // 0
};
struct sys_prx_register_module_0x20_t {
be_t<u64> size; // 0x0
be_t<u32> toc; // 0x8
be_t<u32> toc_size; // 0xC
vm::bptr<void> stubs_ea; // 0x10
be_t<u32> stubs_size; // 0x14
vm::bptr<void> error_handler; // 0x18
char pad[4]; // 0x1C
};
struct sys_prx_register_module_0x30_type_1_t {
be_t<u64> size; // 0x0
be_t<u64> type; // 0x8
be_t<u32> unk3; // 0x10
be_t<u32> unk4; // 0x14
vm::bptr<void> lib_entries_ea; // 0x18
be_t<u32> lib_entries_size; // 0x1C
vm::bptr<void> lib_stub_ea; // 0x20
be_t<u32> lib_stub_size; // 0x24
vm::bptr<void> error_handler; // 0x28
char pad[4]; // 0x2C
};
enum : u32 {
SYS_PRX_RESIDENT = 0,
SYS_PRX_NO_RESIDENT = 1,
SYS_PRX_START_OK = 0,
SYS_PRX_STOP_SUCCESS = 0,
SYS_PRX_STOP_OK = 0,
SYS_PRX_STOP_FAILED = 1
};
// Unofficial names for PRX state
enum : u32 {
PRX_STATE_INITIALIZED,
PRX_STATE_STARTING, // In-between state between initialized and started
// (internal)
PRX_STATE_STARTED,
PRX_STATE_STOPPING, // In-between state between started and stopped (internal)
PRX_STATE_STOPPED, // Last state, the module cannot be restarted
PRX_STATE_DESTROYED, // Last state, the module cannot be restarted
};
struct lv2_prx final : ppu_module<lv2_obj> {
static const u32 id_base = 0x23000000;
atomic_t<u32> state = PRX_STATE_INITIALIZED;
shared_mutex mutex;
std::unordered_map<u32, u32> specials;
vm::ptr<s32(u32 argc, vm::ptr<void> argv)> start = vm::null;
vm::ptr<s32(u32 argc, vm::ptr<void> argv)> stop = vm::null;
vm::ptr<s32(u64 callback, u64 argc, vm::ptr<void, u64> argv)> prologue =
vm::null;
vm::ptr<s32(u64 callback, u64 argc, vm::ptr<void, u64> argv)> epilogue =
vm::null;
vm::ptr<s32()> exit = vm::null;
char module_info_name[28]{};
u8 module_info_version[2]{};
be_t<u16> module_info_attributes{};
u32 imports_start = umax;
u32 imports_end = 0;
u32 exports_start = umax;
u32 exports_end = 0;
std::basic_string<char> m_loaded_flags;
std::basic_string<char> m_external_loaded_flags;
void load_exports(); // (Re)load exports
void restore_exports(); // For savestates
void unload_exports();
lv2_prx() noexcept = default;
lv2_prx(utils::serial &) {}
static std::function<void(void *)> load(utils::serial &);
void save(utils::serial &ar);
};
enum : u64 {
SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR = 0x1ull,
SYS_PRX_LOAD_MODULE_FLAGS_INVALIDMASK = ~SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR,
};
// PPC
enum {
SYS_PRX_R_PPC_ADDR32 = 1,
SYS_PRX_R_PPC_ADDR16_LO = 4,
SYS_PRX_R_PPC_ADDR16_HI = 5,
SYS_PRX_R_PPC_ADDR16_HA = 6,
SYS_PRX_R_PPC64_ADDR32 = SYS_PRX_R_PPC_ADDR32,
SYS_PRX_R_PPC64_ADDR16_LO = SYS_PRX_R_PPC_ADDR16_LO,
SYS_PRX_R_PPC64_ADDR16_HI = SYS_PRX_R_PPC_ADDR16_HI,
SYS_PRX_R_PPC64_ADDR16_HA = SYS_PRX_R_PPC_ADDR16_HA,
SYS_PRX_R_PPC64_ADDR64 = 38,
SYS_PRX_VARLINK_TERMINATE32 = 0x00000000
};
// SysCalls
error_code sys_prx_get_ppu_guid(ppu_thread &ppu);
error_code
_sys_prx_load_module_by_fd(ppu_thread &ppu, s32 fd, u64 offset, u64 flags,
vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module_on_memcontainer_by_fd(
ppu_thread &ppu, s32 fd, u64 offset, u32 mem_ct, u64 flags,
vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module_list(ppu_thread &ppu, s32 count,
vm::cpptr<char, u32, u64> path_list,
u64 flags,
vm::ptr<sys_prx_load_module_option_t> pOpt,
vm::ptr<u32> id_list);
error_code _sys_prx_load_module_list_on_memcontainer(
ppu_thread &ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u32 mem_ct,
u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt,
vm::ptr<u32> id_list);
error_code _sys_prx_load_module_on_memcontainer(
ppu_thread &ppu, vm::cptr<char> path, u32 mem_ct, u64 flags,
vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module(ppu_thread &ppu, vm::cptr<char> path, u64 flags,
vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code
_sys_prx_start_module(ppu_thread &ppu, u32 id, u64 flags,
vm::ptr<sys_prx_start_stop_module_option_t> pOpt);
error_code
_sys_prx_stop_module(ppu_thread &ppu, u32 id, u64 flags,
vm::ptr<sys_prx_start_stop_module_option_t> pOpt);
error_code _sys_prx_unload_module(ppu_thread &ppu, u32 id, u64 flags,
vm::ptr<sys_prx_unload_module_option_t> pOpt);
error_code _sys_prx_register_module(ppu_thread &ppu, vm::cptr<char> name,
vm::ptr<void> opt);
error_code _sys_prx_query_module(ppu_thread &ppu);
error_code _sys_prx_register_library(ppu_thread &ppu, vm::ptr<void> library);
error_code _sys_prx_unregister_library(ppu_thread &ppu, vm::ptr<void> library);
error_code _sys_prx_link_library(ppu_thread &ppu);
error_code _sys_prx_unlink_library(ppu_thread &ppu);
error_code _sys_prx_query_library(ppu_thread &ppu);
error_code
_sys_prx_get_module_list(ppu_thread &ppu, u64 flags,
vm::ptr<sys_prx_get_module_list_option_t> pInfo);
error_code _sys_prx_get_module_info(ppu_thread &ppu, u32 id, u64 flags,
vm::ptr<sys_prx_module_info_option_t> pOpt);
error_code _sys_prx_get_module_id_by_name(
ppu_thread &ppu, vm::cptr<char> name, u64 flags,
vm::ptr<sys_prx_get_module_id_by_name_option_t> pOpt);
error_code _sys_prx_get_module_id_by_address(ppu_thread &ppu, u32 addr);
error_code _sys_prx_start(ppu_thread &ppu);
error_code _sys_prx_stop(ppu_thread &ppu);

View file

@ -0,0 +1,144 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
class cpu_thread;
struct RsxDriverInfo {
be_t<u32> version_driver; // 0x0
be_t<u32> version_gpu; // 0x4
be_t<u32> memory_size; // 0x8
be_t<u32> hardware_channel; // 0xC
be_t<u32> nvcore_frequency; // 0x10
be_t<u32> memory_frequency; // 0x14
be_t<u32> unk1[4]; // 0x18 - 0x24
be_t<u32> unk2; // 0x28 -- pgraph stuff
be_t<u32> reportsNotifyOffset; // 0x2C offset to notify memory
be_t<u32> reportsOffset; // 0x30 offset to reports memory
be_t<u32> reportsReportOffset; // 0x34 offset to reports in reports memory
be_t<u32> unk3[6]; // 0x38-0x54
be_t<u32> systemModeFlags; // 0x54
u8 unk4[0x1064]; // 0x10B8
struct Head {
be_t<u64> lastFlipTime; // 0x0 last flip time
atomic_be_t<u32> flipFlags; // 0x8 flags to handle flip/queue
be_t<u32> offset; // 0xC
be_t<u32> flipBufferId; // 0x10
be_t<u32> lastQueuedBufferId; // 0x14 todo: this is definately not this
// variable but its 'unused' so im using it
// for queueId to pass to flip handler
be_t<u32> unk3; // 0x18
be_t<u32>
lastVTimeLow; // 0x1C last time for first vhandler freq (low 32-bits)
atomic_be_t<u64> lastSecondVTime; // 0x20 last time for second vhandler freq
be_t<u64> unk4; // 0x28
atomic_be_t<u64> vBlankCount; // 0x30
be_t<u32> unk; // 0x38 possible u32, 'flip field', top/bottom for interlaced
be_t<u32>
lastVTimeHigh; // 0x3C last time for first vhandler freq (high 32-bits)
} head[8]; // size = 0x40, 0x200
be_t<u32> unk7; // 0x12B8
be_t<u32> unk8; // 0x12BC
atomic_be_t<u32> handlers; // 0x12C0 -- flags showing which handlers are set
be_t<u32> unk9; // 0x12C4
be_t<u32> unk10; // 0x12C8
be_t<u32> userCmdParam; // 0x12CC
be_t<u32> handler_queue; // 0x12D0
be_t<u32> unk11; // 0x12D4
be_t<u32> unk12; // 0x12D8
be_t<u32> unk13; // 0x12DC
be_t<u32> unk14; // 0x12E0
be_t<u32> unk15; // 0x12E4
be_t<u32> unk16; // 0x12E8
be_t<u32> unk17; // 0x12F0
be_t<u32> lastError; // 0x12F4 error param for cellGcmSetGraphicsHandler
// todo: theres more to this
};
static_assert(sizeof(RsxDriverInfo) == 0x12F8, "rsxSizeTest");
static_assert(sizeof(RsxDriverInfo::Head) == 0x40, "rsxHeadSizeTest");
enum : u64 {
// Unused
SYS_RSX_IO_MAP_IS_STRICT = 1ull << 60
};
// Unofficial event names
enum : u64 {
// SYS_RSX_EVENT_GRAPHICS_ERROR = 1 << 0,
SYS_RSX_EVENT_VBLANK = 1 << 1,
SYS_RSX_EVENT_FLIP_BASE = 1 << 3,
SYS_RSX_EVENT_QUEUE_BASE = 1 << 5,
SYS_RSX_EVENT_USER_CMD = 1 << 7,
SYS_RSX_EVENT_SECOND_VBLANK_BASE = 1 << 10,
SYS_RSX_EVENT_UNMAPPED_BASE = 1ull << 32,
};
struct RsxDmaControl {
u8 resv[0x40];
atomic_be_t<u32> put;
atomic_be_t<u32> get;
atomic_be_t<u32> ref;
be_t<u32> unk[2];
be_t<u32> unk1;
};
struct RsxSemaphore {
atomic_be_t<u32> val;
};
struct alignas(16) RsxNotify {
be_t<u64> timestamp;
be_t<u64> zero;
};
struct alignas(16) RsxReport {
be_t<u64> timestamp;
be_t<u32> val;
be_t<u32> pad;
};
struct RsxReports {
RsxSemaphore semaphore[1024];
RsxNotify notify[64];
RsxReport report[2048];
};
struct RsxDisplayInfo {
be_t<u32> offset{0};
be_t<u32> pitch{0};
be_t<u32> width{0};
be_t<u32> height{0};
ENABLE_BITWISE_SERIALIZATION;
bool valid() const { return height != 0u && width != 0u; }
};
// SysCalls
error_code sys_rsx_device_open(cpu_thread &cpu);
error_code sys_rsx_device_close(cpu_thread &cpu);
error_code sys_rsx_memory_allocate(cpu_thread &cpu, vm::ptr<u32> mem_handle,
vm::ptr<u64> mem_addr, u32 size, u64 flags,
u64 a5, u64 a6, u64 a7);
error_code sys_rsx_memory_free(cpu_thread &cpu, u32 mem_handle);
error_code sys_rsx_context_allocate(cpu_thread &cpu, vm::ptr<u32> context_id,
vm::ptr<u64> lpar_dma_control,
vm::ptr<u64> lpar_driver_info,
vm::ptr<u64> lpar_reports, u64 mem_ctx,
u64 system_mode);
error_code sys_rsx_context_free(ppu_thread &ppu, u32 context_id);
error_code sys_rsx_context_iomap(cpu_thread &cpu, u32 context_id, u32 io,
u32 ea, u32 size, u64 flags);
error_code sys_rsx_context_iounmap(cpu_thread &cpu, u32 context_id, u32 io,
u32 size);
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3,
u64 a4, u64 a5, u64 a6);
error_code sys_rsx_device_map(cpu_thread &cpu, vm::ptr<u64> dev_addr,
vm::ptr<u64> a2, u32 dev_id);
error_code sys_rsx_device_unmap(cpu_thread &cpu, u32 dev_id);
error_code sys_rsx_attribute(cpu_thread &cpu, u32 packageId, u32 a2, u32 a3,
u32 a4, u32 a5);

View file

@ -0,0 +1,629 @@
#pragma once
#include "Emu/Audio/AudioBackend.h"
#include "Emu/Audio/AudioDumper.h"
#include "Emu/Audio/audio_resampler.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/system_config_types.h"
#include "sys_event.h"
#include "sys_sync.h"
#include "util/cond.h"
#include "util/simple_ringbuf.h"
#include "util/transactional_storage.h"
#if defined(unix) || defined(__unix) || defined(__unix__)
// For BSD detection
#include <sys/param.h>
#endif
#ifdef _WIN32
#include <windows.h>
#elif defined(BSD) || defined(__APPLE__)
#include <sys/event.h>
#endif
enum : u32 {
SYS_RSXAUDIO_SERIAL_STREAM_CNT = 4,
SYS_RSXAUDIO_STREAM_DATA_BLK_CNT = 4,
SYS_RSXAUDIO_DATA_BLK_SIZE = 256,
SYS_RSXAUDIO_STREAM_SIZE =
SYS_RSXAUDIO_DATA_BLK_SIZE * SYS_RSXAUDIO_STREAM_DATA_BLK_CNT,
SYS_RSXAUDIO_CH_PER_STREAM = 2,
SYS_RSXAUDIO_SERIAL_MAX_CH = 8,
SYS_RSXAUDIO_SPDIF_MAX_CH = 2,
SYS_RSXAUDIO_STREAM_SAMPLE_CNT =
SYS_RSXAUDIO_STREAM_SIZE / SYS_RSXAUDIO_CH_PER_STREAM / sizeof(f32),
SYS_RSXAUDIO_RINGBUF_BLK_SZ_SERIAL =
SYS_RSXAUDIO_STREAM_SIZE * SYS_RSXAUDIO_SERIAL_STREAM_CNT,
SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF = SYS_RSXAUDIO_STREAM_SIZE,
SYS_RSXAUDIO_RINGBUF_SZ = 16,
SYS_RSXAUDIO_AVPORT_CNT = 5,
SYS_RSXAUDIO_FREQ_BASE_384K = 384000,
SYS_RSXAUDIO_FREQ_BASE_352K = 352800,
SYS_RSXAUDIO_PORT_CNT = 3,
SYS_RSXAUDIO_SPDIF_CNT = 2,
};
enum class RsxaudioAvportIdx : u8 {
HDMI_0 = 0,
HDMI_1 = 1,
AVMULTI = 2,
SPDIF_0 = 3,
SPDIF_1 = 4,
};
enum class RsxaudioPort : u8 {
SERIAL = 0,
SPDIF_0 = 1,
SPDIF_1 = 2,
INVALID = 0xFF,
};
enum class RsxaudioSampleSize : u8 {
_16BIT = 2,
_32BIT = 4,
};
struct rsxaudio_shmem {
struct ringbuf_t {
struct entry_t {
be_t<u32> valid{};
be_t<u32> unk1{};
be_t<u64> audio_blk_idx{};
be_t<u64> timestamp{};
be_t<u32> buf_addr{};
be_t<u32> dma_addr{};
};
be_t<u32> active{};
be_t<u32> unk2{};
be_t<s32> read_idx{};
be_t<u32> write_idx{};
be_t<s32> rw_max_idx{};
be_t<s32> queue_notify_idx{};
be_t<s32> queue_notify_step{};
be_t<u32> unk6{};
be_t<u32> dma_silence_addr{};
be_t<u32> unk7{};
be_t<u64> next_blk_idx{};
entry_t entries[16]{};
};
struct uf_event_t {
be_t<u64> unk1{};
be_t<u32> uf_event_cnt{};
u8 unk2[244]{};
};
struct ctrl_t {
ringbuf_t ringbuf[SYS_RSXAUDIO_PORT_CNT]{};
be_t<u32> unk1{};
be_t<u32> event_queue_1_id{};
u8 unk2[16]{};
be_t<u32> event_queue_2_id{};
be_t<u32> spdif_ch0_channel_data_lo{};
be_t<u32> spdif_ch0_channel_data_hi{};
be_t<u32> spdif_ch0_channel_data_tx_cycles{};
be_t<u32> unk3{};
be_t<u32> event_queue_3_id{};
be_t<u32> spdif_ch1_channel_data_lo{};
be_t<u32> spdif_ch1_channel_data_hi{};
be_t<u32> spdif_ch1_channel_data_tx_cycles{};
be_t<u32> unk4{};
be_t<u32> intr_thread_prio{};
be_t<u32> unk5{};
u8 unk6[248]{};
uf_event_t channel_uf[SYS_RSXAUDIO_PORT_CNT]{};
u8 pad[0x3530]{};
};
u8 dma_serial_region[0x10000]{};
u8 dma_spdif_0_region[0x4000]{};
u8 dma_spdif_1_region[0x4000]{};
u8 dma_silence_region[0x4000]{};
ctrl_t ctrl{};
};
static_assert(sizeof(rsxaudio_shmem::ringbuf_t) == 0x230U,
"rsxAudioRingBufSizeTest");
static_assert(sizeof(rsxaudio_shmem::uf_event_t) == 0x100U,
"rsxAudioUfEventTest");
static_assert(sizeof(rsxaudio_shmem::ctrl_t) == 0x4000U,
"rsxAudioCtrlSizeTest");
static_assert(sizeof(rsxaudio_shmem) == 0x20000U, "rsxAudioShmemSizeTest");
enum rsxaudio_dma_flag : u32 { IO_BASE = 0, IO_ID = 1 };
struct lv2_rsxaudio final : lv2_obj {
static constexpr u32 id_base = 0x60000000;
static constexpr u64 dma_io_id = 1;
static constexpr u32 dma_io_base = 0x30000000;
shared_mutex mutex{};
bool init = false;
vm::addr_t shmem{};
std::array<shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
// lv2 uses port memory addresses for their names
static constexpr std::array<u64, SYS_RSXAUDIO_PORT_CNT> event_port_name{
0x8000000000400100, 0x8000000000400200, 0x8000000000400300};
lv2_rsxaudio() noexcept = default;
lv2_rsxaudio(utils::serial &ar) noexcept;
void save(utils::serial &ar);
void page_lock() {
ensure(shmem && vm::page_protect(shmem, sizeof(rsxaudio_shmem), 0, 0,
vm::page_readable | vm::page_writable |
vm::page_executable));
}
void page_unlock() {
ensure(shmem && vm::page_protect(shmem, sizeof(rsxaudio_shmem), 0,
vm::page_readable | vm::page_writable));
}
rsxaudio_shmem *get_rw_shared_page() const {
return reinterpret_cast<rsxaudio_shmem *>(vm::g_sudo_addr + u32{shmem});
}
};
class rsxaudio_periodic_tmr {
public:
enum class wait_result {
SUCCESS,
INVALID_PARAM,
TIMEOUT,
TIMER_ERROR,
TIMER_CANCELED,
};
rsxaudio_periodic_tmr();
~rsxaudio_periodic_tmr();
rsxaudio_periodic_tmr(const rsxaudio_periodic_tmr &) = delete;
rsxaudio_periodic_tmr &operator=(const rsxaudio_periodic_tmr &) = delete;
// Wait until timer fires and calls callback.
wait_result wait(const std::function<void()> &callback);
// Cancel wait() call
void cancel_wait();
// VTimer funtions
void vtimer_access_sec(std::invocable<> auto func) {
std::lock_guard lock(mutex);
std::invoke(func);
// Adjust timer expiration
cancel_timer_unlocked();
sched_timer();
}
void enable_vtimer(u32 vtimer_id, u32 rate, u64 crnt_time);
void disable_vtimer(u32 vtimer_id);
bool is_vtimer_behind(u32 vtimer_id, u64 crnt_time) const;
void vtimer_skip_periods(u32 vtimer_id, u64 crnt_time);
void vtimer_incr(u32 vtimer_id, u64 crnt_time);
bool is_vtimer_active(u32 vtimer_id) const;
u64 vtimer_get_sched_time(u32 vtimer_id) const;
private:
static constexpr u64 MAX_BURST_PERIODS = SYS_RSXAUDIO_RINGBUF_SZ;
static constexpr u32 VTIMER_MAX = 4;
struct vtimer {
u64 blk_cnt = 0;
f64 blk_time = 0.0;
bool active = false;
};
std::array<vtimer, VTIMER_MAX> vtmr_pool{};
shared_mutex mutex{};
bool in_wait = false;
bool zero_period = false;
#if defined(_WIN32)
HANDLE cancel_event{};
HANDLE timer_handle{};
#elif defined(__linux__)
int cancel_event{};
int timer_handle{};
int epoll_fd{};
#elif defined(BSD) || defined(__APPLE__)
static constexpr u64 TIMER_ID = 0;
static constexpr u64 CANCEL_ID = 1;
int kq{};
struct kevent handle[2]{};
#else
#error "Implement"
#endif
void sched_timer();
void cancel_timer_unlocked();
void reset_cancel_flag();
bool is_vtimer_behind(const vtimer &vtimer, u64 crnt_time) const;
u64 get_crnt_blk(u64 crnt_time, f64 blk_time) const;
f64 get_blk_time(u32 data_rate) const;
u64 get_rel_next_time();
};
struct rsxaudio_hw_param_t {
struct serial_param_t {
bool dma_en = false;
bool buf_empty_en = false;
bool muted = true;
bool en = false;
u8 freq_div = 8;
RsxaudioSampleSize depth = RsxaudioSampleSize::_16BIT;
};
struct spdif_param_t {
bool dma_en = false;
bool buf_empty_en = false;
bool muted = true;
bool en = false;
bool use_serial_buf = true;
u8 freq_div = 8;
RsxaudioSampleSize depth = RsxaudioSampleSize::_16BIT;
std::array<u8, 6> cs_data = {
0x00, 0x90, 0x00,
0x40, 0x80, 0x00}; // HW supports only 6 bytes (uart pkt has 8)
};
struct hdmi_param_t {
struct hdmi_ch_cfg_t {
std::array<u8, SYS_RSXAUDIO_SERIAL_MAX_CH> map{};
AudioChannelCnt total_ch_cnt = AudioChannelCnt::STEREO;
};
static constexpr u8 MAP_SILENT_CH = umax;
bool init = false;
hdmi_ch_cfg_t ch_cfg{};
std::array<u8, 5> info_frame{}; // TODO: check chstat and info_frame for
// info on audio layout, add default values
std::array<u8, 5> chstat{};
bool muted = true;
bool force_mute = true;
bool use_spdif_1 = false; // TODO: unused for now
};
u32 serial_freq_base = SYS_RSXAUDIO_FREQ_BASE_384K;
u32 spdif_freq_base = SYS_RSXAUDIO_FREQ_BASE_352K;
bool avmulti_av_muted = true;
serial_param_t serial{};
spdif_param_t spdif[2]{};
hdmi_param_t hdmi[2]{};
std::array<RsxaudioPort, SYS_RSXAUDIO_AVPORT_CNT> avport_src = {
RsxaudioPort::INVALID, RsxaudioPort::INVALID, RsxaudioPort::INVALID,
RsxaudioPort::INVALID, RsxaudioPort::INVALID};
};
// 16-bit PCM converted into float, so buffer must be twice as big
using ra_stream_blk_t = std::array<f32, SYS_RSXAUDIO_STREAM_SAMPLE_CNT * 2>;
class rsxaudio_data_container {
public:
struct buf_t {
std::array<ra_stream_blk_t, SYS_RSXAUDIO_SERIAL_MAX_CH> serial{};
std::array<ra_stream_blk_t, SYS_RSXAUDIO_SPDIF_MAX_CH>
spdif[SYS_RSXAUDIO_SPDIF_CNT]{};
};
using data_blk_t = std::array<f32, SYS_RSXAUDIO_STREAM_SAMPLE_CNT *
SYS_RSXAUDIO_SERIAL_MAX_CH * 2>;
rsxaudio_data_container(const rsxaudio_hw_param_t &hw_param, const buf_t &buf,
bool serial_rdy, bool spdif_0_rdy, bool spdif_1_rdy);
u32 get_data_size(RsxaudioAvportIdx avport);
void get_data(RsxaudioAvportIdx avport, data_blk_t &data_out);
bool data_was_used();
private:
const rsxaudio_hw_param_t &hwp;
const buf_t &out_buf;
std::array<bool, 5> avport_data_avail{};
u8 hdmi_stream_cnt[2]{};
bool data_was_written = false;
rsxaudio_data_container(const rsxaudio_data_container &) = delete;
rsxaudio_data_container &operator=(const rsxaudio_data_container &) = delete;
rsxaudio_data_container(rsxaudio_data_container &&) = delete;
rsxaudio_data_container &operator=(rsxaudio_data_container &&) = delete;
// Mix individual channels into final PCM stream. Channels in channel map that
// are > input_ch_cnt treated as silent.
template <usz output_ch_cnt, usz input_ch_cnt>
requires(output_ch_cnt > 0 && output_ch_cnt <= 8 && input_ch_cnt > 0)
constexpr void
mix(const std::array<u8, 8> &ch_map, RsxaudioSampleSize sample_size,
const std::array<ra_stream_blk_t, input_ch_cnt> &input_channels,
data_blk_t &data_out) {
const ra_stream_blk_t silent_channel{};
// Build final map
std::array<const ra_stream_blk_t *, output_ch_cnt> real_input_ch = {};
for (u64 ch_idx = 0; ch_idx < output_ch_cnt; ch_idx++) {
if (ch_map[ch_idx] >= input_ch_cnt) {
real_input_ch[ch_idx] = &silent_channel;
} else {
real_input_ch[ch_idx] = &input_channels[ch_map[ch_idx]];
}
}
const u32 samples_in_buf = sample_size == RsxaudioSampleSize::_16BIT
? SYS_RSXAUDIO_STREAM_SAMPLE_CNT * 2
: SYS_RSXAUDIO_STREAM_SAMPLE_CNT;
for (u32 sample_idx = 0; sample_idx < samples_in_buf * output_ch_cnt;
sample_idx += output_ch_cnt) {
const u32 src_sample_idx = sample_idx / output_ch_cnt;
if constexpr (output_ch_cnt >= 1)
data_out[sample_idx + 0] = (*real_input_ch[0])[src_sample_idx];
if constexpr (output_ch_cnt >= 2)
data_out[sample_idx + 1] = (*real_input_ch[1])[src_sample_idx];
if constexpr (output_ch_cnt >= 3)
data_out[sample_idx + 2] = (*real_input_ch[2])[src_sample_idx];
if constexpr (output_ch_cnt >= 4)
data_out[sample_idx + 3] = (*real_input_ch[3])[src_sample_idx];
if constexpr (output_ch_cnt >= 5)
data_out[sample_idx + 4] = (*real_input_ch[4])[src_sample_idx];
if constexpr (output_ch_cnt >= 6)
data_out[sample_idx + 5] = (*real_input_ch[5])[src_sample_idx];
if constexpr (output_ch_cnt >= 7)
data_out[sample_idx + 6] = (*real_input_ch[6])[src_sample_idx];
if constexpr (output_ch_cnt >= 8)
data_out[sample_idx + 7] = (*real_input_ch[7])[src_sample_idx];
}
}
};
namespace audio {
void configure_rsxaudio();
}
class rsxaudio_backend_thread {
public:
struct port_config {
AudioFreq freq = AudioFreq::FREQ_48K;
AudioChannelCnt ch_cnt = AudioChannelCnt::STEREO;
auto operator<=>(const port_config &) const = default;
};
struct avport_bit {
bool hdmi_0 : 1;
bool hdmi_1 : 1;
bool avmulti : 1;
bool spdif_0 : 1;
bool spdif_1 : 1;
};
rsxaudio_backend_thread();
~rsxaudio_backend_thread();
void operator()();
rsxaudio_backend_thread &operator=(thread_state state);
void set_new_stream_param(
const std::array<port_config, SYS_RSXAUDIO_AVPORT_CNT> &cfg,
avport_bit muted_avports);
void set_mute_state(avport_bit muted_avports);
void add_data(rsxaudio_data_container &cont);
void update_emu_cfg();
u32 get_sample_rate() const;
u8 get_channel_count() const;
static constexpr auto thread_name = "RsxAudio Backend Thread"sv;
SAVESTATE_INIT_POS(8.91); // Depends on audio_out_configuration
private:
struct emu_audio_cfg {
std::string audio_device{};
s64 desired_buffer_duration = 0;
f64 time_stretching_threshold = 0;
bool buffering_enabled = false;
bool convert_to_s16 = false;
bool enable_time_stretching = false;
bool dump_to_file = false;
AudioChannelCnt channels = AudioChannelCnt::STEREO;
audio_channel_layout channel_layout = audio_channel_layout::automatic;
audio_renderer renderer = audio_renderer::null;
audio_provider provider = audio_provider::none;
RsxaudioAvportIdx avport = RsxaudioAvportIdx::HDMI_0;
auto operator<=>(const emu_audio_cfg &) const = default;
};
struct rsxaudio_state {
std::array<port_config, SYS_RSXAUDIO_AVPORT_CNT> port{};
};
struct alignas(16) callback_config {
static constexpr u16 VOL_NOMINAL = 10000;
static constexpr f32 VOL_NOMINAL_INV = 1.0f / VOL_NOMINAL;
u32 freq : 20 = 48000;
u16 target_volume = 10000;
u16 initial_volume = 10000;
u16 current_volume = 10000;
RsxaudioAvportIdx avport_idx = RsxaudioAvportIdx::HDMI_0;
u8 mute_state : SYS_RSXAUDIO_AVPORT_CNT = 0b11111;
u8 input_ch_cnt : 4 = 2;
u8 output_channel_layout : 4 =
static_cast<u8>(audio_channel_layout::stereo);
bool ready : 1 = false;
bool convert_to_s16 : 1 = false;
bool cfg_changed : 1 = false;
bool callback_active : 1 = false;
};
static_assert(sizeof(callback_config) <= 16);
struct backend_config {
port_config cfg{};
RsxaudioAvportIdx avport = RsxaudioAvportIdx::HDMI_0;
};
static constexpr u64 ERROR_SERVICE_PERIOD = 500'000;
static constexpr u64 SERVICE_PERIOD = 10'000;
static constexpr f64 SERVICE_PERIOD_SEC = SERVICE_PERIOD / 1'000'000.0;
static constexpr u64 SERVICE_THRESHOLD = 1'500;
static constexpr f64 TIME_STRETCHING_STEP = 0.1f;
u64 start_time = get_system_time();
u64 time_period_idx = 1;
emu_audio_cfg new_emu_cfg{};
bool emu_cfg_changed = true;
rsxaudio_state new_ra_state{};
bool ra_state_changed = true;
shared_mutex state_update_m{};
cond_variable state_update_c{};
simple_ringbuf ringbuf{};
simple_ringbuf aux_ringbuf{};
std::vector<u8> thread_tmp_buf{};
std::vector<f32> callback_tmp_buf{};
bool use_aux_ringbuf = false;
shared_mutex ringbuf_mutex{};
std::shared_ptr<AudioBackend> backend{};
backend_config backend_current_cfg{{}, new_emu_cfg.avport};
atomic_t<callback_config> callback_cfg{};
bool backend_error_occured = false;
bool backend_device_changed = false;
AudioDumper dumper{};
audio_resampler resampler{};
// Backend
void backend_init(const rsxaudio_state &ra_state,
const emu_audio_cfg &emu_cfg, bool reset_backend = true);
void backend_start();
void backend_stop();
bool backend_playing();
u32 write_data_callback(u32 bytes, void *buf);
void state_changed_callback(AudioStateEvent event);
// Time management
u64 get_time_until_service();
void update_service_time();
void reset_service_time();
// Helpers
static emu_audio_cfg get_emu_cfg();
static u8 gen_mute_state(avport_bit avports);
static RsxaudioAvportIdx convert_avport(audio_avport avport);
};
class rsxaudio_data_thread {
public:
// Prevent creation of multiple rsxaudio contexts
atomic_t<bool> rsxaudio_ctx_allocated = false;
shared_mutex rsxaudio_obj_upd_m{};
shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
void operator()();
rsxaudio_data_thread &operator=(thread_state state);
rsxaudio_data_thread();
void
update_hw_param(std::function<void(rsxaudio_hw_param_t &)> update_callback);
void update_mute_state(RsxaudioPort port, bool muted);
void update_av_mute_state(RsxaudioAvportIdx avport, bool muted,
bool force_mute, bool set = true);
void reset_hw();
static constexpr auto thread_name = "RsxAudioData Thread"sv;
private:
rsxaudio_data_container::buf_t output_buf{};
transactional_storage<rsxaudio_hw_param_t> hw_param_ts{
std::make_shared<universal_pool>(),
std::make_shared<rsxaudio_hw_param_t>()};
rsxaudio_periodic_tmr timer{};
void advance_all_timers();
void extract_audio_data();
static std::pair<bool /*data_present*/, void * /*addr*/>
get_ringbuf_addr(RsxaudioPort dst, const lv2_rsxaudio &rsxaudio_obj);
static f32 pcm_to_float(s32 sample);
static f32 pcm_to_float(s16 sample);
static void pcm_serial_process_channel(RsxaudioSampleSize word_bits,
ra_stream_blk_t &buf_out_l,
ra_stream_blk_t &buf_out_r,
const void *buf_in, u8 src_stream);
static void pcm_spdif_process_channel(RsxaudioSampleSize word_bits,
ra_stream_blk_t &buf_out_l,
ra_stream_blk_t &buf_out_r,
const void *buf_in);
bool enqueue_data(RsxaudioPort dst, bool silence, const void *src_addr,
const rsxaudio_hw_param_t &hwp);
static rsxaudio_backend_thread::avport_bit
calc_avport_mute_state(const rsxaudio_hw_param_t &hwp);
static bool calc_port_active_state(RsxaudioPort port,
const rsxaudio_hw_param_t &hwp);
};
using rsx_audio_backend = named_thread<rsxaudio_backend_thread>;
using rsx_audio_data = named_thread<rsxaudio_data_thread>;
// SysCalls
error_code sys_rsxaudio_initialize(vm::ptr<u32> handle);
error_code sys_rsxaudio_finalize(u32 handle);
error_code sys_rsxaudio_import_shared_memory(u32 handle, vm::ptr<u64> addr);
error_code sys_rsxaudio_unimport_shared_memory(u32 handle, vm::ptr<u64> addr);
error_code sys_rsxaudio_create_connection(u32 handle);
error_code sys_rsxaudio_close_connection(u32 handle);
error_code sys_rsxaudio_prepare_process(u32 handle);
error_code sys_rsxaudio_start_process(u32 handle);
error_code sys_rsxaudio_stop_process(u32 handle);
error_code sys_rsxaudio_get_dma_param(u32 handle, u32 flag, vm::ptr<u64> out);

View file

@ -0,0 +1,55 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_rwlock_attribute_t {
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_rwlock final : lv2_obj {
static const u32 id_base = 0x88000000;
const lv2_protocol protocol;
const u64 key;
const u64 name;
shared_mutex mutex;
atomic_t<s64> owner{0};
ppu_thread *rq{};
ppu_thread *wq{};
lv2_rwlock(u32 protocol, u64 key, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}, key(key), name(name) {}
lv2_rwlock(utils::serial &ar);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_rwlock_create(ppu_thread &ppu, vm::ptr<u32> rw_lock_id,
vm::ptr<sys_rwlock_attribute_t> attr);
error_code sys_rwlock_destroy(ppu_thread &ppu, u32 rw_lock_id);
error_code sys_rwlock_rlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_tryrlock(ppu_thread &ppu, u32 rw_lock_id);
error_code sys_rwlock_runlock(ppu_thread &ppu, u32 rw_lock_id);
error_code sys_rwlock_wlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_trywlock(ppu_thread &ppu, u32 rw_lock_id);
error_code sys_rwlock_wunlock(ppu_thread &ppu, u32 rw_lock_id);
constexpr auto _sys_rwlock_trywlock = sys_rwlock_trywlock;

View file

@ -0,0 +1,54 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_semaphore_attribute_t {
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union {
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_sema final : lv2_obj {
static const u32 id_base = 0x96000000;
const lv2_protocol protocol;
const u64 key;
const u64 name;
const s32 max;
shared_mutex mutex;
atomic_t<s32> val;
ppu_thread *sq{};
lv2_sema(u32 protocol, u64 key, u64 name, s32 max, s32 value) noexcept
: protocol{static_cast<u8>(protocol)}, key(key), name(name), max(max),
val(value) {}
lv2_sema(utils::serial &ar);
static std::function<void(void *)> load(utils::serial &ar);
void save(utils::serial &ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_semaphore_create(ppu_thread &ppu, vm::ptr<u32> sem_id,
vm::ptr<sys_semaphore_attribute_t> attr,
s32 initial_val, s32 max_val);
error_code sys_semaphore_destroy(ppu_thread &ppu, u32 sem_id);
error_code sys_semaphore_wait(ppu_thread &ppu, u32 sem_id, u64 timeout);
error_code sys_semaphore_trywait(ppu_thread &ppu, u32 sem_id);
error_code sys_semaphore_post(ppu_thread &ppu, u32 sem_id, s32 count);
error_code sys_semaphore_get_value(ppu_thread &ppu, u32 sem_id,
vm::ptr<s32> count);

View file

@ -1,13 +1,16 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2, vm::ptr<u64> a3, u64 a4);
error_code sys_sm_shutdown(ppu_thread& ppu, u16 op, vm::ptr<void> param, u64 size);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c, vm::ptr<u64> d);
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2,
vm::ptr<u64> a3, u64 a4);
error_code sys_sm_shutdown(ppu_thread &ppu, u16 op, vm::ptr<void> param,
u64 size);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c,
vm::ptr<u64> d);
error_code sys_sm_set_shop_mode(s32 mode);
error_code sys_sm_control_led(u8 led, u8 action);
error_code sys_sm_ring_buzzer(u64 packet, u64 a1, u64 a2);

View file

@ -0,0 +1,426 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/SPUThread.h"
#include "sys_event.h"
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include "util/File.h"
#include <span>
struct lv2_memory_container;
enum : s32 {
SYS_SPU_THREAD_GROUP_TYPE_NORMAL = 0x00,
// SYS_SPU_THREAD_GROUP_TYPE_SEQUENTIAL = 0x01, doesn't exist
SYS_SPU_THREAD_GROUP_TYPE_SYSTEM = 0x02,
SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER = 0x04,
SYS_SPU_THREAD_GROUP_TYPE_NON_CONTEXT = 0x08,
SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT = 0x18,
SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM = 0x20,
};
enum {
SYS_SPU_THREAD_GROUP_JOIN_GROUP_EXIT = 0x0001,
SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT = 0x0002,
SYS_SPU_THREAD_GROUP_JOIN_TERMINATED = 0x0004
};
enum {
SYS_SPU_THREAD_GROUP_EVENT_RUN = 1,
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION = 2,
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE = 4,
};
enum : u64 {
SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY = 0xFFFFFFFF53505500ull,
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION_KEY = 0xFFFFFFFF53505503ull,
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY = 0xFFFFFFFF53505504ull,
};
enum {
SYS_SPU_THREAD_GROUP_LOG_ON = 0x0,
SYS_SPU_THREAD_GROUP_LOG_OFF = 0x1,
SYS_SPU_THREAD_GROUP_LOG_GET_STATUS = 0x2,
};
enum spu_group_status : u32 {
SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED,
SPU_THREAD_GROUP_STATUS_INITIALIZED,
SPU_THREAD_GROUP_STATUS_READY,
SPU_THREAD_GROUP_STATUS_WAITING,
SPU_THREAD_GROUP_STATUS_SUSPENDED,
SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED,
SPU_THREAD_GROUP_STATUS_RUNNING,
SPU_THREAD_GROUP_STATUS_STOPPED,
SPU_THREAD_GROUP_STATUS_DESTROYED, // Internal state
SPU_THREAD_GROUP_STATUS_UNKNOWN,
};
enum : s32 {
SYS_SPU_SEGMENT_TYPE_COPY = 1,
SYS_SPU_SEGMENT_TYPE_FILL = 2,
SYS_SPU_SEGMENT_TYPE_INFO = 4,
};
enum spu_stop_syscall : u32 {
SYS_SPU_THREAD_STOP_YIELD = 0x0100,
SYS_SPU_THREAD_STOP_GROUP_EXIT = 0x0101,
SYS_SPU_THREAD_STOP_THREAD_EXIT = 0x0102,
SYS_SPU_THREAD_STOP_RECEIVE_EVENT = 0x0110,
SYS_SPU_THREAD_STOP_TRY_RECEIVE_EVENT = 0x0111,
SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE = 0x0120,
};
struct sys_spu_thread_group_attribute {
be_t<u32> nsize; // name length including NULL terminator
vm::bcptr<char> name;
be_t<s32> type;
be_t<u32> ct; // memory container id
};
enum : u32 {
SYS_SPU_THREAD_OPTION_NONE = 0,
SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE = 1,
SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE = 2,
};
struct sys_spu_thread_attribute {
vm::bcptr<char> name;
be_t<u32> name_len;
be_t<u32> option;
};
struct sys_spu_thread_argument {
be_t<u64> arg1;
be_t<u64> arg2;
be_t<u64> arg3;
be_t<u64> arg4;
};
struct sys_spu_segment {
ENABLE_BITWISE_SERIALIZATION;
be_t<s32> type; // copy, fill, info
be_t<u32> ls; // local storage address
be_t<u32> size;
union {
be_t<u32> addr; // address or fill value
u64 pad;
};
};
CHECK_SIZE(sys_spu_segment, 0x18);
enum : u32 {
SYS_SPU_IMAGE_TYPE_USER = 0,
SYS_SPU_IMAGE_TYPE_KERNEL = 1,
};
struct sys_spu_image {
be_t<u32> type; // user, kernel
be_t<u32> entry_point; // Note: in kernel mode it's used to store id
vm::bptr<sys_spu_segment> segs;
be_t<s32> nsegs;
template <bool CountInfo = true, typename Phdrs>
static s32 get_nsegs(const Phdrs &phdrs) {
s32 num_segs = 0;
for (const auto &phdr : phdrs) {
if (phdr.p_type != 1u && phdr.p_type != 4u) {
return -1;
}
if (phdr.p_type == 1u && phdr.p_filesz != phdr.p_memsz && phdr.p_filesz) {
num_segs += 2;
} else if (phdr.p_type == 1u || CountInfo) {
num_segs += 1;
}
}
return num_segs;
}
template <bool WriteInfo = true, typename Phdrs>
static s32 fill(vm::ptr<sys_spu_segment> segs, s32 nsegs, const Phdrs &phdrs,
u32 src) {
s32 num_segs = 0;
for (const auto &phdr : phdrs) {
if (phdr.p_type == 1u) {
if (phdr.p_filesz) {
if (num_segs >= nsegs) {
return -2;
}
auto *seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_COPY;
seg->ls = static_cast<u32>(phdr.p_vaddr);
seg->size = static_cast<u32>(phdr.p_filesz);
seg->addr = static_cast<u32>(phdr.p_offset + src);
}
if (phdr.p_memsz > phdr.p_filesz) {
if (num_segs >= nsegs) {
return -2;
}
auto *seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_FILL;
seg->ls = static_cast<u32>(phdr.p_vaddr + phdr.p_filesz);
seg->size = static_cast<u32>(phdr.p_memsz - phdr.p_filesz);
seg->addr = 0;
}
} else if (WriteInfo && phdr.p_type == 4u) {
if (num_segs >= nsegs) {
return -2;
}
auto *seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_INFO;
seg->size = 0x20;
seg->addr = static_cast<u32>(phdr.p_offset + 0x14 + src);
} else if (phdr.p_type != 4u) {
return -1;
}
}
return num_segs;
}
void load(const fs::file &stream);
void free() const;
static void deploy(u8 *loc, std::span<const sys_spu_segment> segs,
bool is_verbose = true);
};
enum : u32 {
SYS_SPU_IMAGE_PROTECT = 0,
SYS_SPU_IMAGE_DIRECT = 1,
};
struct lv2_spu_image : lv2_obj {
static const u32 id_base = 0x22000000;
const u32 e_entry;
const vm::ptr<sys_spu_segment> segs;
const s32 nsegs;
lv2_spu_image(u32 entry, vm::ptr<sys_spu_segment> segs, s32 nsegs)
: e_entry(entry), segs(segs), nsegs(nsegs) {}
lv2_spu_image(utils::serial &ar);
void save(utils::serial &ar);
};
struct sys_spu_thread_group_syscall_253_info {
be_t<u32> deadlineMeetCounter; // From cellSpursGetInfo
be_t<u32> deadlineMissCounter; // Same
be_t<u64> timestamp;
be_t<u64> _x10[6];
};
struct lv2_spu_group {
static const u32 id_base = 0x04000100;
static const u32 id_step = 0x100;
static const u32 id_count = 255;
static constexpr std::pair<u32, u32> id_invl_range = {0, 8};
static_assert(spu_thread::id_count == id_count * 6 + 5);
const std::string name;
const u32 id;
const u32 max_num;
const u32 mem_size;
const s32 type; // SPU Thread Group Type
lv2_memory_container *const ct; // Memory Container
const bool has_scheduler_context;
u32 max_run;
shared_mutex mutex;
atomic_t<u32> init; // Initialization Counter
atomic_t<typename spu_thread::spu_prio_t> prio{}; // SPU Thread Group Priority
atomic_t<spu_group_status> run_state; // SPU Thread Group State
atomic_t<s32> exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause and signal
atomic_t<u32> running = 0; // Number of running threads
atomic_t<u32> spurs_running = 0;
atomic_t<u32> stop_count = 0;
atomic_t<u32> wait_term_count = 0;
u32 waiter_spu_index = -1; // Index of SPU executing a waiting syscall
class ppu_thread *waiter = nullptr;
bool set_terminate = false;
std::array<shared_ptr<named_thread<spu_thread>>, 8> threads; // SPU Threads
std::array<s8, 256> threads_map; // SPU Threads map based number
std::array<std::pair<u32, std::vector<sys_spu_segment>>, 8>
imgs; // Entry points, SPU image segments
std::array<std::array<u64, 4>, 8> args; // SPU Thread Arguments
shared_ptr<lv2_event_queue>
ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
shared_ptr<lv2_event_queue>
ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
shared_ptr<lv2_event_queue>
ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
lv2_spu_group(std::string name, u32 num, s32 _prio, s32 type,
lv2_memory_container *ct, bool uses_scheduler,
u32 mem_size) noexcept
: name(std::move(name)), id(idm::last_id()), max_num(num),
mem_size(mem_size), type(type), ct(ct),
has_scheduler_context(uses_scheduler), max_run(num), init(0),
run_state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED), exit_status(0),
join_state(0), args({}) {
threads_map.fill(-1);
prio.raw().prio = _prio;
}
SAVESTATE_INIT_POS(8); // Dependency on SPUs
lv2_spu_group(utils::serial &ar) noexcept;
void save(utils::serial &ar);
CellError send_run_event(u64 data1, u64 data2, u64 data3) const {
return ep_run ? ep_run->send(SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY, data1,
data2, data3)
: CELL_ENOTCONN;
}
CellError send_exception_event(u64 data1, u64 data2, u64 data3) const {
return ep_exception
? ep_exception->send(SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION_KEY,
data1, data2, data3)
: CELL_ENOTCONN;
}
CellError send_sysmodule_event(u64 data1, u64 data2, u64 data3) const {
return ep_sysmodule ? ep_sysmodule->send(
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY,
data1, data2, data3)
: CELL_ENOTCONN;
}
static std::pair<named_thread<spu_thread> *, shared_ptr<lv2_spu_group>>
get_thread(u32 id);
};
class ppu_thread;
// Syscalls
error_code sys_spu_initialize(ppu_thread &, u32 max_usable_spu,
u32 max_raw_spu);
error_code _sys_spu_image_get_information(ppu_thread &,
vm::ptr<sys_spu_image> img,
vm::ptr<u32> entry_point,
vm::ptr<s32> nsegs);
error_code sys_spu_image_open(ppu_thread &, vm::ptr<sys_spu_image> img,
vm::cptr<char> path);
error_code _sys_spu_image_import(ppu_thread &, vm::ptr<sys_spu_image> img,
u32 src, u32 size, u32 arg4);
error_code _sys_spu_image_close(ppu_thread &, vm::ptr<sys_spu_image> img);
error_code _sys_spu_image_get_segments(ppu_thread &, vm::ptr<sys_spu_image> img,
vm::ptr<sys_spu_segment> segments,
s32 nseg);
error_code sys_spu_thread_initialize(ppu_thread &, vm::ptr<u32> thread,
u32 group, u32 spu_num,
vm::ptr<sys_spu_image>,
vm::ptr<sys_spu_thread_attribute>,
vm::ptr<sys_spu_thread_argument>);
error_code sys_spu_thread_set_argument(ppu_thread &, u32 id,
vm::ptr<sys_spu_thread_argument> arg);
error_code
sys_spu_thread_group_create(ppu_thread &, vm::ptr<u32> id, u32 num, s32 prio,
vm::ptr<sys_spu_thread_group_attribute> attr);
error_code sys_spu_thread_group_destroy(ppu_thread &, u32 id);
error_code sys_spu_thread_group_start(ppu_thread &, u32 id);
error_code sys_spu_thread_group_suspend(ppu_thread &, u32 id);
error_code sys_spu_thread_group_resume(ppu_thread &, u32 id);
error_code sys_spu_thread_group_yield(ppu_thread &, u32 id);
error_code sys_spu_thread_group_terminate(ppu_thread &, u32 id, s32 value);
error_code sys_spu_thread_group_join(ppu_thread &, u32 id, vm::ptr<u32> cause,
vm::ptr<u32> status);
error_code sys_spu_thread_group_set_priority(ppu_thread &, u32 id,
s32 priority);
error_code sys_spu_thread_group_get_priority(ppu_thread &, u32 id,
vm::ptr<s32> priority);
error_code sys_spu_thread_group_connect_event(ppu_thread &, u32 id, u32 eq,
u32 et);
error_code sys_spu_thread_group_disconnect_event(ppu_thread &, u32 id, u32 et);
error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread &, u32 id,
u32 eq_id, u64 req,
vm::ptr<u8> spup);
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread &,
u32 id, u32 spup);
error_code sys_spu_thread_group_set_cooperative_victims(ppu_thread &, u32 id,
u32 threads_mask);
error_code sys_spu_thread_group_syscall_253(
ppu_thread &ppu, u32 id,
vm::ptr<sys_spu_thread_group_syscall_253_info> info);
error_code sys_spu_thread_group_log(ppu_thread &, s32 command,
vm::ptr<s32> stat);
error_code sys_spu_thread_write_ls(ppu_thread &, u32 id, u32 lsa, u64 value,
u32 type);
error_code sys_spu_thread_read_ls(ppu_thread &, u32 id, u32 lsa,
vm::ptr<u64> value, u32 type);
error_code sys_spu_thread_write_spu_mb(ppu_thread &, u32 id, u32 value);
error_code sys_spu_thread_set_spu_cfg(ppu_thread &, u32 id, u64 value);
error_code sys_spu_thread_get_spu_cfg(ppu_thread &, u32 id, vm::ptr<u64> value);
error_code sys_spu_thread_write_snr(ppu_thread &, u32 id, u32 number,
u32 value);
error_code sys_spu_thread_connect_event(ppu_thread &, u32 id, u32 eq, u32 et,
u32 spup);
error_code sys_spu_thread_disconnect_event(ppu_thread &, u32 id, u32 et,
u32 spup);
error_code sys_spu_thread_bind_queue(ppu_thread &, u32 id, u32 spuq,
u32 spuq_num);
error_code sys_spu_thread_unbind_queue(ppu_thread &, u32 id, u32 spuq_num);
error_code sys_spu_thread_get_exit_status(ppu_thread &, u32 id,
vm::ptr<s32> status);
error_code sys_spu_thread_recover_page_fault(ppu_thread &, u32 id);
error_code sys_raw_spu_create(ppu_thread &, vm::ptr<u32> id,
vm::ptr<void> attr);
error_code sys_raw_spu_destroy(ppu_thread &ppu, u32 id);
error_code sys_raw_spu_create_interrupt_tag(ppu_thread &, u32 id, u32 class_id,
u32 hwthread, vm::ptr<u32> intrtag);
error_code sys_raw_spu_set_int_mask(ppu_thread &, u32 id, u32 class_id,
u64 mask);
error_code sys_raw_spu_get_int_mask(ppu_thread &, u32 id, u32 class_id,
vm::ptr<u64> mask);
error_code sys_raw_spu_set_int_stat(ppu_thread &, u32 id, u32 class_id,
u64 stat);
error_code sys_raw_spu_get_int_stat(ppu_thread &, u32 id, u32 class_id,
vm::ptr<u64> stat);
error_code sys_raw_spu_read_puint_mb(ppu_thread &, u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_set_spu_cfg(ppu_thread &, u32 id, u32 value);
error_code sys_raw_spu_get_spu_cfg(ppu_thread &, u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_recover_page_fault(ppu_thread &, u32 id);
error_code sys_isolated_spu_create(ppu_thread &, vm::ptr<u32> id,
vm::ptr<void> image, u64 arg1, u64 arg2,
u64 arg3, u64 arg4);
error_code sys_isolated_spu_start(ppu_thread &, u32 id);
error_code sys_isolated_spu_destroy(ppu_thread &ppu, u32 id);
error_code sys_isolated_spu_create_interrupt_tag(ppu_thread &, u32 id,
u32 class_id, u32 hwthread,
vm::ptr<u32> intrtag);
error_code sys_isolated_spu_set_int_mask(ppu_thread &, u32 id, u32 class_id,
u64 mask);
error_code sys_isolated_spu_get_int_mask(ppu_thread &, u32 id, u32 class_id,
vm::ptr<u64> mask);
error_code sys_isolated_spu_set_int_stat(ppu_thread &, u32 id, u32 class_id,
u64 stat);
error_code sys_isolated_spu_get_int_stat(ppu_thread &, u32 id, u32 class_id,
vm::ptr<u64> stat);
error_code sys_isolated_spu_read_puint_mb(ppu_thread &, u32 id,
vm::ptr<u32> value);
error_code sys_isolated_spu_set_spu_cfg(ppu_thread &, u32 id, u32 value);
error_code sys_isolated_spu_get_spu_cfg(ppu_thread &, u32 id,
vm::ptr<u32> value);

View file

@ -1,11 +1,10 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// Unofficial error code names
enum sys_ss_rng_error : u32
{
enum sys_ss_rng_error : u32 {
SYS_SS_RNG_ERROR_INVALID_PKG = 0x80010500,
SYS_SS_RNG_ERROR_ENOMEM = 0x80010501,
SYS_SS_RNG_ERROR_EAGAIN = 0x80010503,
@ -13,13 +12,13 @@ enum sys_ss_rng_error : u32
SYS_SS_RTC_ERROR_UNK = 0x8001050f,
};
struct CellSsOpenPSID
{
struct CellSsOpenPSID {
be_t<u64> high;
be_t<u64> low;
};
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf, u64 size);
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf,
u64 size);
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3);
error_code sys_ss_get_console_id(vm::ptr<u8> buf);
error_code sys_ss_get_open_psid(vm::ptr<CellSsOpenPSID> psid);
@ -28,6 +27,10 @@ error_code sys_ss_get_cache_of_product_mode(vm::ptr<u8> ptr);
error_code sys_ss_secure_rtc(u64 cmd, u64 a2, u64 a3, u64 a4);
error_code sys_ss_get_cache_of_flash_ext_flag(vm::ptr<u64> flag);
error_code sys_ss_get_boot_device(vm::ptr<u64> dev);
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4);
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2, vm::ptr<u64> out_size, u64 a4, u64 a5, u64 a6);
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4,
u64 a5, u64 a6);
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3,
u64 a4);
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2,
vm::ptr<u64> out_size, u64 a4, u64 a5,
u64 a6);

View file

@ -0,0 +1,91 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "util/File.h"
enum Devices : u64 {
ATA_HDD = 0x101000000000007,
BDVD_DRIVE = 0x101000000000006,
PATA0_HDD_DRIVE = 0x101000000000008,
PATA0_BDVD_DRIVE = BDVD_DRIVE,
PATA1_HDD_DRIVE = ATA_HDD,
BUILTIN_FLASH = 0x100000000000001,
NAND_FLASH = BUILTIN_FLASH,
NAND_UNK = 0x100000000000003,
NOR_FLASH = 0x100000000000004,
MEMORY_STICK = 0x103000000000010,
SD_CARD = 0x103000100000010,
COMPACT_FLASH = 0x103000200000010,
USB_MASS_STORAGE_1_BASE = 0x10300000000000A,
USB_MASS_STORAGE_2_BASE = 0x10300000000001F,
};
struct lv2_storage {
static const u32 id_base = 0x45000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(45);
const u64 device_id;
const fs::file file;
const u64 mode;
const u64 flags;
lv2_storage(u64 device_id, fs::file &&file, u64 mode, u64 flags)
: device_id(device_id), file(std::move(file)), mode(mode), flags(flags) {}
};
struct StorageDeviceInfo {
u8 name[0x20]; // 0x0
be_t<u32> zero; // 0x20
be_t<u32> zero2; // 0x24
be_t<u64> sector_count; // 0x28
be_t<u32> sector_size; // 0x30
be_t<u32> one; // 0x34
u8 flags[8]; // 0x38
};
#define USB_MASS_STORAGE_1(n) (USB_MASS_STORAGE_1_BASE + n) /* For 0-5 */
#define USB_MASS_STORAGE_2(n) \
(USB_MASS_STORAGE_2_BASE + (n - 6)) /* For 6-127 */
// SysCalls
error_code sys_storage_open(u64 device, u64 mode, vm::ptr<u32> fd, u64 flags);
error_code sys_storage_close(u32 fd);
error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors,
vm::ptr<void> bounce_buf, vm::ptr<u32> sectors_read,
u64 flags);
error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector,
u32 num_sectors, vm::ptr<void> data,
vm::ptr<u32> sectors_wrote, u64 flags);
error_code sys_storage_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen);
error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id,
u32 unk);
error_code sys_storage_async_read();
error_code sys_storage_async_write();
error_code sys_storage_async_cancel();
error_code sys_storage_get_device_info(u64 device,
vm::ptr<StorageDeviceInfo> buffer);
error_code sys_storage_get_device_config(vm::ptr<u32> storages,
vm::ptr<u32> devices);
error_code sys_storage_report_devices(u32 storages, u32 start, u32 devices,
vm::ptr<u64> device_ids);
error_code sys_storage_configure_medium_event(u32 fd, u32 equeue_id, u32 c);
error_code sys_storage_set_medium_polling_interval();
error_code sys_storage_create_region();
error_code sys_storage_delete_region();
error_code sys_storage_execute_device_command(
u32 fd, u64 cmd, vm::ptr<char> cmdbuf, u64 cmdbuf_size,
vm::ptr<char> databuf, u64 databuf_size, vm::ptr<u32> driver_status);
error_code sys_storage_check_region_acl();
error_code sys_storage_set_region_acl();
error_code sys_storage_async_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen,
u64 unk);
error_code sys_storage_get_region_offset();
error_code sys_storage_set_emulated_speed();

View file

@ -0,0 +1,469 @@
#pragma once
#include "util/mutex.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IPC.h"
#include "Emu/IdManager.h"
#include "util/shared_ptr.hpp"
// attr_protocol (waiting scheduling policy)
enum lv2_protocol : u8 {
SYS_SYNC_FIFO = 0x1, // First In, First Out Order
SYS_SYNC_PRIORITY = 0x2, // Priority Order
SYS_SYNC_PRIORITY_INHERIT = 0x3, // Basic Priority Inheritance Protocol
SYS_SYNC_RETRY = 0x4, // Not selected while unlocking
};
enum : u32 {
SYS_SYNC_ATTR_PROTOCOL_MASK = 0xf,
};
// attr_recursive (recursive locks policy)
enum {
SYS_SYNC_RECURSIVE = 0x10,
SYS_SYNC_NOT_RECURSIVE = 0x20,
SYS_SYNC_ATTR_RECURSIVE_MASK = 0xf0,
};
// attr_pshared (sharing among processes policy)
enum {
SYS_SYNC_PROCESS_SHARED = 0x100,
SYS_SYNC_NOT_PROCESS_SHARED = 0x200,
SYS_SYNC_ATTR_PSHARED_MASK = 0xf00,
};
// attr_flags (creation policy)
enum {
SYS_SYNC_NEWLY_CREATED =
0x1, // Create new object, fails if specified IPC key exists
SYS_SYNC_NOT_CREATE =
0x2, // Reference existing object, fails if IPC key not found
SYS_SYNC_NOT_CARE =
0x3, // Reference existing object, create new one if IPC key not found
SYS_SYNC_ATTR_FLAGS_MASK = 0xf,
};
// attr_adaptive
enum {
SYS_SYNC_ADAPTIVE = 0x1000,
SYS_SYNC_NOT_ADAPTIVE = 0x2000,
SYS_SYNC_ATTR_ADAPTIVE_MASK = 0xf000,
};
enum ppu_thread_status : u32;
struct ppu_non_sleeping_count_t {
bool has_running; // no actual count for optimization sake
u32 onproc_count;
};
// Base class for some kernel objects (shared set of 8192 objects).
struct lv2_obj {
static const u32 id_step = 0x100;
static const u32 id_count = 8192;
static constexpr std::pair<u32, u32> id_invl_range = {0, 8};
private:
enum thread_cmd : s32 {
yield_cmd = smin,
enqueue_cmd,
};
// Function executed under IDM mutex, error will make the object creation fail
// and the error will be returned
CellError on_id_create() {
exists++;
return {};
}
public:
SAVESTATE_INIT_POS(4); // Dependency on PPUs
lv2_obj() noexcept = default;
lv2_obj(u32 i) noexcept : exists{i} {}
lv2_obj(lv2_obj &&rhs) noexcept : exists{+rhs.exists} {}
lv2_obj(utils::serial &) noexcept {}
lv2_obj &operator=(lv2_obj &&rhs) noexcept {
exists = +rhs.exists;
return *this;
}
void save(utils::serial &) {}
// Existence validation (workaround for shared-ptr ref-counting)
atomic_t<u32> exists = 0;
template <typename Ptr> static bool check(Ptr &&ptr) {
return ptr && ptr->exists;
}
// wrapper for name64 string formatting
struct name_64 {
u64 data;
};
static std::string name64(u64 name_u64);
// Find and remove the object from the linked list
template <bool ModifyNode = true, typename T>
static T *unqueue(T *&first, T *object, T *T::*mem_ptr = &T::next_cpu) {
auto it = +first;
if (it == object) {
atomic_storage<T *>::release(first, it->*mem_ptr);
if constexpr (ModifyNode) {
atomic_storage<T *>::release(it->*mem_ptr, nullptr);
}
return it;
}
for (; it;) {
const auto next = it->*mem_ptr + 0;
if (next == object) {
atomic_storage<T *>::release(it->*mem_ptr, next->*mem_ptr);
if constexpr (ModifyNode) {
atomic_storage<T *>::release(next->*mem_ptr, nullptr);
}
return next;
}
it = next;
}
return {};
}
// Remove an object from the linked set according to the protocol
template <typename E, typename T>
static E *schedule(T &first, u32 protocol, bool modify_node = true) {
auto it = static_cast<E *>(first);
if (!it) {
return it;
}
auto parent_found = &first;
if (protocol == SYS_SYNC_FIFO) {
while (true) {
const auto next = +it->next_cpu;
if (next) {
parent_found = &it->next_cpu;
it = next;
continue;
}
if (cpu_flag::again - it->state) {
atomic_storage<T>::release(*parent_found, nullptr);
}
return it;
}
}
auto prio = it->prio.load();
auto found = it;
while (true) {
auto &node = it->next_cpu;
const auto next = static_cast<E *>(node);
if (!next) {
break;
}
const auto _prio = static_cast<E *>(next)->prio.load();
// This condition tests for equality as well so the earliest element to be
// pushed is popped
if (_prio.prio < prio.prio ||
(_prio.prio == prio.prio && _prio.order < prio.order)) {
found = next;
parent_found = &node;
prio = _prio;
}
it = next;
}
if (cpu_flag::again - found->state) {
atomic_storage<T>::release(*parent_found, found->next_cpu);
if (modify_node) {
atomic_storage<T>::release(found->next_cpu, nullptr);
}
}
return found;
}
template <typename T> static void emplace(T &first, T object) {
atomic_storage<T>::release(object->next_cpu, first);
atomic_storage<T>::release(first, object);
object->prio.atomic_op(
[order = ++g_priority_order_tag](
std::common_type_t<decltype(std::declval<T>()->prio.load())>
&prio) {
if constexpr (requires {
+std::declval<decltype(prio)>().preserve_bit;
}) {
if (prio.preserve_bit) {
// Restoring state on load
prio.preserve_bit = 0;
return;
}
}
prio.order = order;
});
}
private:
// Remove the current thread from the scheduling queue, register timeout
static bool sleep_unlocked(cpu_thread &, u64 timeout, u64 current_time);
// Schedule the thread
static bool awake_unlocked(cpu_thread *, s32 prio = enqueue_cmd);
public:
static constexpr u64 max_timeout = u64{umax} / 1000;
static bool sleep(cpu_thread &cpu, const u64 timeout = 0);
static bool awake(cpu_thread *thread, s32 prio = enqueue_cmd);
// Returns true on successful context switch, false otherwise
static bool yield(cpu_thread &thread);
static void set_priority(cpu_thread &thread, s32 prio) {
ensure(prio + 512u < 3712);
awake(&thread, prio);
}
static inline void awake_all() {
awake({});
g_to_awake.clear();
}
static void make_scheduler_ready();
static std::pair<ppu_thread_status, u32>
ppu_state(ppu_thread *ppu, bool lock_idm = true, bool lock_lv2 = true);
static inline void append(cpu_thread *const thread) {
g_to_awake.emplace_back(thread);
}
// Serialization related
static void set_future_sleep(cpu_thread *cpu);
static bool is_scheduler_ready();
// Must be called under IDM lock
static ppu_non_sleeping_count_t count_non_sleeping_threads();
static inline bool has_ppus_in_running_state() noexcept {
return count_non_sleeping_threads().has_running != 0;
}
static void set_yield_frequency(u64 freq, u64 max_allowed_tsx);
static void cleanup();
template <typename T> static inline u64 get_key(const T &attr) {
return (attr.pshared == SYS_SYNC_PROCESS_SHARED ? +attr.ipc_key : 0);
}
template <typename T, typename F>
static error_code create(u32 pshared, u64 ipc_key, s32 flags, F &&make,
bool key_not_zero = true) {
switch (pshared) {
case SYS_SYNC_PROCESS_SHARED: {
if (key_not_zero && ipc_key == 0) {
return CELL_EINVAL;
}
switch (flags) {
case SYS_SYNC_NEWLY_CREATED:
case SYS_SYNC_NOT_CARE:
case SYS_SYNC_NOT_CREATE: {
break;
}
default:
return CELL_EINVAL;
}
break;
}
case SYS_SYNC_NOT_PROCESS_SHARED: {
break;
}
default:
return CELL_EINVAL;
}
// EAGAIN for IDM IDs shortage
CellError error = CELL_EAGAIN;
if (!idm::import <lv2_obj, T>([&]() -> shared_ptr<T> {
shared_ptr<T> result = make();
auto finalize_construct = [&]() -> shared_ptr<T> {
if ((error = result->on_id_create())) {
result.reset();
}
return std::move(result);
};
if (pshared != SYS_SYNC_PROCESS_SHARED) {
// Creation of unique (non-shared) object handle
return finalize_construct();
}
auto &ipc_container = g_fxo->get<ipc_manager<T, u64>>();
if (flags == SYS_SYNC_NOT_CREATE) {
result = ipc_container.get(ipc_key);
if (!result) {
error = CELL_ESRCH;
return result;
}
// Run on_id_create() on existing object
return finalize_construct();
}
bool added = false;
std::tie(added, result) = ipc_container.add(
ipc_key, finalize_construct, flags != SYS_SYNC_NEWLY_CREATED);
if (!added) {
if (flags == SYS_SYNC_NEWLY_CREATED) {
// Object already exists but flags does not allow it
error = CELL_EEXIST;
// We specified we do not want to peek pointer's value, result
// must be empty
AUDIT(!result);
return result;
}
// Run on_id_create() on existing object
return finalize_construct();
}
return result;
})) {
return error;
}
return CELL_OK;
}
template <typename T>
static void on_id_destroy(T &obj, u64 ipc_key, u64 pshared = umax) {
if (pshared == umax) {
// Default is to check key
pshared = ipc_key != 0;
}
if (obj.exists-- == 1u && pshared) {
g_fxo->get<ipc_manager<T, u64>>().remove(ipc_key);
}
}
template <typename T>
static shared_ptr<T> load(u64 ipc_key, shared_ptr<T> make,
u64 pshared = umax) {
if (pshared == umax ? ipc_key != 0 : pshared != 0) {
g_fxo->need<ipc_manager<T, u64>>();
g_fxo->get<ipc_manager<T, u64>>().add(ipc_key, [&]() { return make; });
}
// Ensure no error
ensure(!make->on_id_create());
return make;
}
template <typename T, typename Storage = lv2_obj>
static std::function<void(void *)> load_func(shared_ptr<T> make,
u64 pshared = umax) {
const u64 key = make->key;
return [ptr = load<T>(key, make, pshared)](void *storage) {
*static_cast<atomic_ptr<Storage> *>(storage) = ptr;
};
}
static bool wait_timeout(u64 usec, ppu_thread *cpu = {}, bool scale = true,
bool is_usleep = false);
static void notify_all() noexcept;
// Can be called before the actual sleep call in order to move it out of mutex
// scope
static void prepare_for_sleep(cpu_thread &cpu);
struct notify_all_t {
notify_all_t() noexcept { g_postpone_notify_barrier = true; }
notify_all_t(const notify_all_t &) = delete;
static void cleanup() {
for (auto &cpu : g_to_notify) {
if (!cpu) {
return;
}
// While IDM mutex is still locked (this function assumes so) check if
// the notification is still needed Pending flag is meant for forced
// notification (if the CPU really has pending work it can restore the
// flag in theory) Disabled to allow reservation notifications from here
if (false && cpu != &g_to_notify &&
static_cast<const decltype(cpu_thread::state) *>(cpu)->none_of(
cpu_flag::signal + cpu_flag::pending)) {
// Omit it (this is a void pointer, it can hold anything)
cpu = &g_to_notify;
}
}
}
~notify_all_t() noexcept { lv2_obj::notify_all(); }
};
// Scheduler mutex
static shared_mutex g_mutex;
// Proirity tags
static atomic_t<u64> g_priority_order_tag;
private:
// Pending list of threads to run
static thread_local std::vector<class cpu_thread *> g_to_awake;
// Scheduler queue for active PPU threads
static class ppu_thread *g_ppu;
// Waiting for the response from
static u32 g_pending;
// Pending list of threads to notify (cpu_thread::state ptr)
static thread_local std::add_pointer_t<const void> g_to_notify[4];
// If a notify_all_t object exists locally, postpone notifications to the
// destructor of it (not recursive, notifies on the first destructor for
// safety)
static thread_local bool g_postpone_notify_barrier;
static void schedule_all(u64 current_time = 0);
};

View file

@ -1,12 +1,13 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// SysCalls
error_code sys_time_set_timezone(s32 timezone, s32 summertime);
error_code sys_time_get_timezone(vm::ptr<s32> timezone, vm::ptr<s32> summertime);
error_code sys_time_get_timezone(vm::ptr<s32> timezone,
vm::ptr<s32> summertime);
error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec);
error_code sys_time_set_current_time(s64 sec, s64 nsec);
u64 sys_time_get_timebase_frequency();

View file

@ -0,0 +1,33 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// TTY channels
enum {
SYS_TTYP_PPU_STDIN = 0,
SYS_TTYP_PPU_STDOUT = 0,
SYS_TTYP_PPU_STDERR = 1,
SYS_TTYP_SPU_STDOUT = 2,
SYS_TTYP_USER1 = 3,
SYS_TTYP_USER2 = 4,
SYS_TTYP_USER3 = 5,
SYS_TTYP_USER4 = 6,
SYS_TTYP_USER5 = 7,
SYS_TTYP_USER6 = 8,
SYS_TTYP_USER7 = 9,
SYS_TTYP_USER8 = 10,
SYS_TTYP_USER9 = 11,
SYS_TTYP_USER10 = 12,
SYS_TTYP_USER11 = 13,
SYS_TTYP_USER12 = 14,
SYS_TTYP_USER13 = 15,
};
class ppu_thread;
// SysCalls
error_code sys_tty_read(s32 ch, vm::ptr<char> buf, u32 len,
vm::ptr<u32> preadlen);
error_code sys_tty_write(ppu_thread &ppu, s32 ch, vm::cptr<char> buf, u32 len,
vm::ptr<u32> pwritelen);

View file

@ -0,0 +1,713 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "sys_rsxaudio.h"
#include "util/Thread.h"
#include "util/cond.h"
#include "util/mutex.h"
#include "util/simple_ringbuf.h"
enum : u32 {
PS3AV_RX_BUF_SIZE = 0x800,
PS3AV_TX_BUF_SIZE = 0x800,
PS3AV_VERSION = 0x205,
PS3AV_CID_AV_INIT = 0x00000001,
PS3AV_CID_AV_FIN = 0x00000002,
PS3AV_CID_AV_GET_HW_CONF = 0x00000003,
PS3AV_CID_AV_GET_MONITOR_INFO = 0x00000004,
PS3AV_CID_AV_GET_BKSV_LIST = 0x00000005,
PS3AV_CID_AV_ENABLE_EVENT = 0x00000006,
PS3AV_CID_AV_DISABLE_EVENT = 0x00000007,
PS3AV_CID_AV_GET_PORT_STATE = 0x00000009,
PS3AV_CID_AV_TV_MUTE = 0x0000000A,
PS3AV_CID_AV_NULL_CMD = 0x0000000B,
PS3AV_CID_AV_GET_AKSV = 0x0000000C,
PS3AV_CID_AV_UNK4 = 0x0000000D,
PS3AV_CID_AV_UNK5 = 0x0000000E,
PS3AV_CID_AV_VIDEO_MUTE = 0x00010002,
PS3AV_CID_AV_VIDEO_DISABLE_SIG = 0x00010003,
PS3AV_CID_AV_VIDEO_YTRAPCONTROL = 0x00010004,
PS3AV_CID_AV_VIDEO_UNK5 = 0x00010005,
PS3AV_CID_AV_VIDEO_UNK6 = 0x00010006,
PS3AV_CID_AV_AUDIO_MUTE = 0x00020002,
PS3AV_CID_AV_ACP_CTRL = 0x00020003,
PS3AV_CID_AV_SET_ACP_PACKET = 0x00020004,
PS3AV_CID_AV_ADD_SIGNAL_CTL = 0x00030001,
PS3AV_CID_AV_SET_CC_CODE = 0x00030002,
PS3AV_CID_AV_SET_CGMS_WSS = 0x00030003,
PS3AV_CID_AV_SET_MACROVISION = 0x00030004,
PS3AV_CID_AV_UNK7 = 0x00030005,
PS3AV_CID_AV_UNK8 = 0x00030006,
PS3AV_CID_AV_UNK9 = 0x00030007,
PS3AV_CID_AV_HDMI_MODE = 0x00040001,
PS3AV_CID_AV_UNK15 = 0x00050001,
PS3AV_CID_AV_CEC_MESSAGE = 0x000A0001,
PS3AV_CID_AV_GET_CEC_CONFIG = 0x000A0002,
PS3AV_CID_AV_UNK11 = 0x000A0003,
PS3AV_CID_AV_UNK12 = 0x000A0004,
PS3AV_CID_AV_UNK13 = 0x000A0005,
PS3AV_CID_AV_UNK14 = 0x000A0006,
PS3AV_CID_VIDEO_INIT = 0x01000001,
PS3AV_CID_VIDEO_MODE = 0x01000002,
PS3AV_CID_VIDEO_ROUTE = 0x01000003,
PS3AV_CID_VIDEO_FORMAT = 0x01000004,
PS3AV_CID_VIDEO_PITCH = 0x01000005,
PS3AV_CID_VIDEO_GET_HW_CONF = 0x01000006,
PS3AV_CID_VIDEO_GET_REG = 0x01000008,
PS3AV_CID_VIDEO_UNK = 0x01000009,
PS3AV_CID_VIDEO_UNK1 = 0x0100000A,
PS3AV_CID_VIDEO_UNK2 = 0x0100000B,
PS3AV_CID_VIDEO_UNK3 = 0x0100000C,
PS3AV_CID_AUDIO_INIT = 0x02000001,
PS3AV_CID_AUDIO_MODE = 0x02000002,
PS3AV_CID_AUDIO_MUTE = 0x02000003,
PS3AV_CID_AUDIO_ACTIVE = 0x02000004,
PS3AV_CID_AUDIO_INACTIVE = 0x02000005,
PS3AV_CID_AUDIO_SPDIF_BIT = 0x02000006,
PS3AV_CID_AUDIO_CTRL = 0x02000007,
PS3AV_CID_AVB_PARAM = 0x04000001,
PS3AV_CID_EVENT_UNPLUGGED = 0x10000001,
PS3AV_CID_EVENT_PLUGGED = 0x10000002,
PS3AV_CID_EVENT_HDCP_DONE = 0x10000003,
PS3AV_CID_EVENT_HDCP_FAIL = 0x10000004,
PS3AV_CID_EVENT_HDCP_REAUTH = 0x10000005,
PS3AV_CID_EVENT_HDCP_ERROR = 0x10000006,
PS3AV_REPLY_BIT = 0x80000000,
PS3AV_RESBIT_720x480P = 0x0003, /* 0x0001 | 0x0002 */
PS3AV_RESBIT_720x576P = 0x0003, /* 0x0001 | 0x0002 */
PS3AV_RESBIT_1280x720P = 0x0004,
PS3AV_RESBIT_1920x1080I = 0x0008,
PS3AV_RESBIT_1920x1080P = 0x4000,
PS3AV_MONITOR_TYPE_NONE = 0,
PS3AV_MONITOR_TYPE_HDMI = 1,
PS3AV_MONITOR_TYPE_DVI = 2,
PS3AV_MONITOR_TYPE_AVMULTI = 3,
PS3AV_COLORIMETRY_xvYCC_601 = 1,
PS3AV_COLORIMETRY_xvYCC_709 = 2,
PS3AV_COLORIMETRY_MD0 = 1 << 4,
PS3AV_COLORIMETRY_MD1 = 1 << 5,
PS3AV_COLORIMETRY_MD2 = 1 << 6,
PS3AV_CS_SUPPORTED = 1,
PS3AV_RGB_SELECTABLE_QAUNTIZATION_RANGE = 8,
PS3AV_12BIT_COLOR = 16,
PS3AV_MON_INFO_AUDIO_BLK_MAX = 16,
PS3AV_MON_INFO_AUDIO_TYPE_LPCM = 1,
PS3AV_MON_INFO_AUDIO_TYPE_AC3 = 2,
PS3AV_MON_INFO_AUDIO_TYPE_AAC = 6,
PS3AV_MON_INFO_AUDIO_TYPE_DTS = 7,
PS3AV_MON_INFO_AUDIO_TYPE_DDP = 10,
PS3AV_MON_INFO_AUDIO_TYPE_DTS_HD = 11,
PS3AV_MON_INFO_AUDIO_TYPE_DOLBY_THD = 12,
PS3AV_HDMI_BEHAVIOR_HDCP_OFF = 0x01,
PS3AV_HDMI_BEHAVIOR_DVI = 0x40,
PS3AV_HDMI_BEHAVIOR_EDID_PASS = 0x80,
PS3AV_HDMI_BEHAVIOR_NORMAL = 0xFF,
PS3AV_EVENT_BIT_UNPLUGGED = 0x01,
PS3AV_EVENT_BIT_PLUGGED = 0x02,
PS3AV_EVENT_BIT_HDCP_DONE = 0x04,
PS3AV_EVENT_BIT_HDCP_FAIL = 0x08,
PS3AV_EVENT_BIT_HDCP_REAUTH = 0x10,
PS3AV_EVENT_BIT_HDCP_TOPOLOGY = 0x20,
PS3AV_EVENT_BIT_UNK = 0x80000000,
PS3AV_HEAD_A_HDMI = 0,
PS3AV_HEAD_B_ANALOG = 1,
PS3AV_AUDIO_PORT_HDMI_0 = 1 << 0,
PS3AV_AUDIO_PORT_HDMI_1 = 1 << 1,
PS3AV_AUDIO_PORT_AVMULTI = 1 << 10,
PS3AV_AUDIO_PORT_SPDIF_0 = 1 << 20,
PS3AV_AUDIO_PORT_SPDIF_1 = 1 << 21,
PS3AV_STATUS_SUCCESS = 0x00,
PS3AV_STATUS_RECEIVE_VUART_ERROR = 0x01,
PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL = 0x02,
PS3AV_STATUS_INVALID_COMMAND = 0x03,
PS3AV_STATUS_INVALID_PORT = 0x04,
PS3AV_STATUS_INVALID_VID = 0x05,
PS3AV_STATUS_INVALID_COLOR_SPACE = 0x06,
PS3AV_STATUS_INVALID_FS = 0x07,
PS3AV_STATUS_INVALID_AUDIO_CH = 0x08,
PS3AV_STATUS_UNSUPPORTED_VERSION = 0x09,
PS3AV_STATUS_INVALID_SAMPLE_SIZE = 0x0A,
PS3AV_STATUS_FAILURE = 0x0B,
PS3AV_STATUS_UNSUPPORTED_COMMAND = 0x0C,
PS3AV_STATUS_BUFFER_OVERFLOW = 0x0D,
PS3AV_STATUS_INVALID_VIDEO_PARAM = 0x0E,
PS3AV_STATUS_NO_SEL = 0x0F,
PS3AV_STATUS_INVALID_AV_PARAM = 0x10,
PS3AV_STATUS_INVALID_AUDIO_PARAM = 0x11,
PS3AV_STATUS_UNSUPPORTED_HDMI_MODE = 0x12,
PS3AV_STATUS_NO_SYNC_HEAD = 0x13,
PS3AV_STATUS_UNK_0x14 = 0x14,
};
const u8 PS3AV_AKSV_VALUE[5] = {0x00, 0x00, 0x0F, 0xFF, 0xFF};
const u8 PS3AV_BKSV_VALUE[5] = {0xFF, 0xFF, 0xF0, 0x00, 0x00};
enum PS3_AV_OP_MODE : u32 {
// BIG operation modes could send more then 4096 bytes
NOT_BLOCKING_BIG_OP = 0,
BLOCKING_BIG_OP = 1,
NOT_BLOCKING_OP = 2,
};
enum class UartHdmiEvent : u8 {
NONE = 0,
UNPLUGGED = 1,
PLUGGED = 2,
HDCP_DONE = 3,
};
enum class UartAudioCtrlID : u32 {
DAC_RESET = 0,
DAC_DE_EMPHASIS = 1,
AVCLK = 2,
};
enum class UartAudioAvport : u8 {
HDMI_0 = 0x0,
HDMI_1 = 0x1,
AVMULTI_0 = 0x10,
AVMULTI_1 = 0x11,
SPDIF_0 = 0x20,
SPDIF_1 = 0x21,
};
enum class UartAudioSource : u32 {
SERIAL = 0,
SPDIF = 1,
};
enum class UartAudioFreq : u32 {
_32K = 1,
_44K = 2,
_48K = 3,
_88K = 4,
_96K = 5,
_176K = 6,
_192K = 7,
};
enum class UartAudioFormat : u32 {
PCM = 1,
BITSTREAM = 0xFF,
};
enum class UartAudioSampleSize : u32 {
_16BIT = 1,
_20BIT = 2,
_24BIT = 3,
};
class vuart_hdmi_event_handler {
public:
vuart_hdmi_event_handler(u64 time_offset = 0);
void set_target_state(UartHdmiEvent start_state, UartHdmiEvent end_state);
bool events_available();
u64 time_until_next();
UartHdmiEvent get_occured_event();
private:
static constexpr u64 EVENT_TIME_DURATION = 20000;
static constexpr u64 EVENT_TIME_THRESHOLD = 1000;
u64 time_of_next_event = 0;
const u64 time_offset = 0;
// Assume that syscon initialized hdmi to plugged state
UartHdmiEvent current_state = UartHdmiEvent::PLUGGED;
UartHdmiEvent current_to_state = UartHdmiEvent::PLUGGED;
UartHdmiEvent base_state = UartHdmiEvent::NONE;
UartHdmiEvent target_state = UartHdmiEvent::NONE;
void schedule_next();
void advance_state();
};
class vuart_av_thread;
struct ps3av_cmd {
virtual u16 get_size(vuart_av_thread &vuart, const void *pkt_buf) = 0;
virtual void execute(vuart_av_thread &vuart, const void *pkt_buf) = 0;
virtual ~ps3av_cmd() {};
};
class vuart_av_thread {
public:
atomic_t<bool> initialized{};
shared_mutex rx_mutex{};
shared_mutex tx_mutex{};
shared_mutex tx_wake_m{};
cond_variable tx_wake_c{};
shared_mutex tx_rdy_m{};
cond_variable tx_rdy_c{};
shared_mutex rx_wake_m{};
cond_variable rx_wake_c{};
bool head_b_initialized = false;
u8 hdmi_behavior_mode = PS3AV_HDMI_BEHAVIOR_NORMAL;
u16 av_cmd_ver = 0;
u32 hdmi_events_bitmask = 0;
bool hdmi_res_set[2]{false, false};
void operator()();
void parse_tx_buffer(u32 buf_size);
vuart_av_thread &operator=(thread_state);
u32 enque_tx_data(const void *data, u32 data_sz);
u32 get_tx_bytes();
u32 read_rx_data(void *data, u32 data_sz);
u32 get_reply_buf_free_size();
template <bool UseScBuffer = false>
void write_resp(u32 cid, u32 status, const void *data = nullptr,
u16 data_size = 0);
void add_hdmi_events(UartHdmiEvent first_event, UartHdmiEvent last_event,
bool hdmi_0, bool hdmi_1);
void add_hdmi_events(UartHdmiEvent last_event, bool hdmi_0, bool hdmi_1);
static RsxaudioAvportIdx avport_to_idx(UartAudioAvport avport);
static constexpr auto thread_name = "VUART AV Thread"sv;
private:
struct temp_buf {
u32 crnt_size = 0;
u8 buf[PS3AV_RX_BUF_SIZE]{};
};
simple_ringbuf tx_buf{PS3AV_TX_BUF_SIZE};
simple_ringbuf rx_buf{PS3AV_RX_BUF_SIZE};
// uart_mngr could sometimes read past the tx_buffer due to weird size checks
// in FW, but no further than size of largest packet
u8 temp_tx_buf[PS3AV_TX_BUF_SIZE * 2]{};
temp_buf temp_rx_buf{};
temp_buf temp_rx_sc_buf{};
vuart_hdmi_event_handler hdmi_event_handler[2]{0, 5000};
bool hdcp_first_auth[2]{true, true};
u32 read_tx_data(void *data, u32 data_sz);
std::shared_ptr<ps3av_cmd> get_cmd(u32 cid);
void commit_rx_buf(bool syscon_buf);
void add_unplug_event(bool hdmi_0, bool hdmi_1);
void add_plug_event(bool hdmi_0, bool hdmi_1);
void add_hdcp_done_event(bool hdmi_0, bool hdmi_1);
void commit_event_data(const void *data, u16 data_size);
void dispatch_hdmi_event(UartHdmiEvent event, UartAudioAvport hdmi);
};
using vuart_av = named_thread<vuart_av_thread>;
struct vuart_params {
be_t<u64, 1> rx_buf_size;
be_t<u64, 1> tx_buf_size;
};
static_assert(sizeof(vuart_params) == 16);
struct ps3av_pkt_reply_hdr {
be_t<u16, 1> version;
be_t<u16, 1> length;
be_t<u32, 1> cid;
be_t<u32, 1> status;
};
static_assert(sizeof(ps3av_pkt_reply_hdr) == 12);
struct ps3av_header {
be_t<u16, 1> version;
be_t<u16, 1> length;
be_t<u32, 1> cid;
};
static_assert(sizeof(ps3av_header) == 8);
struct ps3av_info_resolution {
be_t<u32, 1> res_bits;
be_t<u32, 1> native;
};
struct ps3av_info_cs {
u8 rgb;
u8 yuv444;
u8 yuv422;
u8 colorimetry_data;
};
struct ps3av_info_color {
be_t<u16, 1> red_x;
be_t<u16, 1> red_y;
be_t<u16, 1> green_x;
be_t<u16, 1> green_y;
be_t<u16, 1> blue_x;
be_t<u16, 1> blue_y;
be_t<u16, 1> white_x;
be_t<u16, 1> white_y;
be_t<u32, 1> gamma;
};
struct ps3av_info_audio {
u8 type;
u8 max_num_of_ch;
u8 fs;
u8 sbit;
};
struct ps3av_get_monitor_info_reply {
u8 avport;
u8 monitor_id[10];
u8 monitor_type;
u8 monitor_name[16];
ps3av_info_resolution res_60;
ps3av_info_resolution res_50;
ps3av_info_resolution res_other;
ps3av_info_resolution res_vesa;
ps3av_info_cs cs;
ps3av_info_color color;
u8 supported_ai;
u8 speaker_info;
be_t<u16, 1> num_of_audio_block;
ps3av_info_audio audio_info[PS3AV_MON_INFO_AUDIO_BLK_MAX];
be_t<u16, 1> hor_screen_size;
be_t<u16, 1> ver_screen_size;
u8 supported_content_types;
u8 reserved_1[3];
ps3av_info_resolution res_60_packed_3D;
ps3av_info_resolution res_50_packed_3D;
ps3av_info_resolution res_other_3D;
ps3av_info_resolution res_60_sbs_3D;
ps3av_info_resolution res_50_sbs_3D;
u8 vendor_specific_flags;
u8 reserved_2[7];
};
static_assert(sizeof(ps3av_get_monitor_info_reply) == 208);
struct ps3av_get_monitor_info {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> reserved;
};
static_assert(sizeof(ps3av_get_monitor_info) == 12);
struct ps3av_get_hw_info_reply {
be_t<u16, 1> num_of_hdmi;
be_t<u16, 1> num_of_avmulti;
be_t<u16, 1> num_of_spdif;
be_t<u16, 1> extra_bistream_support;
};
static_assert(sizeof(ps3av_get_hw_info_reply) == 8);
struct ps3av_pkt_set_hdmi_mode {
ps3av_header hdr;
u8 mode;
u8 resv[3];
};
static_assert(sizeof(ps3av_pkt_set_hdmi_mode) == 12);
struct ps3av_pkt_audio_mode {
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
be_t<u32, 1> mask;
be_t<u32, 1> audio_num_of_ch;
be_t<UartAudioFreq, 1> audio_fs;
be_t<UartAudioSampleSize, 1> audio_word_bits;
be_t<UartAudioFormat, 1> audio_format;
be_t<UartAudioSource, 1> audio_source;
u8 audio_enable[4];
u8 audio_swap[4];
u8 audio_map[4];
be_t<u32, 1> audio_layout;
be_t<u32, 1> audio_downmix;
be_t<u32, 1> audio_downmix_level;
u8 audio_cs_info[8];
};
static_assert(sizeof(ps3av_pkt_audio_mode) == 68);
struct ps3av_pkt_audio_mute {
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
u8 mute;
};
static_assert(sizeof(ps3av_pkt_audio_mute) == 13);
struct ps3av_pkt_audio_set_active {
ps3av_header hdr;
be_t<u32, 1> audio_port;
};
static_assert(sizeof(ps3av_pkt_audio_set_active) == 12);
struct ps3av_pkt_audio_spdif_bit {
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
be_t<u32, 1> audio_port;
be_t<u32, 1> spdif_bit_data[12];
};
static_assert(sizeof(ps3av_pkt_audio_spdif_bit) == 64);
struct ps3av_pkt_audio_ctrl {
ps3av_header hdr;
be_t<UartAudioCtrlID, 1> audio_ctrl_id;
be_t<u32, 1> audio_ctrl_data[4];
};
static_assert(sizeof(ps3av_pkt_audio_ctrl) == 28);
struct ps3av_pkt_hdmi_plugged_event {
ps3av_header hdr;
ps3av_get_monitor_info_reply minfo;
};
static_assert(sizeof(ps3av_pkt_hdmi_plugged_event) == 216);
struct ps3av_pkt_hdmi_hdcp_done_event {
ps3av_header hdr;
be_t<u32, 1> ksv_cnt;
u8 ksv_arr[20][5];
};
static_assert(sizeof(ps3av_pkt_hdmi_hdcp_done_event) == 112);
struct ps3av_pkt_av_init {
ps3av_header hdr;
be_t<u32, 1> event_bit;
};
static_assert(sizeof(ps3av_pkt_av_init) == 12);
struct ps3av_pkt_av_init_reply {
be_t<u32, 1> unk;
};
static_assert(sizeof(ps3av_pkt_av_init_reply) == 4);
struct ps3av_pkt_enable_event {
ps3av_header hdr;
be_t<u32, 1> event_bit;
};
static_assert(sizeof(ps3av_pkt_enable_event) == 12);
struct ps3av_pkt_get_bksv {
ps3av_header hdr;
be_t<u16, 1> avport;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_get_bksv) == 12);
struct ps3av_pkt_get_bksv_reply {
be_t<u16, 1> avport;
u8 resv[2];
be_t<u32, 1> ksv_cnt;
u8 ksv_arr[20][5];
};
static_assert(sizeof(ps3av_pkt_get_bksv_reply) == 108);
struct ps3av_pkt_video_get_hw_cfg_reply {
be_t<u32, 1> gx_available;
};
static_assert(sizeof(ps3av_pkt_video_get_hw_cfg_reply) == 4);
struct ps3av_pkt_video_set_pitch {
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u32, 1> pitch;
};
static_assert(sizeof(ps3av_pkt_video_set_pitch) == 16);
struct ps3av_pkt_get_aksv_reply {
be_t<u32, 1> ksv_size;
u8 ksv_arr[2][5];
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_get_aksv_reply) == 16);
struct ps3av_pkt_inc_avset {
ps3av_header hdr;
be_t<u16, 1> num_of_video_pkt;
be_t<u16, 1> num_of_audio_pkt;
be_t<u16, 1> num_of_av_video_pkt;
be_t<u16, 1> num_of_av_audio_pkt;
};
static_assert(sizeof(ps3av_pkt_inc_avset) == 16);
struct ps3av_pkt_av_audio_param {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> resv;
u8 mclk;
u8 ns[3];
u8 enable;
u8 swaplr;
u8 fifomap;
u8 inputctrl;
u8 inputlen;
u8 layout;
u8 info[5];
u8 chstat[5];
};
static_assert(sizeof(ps3av_pkt_av_audio_param) == 32);
struct ps3av_pkt_av_video_cs {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> av_vid;
be_t<u16, 1> av_cs_out;
be_t<u16, 1> av_cs_in;
u8 dither;
u8 bitlen_out;
u8 super_white;
u8 aspect;
u8 unk1;
u8 unk2;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_av_video_cs) == 24);
struct ps3av_pkt_video_mode {
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u16, 1> unk1;
be_t<u16, 1> unk2;
be_t<u32, 1> video_vid;
be_t<u32, 1> width;
be_t<u32, 1> height;
be_t<u32, 1> pitch;
be_t<u32, 1> video_out_format;
be_t<u32, 1> video_format;
be_t<u16, 1> unk3;
be_t<u16, 1> video_order;
be_t<u32, 1> unk4;
};
static_assert(sizeof(ps3av_pkt_video_mode) == 48);
struct ps3av_pkt_av_video_ytrapcontrol {
ps3av_header hdr;
be_t<u16, 1> unk1;
be_t<u16, 1> unk2;
};
static_assert(sizeof(ps3av_pkt_av_video_ytrapcontrol) == 12);
struct ps3av_pkt_av_get_cec_config_reply {
be_t<u32, 1> cec_present;
};
struct ps3av_pkt_video_format {
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u32, 1> video_format;
be_t<u16, 1> unk;
be_t<u16, 1> video_order;
};
static_assert(sizeof(ps3av_pkt_video_format) == 20);
struct ps3av_pkt_av_set_cgms_wss {
ps3av_header hdr;
be_t<u16, 1> avport;
u8 resv[2];
be_t<u32, 1> cgms_wss;
};
static_assert(sizeof(ps3av_pkt_av_set_cgms_wss) == 16);
struct ps3av_pkt_set_acp_packet {
ps3av_header hdr;
u8 avport;
u8 pkt_type;
u8 resv[2];
u8 pkt_data[32];
};
static_assert(sizeof(ps3av_pkt_set_acp_packet) == 44);
struct ps3av_pkt_acp_ctrl {
ps3av_header hdr;
u8 avport;
u8 packetctl;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_acp_ctrl) == 12);
struct ps3av_pkt_add_signal_ctl {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> signal_ctl;
};
static_assert(sizeof(ps3av_pkt_add_signal_ctl) == 12);
struct ps3av_pkt_av_audio_mute {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> mute;
};
static_assert(sizeof(ps3av_pkt_av_audio_mute) == 12);
struct ps3av_pkt_video_disable_sig {
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> resv;
};
static_assert(sizeof(ps3av_pkt_video_disable_sig) == 12);
// SysCalls
error_code sys_uart_initialize(ppu_thread &ppu);
error_code sys_uart_receive(ppu_thread &ppu, vm::ptr<void> buffer, u64 size,
u32 mode);
error_code sys_uart_send(ppu_thread &ppu, vm::cptr<void> buffer, u64 size,
u32 mode);
error_code sys_uart_get_params(vm::ptr<vuart_params> buffer);

View file

@ -0,0 +1,118 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "Input/product_info.h"
class ppu_thread;
#define MAX_SYS_USBD_TRANSFERS 0x44
// PS3 internal codes
enum PS3StandardUsbErrors : u32 {
HC_CC_NOERR = 0x00,
EHCI_CC_MISSMF = 0x10,
EHCI_CC_XACT = 0x20,
EHCI_CC_BABBLE = 0x30,
EHCI_CC_DATABUF = 0x40,
EHCI_CC_HALTED = 0x50,
};
enum PS3IsochronousUsbErrors : u8 {
USBD_HC_CC_NOERR = 0x00,
USBD_HC_CC_MISSMF = 0x01,
USBD_HC_CC_XACT = 0x02,
USBD_HC_CC_BABBLE = 0x04,
USBD_HC_CC_DATABUF = 0x08,
};
enum SysUsbdEvents : u32 {
SYS_USBD_ATTACH = 0x01,
SYS_USBD_DETACH = 0x02,
SYS_USBD_TRANSFER_COMPLETE = 0x03,
SYS_USBD_TERMINATE = 0x04,
};
// PS3 internal structures
struct UsbInternalDevice {
u8 device_high; // System flag maybe (used in generating actual device number)
u8 device_low; // Just a number identifying the device (used in generating
// actual device number)
u8 unk3; // ? Seems to always be 2?
u8 unk4; // ?
};
struct UsbDeviceRequest {
u8 bmRequestType;
u8 bRequest;
be_t<u16> wValue;
be_t<u16> wIndex;
be_t<u16> wLength;
};
struct UsbDeviceIsoRequest {
vm::ptr<void> buf;
be_t<u32> start_frame;
be_t<u32> num_packets;
be_t<u16> packets[8];
};
error_code sys_usbd_initialize(ppu_thread &ppu, vm::ptr<u32> handle);
error_code sys_usbd_finalize(ppu_thread &ppu, u32 handle);
error_code sys_usbd_get_device_list(ppu_thread &ppu, u32 handle,
vm::ptr<UsbInternalDevice> device_list,
u32 max_devices);
error_code sys_usbd_get_descriptor_size(ppu_thread &ppu, u32 handle,
u32 device_handle);
error_code sys_usbd_get_descriptor(ppu_thread &ppu, u32 handle,
u32 device_handle, vm::ptr<void> descriptor,
u32 desc_size);
error_code sys_usbd_register_ldd(ppu_thread &ppu, u32 handle,
vm::cptr<char> s_product, u16 slen_product);
error_code sys_usbd_unregister_ldd(ppu_thread &ppu, u32 handle,
vm::cptr<char> s_product, u16 slen_product);
error_code sys_usbd_open_pipe(ppu_thread &ppu, u32 handle, u32 device_handle,
u32 unk1, u64 unk2, u64 unk3, u32 endpoint,
u64 unk4);
error_code sys_usbd_open_default_pipe(ppu_thread &ppu, u32 handle,
u32 device_handle);
error_code sys_usbd_close_pipe(ppu_thread &ppu, u32 handle, u32 pipe_handle);
error_code sys_usbd_receive_event(ppu_thread &ppu, u32 handle,
vm::ptr<u64> arg1, vm::ptr<u64> arg2,
vm::ptr<u64> arg3);
error_code sys_usbd_detect_event(ppu_thread &ppu);
error_code sys_usbd_attach(ppu_thread &ppu, u32 handle, u32 unk1, u32 unk2,
u32 device_handle);
error_code sys_usbd_transfer_data(ppu_thread &ppu, u32 handle, u32 id_pipe,
vm::ptr<u8> buf, u32 buf_size,
vm::ptr<UsbDeviceRequest> request,
u32 type_transfer);
error_code
sys_usbd_isochronous_transfer_data(ppu_thread &ppu, u32 handle, u32 id_pipe,
vm::ptr<UsbDeviceIsoRequest> iso_request);
error_code sys_usbd_get_transfer_status(ppu_thread &ppu, u32 handle,
u32 id_transfer, u32 unk1,
vm::ptr<u32> result,
vm::ptr<u32> count);
error_code sys_usbd_get_isochronous_transfer_status(
ppu_thread &ppu, u32 handle, u32 id_transfer, u32 unk1,
vm::ptr<UsbDeviceIsoRequest> request, vm::ptr<u32> result);
error_code sys_usbd_get_device_location(ppu_thread &ppu, u32 handle,
u32 device_handle,
vm::ptr<u8> location);
error_code sys_usbd_send_event(ppu_thread &ppu);
error_code sys_usbd_event_port_send(ppu_thread &ppu, u32 handle, u64 arg1,
u64 arg2, u64 arg3);
error_code sys_usbd_allocate_memory(ppu_thread &ppu);
error_code sys_usbd_free_memory(ppu_thread &ppu);
error_code sys_usbd_get_device_speed(ppu_thread &ppu);
error_code sys_usbd_register_extra_ldd(ppu_thread &ppu, u32 handle,
vm::cptr<char> s_product,
u16 slen_product, u16 id_vendor,
u16 id_product_min, u16 id_product_max);
error_code sys_usbd_unregister_extra_ldd(ppu_thread &ppu, u32 handle,
vm::cptr<char> s_product,
u16 slen_product);
void connect_usb_controller(u8 index, input::product_type);
void handle_hotplug_event(bool connected);

View file

@ -0,0 +1,76 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "sys_memory.h"
#include <array>
enum : u64 {
SYS_VM_STATE_INVALID = 0ull,
SYS_VM_STATE_UNUSED = 1ull,
SYS_VM_STATE_ON_MEMORY = 2ull,
SYS_VM_STATE_STORED = 4ull,
SYS_VM_POLICY_AUTO_RECOMMENDED = 1ull,
};
struct sys_vm_statistics_t {
be_t<u64> page_fault_ppu; // Number of bad virtual memory accesses from a PPU
// thread.
be_t<u64> page_fault_spu; // Number of bad virtual memory accesses from a SPU
// thread.
be_t<u64> page_in; // Number of virtual memory backup reading operations.
be_t<u64> page_out; // Number of virtual memory backup writing operations.
be_t<u32> pmem_total; // Total physical memory allocated for the virtual
// memory area.
be_t<u32> pmem_used; // Physical memory in use by the virtual memory area.
be_t<u64> timestamp;
};
// Block info
struct sys_vm_t {
static const u32 id_base = 0x1;
static const u32 id_step = 0x1;
static const u32 id_count = 16;
lv2_memory_container *const ct;
const u32 addr;
const u32 size;
atomic_t<u32> psize;
sys_vm_t(u32 addr, u32 vsize, lv2_memory_container *ct, u32 psize);
~sys_vm_t();
SAVESTATE_INIT_POS(10);
sys_vm_t(utils::serial &ar);
void save(utils::serial &ar);
static std::array<atomic_t<u32>, id_count> g_ids;
static u32 find_id(u32 addr) { return g_ids[addr >> 28].load(); }
};
// Aux
class ppu_thread;
// SysCalls
error_code sys_vm_memory_map(ppu_thread &ppu, u64 vsize, u64 psize, u32 cid,
u64 flag, u64 policy, vm::ptr<u32> addr);
error_code sys_vm_memory_map_different(ppu_thread &ppu, u64 vsize, u64 psize,
u32 cid, u64 flag, u64 policy,
vm::ptr<u32> addr);
error_code sys_vm_unmap(ppu_thread &ppu, u32 addr);
error_code sys_vm_append_memory(ppu_thread &ppu, u32 addr, u64 size);
error_code sys_vm_return_memory(ppu_thread &ppu, u32 addr, u64 size);
error_code sys_vm_lock(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_unlock(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_touch(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_flush(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_invalidate(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_store(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_sync(ppu_thread &ppu, u32 addr, u32 size);
error_code sys_vm_test(ppu_thread &ppu, u32 addr, u32 size,
vm::ptr<u64> result);
error_code sys_vm_get_statistics(ppu_thread &ppu, u32 addr,
vm::ptr<sys_vm_statistics_t> stat);

2523
kernel/cellos/src/lv2.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_bdemu.h"
LOG_CHANNEL(sys_bdemu);
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf,
u64 buf_len) {
sys_bdemu.todo("sys_bdemu_send_command(cmd=0%llx, a2=0x%x, a3=0x%x, "
"buf=0x%x, buf_len=0x%x)",
cmd, a2, a3, buf, buf_len);
return CELL_OK;
}

View file

@ -0,0 +1,12 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_btsetting.h"
LOG_CHANNEL(sys_btsetting);
error_code sys_btsetting_if(u64 cmd, vm::ptr<void> msg) {
sys_btsetting.todo("sys_btsetting_if(cmd=0x%llx, msg=*0x%x)", cmd, msg);
return CELL_OK;
}

View file

@ -0,0 +1,509 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "util/serialization.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_cond.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_cond);
lv2_cond::lv2_cond(utils::serial &ar) noexcept
: key(ar), name(ar), mtx_id(ar),
mutex(idm::check_unlocked<lv2_obj, lv2_mutex>(mtx_id)),
_mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
{}
lv2_cond::lv2_cond(u64 key, u64 name, u32 mtx_id,
shared_ptr<lv2_obj> mutex0) noexcept
: key(key), name(name), mtx_id(mtx_id),
mutex(static_cast<lv2_mutex *>(mutex0.get())), _mutex(mutex0) {}
CellError lv2_cond::on_id_create() {
exists++;
static auto do_it = [](lv2_cond *_this) -> CellError {
if (lv2_obj::check(_this->mutex)) {
_this->mutex->cond_count++;
return {};
}
// Mutex has been destroyed, cannot create conditional variable
return CELL_ESRCH;
};
if (mutex) {
return do_it(this);
}
ensure(!!Emu.DeserialManager());
Emu.PostponeInitCode([this]() {
if (!mutex) {
_mutex = static_cast<shared_ptr<lv2_obj>>(
ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)));
}
// Defer function
ensure(CellError{} == do_it(this));
});
return {};
}
std::function<void(void *)> lv2_cond::load(utils::serial &ar) {
return load_func(make_shared<lv2_cond>(exact_t<utils::serial &>(ar)));
}
void lv2_cond::save(utils::serial &ar) { ar(key, name, mtx_id); }
error_code sys_cond_create(ppu_thread &ppu, vm::ptr<u32> cond_id, u32 mutex_id,
vm::ptr<sys_cond_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)",
cond_id, mutex_id, attr);
auto mutex = idm::get_unlocked<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex) {
return CELL_ESRCH;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_cond.warning("sys_cond_create(cond_id=*0x%x, attr=*0x%x): IPC=0x%016x",
cond_id, attr, ipc_key);
}
if (const auto error =
lv2_obj::create<lv2_cond>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_single<lv2_cond>(ipc_key, _attr.name_u64, mutex_id,
std::move(mutex));
})) {
return error;
}
ppu.check_state();
*cond_id = idm::last_id();
return CELL_OK;
}
error_code sys_cond_destroy(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_destroy(cond_id=0x%x)", cond_id);
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(
cond_id, [&](lv2_cond &cond) -> CellError {
std::lock_guard lock(cond.mutex->mutex);
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
return CELL_EBUSY;
}
cond.mutex->cond_count--;
lv2_obj::on_id_destroy(cond, cond.key);
return {};
});
if (!cond) {
return CELL_ESRCH;
}
if (cond->key) {
sys_cond.warning("sys_cond_destroy(cond_id=0x%x): IPC=0x%016x", cond_id,
cond->key);
}
if (cond.ret) {
return cond.ret;
}
return CELL_OK;
}
error_code sys_cond_signal(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
if (const auto cpu =
cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
if (cond.mutex->try_own(*cpu)) {
cond.awake(cpu);
}
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
}
}
});
if (!finished) {
continue;
}
if (!cond) {
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_all(ppu_thread &ppu, u32 cond_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
}
cpu_thread *result = nullptr;
auto sq = cond.sq;
atomic_storage<ppu_thread *>::release(cond.sq, nullptr);
while (const auto cpu =
cond.schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY)) {
if (cond.mutex->try_own(*cpu)) {
ensure(!std::exchange(result, cpu));
}
}
if (result) {
cond.awake(result);
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
}
}
});
if (!finished) {
continue;
}
if (!cond) {
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_to(ppu_thread &ppu, u32 cond_id, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id,
thread_id);
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) {
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id)) {
return -1;
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->id == thread_id) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
ensure(cond.unqueue(cond.sq, cpu));
if (cond.mutex->try_own(*cpu)) {
cond.awake(cpu);
}
return 1;
}
}
} else {
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (!cond.ret) {
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code sys_cond_wait(ppu_thread &ppu, u32 cond_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
// Further function result
ppu.gpr[3] = CELL_OK;
auto &sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_cond>(
cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond &cond) -> s64 {
if (!ppu.loaded_from_savestate &&
atomic_storage<u32>::load(cond.mutex->control.raw().owner) !=
ppu.id) {
return -1;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex->mutex);
const u64 syscall_state = sstate.try_read<u64>().second;
sstate.clear();
if (ppu.loaded_from_savestate) {
if (syscall_state & 1) {
// Mutex sleep
ensure(!cond.mutex->try_own(ppu));
} else {
lv2_obj::emplace(cond.sq, &ppu);
}
cond.sleep(ppu, timeout);
return static_cast<u32>(syscall_state >> 32);
}
// Register waiter
lv2_obj::emplace(cond.sq, &ppu);
// Unlock the mutex
const u32 count = cond.mutex->lock_count.exchange(0);
if (const auto cpu = cond.mutex->reown<ppu_thread>()) {
if (cpu->state & cpu_flag::again) {
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return 0;
}
cond.mutex->append(cpu);
}
// Sleep current thread and schedule mutex waiter
cond.sleep(ppu, timeout);
// Save the recursive value
return count;
});
if (!cond) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return {};
}
if (cond.ret < 0) {
return CELL_EPERM;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(cond->mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = atomic_storage<ppu_thread *>::load(cond->sq); cpu;
cpu = cpu->next_cpu) {
if (cpu == &ppu) {
cond_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread *>::load(
cond->mutex->control.raw().sq);
cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
mutex_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep) {
break;
}
const u64 optional_syscall_state =
u32{mutex_sleep} | (u64{static_cast<u32>(cond.ret)} << 32);
sstate(optional_syscall_state);
ppu.state += cpu_flag::again;
return {};
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
const u64 start_time = ppu.start_time;
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex->mutex);
// Try to cancel the waiting
if (cond->unqueue(cond->sq, &ppu)) {
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
ppu.gpr[3] = CELL_ETIMEDOUT;
// Own or requeue
if (cond->mutex->try_own(ppu)) {
break;
}
} else if (atomic_storage<u32>::load(
cond->mutex->control.raw().owner) == ppu.id) {
break;
}
cond->mutex->sleep(ppu);
ppu.start_time =
start_time; // Restore start time because awake has been called
timeout = 0;
continue;
}
} else {
ppu.state.wait(state);
}
}
// Verify ownership
ensure(atomic_storage<u32>::load(cond->mutex->control.raw().owner) == ppu.id);
// Restore the recursive value
cond->mutex->lock_count.release(static_cast<u32>(cond.ret));
return not_an_error(ppu.gpr[3]);
}

View file

@ -0,0 +1,464 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "cellos/sys_event.h"
#include "sys_config.h"
LOG_CHANNEL(sys_config);
// Enums
template <>
void fmt_class_string<sys_config_service_id>::format(std::string &out, u64 id) {
const s64 s_id = static_cast<s64>(id);
switch (s_id) {
case SYS_CONFIG_SERVICE_PADMANAGER:
out += "SYS_CONFIG_SERVICE_PADMANAGER";
return;
case SYS_CONFIG_SERVICE_PADMANAGER2:
out += "SYS_CONFIG_SERVICE_PADMANAGER2";
return;
case SYS_CONFIG_SERVICE_USER_LIBPAD:
out += "SYS_CONFIG_SERVICE_USER_LIBPAD";
return;
case SYS_CONFIG_SERVICE_USER_LIBKB:
out += "SYS_CONFIG_SERVICE_USER_LIBKB";
return;
case SYS_CONFIG_SERVICE_USER_LIBMOUSE:
out += "SYS_CONFIG_SERVICE_USER_LIBMOUSE";
return;
}
if (s_id < 0) {
fmt::append(out, "SYS_CONFIG_SERVICE_USER_%llx", id & ~(1ull << 63));
} else {
fmt::append(out, "SYS_CONFIG_SERVICE_%llx", id);
}
}
template <>
void fmt_class_string<sys_config_service_listener_type>::format(
std::string &out, u64 arg) {
format_enum(out, arg, [](auto value) {
switch (value) {
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_ONCE);
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_REPEATING);
}
return unknown;
});
}
// Utilities
void dump_buffer(std::string &out, const std::vector<u8> &buffer) {
if (!buffer.empty()) {
out.reserve(out.size() + buffer.size() * 2 + 1);
fmt::append(out, "0x");
for (u8 x : buffer) {
fmt::append(out, "%02x", x);
}
} else {
fmt::append(out, "EMPTY");
}
}
// LV2 Config
void lv2_config::initialize() {
if (m_state || !m_state.compare_and_swap_test(0, 1)) {
return;
}
// Register padmanager service, notifying vsh that a controller is connected
static const u8 hid_info[0x1a] = {
0x01, 0x01, // 2 unk
0x02, 0x02, // 4
0x00, 0x00, // 6
0x00, 0x00, // 8
0x00, 0x00, // 10
0x05, 0x4c, // 12 vid
0x02, 0x68, // 14 pid
0x00, 0x10, // 16 unk2
0x91, 0x88, // 18
0x04, 0x00, // 20
0x00, 0x07, // 22
0x00, 0x00, // 24
0x00, 0x00 // 26
};
// user_id for the padmanager seems to signify the controller port number, and
// the buffer contains some sort of HID descriptor
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER, 0, 1, 0, hid_info,
0x1a)
->notify();
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER2, 0, 1, 0, hid_info,
0x1a)
->notify();
}
void lv2_config::add_service_event(shared_ptr<lv2_config_service_event> event) {
std::lock_guard lock(m_mutex);
events.emplace(event->id, std::move(event));
}
void lv2_config::remove_service_event(u32 id) {
shared_ptr<lv2_config_service_event> ptr;
std::lock_guard lock(m_mutex);
if (auto it = events.find(id); it != events.end()) {
ptr = std::move(it->second);
events.erase(it);
}
}
lv2_config_service_event &
lv2_config_service_event::operator=(thread_state s) noexcept {
if (s == thread_state::destroying_context && !m_destroyed.exchange(true)) {
if (auto global = g_fxo->try_get<lv2_config>()) {
global->remove_service_event(id);
}
}
return *this;
}
lv2_config_service_event::~lv2_config_service_event() noexcept {
operator=(thread_state::destroying_context);
}
lv2_config::~lv2_config() noexcept {
for (auto &[key, event] : events) {
if (event) {
// Avoid collision with lv2_config_service_event destructor
event->m_destroyed = true;
}
}
}
// LV2 Config Service Listener
bool lv2_config_service_listener::check_service(
const lv2_config_service &service) const {
// Filter by type
if (type == SYS_CONFIG_SERVICE_LISTENER_ONCE && !service_events.empty()) {
return false;
}
// Filter by service ID or verbosity
if (service_id != service.id || min_verbosity > service.verbosity) {
return false;
}
// realhw only seems to send the pad connected events to the listeners that
// provided 0x01 as the first byte of their data buffer
// TODO: Figure out how this filter works more properly
if (service_id == SYS_CONFIG_SERVICE_PADMANAGER &&
(data.empty() || data[0] != 0x01)) {
return false;
}
// Event applies to this listener!
return true;
}
bool lv2_config_service_listener::notify(
const shared_ptr<lv2_config_service_event> &event) {
service_events.emplace_back(event);
return event->notify();
}
bool lv2_config_service_listener::notify(
const shared_ptr<lv2_config_service> &service) {
if (!check_service(*service))
return false;
// Create service event and notify queue!
const auto event = lv2_config_service_event::create(handle, service, *this);
return notify(event);
}
void lv2_config_service_listener::notify_all() {
std::vector<shared_ptr<lv2_config_service>> services;
// Grab all events
idm::select<lv2_config_service>([&](u32 /*id*/, lv2_config_service &service) {
if (check_service(service)) {
services.push_back(service.get_shared_ptr());
}
});
// Sort services by timestamp
sort(services.begin(), services.end(),
[](const shared_ptr<lv2_config_service> &s1,
const shared_ptr<lv2_config_service> &s2) {
return s1->timestamp < s2->timestamp;
});
// Notify listener (now with services in sorted order)
for (auto &service : services) {
this->notify(service);
}
}
// LV2 Config Service
void lv2_config_service::unregister() {
registered = false;
// Notify listeners
notify();
// Allow this object to be destroyed by withdrawing it from the IDM
// Note that it won't be destroyed while there are service events that hold a
// reference to it
idm::remove<lv2_config_service>(idm_id);
}
void lv2_config_service::notify() const {
std::vector<shared_ptr<lv2_config_service_listener>> listeners;
const shared_ptr<lv2_config_service> sptr = get_shared_ptr();
idm::select<lv2_config_service_listener>(
[&](u32 /*id*/, lv2_config_service_listener &listener) {
if (listener.check_service(*sptr))
listeners.push_back(listener.get_shared_ptr());
});
for (auto &listener : listeners) {
listener->notify(sptr);
}
}
bool lv2_config_service_event::notify() const {
const auto _handle = handle;
if (!_handle) {
return false;
}
// Send event
return _handle->notify(SYS_CONFIG_EVENT_SOURCE_SERVICE,
(static_cast<u64>(service->is_registered()) << 32) |
id,
service->get_size());
}
// LV2 Config Service Event
void lv2_config_service_event::write(sys_config_service_event_t *dst) const {
const auto registered = service->is_registered();
dst->service_listener_handle = listener.get_id();
dst->registered = registered;
dst->service_id = service->id;
dst->user_id = service->user_id;
if (registered) {
dst->verbosity = service->verbosity;
dst->padding = service->padding;
const auto size = service->data.size();
dst->data_size = static_cast<u32>(size);
memcpy(dst->data, service->data.data(), size);
}
}
/*
* Syscalls
*/
error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl) {
sys_config.trace("sys_config_open(equeue_hdl=0x%x, out_config_hdl=*0x%x)",
equeue_hdl, out_config_hdl);
// Find queue with the given ID
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_hdl);
if (!queue) {
return CELL_ESRCH;
}
// Initialize lv2_config global state
auto &global = g_fxo->get<lv2_config>();
if (true) {
global.initialize();
}
// Create a lv2_config_handle object
const auto config = lv2_config_handle::create(std::move(queue));
if (config) {
*out_config_hdl = idm::last_id();
return CELL_OK;
}
// Failed to allocate sys_config object
return CELL_EAGAIN;
}
error_code sys_config_close(u32 config_hdl) {
sys_config.trace("sys_config_close(config_hdl=0x%x)", config_hdl);
if (!idm::remove<lv2_config_handle>(config_hdl)) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_get_service_event(u32 config_hdl, u32 event_id,
vm::ptr<sys_config_service_event_t> dst,
u64 size) {
sys_config.trace("sys_config_get_service_event(config_hdl=0x%x, "
"event_id=0x%llx, dst=*0x%llx, size=0x%llx)",
config_hdl, event_id, dst, size);
// Find sys_config handle object with the given ID
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Find service_event object
const auto event = g_fxo->get<lv2_config>().find_event(event_id);
if (!event) {
return CELL_ESRCH;
}
// Check buffer fits
if (!event->check_buffer_size(size)) {
return CELL_EAGAIN;
}
// Write event to buffer
event->write(dst.get_ptr());
return CELL_OK;
}
error_code sys_config_add_service_listener(
u32 config_hdl, sys_config_service_id service_id, u64 min_verbosity,
vm::ptr<void> in, u64 size, sys_config_service_listener_type type,
vm::ptr<u32> out_listener_hdl) {
sys_config.trace("sys_config_add_service_listener(config_hdl=0x%x, "
"service_id=0x%llx, min_verbosity=0x%llx, in=*0x%x, "
"size=%lld, type=0x%llx, out_listener_hdl=*0x%x)",
config_hdl, service_id, min_verbosity, in, size, type,
out_listener_hdl);
// Find sys_config handle object with the given ID
auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Create service listener
const auto listener = lv2_config_service_listener::create(
cfg, service_id, min_verbosity, type, static_cast<u8 *>(in.get_ptr()),
size);
if (!listener) {
return CELL_EAGAIN;
}
if (size > 0) {
std::string buf_str;
dump_buffer(buf_str, listener->data);
sys_config.todo(
"Registered service listener for service %llx with non-zero buffer: %s",
service_id, buf_str.c_str());
}
// Notify listener with all past events
listener->notify_all();
// Done!
*out_listener_hdl = listener->get_id();
return CELL_OK;
}
error_code sys_config_remove_service_listener(u32 config_hdl,
u32 listener_hdl) {
sys_config.trace(
"sys_config_remove_service_listener(config_hdl=0x%x, listener_hdl=0x%x)",
config_hdl, listener_hdl);
// Remove listener from IDM
if (!idm::remove<lv2_config_service_listener>(listener_hdl)) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_register_service(u32 config_hdl,
sys_config_service_id service_id,
u64 user_id, u64 verbosity,
vm::ptr<u8> data_buf, u64 size,
vm::ptr<u32> out_service_hdl) {
sys_config.trace("sys_config_register_service(config_hdl=0x%x, "
"service_id=0x%llx, user_id=0x%llx, verbosity=0x%llx, "
"data_but=*0x%llx, size=%lld, out_service_hdl=*0x%llx)",
config_hdl, service_id, user_id, verbosity, data_buf, size,
out_service_hdl);
// Find sys_config handle object with the given ID
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg) {
return CELL_ESRCH;
}
// Create service
const auto service = lv2_config_service::create(
service_id, user_id, verbosity, 0, data_buf.get_ptr(), size);
if (!service) {
return CELL_EAGAIN;
}
// Notify all listeners
service->notify();
// Done!
*out_service_hdl = service->get_id();
return CELL_OK;
}
error_code sys_config_unregister_service(u32 config_hdl, u32 service_hdl) {
sys_config.trace(
"sys_config_unregister_service(config_hdl=0x%x, service_hdl=0x%x)",
config_hdl, service_hdl);
// Remove listener from IDM
auto service = idm::withdraw<lv2_config_service>(service_hdl);
if (!service) {
return CELL_ESRCH;
}
// Unregister service
service->unregister();
// Done!
return CELL_OK;
}
/*
* IO Events - TODO
*/
error_code sys_config_get_io_event(u32 config_hdl, u32 event_id /*?*/,
vm::ptr<void> out_buf /*?*/,
u64 size /*?*/) {
sys_config.todo("sys_config_get_io_event(config_hdl=0x%x, event_id=0x%x, "
"out_buf=*0x%x, size=%lld)",
config_hdl, event_id, out_buf, size);
return CELL_OK;
}
error_code sys_config_register_io_error_listener(u32 config_hdl) {
sys_config.todo("sys_config_register_io_error_listener(config_hdl=0x%x)",
config_hdl);
return CELL_OK;
}
error_code sys_config_unregister_io_error_listener(u32 config_hdl) {
sys_config.todo("sys_config_unregister_io_error_listener(config_hdl=0x%x)",
config_hdl);
return CELL_OK;
}

View file

@ -0,0 +1,13 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_console.h"
LOG_CHANNEL(sys_console);
error_code sys_console_write(vm::cptr<char> buf, u32 len) {
sys_console.todo("sys_console_write(buf=*0x%x, len=0x%x)", buf, len);
return CELL_OK;
}

View file

@ -0,0 +1,28 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_crypto_engine.h"
LOG_CHANNEL(sys_crypto_engine);
error_code sys_crypto_engine_create(vm::ptr<u32> id) {
sys_crypto_engine.todo("sys_crypto_engine_create(id=*0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_destroy(u32 id) {
sys_crypto_engine.todo("sys_crypto_engine_destroy(id=0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer,
u64 buffer_size) {
sys_crypto_engine.todo(
"sys_crypto_engine_random_generate(buffer=*0x%x, buffer_size=0x%x",
buffer, buffer_size);
return CELL_OK;
}

View file

@ -0,0 +1,128 @@
#include "stdafx.h"
#include "sys_dbg.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUInterpreter.h"
#include "Emu/Memory/vm_locking.h"
#include "rpcsx/fw/ps3/sys_lv2dbg.h"
#include "util/asm.hpp"
void ppu_register_function_at(u32 addr, u32 size,
ppu_intrp_func_t ptr = nullptr);
LOG_CHANNEL(sys_dbg);
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size,
vm::ptr<void> data) {
sys_dbg.warning("sys_dbg_read_process_memory(pid=0x%x, address=0x%llx, "
"size=0x%x, data=*0x%x)",
pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
vm::writer_lock lock;
// Check if data destination is writable
if (!vm::check_addr(data.addr(), vm::page_writable, size)) {
return CELL_EFAULT;
}
// Check if the source is readable
if (!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
std::memmove(data.get_ptr(), vm::base(address), size);
return CELL_OK;
}
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size,
vm::cptr<void> data) {
sys_dbg.warning("sys_dbg_write_process_memory(pid=0x%x, address=0x%llx, "
"size=0x%x, data=*0x%x)",
pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data) {
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
// Check if data source is readable
if (!vm::check_addr(data.addr(), vm::page_readable, size)) {
return CELL_EFAULT;
}
// Check destination (can be read-only actually)
if (!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
vm::writer_lock lock;
// Again
if (!vm::check_addr(data.addr(), vm::page_readable, size) ||
!vm::check_addr(address, vm::page_readable, size)) {
return CELL_EFAULT;
}
const u8 *data_ptr = static_cast<const u8 *>(data.get_ptr());
if ((address >> 28) == 0xDu) {
// Stack pages (4k pages is the exception here)
std::memmove(vm::base(address), data_ptr, size);
return CELL_OK;
}
const u32 end = address + size;
for (u32 i = address, exec_update_size = 0; i < end;) {
const u32 op_size =
std::min<u32>(utils::align<u32>(i + 1, 0x10000), end) - i;
const bool is_exec =
vm::check_addr(i, vm::page_executable | vm::page_readable);
if (is_exec) {
exec_update_size += op_size;
i += op_size;
}
if (!is_exec || i >= end) {
// Commit executable data update
// The read memory is also super ptr so memmove can work correctly on all
// implementations
const u32 before_addr = i - exec_update_size;
std::memmove(vm::get_super_ptr(before_addr),
vm::get_super_ptr(data.addr() + (before_addr - address)),
exec_update_size);
ppu_register_function_at(before_addr, exec_update_size);
exec_update_size = 0;
if (i >= end) {
break;
}
}
if (!is_exec) {
std::memmove(vm::base(i), data_ptr + (i - address), op_size);
i += op_size;
}
}
return CELL_OK;
}

View file

@ -0,0 +1,732 @@
#include "stdafx.h"
#include "sys_event.h"
#include "Emu/IPC.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "sys_process.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_event);
lv2_event_queue::lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name,
u64 ipc_key) noexcept
: id(idm::last_id()), protocol{static_cast<u8>(protocol)},
type(static_cast<u8>(type)), size(static_cast<u8>(size)), name(name),
key(ipc_key) {}
lv2_event_queue::lv2_event_queue(utils::serial &ar) noexcept
: id(idm::last_id()), protocol(ar), type(ar), size(ar), name(ar), key(ar) {
ar(events);
}
std::function<void(void *)> lv2_event_queue::load(utils::serial &ar) {
auto queue = make_shared<lv2_event_queue>(exact_t<utils::serial &>(ar));
return [ptr = lv2_obj::load(queue->key, queue)](void *storage) {
*static_cast<atomic_ptr<lv2_obj> *>(storage) = ptr;
};
}
void lv2_event_queue::save(utils::serial &ar) {
ar(protocol, type, size, name, key, events);
}
void lv2_event_queue::save_ptr(utils::serial &ar, lv2_event_queue *q) {
if (!lv2_obj::check(q)) {
ar(u32{0});
return;
}
ar(q->id);
}
shared_ptr<lv2_event_queue>
lv2_event_queue::load_ptr(utils::serial &ar, shared_ptr<lv2_event_queue> &queue,
std::string_view msg) {
const u32 id = ar.pop<u32>();
if (!id) {
return {};
}
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id)) {
// Already initialized
return q;
}
if (id >> 24 != id_base >> 24) {
fmt::throw_exception("Failed in event queue pointer deserialization "
"(invalid ID): location: %s, id=0x%x",
msg, id);
}
Emu.PostponeInitCode([id, &queue, msg_str = std::string{msg}]() {
// Defer resolving
queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(id);
if (!queue) {
fmt::throw_exception("Failed in event queue pointer deserialization (not "
"found): location: %s, id=0x%x",
msg_str, id);
}
});
// Null until resolved
return {};
}
lv2_event_port::lv2_event_port(utils::serial &ar)
: type(ar), name(ar),
queue(lv2_event_queue::load_ptr(ar, queue, "eventport")) {}
void lv2_event_port::save(utils::serial &ar) {
ar(type, name);
lv2_event_queue::save_ptr(ar, queue.get());
}
shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key) {
if (ipc_key == SYS_EVENT_QUEUE_LOCAL) {
// Invalid IPC key
return {};
}
return g_fxo->get<ipc_manager<lv2_event_queue, u64>>().get(ipc_key);
}
extern void resume_spu_thread_group_from_waiting(spu_thread &spu);
CellError lv2_event_queue::send(lv2_event event, bool *notified_thread,
lv2_event_port *port) {
if (notified_thread) {
*notified_thread = false;
}
std::lock_guard lock(mutex);
if (!exists) {
return CELL_ENOTCONN;
}
if (!pq && !sq) {
if (events.size() < this->size + 0u) {
// Save event
events.emplace_back(event);
return {};
}
return CELL_EBUSY;
}
if (type == SYS_PPU_QUEUE) {
// Store event in registers
auto &ppu = static_cast<ppu_thread &>(*schedule<ppu_thread>(pq, protocol));
if (ppu.state & cpu_flag::again) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::again;
cpu->state += cpu_flag::exit;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
awake(&ppu);
if (port &&
ppu.prio.load().prio <
ensure(cpu_thread::get_current<ppu_thread>())->prio.load().prio) {
// Block event port disconnection for the time being of sending events
// PPU -> lower prio PPU is the only case that can cause thread blocking
port->is_busy++;
ensure(notified_thread);
*notified_thread = true;
}
} else {
// Store event in In_MBox
auto &spu = static_cast<spu_thread &>(*schedule<spu_thread>(sq, protocol));
if (spu.state & cpu_flag::again) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::exit + cpu_flag::again;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
const u32 data1 = static_cast<u32>(std::get<1>(event));
const u32 data2 = static_cast<u32>(std::get<2>(event));
const u32 data3 = static_cast<u32>(std::get<3>(event));
spu.ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
resume_spu_thread_group_from_waiting(spu);
}
return {};
}
error_code sys_event_queue_create(cpu_thread &cpu, vm::ptr<u32> equeue_id,
vm::ptr<sys_event_queue_attribute_t> attr,
u64 ipc_key, s32 size) {
cpu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_create(equeue_id=*0x%x, attr=*0x%x, "
"ipc_key=0x%llx, size=%d)",
equeue_id, attr, ipc_key, size);
if (size <= 0 || size > 127) {
return CELL_EINVAL;
}
const u32 protocol = attr->protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_event.error("sys_event_queue_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u32 type = attr->type;
if (type != SYS_PPU_QUEUE && type != SYS_SPU_QUEUE) {
sys_event.error("sys_event_queue_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u32 pshared = ipc_key == SYS_EVENT_QUEUE_LOCAL
? SYS_SYNC_NOT_PROCESS_SHARED
: SYS_SYNC_PROCESS_SHARED;
constexpr u32 flags = SYS_SYNC_NEWLY_CREATED;
const u64 name = attr->name_u64;
if (const auto error =
lv2_obj::create<lv2_event_queue>(pshared, ipc_key, flags, [&]() {
return make_shared<lv2_event_queue>(protocol, type, size, name,
ipc_key);
})) {
return error;
}
cpu.check_state();
*equeue_id = idm::last_id();
return CELL_OK;
}
error_code sys_event_queue_destroy(ppu_thread &ppu, u32 equeue_id, s32 mode) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_destroy(equeue_id=0x%x, mode=%d)",
equeue_id, mode);
if (mode && mode != SYS_EVENT_QUEUE_DESTROY_FORCE) {
return CELL_EINVAL;
}
std::vector<lv2_event> events;
std::unique_lock<shared_mutex> qlock;
cpu_thread *head{};
const auto queue = idm::withdraw<lv2_obj, lv2_event_queue>(
equeue_id, [&](lv2_event_queue &queue) -> CellError {
qlock = std::unique_lock{queue.mutex};
head = queue.type == SYS_PPU_QUEUE
? static_cast<cpu_thread *>(+queue.pq)
: +queue.sq;
if (!mode && head) {
return CELL_EBUSY;
}
if (!queue.events.empty()) {
// Copy events for logging, does not empty
events.insert(events.begin(), queue.events.begin(),
queue.events.end());
}
lv2_obj::on_id_destroy(queue, queue.key);
if (!head) {
qlock.unlock();
} else {
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return CELL_EAGAIN;
}
}
}
return {};
});
if (!queue) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return {};
}
if (queue.ret) {
return queue.ret;
}
std::string lost_data;
if (qlock.owns_lock()) {
if (sys_event.warning) {
u32 size = 0;
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
size++;
}
fmt::append(lost_data, "Forcefully awaken waiters (%u):\n", size);
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu()) {
lost_data += cpu->get_name();
lost_data += '\n';
}
}
if (queue->type == SYS_PPU_QUEUE) {
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu) {
cpu->gpr[3] = CELL_ECANCELED;
queue->append(cpu);
}
atomic_storage<ppu_thread *>::release(queue->pq, nullptr);
lv2_obj::awake_all();
} else {
for (auto cpu = +queue->sq; cpu; cpu = cpu->next_cpu) {
cpu->ch_in_mbox.set_values(1, CELL_ECANCELED);
resume_spu_thread_group_from_waiting(*cpu);
}
atomic_storage<spu_thread *>::release(queue->sq, nullptr);
}
qlock.unlock();
}
if (sys_event.warning) {
if (!events.empty()) {
fmt::append(lost_data, "Unread queue events (%u):\n", events.size());
}
for (const lv2_event &evt : events) {
fmt::append(lost_data, "data0=0x%x, data1=0x%x, data2=0x%x, data3=0x%x\n",
std::get<0>(evt), std::get<1>(evt), std::get<2>(evt),
std::get<3>(evt));
}
if (!lost_data.empty()) {
sys_event.warning("sys_event_queue_destroy(): %s", lost_data);
}
}
return CELL_OK;
}
error_code sys_event_queue_tryreceive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> event_array,
s32 size, vm::ptr<u32> number) {
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_tryreceive(equeue_id=0x%x, "
"event_array=*0x%x, size=%d, number=*0x%x)",
equeue_id, event_array, size, number);
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
if (!queue) {
return CELL_ESRCH;
}
if (queue->type != SYS_PPU_QUEUE) {
return CELL_EINVAL;
}
std::array<sys_event_t, 127> events;
std::unique_lock lock(queue->mutex);
if (!queue->exists) {
return CELL_ESRCH;
}
s32 count = 0;
while (count < size && !queue->events.empty()) {
auto &dest = events[count++];
std::tie(dest.source, dest.data1, dest.data2, dest.data3) =
queue->events.front();
queue->events.pop_front();
}
lock.unlock();
ppu.check_state();
std::copy_n(events.begin(), count, event_array.get_ptr());
*number = count;
return CELL_OK;
}
error_code sys_event_queue_receive(ppu_thread &ppu, u32 equeue_id,
vm::ptr<sys_event_t> dummy_event,
u64 timeout) {
ppu.state += cpu_flag::wait;
sys_event.trace(
"sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx)",
equeue_id, dummy_event, timeout);
ppu.gpr[3] = CELL_OK;
const auto queue = idm::get<lv2_obj, lv2_event_queue>(
equeue_id,
[&,
notify = lv2_obj::notify_all_t()](lv2_event_queue &queue) -> CellError {
if (queue.type != SYS_PPU_QUEUE) {
return CELL_EINVAL;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(queue.mutex);
// "/dev_flash/vsh/module/msmw2.sprx" seems to rely on some cryptic
// shared memory behaviour that we don't emulate correctly
// This is a hack to avoid waiting for 1m40s every time we boot vsh
if (queue.key == 0x8005911000000012 && Emu.IsVsh()) {
sys_event.todo("sys_event_queue_receive(equeue_id=0x%x, *0x%x, "
"timeout=0x%llx) Bypassing timeout for msmw2.sprx",
equeue_id, dummy_event, timeout);
timeout = 1;
}
if (queue.events.empty()) {
queue.sleep(ppu, timeout);
lv2_obj::emplace(queue.pq, &ppu);
return CELL_EBUSY;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) =
queue.events.front();
queue.events.pop_front();
return {};
});
if (!queue) {
return CELL_ESRCH;
}
if (queue.ret) {
if (queue.ret != CELL_EBUSY) {
return queue.ret;
}
} else {
return CELL_OK;
}
// If cancelled, gpr[3] will be non-zero. Other registers must contain event
// data.
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock_rsx(queue->mutex);
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(queue->pq)) {
// Waiters queue is empty, so the thread must have been signaled
queue->mutex.lock_unlock();
break;
}
std::lock_guard lock(queue->mutex);
if (!queue->unqueue(queue->pq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_queue_drain(ppu_thread &ppu, u32 equeue_id) {
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_drain(equeue_id=0x%x)", equeue_id);
const auto queue = idm::check<lv2_obj, lv2_event_queue>(
equeue_id, [&](lv2_event_queue &queue) {
std::lock_guard lock(queue.mutex);
queue.events.clear();
});
if (!queue) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_port_create(cpu_thread &cpu, vm::ptr<u32> eport_id,
s32 port_type, u64 name) {
cpu.state += cpu_flag::wait;
sys_event.warning(
"sys_event_port_create(eport_id=*0x%x, port_type=%d, name=0x%llx)",
eport_id, port_type, name);
if (port_type != SYS_EVENT_PORT_LOCAL && port_type != 3) {
sys_event.error("sys_event_port_create(): unknown port type (%d)",
port_type);
return CELL_EINVAL;
}
if (const u32 id = idm::make<lv2_obj, lv2_event_port>(port_type, name)) {
cpu.check_state();
*eport_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_event_port_destroy(ppu_thread &ppu, u32 eport_id) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_destroy(eport_id=0x%x)", eport_id);
const auto port = idm::withdraw<lv2_obj, lv2_event_port>(
eport_id, [](lv2_event_port &port) -> CellError {
if (lv2_obj::check(port.queue)) {
return CELL_EISCONN;
}
return {};
});
if (!port) {
return CELL_ESRCH;
}
if (port.ret) {
return port.ret;
}
return CELL_OK;
}
error_code sys_event_port_connect_local(cpu_thread &cpu, u32 eport_id,
u32 equeue_id) {
cpu.state += cpu_flag::wait;
sys_event.warning(
"sys_event_port_connect_local(eport_id=0x%x, equeue_id=0x%x)", eport_id,
equeue_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !idm::check_unlocked<lv2_obj, lv2_event_queue>(equeue_id)) {
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_LOCAL) {
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue)) {
return CELL_EISCONN;
}
port->queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
return CELL_OK;
}
error_code sys_event_port_connect_ipc(ppu_thread &ppu, u32 eport_id,
u64 ipc_key) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_connect_ipc(eport_id=0x%x, ipc_key=0x%x)",
eport_id, ipc_key);
if (ipc_key == 0) {
return CELL_EINVAL;
}
auto queue = lv2_event_queue::find(ipc_key);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !queue) {
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_IPC) {
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue)) {
return CELL_EISCONN;
}
port->queue = std::move(queue);
return CELL_OK;
}
error_code sys_event_port_disconnect(ppu_thread &ppu, u32 eport_id) {
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_disconnect(eport_id=0x%x)", eport_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port) {
return CELL_ESRCH;
}
if (!lv2_obj::check(port->queue)) {
return CELL_ENOTCONN;
}
if (port->is_busy) {
return CELL_EBUSY;
}
port->queue.reset();
return CELL_OK;
}
error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3) {
const auto cpu = cpu_thread::get_current();
const auto ppu = cpu ? cpu->try_get<ppu_thread>() : nullptr;
if (cpu) {
cpu->state += cpu_flag::wait;
}
sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, "
"data2=0x%llx, data3=0x%llx)",
eport_id, data1, data2, data3);
bool notified_thread = false;
const auto port = idm::check<lv2_obj, lv2_event_port>(
eport_id,
[&, notify = lv2_obj::notify_all_t()](lv2_event_port &port) -> CellError {
if (ppu && ppu->loaded_from_savestate) {
port.is_busy++;
notified_thread = true;
return {};
}
if (lv2_obj::check(port.queue)) {
const u64 source =
port.name ? port.name
: (u64{process_getpid() + 0u} << 32) | u64{eport_id};
return port.queue->send(
source, data1, data2, data3, &notified_thread,
ppu && port.queue->type == SYS_PPU_QUEUE ? &port : nullptr);
}
return CELL_ENOTCONN;
});
if (!port) {
return CELL_ESRCH;
}
if (ppu && notified_thread) {
// Wait to be requeued
if (ppu->test_stopped()) {
// Wait again on savestate load
ppu->state += cpu_flag::again;
}
port->is_busy--;
return CELL_OK;
}
if (port.ret) {
if (port.ret == CELL_EAGAIN) {
// Not really an error code exposed to games (thread has raised
// cpu_flag::again)
return not_an_error(CELL_EAGAIN);
}
if (port.ret == CELL_EBUSY) {
return not_an_error(CELL_EBUSY);
}
return port.ret;
}
return CELL_OK;
}

View file

@ -0,0 +1,514 @@
#include "stdafx.h"
#include "sys_event_flag.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_event_flag);
lv2_event_flag::lv2_event_flag(utils::serial &ar)
: protocol(ar), key(ar), type(ar), name(ar) {
ar(pattern);
}
std::function<void(void *)> lv2_event_flag::load(utils::serial &ar) {
return load_func(make_shared<lv2_event_flag>(exact_t<utils::serial &>(ar)));
}
void lv2_event_flag::save(utils::serial &ar) {
ar(protocol, key, type, name, pattern);
}
error_code sys_event_flag_create(ppu_thread &ppu, vm::ptr<u32> id,
vm::ptr<sys_event_flag_attribute_t> attr,
u64 init) {
ppu.state += cpu_flag::wait;
sys_event_flag.warning(
"sys_event_flag_create(id=*0x%x, attr=*0x%x, init=0x%llx)", id, attr,
init);
if (!id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_event_flag.error("sys_event_flag_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u32 type = _attr.type;
if (type != SYS_SYNC_WAITER_SINGLE && type != SYS_SYNC_WAITER_MULTIPLE) {
sys_event_flag.error("sys_event_flag_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (const auto error = lv2_obj::create<lv2_event_flag>(
_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_event_flag>(
_attr.protocol, ipc_key, _attr.type, _attr.name_u64, init);
})) {
return error;
}
ppu.check_state();
*id = idm::last_id();
return CELL_OK;
}
error_code sys_event_flag_destroy(ppu_thread &ppu, u32 id) {
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_destroy(id=0x%x)", id);
const auto flag = idm::withdraw<lv2_obj, lv2_event_flag>(
id, [&](lv2_event_flag &flag) -> CellError {
if (flag.sq) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(flag, flag.key);
return {};
});
if (!flag) {
return CELL_ESRCH;
}
if (flag.ret) {
return flag.ret;
}
return CELL_OK;
}
error_code sys_event_flag_wait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_wait(id=0x%x, bitptn=0x%llx, mode=0x%x, "
"result=*0x%x, timeout=0x%llx)",
id, bitptn, mode, result, timeout);
// Fix function arguments for external access
ppu.gpr[3] = -1;
ppu.gpr[4] = bitptn;
ppu.gpr[5] = mode;
ppu.gpr[6] = 0;
// Always set result
struct store_result {
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept {
if (ptr) {
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode)) {
sys_event_flag.error("sys_event_flag_wait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
const auto flag = idm::get<lv2_obj, lv2_event_flag>(
id,
[&, notify = lv2_obj::notify_all_t()](lv2_event_flag &flag) -> CellError {
if (flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode,
&ppu.gpr[6]);
})
.second) {
// TODO: is it possible to return EPERM in this case?
return {};
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(flag.mutex);
if (flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode,
&ppu.gpr[6]);
})
.second) {
return {};
}
if (flag.type == SYS_SYNC_WAITER_SINGLE && flag.sq) {
return CELL_EPERM;
}
flag.sleep(ppu, timeout);
lv2_obj::emplace(flag.sq, &ppu);
return CELL_EBUSY;
});
if (!flag) {
return CELL_ESRCH;
}
if (flag.ret) {
if (flag.ret != CELL_EBUSY) {
return flag.ret;
}
} else {
store.val = ppu.gpr[6];
return CELL_OK;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(flag->sq)) {
// Waiters queue is empty, so the thread must have been signaled
flag->mutex.lock_unlock();
break;
}
std::lock_guard lock(flag->mutex);
if (!flag->unqueue(flag->sq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
ppu.gpr[6] = flag->pattern;
break;
}
} else {
ppu.state.wait(state);
}
}
store.val = ppu.gpr[6];
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_flag_trywait(ppu_thread &ppu, u32 id, u64 bitptn, u32 mode,
vm::ptr<u64> result) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace(
"sys_event_flag_trywait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x)",
id, bitptn, mode, result);
// Always set result
struct store_result {
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept {
if (ptr) {
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode)) {
sys_event_flag.error("sys_event_flag_trywait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
u64 pattern{};
const auto flag =
idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag &flag) {
return flag.pattern
.fetch_op([&](u64 &pat) {
return lv2_event_flag::check_pattern(pat, bitptn, mode, &pattern);
})
.second;
});
if (!flag) {
return CELL_ESRCH;
}
if (!flag.ret) {
return not_an_error(CELL_EBUSY);
}
store.val = pattern;
return CELL_OK;
}
error_code sys_event_flag_set(cpu_thread &cpu, u32 id, u64 bitptn) {
cpu.state += cpu_flag::wait;
// Warning: may be called from SPU thread.
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id,
bitptn);
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag) {
return CELL_ESRCH;
}
if ((flag->pattern & bitptn) == bitptn) {
return CELL_OK;
}
if (lv2_obj::notify_all_t notify; true) {
std::lock_guard lock(flag->mutex);
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu) {
if (ppu->state & cpu_flag::again) {
cpu.state += cpu_flag::again;
// Fake error for abort
return not_an_error(CELL_EAGAIN);
}
}
u32 count = 0;
// Process all waiters in single atomic op
for (u64 pattern = flag->pattern, to_write = pattern, dependant_mask = 0;;
to_write = pattern, dependant_mask = 0) {
count = 0;
to_write |= bitptn;
dependant_mask = 0;
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu) {
ppu->gpr[7] = 0;
}
auto first = +flag->sq;
auto get_next = [&]() -> ppu_thread * {
s32 prio = smax;
ppu_thread *it{};
for (auto ppu = first; ppu; ppu = ppu->next_cpu) {
if (!ppu->gpr[7] && (flag->protocol != SYS_SYNC_PRIORITY ||
ppu->prio.load().prio <= prio)) {
it = ppu;
prio = ppu->prio.load().prio;
}
}
if (it) {
// Mark it so it won't reappear
it->gpr[7] = 1;
}
return it;
};
while (auto it = get_next()) {
auto &ppu = *it;
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
// If it's OR mode, set bits must have waken up the thread therefore no
// dependency on old value
const u64 dependant_mask_or =
((mode & 0xf) == SYS_EVENT_FLAG_WAIT_OR ||
(bitptn & pattern & to_write) == pattern
? 0
: pattern);
if (lv2_event_flag::check_pattern(to_write, pattern, mode,
&ppu.gpr[6])) {
dependant_mask |= dependant_mask_or;
ppu.gpr[3] = CELL_OK;
count++;
if (!to_write) {
break;
}
} else {
ppu.gpr[3] = -1;
}
}
dependant_mask &= ~bitptn;
auto [new_val, ok] = flag->pattern.fetch_op([&](u64 &x) {
if ((x ^ pattern) & dependant_mask) {
return false;
}
x |= bitptn;
// Clear the bit-wise difference
x &= ~((pattern | bitptn) & ~to_write);
return true;
});
if (ok) {
break;
}
pattern = new_val;
}
if (!count) {
return CELL_OK;
}
// Remove waiters
for (auto next_cpu = &flag->sq; *next_cpu;) {
auto &ppu = **next_cpu;
if (ppu.gpr[3] == CELL_OK) {
atomic_storage<ppu_thread *>::release(*next_cpu, ppu.next_cpu);
ppu.next_cpu = nullptr;
flag->append(&ppu);
continue;
}
next_cpu = &ppu.next_cpu;
};
lv2_obj::awake_all();
}
return CELL_OK;
}
error_code sys_event_flag_clear(ppu_thread &ppu, u32 id, u64 bitptn) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_clear(id=0x%x, bitptn=0x%llx)", id,
bitptn);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(
id, [&](lv2_event_flag &flag) { flag.pattern &= bitptn; });
if (!flag) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_flag_cancel(ppu_thread &ppu, u32 id, vm::ptr<u32> num) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_cancel(id=0x%x, num=*0x%x)", id, num);
if (num)
*num = 0;
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag) {
return CELL_ESRCH;
}
u32 value = 0;
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
// Get current pattern
const u64 pattern = flag->pattern;
// Signal all threads to return CELL_ECANCELED (protocol does not matter)
while (auto ppu = flag->schedule<ppu_thread>(flag->sq, SYS_SYNC_FIFO)) {
ppu->gpr[3] = CELL_ECANCELED;
ppu->gpr[6] = pattern;
value++;
flag->append(ppu);
}
if (value) {
lv2_obj::awake_all();
}
}
static_cast<void>(ppu.test_stopped());
if (num)
*num = value;
return CELL_OK;
}
error_code sys_event_flag_get(ppu_thread &ppu, u32 id, vm::ptr<u64> flags) {
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_get(id=0x%x, flags=*0x%x)", id, flags);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(
id, [](lv2_event_flag &flag) { return +flag.pattern; });
ppu.check_state();
if (!flag) {
if (flags)
*flags = 0;
return CELL_ESRCH;
}
if (!flags) {
return CELL_EFAULT;
}
*flags = flag.ret;
return CELL_OK;
}

3248
kernel/cellos/src/sys_fs.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,267 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/System.h"
#include "Emu/system_utils.hpp"
#include "cellos/sys_process.h"
#include "util/StrUtil.h"
#include "util/Thread.h"
#include "util/sysinfo.hpp"
#include "util/v128.hpp"
#include "Emu/Cell/timers.hpp"
#include "sys_game.h"
LOG_CHANNEL(sys_game);
struct system_sw_version {
system_sw_version() {
f64 version_f = 0;
if (!try_to_float(&version_f, utils::get_firmware_version(), 0.0f,
99.9999f))
sys_game.error("Error parsing firmware version");
version = static_cast<usz>(version_f * 10000);
}
system_sw_version(const system_sw_version &) = delete;
system_sw_version &operator=(const system_sw_version &) = delete;
~system_sw_version() = default;
atomic_t<u64> version;
};
struct board_storage {
public:
bool read(u8 *buffer) {
if (!buffer)
return false;
const auto data = storage.load();
memcpy(buffer, &data, size);
return true;
}
bool write(u8 *buffer) {
if (!buffer)
return false;
storage.store(read_from_ptr<be_t<v128>>(buffer));
written = true;
return true;
}
board_storage() {
memset(&storage.raw(), -1, size);
if (fs::file file; file.open(file_path, fs::read))
file.read(&storage.raw(), std::min(file.size(), size));
}
board_storage(const board_storage &) = delete;
board_storage &operator=(const board_storage &) = delete;
~board_storage() {
if (written) {
if (fs::file file;
file.open(file_path, fs::create + fs::write + fs::lock)) {
file.write(&storage.raw(), size);
file.trunc(size);
}
}
}
private:
atomic_be_t<v128> storage;
bool written = false;
const std::string file_path =
rpcs3::utils::get_hdd1_dir() + "/caches/board_storage.bin";
static constexpr u64 size = sizeof(v128);
};
struct watchdog_t {
struct alignas(8) control_t {
bool needs_restart = false;
bool active = false;
char pad[sizeof(u32) - sizeof(bool) * 2]{};
u32 timeout = 0;
};
atomic_t<control_t> control;
void operator()() {
u64 start_time = get_system_time();
u64 old_time = start_time;
u64 current_time = old_time;
constexpr u64 sleep_time = 50'000;
while (thread_ctrl::state() != thread_state::aborting) {
if (Emu.GetStatus(false) == system_state::paused) {
start_time += current_time - old_time;
old_time = current_time;
thread_ctrl::wait_for(sleep_time);
current_time = get_system_time();
continue;
}
old_time = std::exchange(current_time, get_system_time());
const auto old = control
.fetch_op([&](control_t &data) {
if (data.needs_restart) {
data.needs_restart = false;
return true;
}
return false;
})
.first;
if (old.active && old.needs_restart) {
start_time = current_time;
old_time = current_time;
continue;
}
if (old.active && current_time - start_time >= old.timeout) {
sys_game.success("Watchdog timeout! Restarting the game...");
Emu.CallFromMainThread([]() { Emu.Restart(false); });
return;
}
thread_ctrl::wait_for(sleep_time);
}
}
static constexpr auto thread_name = "LV2 Watchdog Thread"sv;
};
void abort_lv2_watchdog() {
if (auto thr = g_fxo->try_get<named_thread<watchdog_t>>()) {
sys_game.notice("Aborting %s...", thr->thread_name);
*thr = thread_state::aborting;
}
}
error_code _sys_game_watchdog_start(u32 timeout) {
sys_game.trace("sys_game_watchdog_start(timeout=%d)", timeout);
// According to disassembly
timeout *= 1'000'000;
timeout &= -64;
if (!g_fxo->get<named_thread<watchdog_t>>()
.control
.fetch_op([&](watchdog_t::control_t &data) {
if (data.active) {
return false;
}
data.needs_restart = true;
data.active = true;
data.timeout = timeout;
return true;
})
.second) {
return CELL_EABORT;
}
return CELL_OK;
}
error_code _sys_game_watchdog_stop() {
sys_game.trace("sys_game_watchdog_stop()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op(
[](watchdog_t::control_t &data) {
if (!data.active) {
return false;
}
data.active = false;
return true;
});
return CELL_OK;
}
error_code _sys_game_watchdog_clear() {
sys_game.trace("sys_game_watchdog_clear()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op(
[](watchdog_t::control_t &data) {
if (!data.active || data.needs_restart) {
return false;
}
data.needs_restart = true;
return true;
});
return CELL_OK;
}
error_code _sys_game_set_system_sw_version(u64 version) {
sys_game.trace("sys_game_set_system_sw_version(version=%d)", version);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
g_fxo->get<system_sw_version>().version = version;
return CELL_OK;
}
u64 _sys_game_get_system_sw_version() {
sys_game.trace("sys_game_get_system_sw_version()");
return g_fxo->get<system_sw_version>().version;
}
error_code _sys_game_board_storage_read(vm::ptr<u8> buffer,
vm::ptr<u8> status) {
sys_game.trace("sys_game_board_storage_read(buffer=*0x%x, status=*0x%x)",
buffer, status);
if (!buffer || !status) {
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().read(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer,
vm::ptr<u8> status) {
sys_game.trace("sys_game_board_storage_write(buffer=*0x%x, status=*0x%x)",
buffer, status);
if (!buffer || !status) {
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().write(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_get_rtc_status(vm::ptr<s32> status) {
sys_game.trace("sys_game_get_rtc_status(status=*0x%x)", status);
if (!status) {
return CELL_EFAULT;
}
*status = 0;
return CELL_OK;
}

View file

@ -0,0 +1,102 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_gamepad.h"
LOG_CHANNEL(sys_gamepad);
u32 sys_gamepad_ycon_initalize(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_initalize(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_finalize(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_finalize(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_has_input_ownership(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_has_input_ownership(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_enumerate_device(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_enumerate_device(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_device_info(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_get_device_info(in=%d, out=%d) -> CELL_OK",
in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_read_raw_report(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_read_raw_report(in=%d, out=%d) -> CELL_OK",
in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_write_raw_report(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo(
"sys_gamepad_ycon_write_raw_report(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_feature(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_get_feature(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_set_feature(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_set_feature(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
u32 sys_gamepad_ycon_is_gem(vm::ptr<u8> in, vm::ptr<u8> out) {
sys_gamepad.todo("sys_gamepad_ycon_is_gem(in=%d, out=%d) -> CELL_OK", in,
out);
return CELL_OK;
}
// syscall(621,packet_id,u8 *in,u8 *out)
// Talk:LV2_Functions_and_Syscalls#Syscall_621_.280x26D.29 gamepad_if usage
u32 sys_gamepad_ycon_if(u8 packet_id, vm::ptr<u8> in, vm::ptr<u8> out) {
switch (packet_id) {
case 0:
return sys_gamepad_ycon_initalize(in, out);
case 1:
return sys_gamepad_ycon_finalize(in, out);
case 2:
return sys_gamepad_ycon_has_input_ownership(in, out);
case 3:
return sys_gamepad_ycon_enumerate_device(in, out);
case 4:
return sys_gamepad_ycon_get_device_info(in, out);
case 5:
return sys_gamepad_ycon_read_raw_report(in, out);
case 6:
return sys_gamepad_ycon_write_raw_report(in, out);
case 7:
return sys_gamepad_ycon_get_feature(in, out);
case 8:
return sys_gamepad_ycon_set_feature(in, out);
case 9:
return sys_gamepad_ycon_is_gem(in, out);
default:
sys_gamepad.error(
"sys_gamepad_ycon_if(packet_id=*%d, in=%d, out=%d), unknown packet id",
packet_id, in, out);
break;
}
return CELL_OK;
}

View file

@ -0,0 +1,41 @@
#include "stdafx.h"
#include "sys_gpio.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_gpio);
error_code sys_gpio_get(u64 device_id, vm::ptr<u64> value) {
sys_gpio.trace("sys_gpio_get(device_id=0x%llx, value=*0x%x)", device_id,
value);
if (device_id != SYS_GPIO_LED_DEVICE_ID &&
device_id != SYS_GPIO_DIP_SWITCH_DEVICE_ID) {
return CELL_ESRCH;
}
// Retail consoles dont have LEDs or DIPs switches, hence always sets 0 in
// paramenter
if (!value.try_write(0)) {
return CELL_EFAULT;
}
return CELL_OK;
}
error_code sys_gpio_set(u64 device_id, u64 mask, u64 value) {
sys_gpio.trace("sys_gpio_set(device_id=0x%llx, mask=0x%llx, value=0x%llx)",
device_id, mask, value);
// Retail consoles dont have LEDs or DIPs switches, hence the syscall can't
// modify devices's value
switch (device_id) {
case SYS_GPIO_LED_DEVICE_ID:
return CELL_OK;
case SYS_GPIO_DIP_SWITCH_DEVICE_ID:
return CELL_EINVAL;
}
return CELL_ESRCH;
}

View file

@ -0,0 +1,193 @@
#include "stdafx.h"
#include "sys_hid.h"
#include "Emu/Memory/vm_var.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "rpcsx/fw/ps3/cellPad.h"
#include "sys_process.h"
LOG_CHANNEL(sys_hid);
error_code sys_hid_manager_open(ppu_thread &ppu, u64 device_type, u64 port_no,
vm::ptr<u32> handle) {
sys_hid.todo("sys_hid_manager_open(device_type=0x%llx, port_no=0x%llx, "
"handle=*0x%llx)",
device_type, port_no, handle);
// device type == 1 = pad, 2 = kb, 3 = mouse
if (device_type > 3) {
return CELL_EINVAL;
}
if (!handle) {
return CELL_EFAULT;
}
// 'handle' starts at 0x100 in realhw, and increments every time
// sys_hid_manager_open is called however, sometimes the handle is reused when
// opening sys_hid_manager again (even when the previous one hasn't been
// closed yet) - maybe when processes/threads get killed/finish they also
// release their handles?
static u32 ctr = 0x100;
*handle = ctr++;
if (device_type == 1) {
cellPadInit(ppu, 7);
cellPadSetPortSetting(::narrow<u32>(port_no) /* 0 */,
CELL_PAD_SETTING_LDD | CELL_PAD_SETTING_PRESS_ON |
CELL_PAD_SETTING_SENSOR_ON);
}
return CELL_OK;
}
error_code sys_hid_manager_ioctl(u32 hid_handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size) {
sys_hid.todo("sys_hid_manager_ioctl(hid_handle=0x%x, pkg_id=0x%llx, "
"buf=*0x%x, buf_size=0x%llx)",
hid_handle, pkg_id, buf, buf_size);
// clang-format off
// From realhw syscall dump when vsh boots
// SC count | handle | pkg_id | *buf (in) | *buf (out) | size -> ret
// ---------|--------|--------|---------------------------------------------------------------------------|---------------------------------------------------------------------------|------------
// 28893 | 0x101 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28894 | 0x101 | 0x3 | 00000000 | 00000000 | 4 -> 0
// 28895 | 0x101 | 0x5 | 00000000 | 00000000 | 4 -> 0
// 28896 | 0x101 | 0x68 | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 64 -> 0
// | | | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 |
// 28898 | 0x102 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28901 | 0x100 | 0x64 | 00000001 | 00000001 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 2890 | 0x100 | 0x65 | 6b49d200 | 6b49d200 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 28903 | 0x100 | 0x66 | 00000001 | 00000001 | 4 -> 0 # x3::hidportassign
// 28904 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 68 -> 0 # x3::hidportassign
// | | | 000000000000000000000000000000000000001000000010000000100000001 | 000000000000000000000000000000000000001000000010000000100000001 |
// 28907 | 0x101 | 0x3 | 00000001 | 00000001 | 4 -> 0
// 28908 | 0x101 | 0x5 | 00000001 | 00000001 | 4 -> 0
// 29404 | 0x100 | 0x4 | 00 | ee | 1 -> 0
// *** repeats 30600, 31838, 33034, 34233, 35075 (35075 is x3::hidportassign) ***
// 35076 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 68 -> 0
// | | | 000003200000032000000320000003200002710000027100000271000002710 | 000003200000032000000320000003200002710000027100000271000002710 |
// *** more 0x4 that have buf(in)=00 and buf(out)=ee ***
// clang-format on
if (pkg_id == 2) {
// Return what realhw seems to return
// TODO: Figure out what this corresponds to
auto info = vm::static_ptr_cast<sys_hid_info_2>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
u8 realhw[17] = {0x01, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x03, 0x50, 0x00, 0x00, 0x1c, 0x1f};
memcpy(info->unk, &realhw, 17);
} else if (pkg_id == 5) {
auto info = vm::static_ptr_cast<sys_hid_info_5>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
}
// pkg_id == 6 == setpressmode?
else if (pkg_id == 0x68) {
[[maybe_unused]] auto info = vm::static_ptr_cast<sys_hid_ioctl_68>(buf);
// info->unk2 = 0;
}
return CELL_OK;
}
error_code sys_hid_manager_check_focus() {
// spammy sys_hid.todo("sys_hid_manager_check_focus()");
return not_an_error(1);
}
error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf,
u64 buf_size) {
sys_hid.todo("sys_hid_manager_513(%llx, %llx, buf=%llx, buf_size=%llx)", a1,
a2, buf, buf_size);
return CELL_OK;
}
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size) {
if (pkg_id == 0xE) {
sys_hid.trace(
"sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id,
buf, buf_size);
} else {
sys_hid.todo("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)",
pkg_id, buf, buf_size);
}
if (pkg_id == 0xE) {
// buf holds device_type
// auto device_type = vm::static_ptr_cast<u8>(buf);
// spammy sys_hid.todo("device_type: 0x%x", device_type[0]);
// return 1 or 0? look like almost like another check_focus type check,
// returning 0 looks to keep system focus
} else if (pkg_id == 0xD) {
auto inf = vm::static_ptr_cast<sys_hid_manager_514_pkg_d>(buf);
// unk1 = (pad# << 24) | pad# | 0x100
// return value doesn't seem to be used again
sys_hid.todo("unk1: 0x%x, unk2:0x%x", inf->unk1, inf->unk2);
}
return CELL_OK;
}
error_code sys_hid_manager_is_process_permission_root(u32 pid) {
sys_hid.todo("sys_hid_manager_is_process_permission_root(pid=0x%x)", pid);
return not_an_error(g_ps3_process_info.has_root_perm());
}
error_code sys_hid_manager_add_hot_key_observer(u32 event_queue,
vm::ptr<u32> unk) {
sys_hid.todo(
"sys_hid_manager_add_hot_key_observer(event_queue=0x%x, unk=*0x%x)",
event_queue, unk);
return CELL_OK;
}
error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf,
u64 buf_size) {
if (!buf) {
return CELL_EFAULT;
}
(pkg_id == 2 || pkg_id == 0x81 ? sys_hid.trace : sys_hid.todo)(
"sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, "
"buf_size=0x%llx)",
handle, pkg_id, buf, buf_size);
if (pkg_id == 2) {
// cellPadGetData
// it returns just button array from 'CellPadData'
// auto data = vm::static_ptr_cast<u16[64]>(buf);
// todo: use handle and dont call cellpad here
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0) {
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16),
buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize);
}
} else if (pkg_id == 0x81) {
// cellPadGetDataExtra?
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0) {
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16),
buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize / 2);
}
}
return CELL_OK;
}

View file

@ -0,0 +1,262 @@
#include "stdafx.h"
#include "sys_interrupt.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
LOG_CHANNEL(sys_interrupt);
lv2_int_tag::lv2_int_tag() noexcept : lv2_obj(1), id(idm::last_id()) {}
lv2_int_tag::lv2_int_tag(utils::serial &ar) noexcept
: lv2_obj(1), id(idm::last_id()), handler([&]() {
const u32 id = ar;
auto ptr = idm::get_unlocked<lv2_obj, lv2_int_serv>(id);
if (!ptr && id) {
Emu.PostponeInitCode([id, &handler = this->handler]() {
handler = ensure(idm::get_unlocked<lv2_obj, lv2_int_serv>(id));
});
}
return ptr;
}()) {}
void lv2_int_tag::save(utils::serial &ar) {
ar(lv2_obj::check(handler) ? handler->id : 0);
}
lv2_int_serv::lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread,
u64 arg1, u64 arg2) noexcept
: lv2_obj(1), id(idm::last_id()), thread(thread), arg1(arg1), arg2(arg2) {}
lv2_int_serv::lv2_int_serv(utils::serial &ar) noexcept
: lv2_obj(1), id(idm::last_id()),
thread(idm::get_unlocked<named_thread<ppu_thread>>(ar)), arg1(ar),
arg2(ar) {}
void lv2_int_serv::save(utils::serial &ar) {
ar(thread && idm::check_unlocked<named_thread<ppu_thread>>(thread->id)
? thread->id
: 0,
arg1, arg2);
}
void ppu_interrupt_thread_entry(ppu_thread &, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *);
void lv2_int_serv::exec() const {
thread->cmd_list({{ppu_cmd::reset_stack, 0},
{ppu_cmd::set_args, 2},
arg1,
arg2,
{ppu_cmd::entry_call, 0},
{ppu_cmd::sleep, 0},
{ppu_cmd::ptr_call, 0},
std::bit_cast<u64>(&ppu_interrupt_thread_entry)});
}
void ppu_thread_exit(ppu_thread &, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *);
void lv2_int_serv::join() const {
thread->cmd_list(
{{ppu_cmd::ptr_call, 0}, std::bit_cast<u64>(&ppu_thread_exit)});
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
(*thread)();
idm::remove_verify<named_thread<ppu_thread>>(thread->id, thread);
}
error_code sys_interrupt_tag_destroy(ppu_thread &ppu, u32 intrtag) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning("sys_interrupt_tag_destroy(intrtag=0x%x)", intrtag);
const auto tag = idm::withdraw<lv2_obj, lv2_int_tag>(
intrtag, [](lv2_int_tag &tag) -> CellError {
if (lv2_obj::check(tag.handler)) {
return CELL_EBUSY;
}
tag.exists.release(0);
return {};
});
if (!tag) {
return CELL_ESRCH;
}
if (tag.ret) {
return tag.ret;
}
return CELL_OK;
}
error_code _sys_interrupt_thread_establish(ppu_thread &ppu, vm::ptr<u32> ih,
u32 intrtag, u32 intrthread,
u64 arg1, u64 arg2) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning(
"_sys_interrupt_thread_establish(ih=*0x%x, intrtag=0x%x, "
"intrthread=0x%x, arg1=0x%llx, arg2=0x%llx)",
ih, intrtag, intrthread, arg1, arg2);
CellError error = CELL_EAGAIN;
const u32 id = idm::import <lv2_obj, lv2_int_serv>([&]() {
shared_ptr<lv2_int_serv> result;
// Get interrupt tag
const auto tag = idm::check_unlocked<lv2_obj, lv2_int_tag>(intrtag);
if (!tag) {
error = CELL_ESRCH;
return result;
}
// Get interrupt thread
const auto it = idm::get_unlocked<named_thread<ppu_thread>>(intrthread);
if (!it) {
error = CELL_ESRCH;
return result;
}
// If interrupt thread is running, it's already established on another
// interrupt tag
if (cpu_flag::stop - it->state) {
error = CELL_EAGAIN;
return result;
}
// It's unclear if multiple handlers can be established on single interrupt
// tag
if (lv2_obj::check(tag->handler)) {
error = CELL_ESTAT;
return result;
}
result = make_shared<lv2_int_serv>(it, arg1, arg2);
tag->handler = result;
it->cmd_list({{ppu_cmd::ptr_call, 0},
std::bit_cast<u64>(&ppu_interrupt_thread_entry)});
it->state -= cpu_flag::stop;
it->state.notify_one();
return result;
});
if (id) {
ppu.check_state();
*ih = id;
return CELL_OK;
}
return error;
}
error_code _sys_interrupt_thread_disestablish(ppu_thread &ppu, u32 ih,
vm::ptr<u64> r13) {
ppu.state += cpu_flag::wait;
sys_interrupt.warning(
"_sys_interrupt_thread_disestablish(ih=0x%x, r13=*0x%x)", ih, r13);
const auto handler = idm::withdraw<lv2_obj, lv2_int_serv>(
ih, [](lv2_obj &obj) { obj.exists.release(0); });
if (!handler) {
if (const auto thread = idm::withdraw<named_thread<ppu_thread>>(ih)) {
*r13 = thread->gpr[13];
// It is detached from IDM now so join must be done explicitly now
*thread = thread_state::finished;
return CELL_OK;
}
return CELL_ESRCH;
}
lv2_obj::sleep(ppu);
// Wait for sys_interrupt_thread_eoi() and destroy interrupt thread
handler->join();
// Save TLS base
*r13 = handler->thread->gpr[13];
return CELL_OK;
}
void sys_interrupt_thread_eoi(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_interrupt.trace("sys_interrupt_thread_eoi()");
ppu.state += cpu_flag::ret;
lv2_obj::sleep(ppu);
ppu.interrupt_thread_executing = false;
}
void ppu_interrupt_thread_entry(ppu_thread &ppu, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *) {
while (true) {
shared_ptr<lv2_int_serv> serv = null_ptr;
// Loop endlessly trying to invoke an interrupt if required
idm::select<named_thread<spu_thread>>([&](u32, spu_thread &spu) {
if (spu.get_type() != spu_type::threaded) {
auto &ctrl = spu.int_ctrl[2];
if (lv2_obj::check(ctrl.tag)) {
auto &handler = ctrl.tag->handler;
if (lv2_obj::check(handler)) {
if (handler->thread.get() == &ppu) {
if (spu.ch_out_intr_mbox.get_count() &&
ctrl.mask & SPU_INT2_STAT_MAILBOX_INT) {
ctrl.stat |= SPU_INT2_STAT_MAILBOX_INT;
}
if (ctrl.mask & ctrl.stat) {
ensure(!serv);
serv = handler;
}
}
}
}
}
});
if (serv) {
// Queue interrupt, after the interrupt has finished the PPU returns to
// this loop
serv->exec();
return;
}
const auto state = +ppu.state;
if (::is_stopped(state) || ppu.cmd_notify.exchange(0)) {
return;
}
thread_ctrl::wait_on(ppu.cmd_notify, 0);
}
}

View file

@ -0,0 +1,70 @@
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm.h"
#include "sys_io.h"
LOG_CHANNEL(sys_io);
error_code sys_io_buffer_create(u32 block_count, u32 block_size, u32 blocks,
u32 unk1, vm::ptr<u32> handle) {
sys_io.todo("sys_io_buffer_create(block_count=0x%x, block_size=0x%x, "
"blocks=0x%x, unk1=0x%x, handle=*0x%x)",
block_count, block_size, blocks, unk1, handle);
if (!handle) {
return CELL_EFAULT;
}
if (auto io = idm::make<lv2_io_buf>(block_count, block_size, blocks, unk1)) {
*handle = io;
return CELL_OK;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_destroy(u32 handle) {
sys_io.todo("sys_io_buffer_destroy(handle=0x%x)", handle);
idm::remove<lv2_io_buf>(handle);
return CELL_OK;
}
error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block) {
sys_io.todo("sys_io_buffer_allocate(handle=0x%x, block=*0x%x)", handle,
block);
if (!block) {
return CELL_EFAULT;
}
if (auto io = idm::get_unlocked<lv2_io_buf>(handle)) {
// no idea what we actually need to allocate
if (u32 addr = vm::alloc(io->block_count * io->block_size, vm::main)) {
*block = addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_free(u32 handle, u32 block) {
sys_io.todo("sys_io_buffer_free(handle=0x%x, block=0x%x)", handle, block);
const auto io = idm::get_unlocked<lv2_io_buf>(handle);
if (!io) {
return CELL_ESRCH;
}
vm::dealloc(block);
return CELL_OK;
}

View file

@ -0,0 +1,565 @@
#include "stdafx.h"
#include "sys_lwcond.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwcond);
lv2_lwcond::lv2_lwcond(utils::serial &ar)
: name(ar.pop<be_t<u64>>()), lwid(ar), protocol(ar),
control(ar.pop<decltype(control)>()) {}
void lv2_lwcond::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(name, lwid, protocol, control);
}
error_code _sys_lwcond_create(ppu_thread &ppu, vm::ptr<u32> lwcond_id,
u32 lwmutex_id, vm::ptr<sys_lwcond_t> control,
u64 name) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(u8"_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, "
u8"control=*0x%x, name=0x%llx (“%s”))",
lwcond_id, lwmutex_id, control, name,
lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
u32 protocol;
// Extract protocol from lwmutex
if (!idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id,
[&protocol](lv2_lwmutex &mutex) { protocol = mutex.protocol; })) {
return CELL_ESRCH;
}
if (protocol == SYS_SYNC_RETRY) {
// Lwcond can't have SYS_SYNC_RETRY protocol
protocol = SYS_SYNC_PRIORITY;
}
if (const u32 id =
idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id, protocol, control)) {
ppu.check_state();
*lwcond_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwcond_destroy(ppu_thread &ppu, u32 lwcond_id) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
shared_ptr<lv2_lwcond> _cond;
while (true) {
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwcond>(
lwcond_id, [&](lv2_lwcond &cond) -> CellError {
// Ignore check on first iteration
if (_cond && std::addressof(cond) != _cond.get()) {
// Other thread has destroyed the lwcond earlier
return CELL_ESRCH;
}
std::lock_guard lock(cond.mutex);
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
return CELL_EBUSY;
}
old_val = cond.lwmutex_waiters.or_fetch(smin);
if (old_val != smin) {
// De-schedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwmutex waiters inside
// _sys_lwcond_queue_wait
return CELL_EAGAIN;
}
return {};
});
if (!ptr) {
return CELL_ESRCH;
}
if (ret) {
if (ret != CELL_EAGAIN) {
return ret;
}
} else {
break;
}
_cond = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31) {
thread_ctrl::wait_on(_cond->lwmutex_waiters, old_val);
if (ppu.is_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
old_val = _cond->lwmutex_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwcond_signal(ppu_thread &ppu, u32 lwcond_id, u32 lwmutex_id,
u64 ppu_thread_id, u32 mode) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, "
"ppu_thread_id=0x%llx, mode=%d)",
lwcond_id, lwmutex_id, ppu_thread_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been
// increased Mode 3: lwmutex was forcefully owned by the calling thread
if (mode < 1 || mode > 3) {
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(
lwcond_id,
[&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) -> int {
ppu_thread *cpu = nullptr;
if (ppu_thread_id != u32{umax}) {
cpu = idm::check_unlocked<named_thread<ppu_thread>>(
static_cast<u32>(ppu_thread_id));
if (!cpu) {
return -1;
}
}
lv2_lwmutex *mutex = nullptr;
if (mode != 2) {
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return -1;
}
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
if (cpu) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
}
auto result =
cpu ? cond.unqueue(cond.sq, cpu)
: cond.schedule<ppu_thread>(cond.sq, cond.protocol);
if (result) {
if (static_cast<ppu_thread *>(result)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
if (mode == 2) {
static_cast<ppu_thread *>(result)->gpr[3] = CELL_EBUSY;
} else if (mode == 3 && mutex->load_sq()) [[unlikely]] {
std::lock_guard lock(mutex->mutex);
// Respect ordering of the sleep queue
mutex->try_own(result, true);
auto result2 = mutex->reown<ppu_thread>();
if (result2->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
if (result2 != result) {
cond.awake(result2);
result = nullptr;
}
} else if (mode == 1) {
mutex->try_own(result, true);
result = nullptr;
}
if (result) {
cond.awake(result);
}
return 1;
}
} else {
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (!cond.ret) {
if (ppu_thread_id == u32{umax}) {
if (mode == 3) {
return not_an_error(CELL_ENOENT);
} else if (mode == 2) {
return CELL_OK;
}
}
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code _sys_lwcond_signal_all(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u32 mode) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(
"_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)",
lwcond_id, lwmutex_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been
// increased
if (mode < 1 || mode > 2) {
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true) {
if (ppu.test_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(
lwcond_id,
[&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) -> int {
lv2_lwmutex *mutex{};
if (mode != 2) {
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return -1;
}
}
if (atomic_storage<ppu_thread *>::load(cond.sq)) {
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend) {
// Test if another signal caused the current thread to be
// suspended, in which case it needs to wait until the thread
// wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
u32 result = 0;
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return 0;
}
}
auto sq = cond.sq;
atomic_storage<ppu_thread *>::release(cond.sq, nullptr);
while (const auto cpu =
cond.schedule<ppu_thread>(sq, cond.protocol)) {
if (mode == 2) {
static_cast<ppu_thread *>(cpu)->gpr[3] = CELL_EBUSY;
}
if (mode == 1) {
mutex->try_own(cpu, true);
} else {
lv2_obj::append(cpu);
}
result++;
}
if (result && mode == 2) {
lv2_obj::awake_all();
}
return result;
} else {
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend) {
finished = false;
return 0;
}
}
return 0;
});
if (!finished) {
continue;
}
if (!cond || cond.ret == -1) {
return CELL_ESRCH;
}
if (mode == 1) {
// Mode 1: return the amount of threads (TODO)
return not_an_error(cond.ret);
}
return CELL_OK;
}
}
error_code _sys_lwcond_queue_wait(ppu_thread &ppu, u32 lwcond_id,
u32 lwmutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_lwcond.trace(
"_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)",
lwcond_id, lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
shared_ptr<lv2_lwmutex> mutex;
auto &sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(
lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond &cond) {
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex) {
return;
}
// Increment lwmutex's lwcond's waiters count
mutex->lwcond_waiters++;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex);
cond.lwmutex_waiters++;
const bool mutex_sleep = sstate.try_read<bool>().second;
sstate.clear();
if (mutex_sleep) {
// Special: loading state from the point of waiting on lwmutex sleep
// queue
mutex->try_own(&ppu, true);
} else {
// Add a waiter
lv2_obj::emplace(cond.sq, &ppu);
}
if (!ppu.loaded_from_savestate && !mutex->try_unlock(false)) {
std::lock_guard lock2(mutex->mutex);
// Process lwmutex sleep queue
if (const auto cpu = mutex->reown<ppu_thread>()) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return;
}
// Put the current thread to sleep and schedule lwmutex waiter
// atomically
cond.append(cpu);
cond.sleep(ppu, timeout);
return;
}
}
cond.sleep(ppu, timeout);
});
if (!cond || !mutex) {
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again) {
return CELL_OK;
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::scoped_lock lock(cond->mutex, mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
mutex_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread *>::load(cond->sq); cpu;
cpu = cpu->next_cpu) {
if (cpu == &ppu) {
cond_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep) {
break;
}
sstate(mutex_sleep);
ppu.state += cpu_flag::again;
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex);
if (cond->unqueue(cond->sq, &ppu)) {
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
std::lock_guard lock2(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
if (--mutex->lwcond_waiters == smin) {
// Notify the thread destroying lwmutex on last waiter
mutex->lwcond_waiters.notify_all();
}
if (--cond->lwmutex_waiters == smin) {
// Notify the thread destroying lwcond on last waiter
cond->lwmutex_waiters.notify_all();
}
// Return cause
return not_an_error(ppu.gpr[3]);
}

View file

@ -0,0 +1,353 @@
#include "stdafx.h"
#include "sys_lwmutex.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwmutex);
lv2_lwmutex::lv2_lwmutex(utils::serial &ar)
: protocol(ar), control(ar.pop<decltype(control)>()),
name(ar.pop<be_t<u64>>()) {
ar(lv2_control.raw().signaled);
}
void lv2_lwmutex::save(utils::serial &ar) {
ar(protocol, control, name, lv2_control.raw().signaled);
}
error_code _sys_lwmutex_create(ppu_thread &ppu, vm::ptr<u32> lwmutex_id,
u32 protocol, vm::ptr<sys_lwmutex_t> control,
s32 has_name, u64 name) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace(u8"_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, "
u8"control=*0x%x, has_name=0x%x, name=0x%llx (“%s”))",
lwmutex_id, protocol, control, has_name, name,
lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY &&
protocol != SYS_SYNC_PRIORITY) {
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
if (!(has_name < 0)) {
name = 0;
}
if (const u32 id = idm::make<lv2_obj, lv2_lwmutex>(protocol, control, name)) {
ppu.check_state();
*lwmutex_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwmutex_destroy(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
shared_ptr<lv2_lwmutex> _mutex;
while (true) {
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&](lv2_lwmutex &mutex) -> CellError {
// Ignore check on first iteration
if (_mutex && std::addressof(mutex) != _mutex.get()) {
// Other thread has destroyed the lwmutex earlier
return CELL_ESRCH;
}
std::lock_guard lock(mutex.mutex);
if (mutex.load_sq()) {
return CELL_EBUSY;
}
old_val = mutex.lwcond_waiters.or_fetch(smin);
if (old_val != smin) {
// Deschedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwcond waiters
return CELL_EAGAIN;
}
return {};
});
if (!ptr) {
return CELL_ESRCH;
}
if (ret) {
if (ret != CELL_EAGAIN) {
return ret;
}
} else {
break;
}
_mutex = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31) {
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
if (ppu.is_stopped()) {
ppu.state += cpu_flag::again;
return {};
}
old_val = _mutex->lwcond_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwmutex_lock(ppu_thread &ppu, u32 lwmutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)",
lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (s32 signal = mutex.lv2_control
.fetch_op([](lv2_lwmutex::control_data_t &data) {
if (data.signaled) {
data.signaled = 0;
return true;
}
return false;
})
.first.signaled) {
if (~signal & 1) {
ppu.gpr[3] = CELL_EBUSY;
}
return true;
}
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (s32 signal = mutex.try_own(&ppu)) {
if (~signal & 1) {
ppu.gpr[3] = CELL_EBUSY;
}
ppu.cancel_sleep = 0;
return true;
}
const bool finished = !mutex.sleep(ppu, timeout);
notify.cleanup();
return finished;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
return not_an_error(ppu.gpr[3]);
}
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(mutex->mutex);
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!mutex->load_sq()) {
// Sleep queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code _sys_lwmutex_trylock(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_trylock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex =
idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex &mutex) {
auto [_, ok] =
mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t &data) {
if (data.signaled & 1) {
data.signaled = 0;
return true;
}
return false;
});
return ok;
});
if (!mutex) {
return CELL_ESRCH;
}
if (!mutex.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (mutex.try_unlock(false)) {
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>()) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of
// the time, can be ignored
}
});
if (!mutex) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock2(ppu_thread &ppu, u32 lwmutex_id) {
ppu.state += cpu_flag::wait;
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(
lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex &mutex) {
if (mutex.try_unlock(true)) {
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>(true)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread *>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of
// the time, can be ignored
}
});
if (!mutex) {
return CELL_ESRCH;
}
return CELL_OK;
}

View file

@ -0,0 +1,408 @@
#include "stdafx.h"
#include "sys_memory.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_locking.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_memory);
//
static shared_mutex s_memstats_mtx;
lv2_memory_container::lv2_memory_container(u32 size, bool from_idm) noexcept
: size(size),
id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID} {}
lv2_memory_container::lv2_memory_container(utils::serial &ar,
bool from_idm) noexcept
: size(ar), id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID},
used(ar) {}
std::function<void(void *)> lv2_memory_container::load(utils::serial &ar) {
// Use idm::last_id() only for the instances at IDM
return [ptr = make_shared<lv2_memory_container>(exact_t<utils::serial &>(ar),
true)](void *storage) {
*static_cast<atomic_ptr<lv2_memory_container> *>(storage) = ptr;
};
}
void lv2_memory_container::save(utils::serial &ar) { ar(size, used); }
lv2_memory_container *lv2_memory_container::search(u32 id) {
if (id != SYS_MEMORY_CONTAINER_ID_INVALID) {
return idm::check_unlocked<lv2_memory_container>(id);
}
return &g_fxo->get<lv2_memory_container>();
}
struct sys_memory_address_table {
atomic_t<lv2_memory_container *> addrs[65536]{};
sys_memory_address_table() = default;
SAVESTATE_INIT_POS(
id_manager::id_map<lv2_memory_container>::savestate_init_pos + 0.1);
sys_memory_address_table(utils::serial &ar) {
// First: address, second: conatiner ID (SYS_MEMORY_CONTAINER_ID_INVALID for
// global FXO memory container)
std::unordered_map<u16, u32> mm;
ar(mm);
for (const auto &[addr, id] : mm) {
addrs[addr] = ensure(lv2_memory_container::search(id));
}
}
void save(utils::serial &ar) {
std::unordered_map<u16, u32> mm;
for (auto &ctr : addrs) {
if (const auto ptr = +ctr) {
mm[static_cast<u16>(&ctr - addrs)] = ptr->id;
}
}
ar(mm);
}
};
std::shared_ptr<vm::block_t> reserve_map(u32 alloc_size, u32 align) {
return vm::reserve_map(
align == 0x10000 ? vm::user64k : vm::user1m, 0,
align == 0x10000 ? 0x20000000 : utils::align(alloc_size, 0x10000000),
align == 0x10000 ? (vm::page_size_64k | vm::bf0_0x1)
: (vm::page_size_1m | vm::bf0_0x1));
}
// Todo: fix order of error checks
error_code sys_memory_allocate(cpu_thread &cpu, u64 size, u64 flags,
vm::ptr<u32> alloc_addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning(
"sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size,
flags, alloc_addr);
if (!size) {
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000
: flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000
: flags == 0 ? 0x100000
: 0;
if (!align) {
return {CELL_EINVAL, flags};
}
if (size % align) {
return {CELL_EALIGN, size};
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
// Try to get "physical memory"
if (!dct.take(size)) {
return {CELL_ENOMEM, dct.size - dct.used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align)) {
if (const u32 addr = area->alloc(static_cast<u32>(size), nullptr, align)) {
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
&dct));
if (alloc_addr) {
sys_memory.notice(
"sys_memory_allocate(): Allocated 0x%x address (size=0x%x)", addr,
size);
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
dct.free(size);
return CELL_ENOMEM;
}
error_code sys_memory_allocate_from_container(cpu_thread &cpu, u64 size,
u32 cid, u64 flags,
vm::ptr<u32> alloc_addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, "
"flags=0x%llx, alloc_addr=*0x%x)",
size, cid, flags, alloc_addr);
if (!size) {
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000
: flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000
: flags == 0 ? 0x100000
: 0;
if (!align) {
return {CELL_EINVAL, flags};
}
if (size % align) {
return {CELL_EALIGN, size};
}
const auto ct = idm::get<lv2_memory_container>(
cid, [&](lv2_memory_container &ct) -> CellError {
// Try to get "physical memory"
if (!ct.take(size)) {
return CELL_ENOMEM;
}
return {};
});
if (!ct) {
return CELL_ESRCH;
}
if (ct.ret) {
return {ct.ret, ct->size - ct->used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align)) {
if (const u32 addr = area->alloc(static_cast<u32>(size))) {
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
ct.ptr.get()));
if (alloc_addr) {
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
ct->free(size);
return CELL_ENOMEM;
}
error_code sys_memory_free(cpu_thread &cpu, u32 addr) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_free(addr=0x%x)", addr);
const auto ct =
addr % 0x10000
? nullptr
: g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(
nullptr);
if (!ct) {
return {CELL_EINVAL, addr};
}
const auto size = (ensure(vm::dealloc(addr)));
reader_lock{id_manager::g_mutex}, ct->free(size);
return CELL_OK;
}
error_code sys_memory_get_page_attribute(cpu_thread &cpu, u32 addr,
vm::ptr<sys_page_attr_t> attr) {
cpu.state += cpu_flag::wait;
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr,
attr);
vm::writer_lock rlock;
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR) {
return CELL_EINVAL;
}
if (!vm::check_addr(attr.addr(), vm::page_readable, attr.size())) {
return CELL_EFAULT;
}
attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE (TODO)
attr->access_right = addr >> 28 == 0xdu
? SYS_MEMORY_ACCESS_RIGHT_PPU_THR
: SYS_MEMORY_ACCESS_RIGHT_ANY; // (TODO)
if (vm::check_addr(addr, vm::page_1m_size)) {
attr->page_size = 0x100000;
} else if (vm::check_addr(addr, vm::page_64k_size)) {
attr->page_size = 0x10000;
} else {
attr->page_size = 4096;
}
attr->pad = 0; // Always write 0
return CELL_OK;
}
error_code
sys_memory_get_user_memory_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)",
mem_info);
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
sys_memory_info_t out{};
{
::reader_lock lock(s_memstats_mtx);
out.total_user_memory = dct.size;
out.available_user_memory = dct.size - dct.used;
// Scan other memory containers
idm::select<lv2_memory_container>([&](u32, lv2_memory_container &ct) {
out.total_user_memory -= ct.size;
});
}
cpu.check_state();
*mem_info = out;
return CELL_OK;
}
error_code sys_memory_get_user_memory_stat(
cpu_thread &cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat) {
cpu.state += cpu_flag::wait;
sys_memory.todo("sys_memory_get_user_memory_stat(mem_stat=*0x%x)", mem_stat);
return CELL_OK;
}
error_code sys_memory_container_create(cpu_thread &cpu, vm::ptr<u32> cid,
u64 size) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid,
size);
// Round down to 1 MB granularity
size &= ~0xfffff;
if (!size) {
return CELL_ENOMEM;
}
auto &dct = g_fxo->get<lv2_memory_container>();
std::lock_guard lock(s_memstats_mtx);
// Try to obtain "physical memory" from the default container
if (!dct.take(size)) {
return CELL_ENOMEM;
}
// Create the memory container
if (const u32 id =
idm::make<lv2_memory_container>(static_cast<u32>(size), true)) {
cpu.check_state();
*cid = id;
return CELL_OK;
}
dct.free(size);
return CELL_EAGAIN;
}
error_code sys_memory_container_destroy(cpu_thread &cpu, u32 cid) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid);
std::lock_guard lock(s_memstats_mtx);
const auto ct = idm::withdraw<lv2_memory_container>(
cid, [](lv2_memory_container &ct) -> CellError {
// Check if some memory is not deallocated (the container cannot be
// destroyed in this case)
if (!ct.used.compare_and_swap_test(0, ct.size)) {
return CELL_EBUSY;
}
return {};
});
if (!ct) {
return CELL_ESRCH;
}
if (ct.ret) {
return ct.ret;
}
// Return "physical memory" to the default container
g_fxo->get<lv2_memory_container>().free(ct->size);
return CELL_OK;
}
error_code sys_memory_container_get_size(cpu_thread &cpu,
vm::ptr<sys_memory_info_t> mem_info,
u32 cid) {
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)",
mem_info, cid);
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
cpu.check_state();
mem_info->total_user_memory = ct->size; // Total container memory
mem_info->available_user_memory =
ct->size - ct->used; // Available container memory
return CELL_OK;
}
error_code sys_memory_container_destroy_parent_with_childs(
cpu_thread &cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child) {
sys_memory.warning("sys_memory_container_destroy_parent_with_childs(cid=0x%x,"
" must_0=%d, mc_child=*0x%x)",
cid, must_0, mc_child);
if (must_0) {
return CELL_EINVAL;
}
// Multi-process is not supported yet so child containers mean nothing at the
// moment Simply destroy parent
return sys_memory_container_destroy(cpu, cid);
}

View file

@ -0,0 +1,805 @@
#include "stdafx.h"
#include "sys_mmapper.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Memory/vm_var.h"
#include "cellos/sys_event.h"
#include "sys_memory.h"
#include "sys_process.h"
#include "sys_sync.h"
#include <span>
#include "util/vm.hpp"
LOG_CHANNEL(sys_mmapper);
template <>
void fmt_class_string<lv2_mem_container_id>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto value) {
switch (value) {
case SYS_MEMORY_CONTAINER_ID_INVALID:
return "Global";
}
// Resort to hex formatting for other values
return unknown;
});
}
lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared,
lv2_memory_container *ct)
: size(size), align(align), flags(flags), key(key), pshared(pshared),
ct(ct), shm(std::make_shared<utils::shm>(size, 1 /* shareable flag */)) {
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
lv2_memory::lv2_memory(utils::serial &ar)
: size(ar), align(ar), flags(ar), key(ar), pshared(ar),
ct(lv2_memory_container::search(ar.pop<u32>())), shm([&](u32 addr) {
if (addr) {
return ensure(vm::get(vm::any, addr)->peek(addr).second);
}
const auto _shm = std::make_shared<utils::shm>(size, 1);
ar(std::span(_shm->map_self(), size));
return _shm;
}(ar.pop<u32>())),
counter(ar) {
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
CellError lv2_memory::on_id_create() {
if (!exists && !ct->take(size)) {
sys_mmapper.error("lv2_memory::on_id_create(): Cannot allocate 0x%x bytes "
"(0x%x available)",
size, ct->size - ct->used);
return CELL_ENOMEM;
}
exists++;
return {};
}
std::function<void(void *)> lv2_memory::load(utils::serial &ar) {
auto mem = make_shared<lv2_memory>(exact_t<utils::serial &>(ar));
mem->exists++; // Disable on_id_create()
auto func = load_func(mem, +mem->pshared);
mem->exists--;
return func;
}
void lv2_memory::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_memory);
ar(size, align, flags, key, pshared, ct->id);
ar(counter ? vm::get_shm_addr(shm) : 0);
if (!counter) {
ar(std::span(shm->map_self(), size));
}
ar(counter);
}
page_fault_notification_entries::page_fault_notification_entries(
utils::serial &ar) {
ar(entries);
}
void page_fault_notification_entries::save(utils::serial &ar) { ar(entries); }
template <bool exclusive = false>
error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align,
u64 flags, lv2_memory_container *ct) {
const u32 _pshared =
pshared ? SYS_SYNC_PROCESS_SHARED : SYS_SYNC_NOT_PROCESS_SHARED;
if (!pshared) {
ipc_key = 0;
}
if (auto error = lv2_obj::create<lv2_memory>(
_pshared, ipc_key,
exclusive ? SYS_SYNC_NEWLY_CREATED : SYS_SYNC_NOT_CARE,
[&]() {
return make_shared<lv2_memory>(static_cast<u32>(size), align, flags,
ipc_key, pshared, ct);
},
false)) {
return error;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_address(ppu_thread &ppu, u64 size, u64 flags,
u64 alignment,
vm::ptr<u32> alloc_addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_address(size=0x%x, flags=0x%x, "
"alignment=0x%x, alloc_addr=*0x%x)",
size, flags, alignment, alloc_addr);
if (size % 0x10000000) {
return CELL_EALIGN;
}
if (size > u32{umax}) {
return CELL_ENOMEM;
}
// This is a workaround for psl1ght, which gives us an alignment of 0, which
// is technically invalid, but apparently is allowed on actual ps3
// https://github.com/ps3dev/PSL1GHT/blob/534e58950732c54dc6a553910b653c99ba6e9edc/ppu/librt/sbrk.c#L71
if (!alignment) {
alignment = 0x10000000;
}
switch (alignment) {
case 0x10000000:
case 0x20000000:
case 0x40000000:
case 0x80000000: {
if (const auto area =
vm::find_map(static_cast<u32>(size), static_cast<u32>(alignment),
flags & SYS_MEMORY_PAGE_SIZE_MASK)) {
sys_mmapper.warning(
"sys_mmapper_allocate_address(): Found VM 0x%x area (vsize=0x%x)",
area->addr, size);
ppu.check_state();
*alloc_addr = area->addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
}
return CELL_EALIGN;
}
error_code sys_mmapper_allocate_fixed_address(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_fixed_address()");
if (!vm::map(0xB0000000, 0x10000000, SYS_MEMORY_PAGE_SIZE_1M)) {
return CELL_EEXIST;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory(ppu_thread &ppu, u64 ipc_key,
u64 size, u64 flags,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_shared_memory(ipc_key=0x%x, "
"size=0x%x, flags=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
// Check page granularity
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case 0:
case SYS_MEMORY_GRANULARITY_1M: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm(
ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct)) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code
sys_mmapper_allocate_shared_memory_from_container(ppu_thread &ppu, u64 ipc_key,
u64 size, u32 cid, u64 flags,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_allocate_shared_memory_from_container(ipc_key=0x%x, "
"size=0x%x, cid=0x%x, flags=0x%x, mem_id=*0x%x)",
ipc_key, size, cid, flags, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
// Check page granularity.
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case 0:
case SYS_MEMORY_GRANULARITY_1M: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
if (auto error =
create_lv2_shm(ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000,
flags, ct.get())) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_ext(
ppu_thread &ppu, u64 ipc_key, u64 size, u32 flags,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo(
"sys_mmapper_allocate_shared_memory_ext(ipc_key=0x%x, size=0x%x, "
"flags=0x%x, entries=*0x%x, entry_count=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, entries, entry_count, mem_id);
if (size == 0) {
return CELL_EALIGN;
}
switch (flags & SYS_MEMORY_GRANULARITY_MASK) {
case SYS_MEMORY_GRANULARITY_1M:
case 0: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK) {
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10) {
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true) {
for (s32 i = 0; i < entry_count; i++) {
const u64 type = entries[i].type;
// The whole structure contents are unknown
sys_mmapper.todo(
"sys_mmapper_allocate_shared_memory_ext(): entry type = 0x%x", type);
switch (type) {
case 0:
case 1:
case 3: {
break;
}
case 5: {
to_perm_check = true;
break;
}
default: {
return CELL_EPERM;
}
}
}
if (to_perm_check) {
if (flags != SYS_MEMORY_PAGE_SIZE_64K ||
!g_ps3_process_info.debug_or_root()) {
return CELL_EPERM;
}
}
}
// Get "default" memory container
auto &dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm<true>(
true, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct)) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_from_container_ext(
ppu_thread &ppu, u64 ipc_key, u64 size, u64 flags, u32 cid,
vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext(ipc_"
"key=0x%x, size=0x%x, flags=0x%x, cid=0x%x, entries=*0x%x, "
"entry_count=0x%x, mem_id=*0x%x)",
ipc_key, size, flags, cid, entries, entry_count, mem_id);
switch (flags & SYS_MEMORY_PAGE_SIZE_MASK) {
case SYS_MEMORY_PAGE_SIZE_1M:
case 0: {
if (size % 0x100000) {
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_PAGE_SIZE_64K: {
if (size % 0x10000) {
return CELL_EALIGN;
}
break;
}
default: {
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK) {
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10) {
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true) {
for (s32 i = 0; i < entry_count; i++) {
const u64 type = entries[i].type;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext()"
": entry type = 0x%x",
type);
switch (type) {
case 0:
case 1:
case 3: {
break;
}
case 5: {
to_perm_check = true;
break;
}
default: {
return CELL_EPERM;
}
}
}
if (to_perm_check) {
if (flags != SYS_MEMORY_PAGE_SIZE_64K ||
!g_ps3_process_info.debug_or_root()) {
return CELL_EPERM;
}
}
}
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct) {
return CELL_ESRCH;
}
if (auto error = create_lv2_shm<true>(
true, ipc_key, size,
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags,
ct.get())) {
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_change_address_access_right(ppu_thread &ppu, u32 addr,
u64 flags) {
ppu.state += cpu_flag::wait;
sys_mmapper.todo(
"sys_mmapper_change_address_access_right(addr=0x%x, flags=0x%x)", addr,
flags);
return CELL_OK;
}
error_code sys_mmapper_free_address(ppu_thread &ppu, u32 addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_address(addr=0x%x)", addr);
if (addr < 0x20000000 || addr >= 0xC0000000) {
return {CELL_EINVAL, addr};
}
// If page fault notify exists and an address in this area is faulted, we
// can't free the memory.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto mem = vm::get(vm::any, addr);
if (!mem || mem->addr != addr) {
return {CELL_EINVAL, addr};
}
for (const auto &ev : pf_events.events) {
if (addr <= ev.second && ev.second <= addr + mem->size - 1) {
return CELL_EBUSY;
}
}
// Try to unmap area
const auto [area, success] = vm::unmap(addr, true, &mem);
if (!area) {
return {CELL_EINVAL, addr};
}
if (!success) {
return CELL_EBUSY;
}
// If a memory block is freed, remove it from page notification table.
auto &pf_entries = g_fxo->get<page_fault_notification_entries>();
std::lock_guard lock(pf_entries.mutex);
auto ind_to_remove = pf_entries.entries.begin();
for (; ind_to_remove != pf_entries.entries.end(); ++ind_to_remove) {
if (addr == ind_to_remove->start_addr) {
break;
}
}
if (ind_to_remove != pf_entries.entries.end()) {
pf_entries.entries.erase(ind_to_remove);
}
return CELL_OK;
}
error_code sys_mmapper_free_shared_memory(ppu_thread &ppu, u32 mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_shared_memory(mem_id=0x%x)", mem_id);
// Conditionally remove memory ID
const auto mem = idm::withdraw<lv2_obj, lv2_memory>(
mem_id, [&](lv2_memory &mem) -> CellError {
if (mem.counter) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(mem, mem.key, +mem.pshared);
if (!mem.exists) {
// Return "physical memory" to the memory container
mem.ct->free(mem.size);
}
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
return CELL_OK;
}
error_code sys_mmapper_map_shared_memory(ppu_thread &ppu, u32 addr, u32 mem_id,
u64 flags) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_map_shared_memory(addr=0x%x, mem_id=0x%x, flags=0x%x)", addr,
mem_id, flags);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000) {
return CELL_EINVAL;
}
const auto mem =
idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory &mem) -> CellError {
const u32 page_alignment =
area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment) {
return CELL_EINVAL;
}
if (addr % page_alignment) {
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
if (!area->falloc(addr, mem->size, &mem->shm,
mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K
: SYS_MEMORY_PAGE_SIZE_1M)) {
mem->counter--;
if (!area->is_valid()) {
return {CELL_EINVAL, addr};
}
return CELL_EBUSY;
}
vm::lock_sudo(addr, mem->size);
return CELL_OK;
}
error_code sys_mmapper_search_and_map(ppu_thread &ppu, u32 start_addr,
u32 mem_id, u64 flags,
vm::ptr<u32> alloc_addr) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_search_and_map(start_addr=0x%x, "
"mem_id=0x%x, flags=0x%x, alloc_addr=*0x%x)",
start_addr, mem_id, flags, alloc_addr);
const auto area = vm::get(vm::any, start_addr);
if (!area || start_addr != area->addr || start_addr < 0x20000000 ||
start_addr >= 0xC0000000) {
return {CELL_EINVAL, start_addr};
}
const auto mem =
idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory &mem) -> CellError {
const u32 page_alignment =
area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment) {
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem) {
return CELL_ESRCH;
}
if (mem.ret) {
return mem.ret;
}
const u32 addr = area->alloc(mem->size, &mem->shm, mem->align,
mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K
: SYS_MEMORY_PAGE_SIZE_1M);
if (!addr) {
mem->counter--;
if (!area->is_valid()) {
return {CELL_EINVAL, start_addr};
}
return CELL_ENOMEM;
}
sys_mmapper.notice("sys_mmapper_search_and_map(): Found 0x%x address", addr);
vm::lock_sudo(addr, mem->size);
ppu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
error_code sys_mmapper_unmap_shared_memory(ppu_thread &ppu, u32 addr,
vm::ptr<u32> mem_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning(
"sys_mmapper_unmap_shared_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000) {
return {CELL_EINVAL, addr};
}
const auto shm = area->peek(addr);
if (!shm.second) {
return {CELL_EINVAL, addr};
}
const auto mem =
idm::select<lv2_obj, lv2_memory>([&](u32 id, lv2_memory &mem) -> u32 {
if (mem.shm.get() == shm.second.get()) {
return id;
}
return 0;
});
if (!mem) {
return {CELL_EINVAL, addr};
}
if (!area->dealloc(addr, &shm.second)) {
return {CELL_EINVAL, addr};
}
// Write out the ID
ppu.check_state();
*mem_id = mem.ret;
// Acknowledge
mem->counter--;
return CELL_OK;
}
error_code sys_mmapper_enable_page_fault_notification(ppu_thread &ppu,
u32 start_addr,
u32 event_queue_id) {
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_enable_page_fault_notification(start_addr="
"0x%x, event_queue_id=0x%x)",
start_addr, event_queue_id);
auto mem = vm::get(vm::any, start_addr);
if (!mem || start_addr != mem->addr || start_addr < 0x20000000 ||
start_addr >= 0xC0000000) {
return {CELL_EINVAL, start_addr};
}
// TODO: Check memory region's flags to make sure the memory can be used for
// page faults.
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(event_queue_id);
if (!queue) { // Can't connect the queue if it doesn't exist.
return CELL_ESRCH;
}
vm::var<u32> port_id(0);
error_code res = sys_event_port_create(ppu, port_id, SYS_EVENT_PORT_LOCAL,
SYS_MEMORY_PAGE_FAULT_EVENT_KEY);
sys_event_port_connect_local(ppu, *port_id, event_queue_id);
if (res + 0u == CELL_EAGAIN) {
// Not enough system resources.
return CELL_EAGAIN;
}
auto &pf_entries = g_fxo->get<page_fault_notification_entries>();
std::unique_lock lock(pf_entries.mutex);
// Return error code if page fault notifications are already enabled
for (const auto &entry : pf_entries.entries) {
if (entry.start_addr == start_addr) {
lock.unlock();
sys_event_port_disconnect(ppu, *port_id);
sys_event_port_destroy(ppu, *port_id);
return CELL_EBUSY;
}
}
page_fault_notification_entry entry{start_addr, event_queue_id,
port_id->value()};
pf_entries.entries.emplace_back(entry);
return CELL_OK;
}
error_code mmapper_thread_recover_page_fault(cpu_thread *cpu) {
// We can only wake a thread if it is being suspended for a page fault.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
{
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto pf_event_ind = pf_events.events.find(cpu);
if (pf_event_ind == pf_events.events.end()) {
// if not found...
return CELL_EINVAL;
}
pf_events.events.erase(pf_event_ind);
if (cpu->get_class() == thread_class::ppu) {
lv2_obj::awake(cpu);
} else {
cpu->state += cpu_flag::signal;
}
}
if (cpu->state & cpu_flag::signal) {
cpu->state.notify_one();
}
return CELL_OK;
}

View file

@ -0,0 +1,338 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
#include "sys_mutex.h"
LOG_CHANNEL(sys_mutex);
lv2_mutex::lv2_mutex(utils::serial &ar)
: protocol(ar), recursive(ar), adaptive(ar), key(ar), name(ar) {
ar(lock_count, control.raw().owner);
// For backwards compatibility
control.raw().owner >>= 1;
}
std::function<void(void *)> lv2_mutex::load(utils::serial &ar) {
return load_func(make_shared<lv2_mutex>(exact_t<utils::serial &>(ar)));
}
void lv2_mutex::save(utils::serial &ar) {
ar(protocol, recursive, adaptive, key, name, lock_count,
control.raw().owner << 1);
}
error_code sys_mutex_create(ppu_thread &ppu, vm::ptr<u32> mutex_id,
vm::ptr<sys_mutex_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x)", mutex_id,
attr);
if (!mutex_id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_mutex.warning(
"sys_mutex_create(mutex_id=*0x%x, attr=*0x%x): IPC=0x%016x", mutex_id,
attr, ipc_key);
}
switch (_attr.protocol) {
case SYS_SYNC_FIFO:
break;
case SYS_SYNC_PRIORITY:
break;
case SYS_SYNC_PRIORITY_INHERIT:
sys_mutex.warning("sys_mutex_create(): SYS_SYNC_PRIORITY_INHERIT");
break;
default: {
sys_mutex.error("sys_mutex_create(): unknown protocol (0x%x)",
_attr.protocol);
return CELL_EINVAL;
}
}
switch (_attr.recursive) {
case SYS_SYNC_RECURSIVE:
break;
case SYS_SYNC_NOT_RECURSIVE:
break;
default: {
sys_mutex.error("sys_mutex_create(): unknown recursive (0x%x)",
_attr.recursive);
return CELL_EINVAL;
}
}
if (_attr.adaptive != SYS_SYNC_NOT_ADAPTIVE) {
sys_mutex.todo("sys_mutex_create(): unexpected adaptive (0x%x)",
_attr.adaptive);
}
if (auto error = lv2_obj::create<lv2_mutex>(
_attr.pshared, _attr.ipc_key, _attr.flags, [&]() {
return make_shared<lv2_mutex>(_attr.protocol, _attr.recursive,
_attr.adaptive, ipc_key,
_attr.name_u64);
})) {
return error;
}
ppu.check_state();
*mutex_id = idm::last_id();
return CELL_OK;
}
error_code sys_mutex_destroy(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_destroy(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::withdraw<lv2_obj, lv2_mutex>(
mutex_id, [](lv2_mutex &mutex) -> CellError {
std::lock_guard lock(mutex.mutex);
if (atomic_storage<u32>::load(mutex.control.raw().owner)) {
return CELL_EBUSY;
}
if (mutex.cond_count) {
return CELL_EPERM;
}
lv2_obj::on_id_destroy(mutex, mutex.key);
return {};
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex->key) {
sys_mutex.warning("sys_mutex_destroy(mutex_id=0x%x): IPC=0x%016x", mutex_id,
mutex->key);
}
if (mutex.ret) {
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_lock(ppu_thread &ppu, u32 mutex_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id,
timeout);
const auto mutex = idm::get<lv2_obj, lv2_mutex>(
mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex &mutex) {
CellError result = mutex.try_lock(ppu);
if (result == CELL_EBUSY &&
!atomic_storage<ppu_thread *>::load(mutex.control.raw().sq)) {
// Try busy waiting a bit if advantageous
for (u32 i = 0, end = lv2_obj::has_ppus_in_running_state() ? 3 : 10;
id_manager::g_mutex.is_lockable() && i < end; i++) {
busy_wait(300);
result = mutex.try_lock(ppu);
if (!result ||
atomic_storage<ppu_thread *>::load(mutex.control.raw().sq)) {
break;
}
}
}
if (result == CELL_EBUSY) {
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (mutex.try_own(ppu) || !mutex.sleep(ppu, timeout)) {
result = {};
}
if (ppu.cancel_sleep != 1) {
notify.cleanup();
}
ppu.cancel_sleep = 0;
}
return result;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
if (mutex.ret != CELL_EBUSY) {
return mutex.ret;
}
} else {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(mutex->mutex);
for (auto cpu =
atomic_storage<ppu_thread *>::load(mutex->control.raw().sq);
cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 40; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(mutex->control.raw().sq)) {
// Waiters queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->control.fetch_op([&](lv2_mutex::control_data_t &data) {
success = false;
ppu_thread *sq = static_cast<ppu_thread *>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu)) {
return false;
}
success = true;
if (!retval) {
return false;
}
data.sq = sq;
return true;
});
if (success) {
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_mutex_trylock(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(
mutex_id, [&](lv2_mutex &mutex) { return mutex.try_lock(ppu); });
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
if (mutex.ret == CELL_EBUSY) {
return not_an_error(CELL_EBUSY);
}
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_unlock(ppu_thread &ppu, u32 mutex_id) {
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(
mutex_id,
[&, notify = lv2_obj::notify_all_t()](lv2_mutex &mutex) -> CellError {
auto result = mutex.try_unlock(ppu);
if (result == CELL_EBUSY) {
std::lock_guard lock(mutex.mutex);
if (auto cpu = mutex.reown<ppu_thread>()) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
mutex.awake(cpu);
}
result = {};
}
notify.cleanup();
return result;
});
if (!mutex) {
return CELL_ESRCH;
}
if (mutex.ret) {
return mutex.ret;
}
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,157 @@
#include "stdafx.h"
#include "sys_net/lv2_socket.h"
#include "sys_net/network_context.h"
LOG_CHANNEL(sys_net);
lv2_socket::lv2_socket(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol) {
this->family = family;
this->type = type;
this->protocol = protocol;
}
std::unique_lock<shared_mutex> lv2_socket::lock() {
return std::unique_lock(mutex);
}
lv2_socket_family lv2_socket::get_family() const { return family; }
lv2_socket_type lv2_socket::get_type() const { return type; }
lv2_ip_protocol lv2_socket::get_protocol() const { return protocol; }
std::size_t lv2_socket::get_queue_size() const { return queue.size(); }
socket_type lv2_socket::get_socket() const { return native_socket; }
#ifdef _WIN32
bool lv2_socket::is_connecting() const { return connecting; }
void lv2_socket::set_connecting(bool connecting) {
this->connecting = connecting;
}
#endif
void lv2_socket::set_lv2_id(u32 id) { lv2_id = id; }
bs_t<lv2_socket::poll_t> lv2_socket::get_events() const {
return events.load();
}
void lv2_socket::set_poll_event(bs_t<lv2_socket::poll_t> event) {
events += event;
}
void lv2_socket::poll_queue(
shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event,
std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb) {
set_poll_event(event);
queue.emplace_back(std::move(ppu), poll_cb);
// Makes sure network_context thread is awaken
if (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM) {
auto &nc = g_fxo->get<network_context>();
const u32 prev_value = nc.num_polls.fetch_add(1);
if (!prev_value) {
nc.num_polls.notify_one();
}
}
}
u32 lv2_socket::clear_queue(ppu_thread *ppu) {
std::lock_guard lock(mutex);
u32 cleared = 0;
for (auto it = queue.begin(); it != queue.end();) {
if (it->first.get() == ppu) {
it = queue.erase(it);
cleared++;
continue;
}
it++;
}
if (queue.empty()) {
events.store({});
}
if (cleared && (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM)) {
// Makes sure network_context thread can go back to sleep if there is no
// active polling
const u32 prev_value =
g_fxo->get<network_context>().num_polls.fetch_sub(cleared);
ensure(prev_value >= cleared);
}
return cleared;
}
void lv2_socket::handle_events(const pollfd &native_pfd,
[[maybe_unused]] bool unset_connecting) {
bs_t<lv2_socket::poll_t> events_happening{};
if (native_pfd.revents & (POLLIN | POLLHUP) &&
events.test_and_reset(lv2_socket::poll_t::read))
events_happening += lv2_socket::poll_t::read;
if (native_pfd.revents & POLLOUT &&
events.test_and_reset(lv2_socket::poll_t::write))
events_happening += lv2_socket::poll_t::write;
if (native_pfd.revents & POLLERR &&
events.test_and_reset(lv2_socket::poll_t::error))
events_happening += lv2_socket::poll_t::error;
if (events_happening || (!queue.empty() && (so_rcvtimeo || so_sendtimeo))) {
std::lock_guard lock(mutex);
#ifdef _WIN32
if (unset_connecting)
set_connecting(false);
#endif
u32 handled = 0;
for (auto it = queue.begin(); it != queue.end();) {
if (it->second(events_happening)) {
it = queue.erase(it);
handled++;
continue;
}
it++;
}
if (handled &&
(type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM)) {
const u32 prev_value =
g_fxo->get<network_context>().num_polls.fetch_sub(handled);
ensure(prev_value >= handled);
}
if (queue.empty()) {
events.store({});
}
}
}
void lv2_socket::queue_wake(ppu_thread *ppu) {
switch (type) {
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
g_fxo->get<network_context>().add_ppu_to_awake(ppu);
break;
case SYS_NET_SOCK_DGRAM_P2P:
case SYS_NET_SOCK_STREAM_P2P:
g_fxo->get<p2p_context>().add_ppu_to_awake(ppu);
break;
default:
break;
}
}
lv2_socket &lv2_socket::operator=(thread_state s) noexcept {
if (s == thread_state::destroying_context) {
close();
}
return *this;
}
lv2_socket::~lv2_socket() noexcept {}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,400 @@
#include "stdafx.h"
#include "Emu/NP/np_helpers.h"
#include "sys_net/lv2_socket_p2p.h"
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
lv2_socket_p2p::lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol) {
sockopt_cache cache_type;
cache_type.data._int = SYS_NET_SOCK_DGRAM_P2P;
cache_type.len = 4;
sockopts[(static_cast<u64>(SYS_NET_SOL_SOCKET) << 32ull) | SYS_NET_SO_TYPE] =
cache_type;
}
lv2_socket_p2p::lv2_socket_p2p(utils::serial &ar, lv2_socket_type type)
: lv2_socket(make_exact(ar), type) {
ar(port, vport, bound_addr);
auto data_dequeue =
ar.pop<std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>>>();
for (; !data_dequeue.empty(); data_dequeue.pop_front()) {
data.push(std::move(data_dequeue.front()));
}
}
void lv2_socket_p2p::save(utils::serial &ar) {
lv2_socket::save(ar, true);
ar(port, vport, bound_addr);
std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data_dequeue;
for (auto save_data = ::as_rvalue(data); !save_data.empty();
save_data.pop()) {
data_dequeue.push_back(std::move(save_data.front()));
}
ar(data_dequeue);
}
void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr,
std::vector<u8> p2p_data) {
std::lock_guard lock(mutex);
sys_net.trace("Received a P2P packet for vport %d and saved it",
p2p_addr.sin_vport);
data.push(std::make_pair(std::move(p2p_addr), std::move(p2p_data)));
// Check if poll is happening
if (events.test_and_reset(lv2_socket::poll_t::read)) {
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
for (auto it = queue.begin(); it != queue.end();) {
if (it->second(read_event)) {
it = queue.erase(it);
continue;
}
it++;
}
if (queue.empty()) {
events.store({});
}
}
}
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
lv2_socket_p2p::accept([[maybe_unused]] bool is_lock) {
sys_net.fatal("[P2P] accept() called on a P2P socket");
return {};
}
std::optional<s32>
lv2_socket_p2p::connect([[maybe_unused]] const sys_net_sockaddr &addr) {
sys_net.fatal("[P2P] connect() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::connect_followup() {
sys_net.fatal("[P2P] connect_followup() called on a P2P socket");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getpeername() {
sys_net.fatal("[P2P] getpeername() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::listen([[maybe_unused]] s32 backlog) {
sys_net.fatal("[P2P] listen() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::bind(const sys_net_sockaddr &addr) {
const auto *psa_in_p2p =
reinterpret_cast<const sys_net_sockaddr_in_p2p *>(&addr);
u16 p2p_port = psa_in_p2p->sin_port;
u16 p2p_vport = psa_in_p2p->sin_vport;
sys_net.notice("[P2P] Trying to bind %s:%d:%d",
np::ip_to_string(std::bit_cast<u32>(psa_in_p2p->sin_addr)),
p2p_port, p2p_vport);
if (p2p_port != SCE_NP_PORT) {
if (p2p_port == 0) {
return -SYS_NET_EINVAL;
}
sys_net.warning("[P2P] Attempting to bind a socket to a port != %d",
+SCE_NP_PORT);
}
socket_type real_socket{};
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
nc.create_p2p_port(p2p_port);
auto &pport = ::at32(nc.list_p2p_ports, p2p_port);
real_socket = pport.p2p_socket;
{
std::lock_guard lock(pport.bound_p2p_vports_mutex);
if (p2p_vport == 0) {
// Find a free vport starting at 30000
p2p_vport = 30000;
while (pport.bound_p2p_vports.contains(p2p_vport)) {
p2p_vport++;
}
}
if (pport.bound_p2p_vports.contains(p2p_vport)) {
// Check that all other sockets are SO_REUSEADDR or SO_REUSEPORT
auto &bound_sockets = ::at32(pport.bound_p2p_vports, p2p_vport);
if (!sys_net_helpers::all_reusable(bound_sockets)) {
return -SYS_NET_EADDRINUSE;
}
bound_sockets.insert(lv2_id);
} else {
std::set<s32> bound_ports{lv2_id};
pport.bound_p2p_vports.insert(
std::make_pair(p2p_vport, std::move(bound_ports)));
}
}
}
{
std::lock_guard lock(mutex);
port = p2p_port;
vport = p2p_vport;
native_socket = real_socket;
bound_addr = psa_in_p2p->sin_addr;
}
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getsockname() {
std::lock_guard lock(mutex);
// Unbound socket
if (!native_socket) {
return {CELL_OK, {}};
}
sys_net_sockaddr sn_addr{};
sys_net_sockaddr_in_p2p *paddr =
reinterpret_cast<sys_net_sockaddr_in_p2p *>(&sn_addr);
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = port;
paddr->sin_vport = vport;
paddr->sin_addr = bound_addr;
return {CELL_OK, sn_addr};
}
std::tuple<s32, lv2_socket::sockopt_data, u32>
lv2_socket_p2p::getsockopt(s32 level, s32 optname, u32 len) {
std::lock_guard lock(mutex);
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
if (!sockopts.contains(key)) {
sys_net.error("Unhandled getsockopt(level=%d, optname=%d, len=%d)", level,
optname, len);
return {};
}
const auto &cache = ::at32(sockopts, key);
return {CELL_OK, cache.data, cache.len};
}
s32 lv2_socket_p2p::setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) {
std::lock_guard lock(mutex);
int native_int = *reinterpret_cast<const be_t<s32> *>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO) {
so_nbio = native_int;
}
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
sockopt_cache cache{};
memcpy(&cache.data._int, optval.data(), optval.size());
cache.len = ::size32(optval);
sockopts[key] = std::move(cache);
return CELL_OK;
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
lv2_socket_p2p::recvfrom(s32 flags, u32 len, bool is_lock) {
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock) {
lock.lock();
}
if (data.empty()) {
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT))
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
return std::nullopt;
}
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
std::vector<u8> res_buf(len);
const auto &p2p_data = data.front();
s32 native_result = std::min(len, static_cast<u32>(p2p_data.second.size()));
memcpy(res_buf.data(), p2p_data.second.data(), native_result);
sys_net_sockaddr sn_addr;
memcpy(&sn_addr, &p2p_data.first, sizeof(sn_addr));
data.pop();
return {{native_result, res_buf, sn_addr}};
}
std::optional<s32>
lv2_socket_p2p::sendto(s32 flags, const std::vector<u8> &buf,
std::optional<sys_net_sockaddr> opt_sn_addr,
bool is_lock) {
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock) {
lock.lock();
}
ensure(opt_sn_addr);
ensure(socket); // ensures it has been bound
ensure(
buf.size() <=
static_cast<usz>(
65535 -
VPORT_P2P_HEADER_SIZE)); // catch games using full payload for future
// fragmentation implementation if necessary
const u16 p2p_port =
reinterpret_cast<const sys_net_sockaddr_in *>(&*opt_sn_addr)->sin_port;
const u16 p2p_vport =
reinterpret_cast<const sys_net_sockaddr_in_p2p *>(&*opt_sn_addr)
->sin_vport;
auto native_addr = sys_net_addr_to_native_addr(*opt_sn_addr);
char ip_str[16];
inet_ntop(AF_INET, &native_addr.sin_addr, ip_str, sizeof(ip_str));
sys_net.trace("[P2P] Sending a packet to %s:%d:%d", ip_str, p2p_port,
p2p_vport);
std::vector<u8> p2p_data(buf.size() + VPORT_P2P_HEADER_SIZE);
const le_t<u16> p2p_vport_le = p2p_vport;
const le_t<u16> src_vport_le = vport;
const le_t<u16> p2p_flags_le = P2P_FLAG_P2P;
memcpy(p2p_data.data(), &p2p_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16), &src_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16) + sizeof(u16), &p2p_flags_le,
sizeof(u16));
memcpy(p2p_data.data() + VPORT_P2P_HEADER_SIZE, buf.data(), buf.size());
int native_flags = 0;
if (flags & SYS_NET_MSG_WAITALL) {
native_flags |= MSG_WAITALL;
}
auto native_result = np::sendto_possibly_ipv6(
native_socket, reinterpret_cast<const char *>(p2p_data.data()),
::size32(p2p_data), &native_addr, native_flags);
if (native_result >= 0) {
return {std::max<s32>(native_result - VPORT_P2P_HEADER_SIZE, 0l)};
}
s32 result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
if (result) {
return {-result};
}
// Note that this can only happen if the send buffer is full
return std::nullopt;
}
std::optional<s32>
lv2_socket_p2p::sendmsg([[maybe_unused]] s32 flags,
[[maybe_unused]] const sys_net_msghdr &msg,
[[maybe_unused]] bool is_lock) {
sys_net.todo("lv2_socket_p2p::sendmsg");
return {};
}
void lv2_socket_p2p::close() {
if (!port || !vport) {
return;
}
if (g_fxo->is_init<p2p_context>()) {
auto &nc = g_fxo->get<p2p_context>();
std::lock_guard lock(nc.list_p2p_ports_mutex);
if (!nc.list_p2p_ports.contains(port))
return;
auto &p2p_port = ::at32(nc.list_p2p_ports, port);
{
std::lock_guard lock(p2p_port.bound_p2p_vports_mutex);
if (!p2p_port.bound_p2p_vports.contains(vport)) {
return;
}
auto &bound_sockets = ::at32(p2p_port.bound_p2p_vports, vport);
bound_sockets.erase(lv2_id);
if (bound_sockets.empty()) {
p2p_port.bound_p2p_vports.erase(vport);
}
}
}
}
s32 lv2_socket_p2p::shutdown([[maybe_unused]] s32 how) {
sys_net.todo("[P2P] shutdown");
return CELL_OK;
}
s32 lv2_socket_p2p::poll(sys_net_pollfd &sn_pfd,
[[maybe_unused]] pollfd &native_pfd) {
std::lock_guard lock(mutex);
ensure(vport);
// Check if it's a bound P2P socket
if ((sn_pfd.events & SYS_NET_POLLIN) && !data.empty()) {
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
sn_pfd.revents |= SYS_NET_POLLIN;
}
// Data can always be written on a dgram socket
if (sn_pfd.events & SYS_NET_POLLOUT) {
sn_pfd.revents |= SYS_NET_POLLOUT;
}
return sn_pfd.revents ? 1 : 0;
}
std::tuple<bool, bool, bool>
lv2_socket_p2p::select(bs_t<lv2_socket::poll_t> selected,
[[maybe_unused]] pollfd &native_pfd) {
std::lock_guard lock(mutex);
bool read_set = false;
bool write_set = false;
// Check if it's a bound P2P socket
if ((selected & lv2_socket::poll_t::read) && vport && !data.empty()) {
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport,
data.size());
read_set = true;
}
if (selected & lv2_socket::poll_t::write) {
write_set = true;
}
return {read_set, write_set, false};
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,137 @@
#include "stdafx.h"
#include "Emu/NP/vport0.h"
#include "sys_net/lv2_socket_raw.h"
LOG_CHANNEL(sys_net);
template <typename T> struct socket_raw_logging {
socket_raw_logging() = default;
socket_raw_logging(const socket_raw_logging &) = delete;
socket_raw_logging &operator=(const socket_raw_logging &) = delete;
atomic_t<bool> logged = false;
};
#define LOG_ONCE(raw_var, message) \
if (!g_fxo->get<socket_raw_logging<class raw_var>>().logged.exchange( \
true)) { \
sys_net.todo(message); \
}
lv2_socket_raw::lv2_socket_raw(lv2_socket_family family, lv2_socket_type type,
lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol) {}
lv2_socket_raw::lv2_socket_raw(utils::serial &ar, lv2_socket_type type)
: lv2_socket(make_exact(ar), type) {}
void lv2_socket_raw::save(utils::serial &ar) { lv2_socket::save(ar, true); }
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr>
lv2_socket_raw::accept([[maybe_unused]] bool is_lock) {
sys_net.fatal("[RAW] accept() called on a RAW socket");
return {};
}
std::optional<s32>
lv2_socket_raw::connect([[maybe_unused]] const sys_net_sockaddr &addr) {
sys_net.fatal("[RAW] connect() called on a RAW socket");
return CELL_OK;
}
s32 lv2_socket_raw::connect_followup() {
sys_net.fatal("[RAW] connect_followup() called on a RAW socket");
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getpeername() {
LOG_ONCE(raw_getpeername, "[RAW] getpeername() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::listen([[maybe_unused]] s32 backlog) {
LOG_ONCE(raw_listen, "[RAW] listen() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::bind([[maybe_unused]] const sys_net_sockaddr &addr) {
LOG_ONCE(raw_bind, "lv2_socket_raw::bind");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getsockname() {
LOG_ONCE(raw_getsockname, "lv2_socket_raw::getsockname");
return {};
}
std::tuple<s32, lv2_socket::sockopt_data, u32>
lv2_socket_raw::getsockopt([[maybe_unused]] s32 level,
[[maybe_unused]] s32 optname,
[[maybe_unused]] u32 len) {
LOG_ONCE(raw_getsockopt, "lv2_socket_raw::getsockopt");
return {};
}
s32 lv2_socket_raw::setsockopt(s32 level, s32 optname,
const std::vector<u8> &optval) {
LOG_ONCE(raw_setsockopt, "lv2_socket_raw::setsockopt");
// TODO
int native_int = *reinterpret_cast<const be_t<s32> *>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO) {
so_nbio = native_int;
}
return {};
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>>
lv2_socket_raw::recvfrom(s32 flags, [[maybe_unused]] u32 len,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_recvfrom, "lv2_socket_raw::recvfrom");
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT)) {
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
}
return {};
}
std::optional<s32> lv2_socket_raw::sendto(
[[maybe_unused]] s32 flags, [[maybe_unused]] const std::vector<u8> &buf,
[[maybe_unused]] std::optional<sys_net_sockaddr> opt_sn_addr,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_sendto, "lv2_socket_raw::sendto");
return ::size32(buf);
}
std::optional<s32>
lv2_socket_raw::sendmsg([[maybe_unused]] s32 flags,
[[maybe_unused]] const sys_net_msghdr &msg,
[[maybe_unused]] bool is_lock) {
LOG_ONCE(raw_sendmsg, "lv2_socket_raw::sendmsg");
return {};
}
void lv2_socket_raw::close() { LOG_ONCE(raw_close, "lv2_socket_raw::close"); }
s32 lv2_socket_raw::shutdown([[maybe_unused]] s32 how) {
LOG_ONCE(raw_shutdown, "lv2_socket_raw::shutdown");
return {};
}
s32 lv2_socket_raw::poll([[maybe_unused]] sys_net_pollfd &sn_pfd,
[[maybe_unused]] pollfd &native_pfd) {
LOG_ONCE(raw_poll, "lv2_socket_raw::poll");
return {};
}
std::tuple<bool, bool, bool>
lv2_socket_raw::select([[maybe_unused]] bs_t<lv2_socket::poll_t> selected,
[[maybe_unused]] pollfd &native_pfd) {
LOG_ONCE(raw_select, "lv2_socket_raw::select");
return {};
}

View file

@ -0,0 +1,299 @@
#include "stdafx.h"
#include "Emu/NP/ip_address.h"
#include "cellos/sys_sync.h"
#include "rpcsx/fw/ps3/sceNp.h" // for SCE_NP_PORT
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
// Used by RPCN to send signaling packets to RPCN server(for UDP hole punching)
bool send_packet_from_p2p_port_ipv4(const std::vector<u8> &data,
const sockaddr_in &addr) {
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
if (def_port.is_ipv6) {
const auto addr6 = np::sockaddr_to_sockaddr6(addr);
if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()),
::size32(data), 0,
reinterpret_cast<const sockaddr *>(&addr6),
sizeof(sockaddr_in6)) == -1) {
sys_net.error(
"Failed to send IPv4 signaling packet on IPv6 socket: %s",
get_last_error(false, false));
return false;
}
} else if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()),
::size32(data), 0,
reinterpret_cast<const sockaddr *>(&addr),
sizeof(sockaddr_in)) == -1) {
sys_net.error("Failed to send signaling packet on IPv4 socket: %s",
get_last_error(false, false));
return false;
}
} else {
sys_net.error("send_packet_from_p2p_port_ipv4: port %d not present",
+SCE_NP_PORT);
return false;
}
}
return true;
}
bool send_packet_from_p2p_port_ipv6(const std::vector<u8> &data,
const sockaddr_in6 &addr) {
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
ensure(def_port.is_ipv6);
if (::sendto(def_port.p2p_socket,
reinterpret_cast<const char *>(data.data()), ::size32(data),
0, reinterpret_cast<const sockaddr *>(&addr),
sizeof(sockaddr_in6)) == -1) {
sys_net.error("Failed to send signaling packet on IPv6 socket: %s",
get_last_error(false, false));
return false;
}
} else {
sys_net.error("send_packet_from_p2p_port_ipv6: port %d not present",
+SCE_NP_PORT);
return false;
}
}
return true;
}
std::vector<std::vector<u8>> get_rpcn_msgs() {
std::vector<std::vector<u8>> msgs;
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_rpcn_mutex);
msgs = std::move(def_port.rpcn_msgs);
def_port.rpcn_msgs.clear();
}
} else {
sys_net.error("get_rpcn_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
std::vector<signaling_message> get_sign_msgs() {
std::vector<signaling_message> msgs;
auto &nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT)) {
auto &def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_sign_mutex);
msgs = std::move(def_port.sign_msgs);
def_port.sign_msgs.clear();
}
} else {
sys_net.error("get_sign_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
namespace np {
void init_np_handler_dependencies();
}
void base_network_thread::add_ppu_to_awake(ppu_thread *ppu) {
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.emplace_back(ppu);
}
void base_network_thread::del_ppu_to_awake(ppu_thread *ppu) {
std::lock_guard lock(mutex_ppu_to_awake);
for (auto it = ppu_to_awake.begin(); it != ppu_to_awake.end();) {
if (*it == ppu) {
it = ppu_to_awake.erase(it);
continue;
}
it++;
}
}
void base_network_thread::wake_threads() {
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.erase(std::unique(ppu_to_awake.begin(), ppu_to_awake.end()),
ppu_to_awake.end());
for (ppu_thread *ppu : ppu_to_awake) {
network_clear_queue(*ppu);
lv2_obj::append(ppu);
}
if (!ppu_to_awake.empty()) {
ppu_to_awake.clear();
lv2_obj::awake_all();
}
}
p2p_thread::p2p_thread() { np::init_np_handler_dependencies(); }
void p2p_thread::bind_sce_np_port() {
std::lock_guard list_lock(list_p2p_ports_mutex);
create_p2p_port(SCE_NP_PORT);
}
void network_thread::operator()() {
std::vector<shared_ptr<lv2_socket>> socklist;
socklist.reserve(lv2_socket::id_count);
{
std::lock_guard lock(mutex_ppu_to_awake);
ppu_to_awake.clear();
}
std::vector<::pollfd> fds(lv2_socket::id_count);
#ifdef _WIN32
std::vector<bool> connecting(lv2_socket::id_count);
std::vector<bool> was_connecting(lv2_socket::id_count);
#endif
while (thread_ctrl::state() != thread_state::aborting) {
if (!num_polls) {
thread_ctrl::wait_on(num_polls, 0);
continue;
}
ensure(socklist.size() <= lv2_socket::id_count);
// Wait with 1ms timeout
#ifdef _WIN32
windows_poll(fds, ::size32(socklist), 1, connecting);
#else
::poll(fds.data(), socklist.size(), 1);
#endif
std::lock_guard lock(mutex_thread_loop);
for (usz i = 0; i < socklist.size(); i++) {
#ifdef _WIN32
socklist[i]->handle_events(fds[i], was_connecting[i] && !connecting[i]);
#else
socklist[i]->handle_events(fds[i]);
#endif
}
wake_threads();
socklist.clear();
// Obtain all native active sockets
idm::select<lv2_socket>([&](u32 id, lv2_socket &s) {
if (s.get_type() == SYS_NET_SOCK_DGRAM ||
s.get_type() == SYS_NET_SOCK_STREAM) {
socklist.emplace_back(idm::get_unlocked<lv2_socket>(id));
}
});
for (usz i = 0; i < socklist.size(); i++) {
auto events = socklist[i]->get_events();
fds[i].fd = events ? socklist[i]->get_socket() : -1;
fds[i].events = (events & lv2_socket::poll_t::read ? POLLIN : 0) |
(events & lv2_socket::poll_t::write ? POLLOUT : 0) | 0;
fds[i].revents = 0;
#ifdef _WIN32
const auto cur_connecting = socklist[i]->is_connecting();
was_connecting[i] = cur_connecting;
connecting[i] = cur_connecting;
#endif
}
}
}
// Must be used under list_p2p_ports_mutex lock!
void p2p_thread::create_p2p_port(u16 p2p_port) {
if (!list_p2p_ports.contains(p2p_port)) {
list_p2p_ports.emplace(std::piecewise_construct,
std::forward_as_tuple(p2p_port),
std::forward_as_tuple(p2p_port));
const u32 prev_value = num_p2p_ports.fetch_add(1);
if (!prev_value) {
num_p2p_ports.notify_one();
}
}
}
void p2p_thread::operator()() {
std::vector<::pollfd> p2p_fd(lv2_socket::id_count);
while (thread_ctrl::state() != thread_state::aborting) {
if (!num_p2p_ports) {
thread_ctrl::wait_on(num_p2p_ports, 0);
continue;
}
// Check P2P sockets for incoming packets
auto num_p2p_sockets = 0;
std::memset(p2p_fd.data(), 0, p2p_fd.size() * sizeof(::pollfd));
{
auto set_fd = [&](socket_type socket) {
p2p_fd[num_p2p_sockets].events = POLLIN;
p2p_fd[num_p2p_sockets].revents = 0;
p2p_fd[num_p2p_sockets].fd = socket;
num_p2p_sockets++;
};
std::lock_guard lock(list_p2p_ports_mutex);
for (const auto &[_, p2p_port] : list_p2p_ports) {
set_fd(p2p_port.p2p_socket);
}
}
#ifdef _WIN32
const auto ret_p2p = WSAPoll(p2p_fd.data(), num_p2p_sockets, 1);
#else
const auto ret_p2p = ::poll(p2p_fd.data(), num_p2p_sockets, 1);
#endif
if (ret_p2p > 0) {
std::lock_guard lock(list_p2p_ports_mutex);
auto fd_index = 0;
auto process_fd = [&](nt_p2p_port &p2p_port) {
if ((p2p_fd[fd_index].revents & POLLIN) == POLLIN ||
(p2p_fd[fd_index].revents & POLLRDNORM) == POLLRDNORM) {
while (p2p_port.recv_data())
;
}
fd_index++;
};
for (auto &[_, p2p_port] : list_p2p_ports) {
process_fd(p2p_port);
}
wake_threads();
} else if (ret_p2p < 0) {
sys_net.error("[P2P] Error poll on master P2P socket: %d",
get_last_error(false));
}
}
}

View file

@ -0,0 +1,377 @@
#include "sys_net/nt_p2p_port.h"
#include "Emu/NP/ip_address.h"
#include "Emu/NP/np_handler.h"
#include "Emu/NP/signaling_handler.h"
#include "Emu/NP/vport0.h"
#include "stdafx.h"
#include "sys_net/lv2_socket_p2ps.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
namespace sys_net_helpers {
bool all_reusable(const std::set<s32> &sock_ids) {
for (const s32 sock_id : sock_ids) {
const auto [_, reusable] =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) -> bool {
auto [res_reuseaddr, optval_reuseaddr, optlen_reuseaddr] =
sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEADDR,
sizeof(s32));
auto [res_reuseport, optval_reuseport, optlen_reuseport] =
sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEPORT,
sizeof(s32));
const bool reuse_addr =
optlen_reuseaddr == 4 && !!optval_reuseaddr._int;
const bool reuse_port =
optlen_reuseport == 4 && !!optval_reuseport._int;
return (reuse_addr || reuse_port);
});
if (!reusable) {
return false;
}
}
return true;
}
} // namespace sys_net_helpers
nt_p2p_port::nt_p2p_port(u16 port) : port(port) {
is_ipv6 = np::is_ipv6_supported();
// Creates and bind P2P Socket
p2p_socket = is_ipv6 ? ::socket(AF_INET6, SOCK_DGRAM, 0)
: ::socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
#ifdef _WIN32
if (p2p_socket == INVALID_SOCKET)
#else
if (p2p_socket == -1)
#endif
fmt::throw_exception("Failed to create DGRAM socket for P2P socket: %s!",
get_last_error(true));
np::set_socket_non_blocking(p2p_socket);
u32 optval = 131072; // value obtained from DECR for a SOCK_DGRAM_P2P
// socket(should maybe be bigger for actual socket?)
if (setsockopt(p2p_socket, SOL_SOCKET, SO_RCVBUF,
reinterpret_cast<const char *>(&optval), sizeof(optval)) != 0)
fmt::throw_exception("Error setsockopt SO_RCVBUF on P2P socket: %s",
get_last_error(true));
int ret_bind = 0;
const u16 be_port = std::bit_cast<u16, be_t<u16>>(port);
if (is_ipv6) {
// Some OS(Windows, maybe more) will only support IPv6 adressing by default
// and we need IPv4 over IPv6
optval = 0;
if (setsockopt(p2p_socket, IPPROTO_IPV6, IPV6_V6ONLY,
reinterpret_cast<const char *>(&optval),
sizeof(optval)) != 0)
fmt::throw_exception("Error setsockopt IPV6_V6ONLY on P2P socket: %s",
get_last_error(true));
::sockaddr_in6 p2p_ipv6_addr{.sin6_family = AF_INET6, .sin6_port = be_port};
ret_bind = ::bind(p2p_socket, reinterpret_cast<sockaddr *>(&p2p_ipv6_addr),
sizeof(p2p_ipv6_addr));
} else {
::sockaddr_in p2p_ipv4_addr{.sin_family = AF_INET, .sin_port = be_port};
ret_bind = ::bind(p2p_socket, reinterpret_cast<sockaddr *>(&p2p_ipv4_addr),
sizeof(p2p_ipv4_addr));
}
if (ret_bind == -1)
fmt::throw_exception("Failed to bind DGRAM socket to %d for P2P: %s!", port,
get_last_error(true));
auto &nph = g_fxo->get<named_thread<np::np_handler>>();
nph.upnp_add_port_mapping(port, "UDP");
sys_net.notice("P2P port %d was bound!", port);
}
nt_p2p_port::~nt_p2p_port() { np::close_socket(p2p_socket); }
void nt_p2p_port::dump_packet(p2ps_encapsulated_tcp *tcph) {
sys_net.trace("PACKET DUMP:\nsrc_port: %d\ndst_port: %d\nflags: %d\nseq: "
"%d\nack: %d\nlen: %d",
tcph->src_port, tcph->dst_port, tcph->flags, tcph->seq,
tcph->ack, tcph->length);
}
// Must be used under bound_p2p_vports_mutex lock
u16 nt_p2p_port::get_port() {
if (binding_port == 0) {
binding_port = 30000;
}
return binding_port++;
}
bool nt_p2p_port::handle_connected(s32 sock_id,
p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr) {
const auto sock =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) -> bool {
ensure(sock.get_type() == SYS_NET_SOCK_STREAM_P2P);
auto &sock_p2ps = reinterpret_cast<lv2_socket_p2ps &>(sock);
return sock_p2ps.handle_connected(tcp_header, data, op_addr, this);
});
if (!sock) {
sys_net.error("[P2PS] Couldn't find the socket!");
return false;
}
if (!sock.ret) {
sys_net.error("[P2PS] handle_connected() failed!");
return false;
}
return true;
}
bool nt_p2p_port::handle_listening(s32 sock_id,
p2ps_encapsulated_tcp *tcp_header, u8 *data,
::sockaddr_storage *op_addr) {
auto sock = idm::get_unlocked<lv2_socket>(sock_id);
if (!sock)
return false;
auto &sock_p2ps = reinterpret_cast<lv2_socket_p2ps &>(*sock);
return sock_p2ps.handle_listening(tcp_header, data, op_addr);
}
bool nt_p2p_port::recv_data() {
::sockaddr_storage native_addr{};
::socklen_t native_addrlen = sizeof(native_addr);
const auto recv_res = ::recvfrom(
p2p_socket, reinterpret_cast<char *>(p2p_recv_data.data()),
::size32(p2p_recv_data), 0,
reinterpret_cast<struct sockaddr *>(&native_addr), &native_addrlen);
if (recv_res == -1) {
auto lerr = get_last_error(false);
if (lerr != SYS_NET_EINPROGRESS && lerr != SYS_NET_EWOULDBLOCK)
sys_net.error("Error recvfrom on %s P2P socket: %d",
is_ipv6 ? "IPv6" : "IPv4", lerr);
return false;
}
if (recv_res < static_cast<s32>(sizeof(u16))) {
sys_net.error("Received badly formed packet on P2P port(no vport)!");
return true;
}
u16 dst_vport = reinterpret_cast<le_t<u16> &>(p2p_recv_data[0]);
if (is_ipv6) {
const auto *addr_ipv6 = reinterpret_cast<sockaddr_in6 *>(&native_addr);
const auto addr_ipv4 = np::sockaddr6_to_sockaddr(*addr_ipv6);
native_addr = {};
std::memcpy(&native_addr, &addr_ipv4, sizeof(addr_ipv4));
}
if (dst_vport == 0) {
if (recv_res < VPORT_0_HEADER_SIZE) {
sys_net.error("Bad vport 0 packet(no subset)!");
return true;
}
const u8 subset = p2p_recv_data[2];
const auto data_size = recv_res - VPORT_0_HEADER_SIZE;
std::vector<u8> vport_0_data(p2p_recv_data.data() + VPORT_0_HEADER_SIZE,
p2p_recv_data.data() + VPORT_0_HEADER_SIZE +
data_size);
switch (subset) {
case SUBSET_RPCN: {
std::lock_guard lock(s_rpcn_mutex);
rpcn_msgs.push_back(std::move(vport_0_data));
return true;
}
case SUBSET_SIGNALING: {
signaling_message msg;
msg.src_addr =
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_addr.s_addr;
msg.src_port = std::bit_cast<u16, be_t<u16>>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_port);
msg.data = std::move(vport_0_data);
{
std::lock_guard lock(s_sign_mutex);
sign_msgs.push_back(std::move(msg));
}
auto &sigh = g_fxo->get<named_thread<signaling_handler>>();
sigh.wake_up();
return true;
}
default: {
sys_net.error("Invalid vport 0 subset!");
return true;
}
}
}
if (recv_res < VPORT_P2P_HEADER_SIZE) {
return true;
}
const u16 src_vport =
*reinterpret_cast<le_t<u16> *>(p2p_recv_data.data() + sizeof(u16));
const u16 vport_flags = *reinterpret_cast<le_t<u16> *>(
p2p_recv_data.data() + sizeof(u16) + sizeof(u16));
std::vector<u8> p2p_data(recv_res - VPORT_P2P_HEADER_SIZE);
memcpy(p2p_data.data(), p2p_recv_data.data() + VPORT_P2P_HEADER_SIZE,
p2p_data.size());
if (vport_flags & P2P_FLAG_P2P) {
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_vports.contains(dst_vport)) {
sys_net_sockaddr_in_p2p p2p_addr{};
p2p_addr.sin_len = sizeof(sys_net_sockaddr_in);
p2p_addr.sin_family = SYS_NET_AF_INET;
p2p_addr.sin_addr = std::bit_cast<be_t<u32>, u32>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)
->sin_addr.s_addr);
p2p_addr.sin_vport = src_vport;
p2p_addr.sin_port = std::bit_cast<be_t<u16>, u16>(
reinterpret_cast<struct sockaddr_in *>(&native_addr)->sin_port);
auto &bound_sockets = ::at32(bound_p2p_vports, dst_vport);
for (const auto sock_id : bound_sockets) {
const auto sock =
idm::check<lv2_socket>(sock_id, [&](lv2_socket &sock) {
ensure(sock.get_type() == SYS_NET_SOCK_DGRAM_P2P);
auto &sock_p2p = reinterpret_cast<lv2_socket_p2p &>(sock);
sock_p2p.handle_new_data(p2p_addr, p2p_data);
});
if (!sock) {
sys_net.error("Socket %d found in bound_p2p_vports didn't exist!",
sock_id);
bound_sockets.erase(sock_id);
if (bound_sockets.empty()) {
bound_p2p_vports.erase(dst_vport);
}
}
}
return true;
}
} else if (vport_flags & P2P_FLAG_P2PS) {
if (p2p_data.size() < sizeof(p2ps_encapsulated_tcp)) {
sys_net.notice("Received P2P packet targeted at unbound vport(likely) or "
"invalid(vport=%d)",
dst_vport);
return true;
}
auto *tcp_header =
reinterpret_cast<p2ps_encapsulated_tcp *>(p2p_data.data());
// Validate signature & length
if (tcp_header->signature != P2PS_U2S_SIG) {
sys_net.notice("Received P2P packet targeted at unbound vport(vport=%d)",
dst_vport);
return true;
}
if (tcp_header->length !=
(p2p_data.size() - sizeof(p2ps_encapsulated_tcp))) {
sys_net.error(
"Received STREAM-P2P packet tcp length didn't match packet length");
return true;
}
// Sanity check
if (tcp_header->dst_port != dst_vport) {
sys_net.error("Received STREAM-P2P packet with dst_port != vport");
return true;
}
// Validate checksum
u16 given_checksum = tcp_header->checksum;
tcp_header->checksum = 0;
if (given_checksum !=
u2s_tcp_checksum(reinterpret_cast<const le_t<u16> *>(p2p_data.data()),
p2p_data.size())) {
sys_net.error("Checksum is invalid, dropping packet!");
return true;
}
// The packet is valid
dump_packet(tcp_header);
// Check if it's bound
const u64 key_connected =
(reinterpret_cast<struct sockaddr_in *>(&native_addr)
->sin_addr.s_addr) |
(static_cast<u64>(tcp_header->src_port) << 48) |
(static_cast<u64>(tcp_header->dst_port) << 32);
{
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_streams.contains(key_connected)) {
const auto sock_id = ::at32(bound_p2p_streams, key_connected);
sys_net.trace("Received packet for connected STREAM-P2P socket(s=%d)",
sock_id);
handle_connected(sock_id, tcp_header,
p2p_data.data() + sizeof(p2ps_encapsulated_tcp),
&native_addr);
return true;
}
if (bound_p2ps_vports.contains(tcp_header->dst_port)) {
const auto &bound_sockets =
::at32(bound_p2ps_vports, tcp_header->dst_port);
for (const auto sock_id : bound_sockets) {
sys_net.trace("Received packet for listening STREAM-P2P socket(s=%d)",
sock_id);
handle_listening(sock_id, tcp_header,
p2p_data.data() + sizeof(p2ps_encapsulated_tcp),
&native_addr);
}
return true;
}
if (tcp_header->flags == p2ps_tcp_flags::RST) {
sys_net.trace("[P2PS] Received RST on unbound P2PS");
return true;
}
// The P2PS packet was sent to an unbound vport, send a RST packet
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::RST;
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
if (np::sendto_possibly_ipv6(
p2p_socket, reinterpret_cast<char *>(packet.data()),
::size32(packet),
reinterpret_cast<const sockaddr_in *>(&native_addr), 0) == -1) {
sys_net.error("[P2PS] Error sending RST to sender to unbound P2PS: %s",
get_last_error(false));
return true;
}
sys_net.trace("[P2PS] Sent RST to sender to unbound P2PS");
return true;
}
}
sys_net.notice("Received a P2P packet with no bound target(dst_vport = %d)",
dst_vport);
return true;
}

View file

@ -0,0 +1,243 @@
#include "stdafx.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/IdManager.h"
#include "sys_net/lv2_socket.h"
#include "sys_net/network_context.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
int get_native_error() {
int native_error;
#ifdef _WIN32
native_error = WSAGetLastError();
#else
native_error = errno;
#endif
return native_error;
}
sys_net_error convert_error(bool is_blocking, int native_error,
[[maybe_unused]] bool is_connecting) {
// Convert the error code for socket functions to a one for sys_net
sys_net_error result{};
const char *name{};
#ifdef _WIN32
#define ERROR_CASE(error) \
case WSA##error: \
result = SYS_NET_##error; \
name = #error; \
break;
#else
#define ERROR_CASE(error) \
case error: \
result = SYS_NET_##error; \
name = #error; \
break;
#endif
switch (native_error) {
#ifndef _WIN32
ERROR_CASE(ENOENT);
ERROR_CASE(ENOMEM);
ERROR_CASE(EBUSY);
ERROR_CASE(ENOSPC);
ERROR_CASE(EPIPE);
#endif
// TODO: We don't currently support EFAULT or EINTR
// ERROR_CASE(EFAULT);
// ERROR_CASE(EINTR);
ERROR_CASE(EBADF);
ERROR_CASE(EACCES);
ERROR_CASE(EINVAL);
ERROR_CASE(EMFILE);
ERROR_CASE(EWOULDBLOCK);
ERROR_CASE(EINPROGRESS);
ERROR_CASE(EALREADY);
ERROR_CASE(EDESTADDRREQ);
ERROR_CASE(EMSGSIZE);
ERROR_CASE(EPROTOTYPE);
ERROR_CASE(ENOPROTOOPT);
ERROR_CASE(EPROTONOSUPPORT);
ERROR_CASE(EOPNOTSUPP);
ERROR_CASE(EPFNOSUPPORT);
ERROR_CASE(EAFNOSUPPORT);
ERROR_CASE(EADDRINUSE);
ERROR_CASE(EADDRNOTAVAIL);
ERROR_CASE(ENETDOWN);
ERROR_CASE(ENETUNREACH);
ERROR_CASE(ECONNABORTED);
ERROR_CASE(ECONNRESET);
ERROR_CASE(ENOBUFS);
ERROR_CASE(EISCONN);
ERROR_CASE(ENOTCONN);
ERROR_CASE(ESHUTDOWN);
ERROR_CASE(ETOOMANYREFS);
ERROR_CASE(ETIMEDOUT);
ERROR_CASE(ECONNREFUSED);
ERROR_CASE(EHOSTDOWN);
ERROR_CASE(EHOSTUNREACH);
#ifdef _WIN32
// Windows likes to be special with unique errors
case WSAENETRESET:
result = SYS_NET_ECONNRESET;
name = "WSAENETRESET";
break;
#endif
default:
fmt::throw_exception("sys_net get_last_error(is_blocking=%d, "
"native_error=%d): Unknown/illegal socket error",
is_blocking, native_error);
}
#ifdef _WIN32
if (is_connecting) {
// Windows will return SYS_NET_ENOTCONN when recvfrom/sendto is called on a
// socket that is connecting but not yet connected
if (result == SYS_NET_ENOTCONN)
return SYS_NET_EAGAIN;
}
#endif
if (name && result != SYS_NET_EWOULDBLOCK && result != SYS_NET_EINPROGRESS) {
sys_net.error("Socket error %s", name);
}
if (is_blocking && result == SYS_NET_EWOULDBLOCK) {
return {};
}
if (is_blocking && result == SYS_NET_EINPROGRESS) {
return {};
}
return result;
#undef ERROR_CASE
}
sys_net_error get_last_error(bool is_blocking, bool is_connecting) {
return convert_error(is_blocking, get_native_error(), is_connecting);
}
sys_net_sockaddr
native_addr_to_sys_net_addr(const ::sockaddr_storage &native_addr) {
ensure(native_addr.ss_family == AF_INET ||
native_addr.ss_family == AF_UNSPEC);
sys_net_sockaddr sn_addr;
sys_net_sockaddr_in *paddr =
reinterpret_cast<sys_net_sockaddr_in *>(&sn_addr);
*paddr = {};
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = std::bit_cast<be_t<u16>, u16>(
reinterpret_cast<const sockaddr_in *>(&native_addr)->sin_port);
paddr->sin_addr = std::bit_cast<be_t<u32>, u32>(
reinterpret_cast<const sockaddr_in *>(&native_addr)->sin_addr.s_addr);
return sn_addr;
}
::sockaddr_in sys_net_addr_to_native_addr(const sys_net_sockaddr &sn_addr) {
ensure(sn_addr.sa_family == SYS_NET_AF_INET);
const sys_net_sockaddr_in *psa_in =
reinterpret_cast<const sys_net_sockaddr_in *>(&sn_addr);
::sockaddr_in native_addr{};
native_addr.sin_family = AF_INET;
native_addr.sin_port = std::bit_cast<u16>(psa_in->sin_port);
native_addr.sin_addr.s_addr = std::bit_cast<u32>(psa_in->sin_addr);
#ifdef _WIN32
// Windows doesn't support sending packets to 0.0.0.0 but it works on unixes,
// send to 127.0.0.1 instead
if (native_addr.sin_addr.s_addr == 0x00000000) {
sys_net.warning("[Native] Redirected 0.0.0.0 to 127.0.0.1");
native_addr.sin_addr.s_addr = std::bit_cast<u32, be_t<u32>>(0x7F000001);
}
#endif
return native_addr;
}
bool is_ip_public_address(const ::sockaddr_in &addr) {
const u8 *ip = reinterpret_cast<const u8 *>(&addr.sin_addr.s_addr);
if ((ip[0] == 10) || (ip[0] == 127) ||
(ip[0] == 172 && (ip[1] >= 16 && ip[1] <= 31)) ||
(ip[0] == 192 && ip[1] == 168)) {
return false;
}
return true;
}
u32 network_clear_queue(ppu_thread &ppu) {
u32 cleared = 0;
idm::select<lv2_socket>(
[&](u32, lv2_socket &sock) { cleared += sock.clear_queue(&ppu); });
return cleared;
}
void clear_ppu_to_awake(ppu_thread &ppu) {
g_fxo->get<network_context>().del_ppu_to_awake(&ppu);
g_fxo->get<p2p_context>().del_ppu_to_awake(&ppu);
}
#ifdef _WIN32
// Workaround function for WSAPoll not reporting failed connections
// Note that this was fixed in Windows 10 version 2004 (after more than 10 years
// lol)
void windows_poll(std::vector<pollfd> &fds, unsigned long nfds, int timeout,
std::vector<bool> &connecting) {
ensure(fds.size() >= nfds);
ensure(connecting.size() >= nfds);
// Don't call WSAPoll with zero nfds (errors 10022 or 10038)
if (std::none_of(fds.begin(), fds.begin() + nfds,
[](pollfd &pfd) { return pfd.fd != INVALID_SOCKET; })) {
if (timeout > 0) {
Sleep(timeout);
}
return;
}
int r = ::WSAPoll(fds.data(), nfds, timeout);
if (r == SOCKET_ERROR) {
sys_net.error(
"WSAPoll failed: %s",
fmt::win_error{static_cast<unsigned long>(WSAGetLastError()), nullptr});
return;
}
for (unsigned long i = 0; i < nfds; i++) {
if (connecting[i]) {
if (!fds[i].revents) {
int error = 0;
socklen_t intlen = sizeof(error);
if (getsockopt(fds[i].fd, SOL_SOCKET, SO_ERROR,
reinterpret_cast<char *>(&error), &intlen) == -1 ||
error != 0) {
// Connection silently failed
connecting[i] = false;
fds[i].revents =
POLLERR | POLLHUP | (fds[i].events & (POLLIN | POLLOUT));
}
} else {
connecting[i] = false;
}
}
}
}
#endif

View file

@ -0,0 +1,210 @@
#include "stdafx.h"
#include "Crypto/unedat.h"
#include "Crypto/unself.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/VFS.h"
#include "Emu/system_config.h"
#include "Loader/ELF.h"
#include "sys_fs.h"
#include "sys_overlay.h"
#include "sys_process.h"
extern std::pair<shared_ptr<lv2_overlay>, CellError>
ppu_load_overlay(const ppu_exec_object &, bool virtual_load,
const std::string &path, s64 file_offset,
utils::serial *ar = nullptr);
extern bool ppu_initialize(const ppu_module<lv2_obj> &, bool check_only = false,
u64 file_size = 0);
extern void ppu_finalize(const ppu_module<lv2_obj> &info,
bool force_mem_release = false);
LOG_CHANNEL(sys_overlay);
static error_code overlay_load_module(vm::ptr<u32> ovlmid,
const std::string &vpath, u64 /*flags*/,
vm::ptr<u32> entry, fs::file src = {},
s64 file_offset = 0) {
if (!src) {
auto [fs_error, ppath, path, lv2_file, type] = lv2_file::open(vpath, 0, 0);
if (fs_error) {
return {fs_error, vpath};
}
src = std::move(lv2_file);
}
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
src = decrypt_self(std::move(src), reinterpret_cast<u8 *>(&klic));
if (!src) {
return {CELL_ENOEXEC, +"Failed to decrypt file"};
}
ppu_exec_object obj = std::move(src);
src.close();
if (obj != elf_error::ok) {
return {CELL_ENOEXEC, obj.operator elf_error()};
}
const auto [ovlm, error] =
ppu_load_overlay(obj, false, vfs::get(vpath), file_offset);
obj.clear();
if (error) {
if (error == CELL_CANCEL + 0u) {
// Emulation stopped
return {};
}
return error;
}
ppu_initialize(*ovlm);
sys_overlay.success("Loaded overlay: \"%s\" (id=0x%x)", vpath,
idm::last_id());
*ovlmid = idm::last_id();
*entry = ovlm->entry;
return CELL_OK;
}
fs::file make_file_view(fs::file &&file, u64 offset, u64 size);
std::function<void(void *)> lv2_overlay::load(utils::serial &ar) {
const std::string vpath = ar.pop<std::string>();
const std::string path = vfs::get(vpath);
const s64 offset = ar.pop<s64>();
sys_overlay.success("lv2_overlay::load(): vpath='%s', path='%s', offset=0x%x",
vpath, path, offset);
shared_ptr<lv2_overlay> ovlm;
fs::file file{path.substr(
0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file) {
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset, umax);
ovlm =
ppu_load_overlay(ppu_exec_object{decrypt_self(
std::move(file), reinterpret_cast<u8 *>(&klic))},
false, path, 0, &ar)
.first;
if (!ovlm) {
fmt::throw_exception("lv2_overlay::load(): ppu_load_overlay() failed. "
"(vpath='%s', offset=0x%x)",
vpath, offset);
}
} else if (!g_cfg.savestate.state_inspection_mode.get()) {
fmt::throw_exception(
"lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)",
vpath, offset);
} else {
sys_overlay.error(
"lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)",
vpath, offset);
}
return [ovlm](void *storage) {
*static_cast<atomic_ptr<lv2_obj> *>(storage) = ovlm;
};
}
void lv2_overlay::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
const std::string vpath = vfs::retrieve(path);
(vpath.empty() ? sys_overlay.error : sys_overlay.success)(
"lv2_overlay::save(): vpath='%s', offset=0x%x", vpath, offset);
ar(vpath, offset);
}
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path,
u64 flags, vm::ptr<u32> entry) {
sys_overlay.warning(
"sys_overlay_load_module(ovlmid=*0x%x, path=%s, flags=0x%x, entry=*0x%x)",
ovlmid, path, flags, entry);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
if (!path) {
return CELL_EFAULT;
}
return overlay_load_module(ovlmid, path.get_ptr(), flags, entry);
}
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd,
u64 offset, u64 flags,
vm::ptr<u32> entry) {
sys_overlay.warning("sys_overlay_load_module_by_fd(ovlmid=*0x%x, fd=%d, "
"offset=0x%llx, flags=0x%x, entry=*0x%x)",
ovlmid, fd, offset, flags, entry);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
if (static_cast<s64>(offset) < 0) {
return CELL_EINVAL;
}
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file) {
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file) {
return CELL_EBADF;
}
return overlay_load_module(
ovlmid,
offset ? fmt::format("%s_x%x", file->name.data(), offset)
: file->name.data(),
flags, entry, lv2_file::make_view(file, offset), offset);
}
error_code sys_overlay_unload_module(u32 ovlmid) {
sys_overlay.warning("sys_overlay_unload_module(ovlmid=0x%x)", ovlmid);
if (!g_ps3_process_info.ppc_seg) {
// Process not permitted
return CELL_ENOSYS;
}
const auto _main = idm::withdraw<lv2_obj, lv2_overlay>(ovlmid);
if (!_main) {
return CELL_ESRCH;
}
for (auto &seg : _main->segs) {
vm::dealloc(seg.addr);
}
ppu_finalize(*_main);
return CELL_OK;
}

View file

@ -0,0 +1,645 @@
#include "stdafx.h"
#include "sys_ppu_thread.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Memory/vm_locking.h"
#include "sys_event.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_process.h"
#include "util/asm.hpp"
#include <thread>
LOG_CHANNEL(sys_ppu_thread);
// Simple structure to cleanup previous thread, because can't remove its own
// thread
struct ppu_thread_cleaner {
shared_ptr<named_thread<ppu_thread>> old;
shared_ptr<named_thread<ppu_thread>>
clean(shared_ptr<named_thread<ppu_thread>> ptr) {
return std::exchange(old, std::move(ptr));
}
ppu_thread_cleaner() = default;
ppu_thread_cleaner(const ppu_thread_cleaner &) = delete;
ppu_thread_cleaner &operator=(const ppu_thread_cleaner &) = delete;
ppu_thread_cleaner &operator=(thread_state state) noexcept {
reader_lock lock(id_manager::g_mutex);
if (old) {
// It is detached from IDM now so join must be done explicitly now
*static_cast<named_thread<ppu_thread> *>(old.get()) = state;
}
return *this;
}
};
void ppu_thread_exit(ppu_thread &ppu, ppu_opcode_t, be_t<u32> *,
struct ppu_intrp_func *) {
ppu.state += cpu_flag::exit + cpu_flag::wait;
// Deallocate Stack Area
ensure(vm::dealloc(ppu.stack_addr, vm::stack) == ppu.stack_size);
if (auto dct = g_fxo->try_get<lv2_memory_container>()) {
dct->free(ppu.stack_size);
}
if (ppu.call_history.index) {
ppu_log.notice("Calling history: %s", ppu.call_history);
ppu.call_history.index = 0;
}
if (ppu.syscall_history.index) {
ppu_log.notice("HLE/LV2 history: %s", ppu.syscall_history);
ppu.syscall_history.index = 0;
}
}
constexpr u32 c_max_ppu_name_size = 28;
void _sys_ppu_thread_exit(ppu_thread &ppu, u64 errorcode) {
ppu.state += cpu_flag::wait;
u64 writer_mask = 0;
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
ppu_join_status old_status;
// Avoid cases where cleaning causes the destructor to be called inside IDM
// lock scope (for performance)
shared_ptr<named_thread<ppu_thread>> old_ppu;
{
lv2_obj::notify_all_t notify;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(id_manager::g_mutex);
// Get joiner ID
old_status = ppu.joiner.fetch_op([](ppu_join_status &status) {
if (status == ppu_join_status::joinable) {
// Joinable, not joined
status = ppu_join_status::zombie;
return;
}
// Set deleted thread status
status = ppu_join_status::exited;
});
if (old_status >= ppu_join_status::max) {
lv2_obj::append(idm::check_unlocked<named_thread<ppu_thread>>(
static_cast<u32>(old_status)));
}
if (old_status != ppu_join_status::joinable) {
// Remove self ID from IDM, move owning ptr
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(
idm::withdraw<named_thread<ppu_thread>>(ppu.id, 0,
std::false_type{}));
}
// Get writers mask (wait for all current writers to quit)
writer_mask = vm::g_range_lock_bits[1];
// Unqueue
lv2_obj::sleep(ppu);
notify.cleanup();
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
}
while (ppu.joiner == ppu_join_status::zombie) {
if (ppu.is_stopped() &&
ppu.joiner.compare_and_swap_test(ppu_join_status::zombie,
ppu_join_status::joinable)) {
// Abort
ppu.state += cpu_flag::again;
return;
}
// Wait for termination
thread_ctrl::wait_on(ppu.joiner, ppu_join_status::zombie);
}
ppu_thread_exit(ppu, {}, nullptr, nullptr);
if (old_ppu) {
// It is detached from IDM now so join must be done explicitly now
*old_ppu = thread_state::finished;
}
// Need to wait until the current writers finish
if (ppu.state & cpu_flag::memory) {
for (; writer_mask; writer_mask &= vm::g_range_lock_bits[1]) {
busy_wait(200);
}
}
}
s32 sys_ppu_thread_yield(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_yield()");
const s32 success = lv2_obj::yield(ppu) ? CELL_OK : CELL_CANCEL;
if (success == CELL_CANCEL) {
// Do other work in the meantime
lv2_obj::notify_all();
}
// Return 0 on successful context switch, 1 otherwise
return success;
}
error_code sys_ppu_thread_join(ppu_thread &ppu, u32 thread_id,
vm::ptr<u64> vptr) {
lv2_obj::prepare_for_sleep(ppu);
sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)",
thread_id, vptr);
if (thread_id == ppu.id) {
return CELL_EDEADLK;
}
auto thread = idm::get<named_thread<ppu_thread>>(
thread_id,
[&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) -> CellError {
CellError result =
thread.joiner.atomic_op([&](ppu_join_status &value) -> CellError {
switch (value) {
case ppu_join_status::joinable:
value = ppu_join_status{ppu.id};
return {};
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
case ppu_join_status::detached:
default:
return CELL_EINVAL;
}
});
if (!result) {
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
}
notify.cleanup();
return result;
});
if (!thread) {
return CELL_ESRCH;
}
if (thread.ret && thread.ret != CELL_EAGAIN) {
return thread.ret;
}
if (thread.ret == CELL_EAGAIN) {
// Notify thread if waiting for a joiner
thread->joiner.notify_one();
}
// Wait for cleanup
(*thread.ptr)();
if (thread->joiner != ppu_join_status::exited) {
// Thread aborted, log it later
ppu.state += cpu_flag::again;
return {};
}
static_cast<void>(ppu.test_stopped());
// Get the exit status from the register
const u64 vret = thread->gpr[3];
if (thread.ret == CELL_EAGAIN) {
// Cleanup
ensure(idm::remove_verify<named_thread<ppu_thread>>(thread_id,
std::move(thread.ptr)));
}
if (!vptr) {
return not_an_error(CELL_EFAULT);
}
*vptr = vret;
return CELL_OK;
}
error_code sys_ppu_thread_detach(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
CellError result = CELL_ESRCH;
auto [ptr, _] = idm::withdraw<named_thread<ppu_thread>>(
thread_id, [&](ppu_thread &thread) {
result =
thread.joiner.atomic_op([](ppu_join_status &value) -> CellError {
switch (value) {
case ppu_join_status::joinable:
value = ppu_join_status::detached;
return {};
case ppu_join_status::detached:
return CELL_EINVAL;
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
default:
return CELL_EBUSY;
}
});
// Remove ID on EAGAIN
return result != CELL_EAGAIN;
});
if (result) {
if (result == CELL_EAGAIN) {
// Join and notify thread (it is detached from IDM now so it must be done
// explicitly now)
*ptr = thread_state::finished;
}
return result;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_join_state(ppu_thread &ppu,
vm::ptr<s32> isjoinable) {
sys_ppu_thread.trace("sys_ppu_thread_get_join_state(isjoinable=*0x%x)",
isjoinable);
if (!isjoinable) {
return CELL_EFAULT;
}
*isjoinable = ppu.joiner != ppu_join_status::detached;
return CELL_OK;
}
error_code sys_ppu_thread_set_priority(ppu_thread &ppu, u32 thread_id,
s32 prio) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_set_priority(thread_id=0x%x, prio=%d)",
thread_id, prio);
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071) {
return CELL_EINVAL;
}
if (thread_id == ppu.id) {
// Fast path for self
if (ppu.prio.load().prio != prio) {
lv2_obj::set_priority(ppu, prio);
}
return CELL_OK;
}
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) {
lv2_obj::set_priority(thread, prio);
});
if (!thread) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_priority(ppu_thread &ppu, u32 thread_id,
vm::ptr<s32> priop) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace(
"sys_ppu_thread_get_priority(thread_id=0x%x, priop=*0x%x)", thread_id,
priop);
u32 prio{};
if (thread_id == ppu.id) {
// Fast path for self
for (; !ppu.is_stopped(); std::this_thread::yield()) {
if (reader_lock lock(lv2_obj::g_mutex); cpu_flag::suspend - ppu.state) {
prio = ppu.prio.load().prio;
break;
}
ppu.check_state();
ppu.state += cpu_flag::wait;
}
ppu.check_state();
*priop = prio;
return CELL_OK;
}
for (; !ppu.is_stopped(); std::this_thread::yield()) {
bool check_state = false;
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [&](ppu_thread &thread) {
if (reader_lock lock(lv2_obj::g_mutex);
cpu_flag::suspend - ppu.state) {
prio = thread.prio.load().prio;
} else {
check_state = true;
}
});
if (check_state) {
ppu.check_state();
ppu.state += cpu_flag::wait;
continue;
}
if (!thread) {
return CELL_ESRCH;
}
ppu.check_state();
*priop = prio;
break;
}
return CELL_OK;
}
error_code
sys_ppu_thread_get_stack_information(ppu_thread &ppu,
vm::ptr<sys_ppu_thread_stack_t> sp) {
sys_ppu_thread.trace("sys_ppu_thread_get_stack_information(sp=*0x%x)", sp);
sp->pst_addr = ppu.stack_addr;
sp->pst_size = ppu.stack_size;
return CELL_OK;
}
error_code sys_ppu_thread_stop(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
const auto thread = idm::check<named_thread<ppu_thread>>(
thread_id, [](named_thread<ppu_thread> &) {});
if (!thread) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_restart(ppu_thread &ppu) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_restart()");
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code _sys_ppu_thread_create(ppu_thread &ppu, vm::ptr<u64> thread_id,
vm::ptr<ppu_thread_param_t> param, u64 arg,
u64 unk, s32 prio, u32 _stacksz, u64 flags,
vm::cptr<char> threadname) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning(
"_sys_ppu_thread_create(thread_id=*0x%x, param=*0x%x, arg=0x%llx, "
"unk=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)",
thread_id, param, arg, unk, prio, _stacksz, flags, threadname);
// thread_id is checked for null in stub -> CELL_ENOMEM
// unk is set to 0 in sys_ppu_thread_create stub
if (!param || !param->entry) {
return CELL_EFAULT;
}
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071) {
return CELL_EINVAL;
}
if ((flags & 3) == 3) // Check two flags: joinable + interrupt not allowed
{
return CELL_EPERM;
}
const ppu_func_opd_t entry = param->entry.opd();
const u32 tls = param->tls;
// Compute actual stack size and allocate
const u32 stack_size = utils::align<u32>(std::max<u32>(_stacksz, 4096), 4096);
auto &dct = g_fxo->get<lv2_memory_container>();
// Try to obtain "physical memory" from the default container
if (!dct.take(stack_size)) {
return {CELL_ENOMEM, dct.size - dct.used};
}
const vm::addr_t stack_base{vm::alloc(stack_size, vm::stack, 4096)};
if (!stack_base) {
dct.free(stack_size);
return CELL_ENOMEM;
}
std::string ppu_name;
if (threadname) {
constexpr u32 max_size =
c_max_ppu_name_size - 1; // max size excluding null terminator
if (!vm::read_string(threadname.addr(), max_size, ppu_name, true)) {
dct.free(stack_size);
return CELL_EFAULT;
}
}
const u32 tid = idm::import <named_thread<ppu_thread>>([&]() {
ppu_thread_params p;
p.stack_addr = stack_base;
p.stack_size = stack_size;
p.tls_addr = tls;
p.entry = entry;
p.arg0 = arg;
p.arg1 = unk;
return stx::make_shared<named_thread<ppu_thread>>(
p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
});
if (!tid) {
vm::dealloc(stack_base);
dct.free(stack_size);
return CELL_EAGAIN;
}
sys_ppu_thread.warning("_sys_ppu_thread_create(): Thread \"%s\" created "
"(id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)",
ppu_name, tid, entry.addr, entry.rtoc, tls);
ppu.check_state();
*thread_id = tid;
return CELL_OK;
}
error_code sys_ppu_thread_start(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(
thread_id,
[&, notify = lv2_obj::notify_all_t()](ppu_thread &thread) -> CellError {
if (!thread.state.test_and_reset(cpu_flag::stop)) {
// Already started
return CELL_EBUSY;
}
ensure(lv2_obj::awake(&thread));
thread.cmd_list({
{ppu_cmd::entry_call, 0},
});
return {};
});
if (!thread) {
return CELL_ESRCH;
}
if (thread.ret) {
return thread.ret;
} else {
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
}
return CELL_OK;
}
error_code sys_ppu_thread_rename(ppu_thread &ppu, u32 thread_id,
vm::cptr<char> name) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)",
thread_id, name);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
if (!name) {
return CELL_EFAULT;
}
constexpr u32 max_size =
c_max_ppu_name_size - 1; // max size excluding null terminator
// Make valid name
std::string out_str;
if (!vm::read_string(name.addr(), max_size, out_str, true)) {
return CELL_EFAULT;
}
auto _name = make_single<std::string>(std::move(out_str));
// thread_ctrl name is not changed (TODO)
sys_ppu_thread.warning("sys_ppu_thread_rename(): Thread renamed to \"%s\"",
*_name);
thread->ppu_tname.store(std::move(_name));
thread_ctrl::set_name(
*thread, thread->thread_name); // TODO: Currently sets debugger thread
// name only for local thread
return CELL_OK;
}
error_code sys_ppu_thread_recover_page_fault(ppu_thread &ppu, u32 thread_id) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)",
thread_id);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
return mmapper_thread_recover_page_fault(thread.get());
}
error_code
sys_ppu_thread_get_page_fault_context(ppu_thread &ppu, u32 thread_id,
vm::ptr<sys_ppu_thread_icontext_t> ctxt) {
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo(
"sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)",
thread_id, ctxt);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread) {
return CELL_ESRCH;
}
// We can only get a context if the thread is being suspended for a page
// fault.
auto &pf_events = g_fxo->get<page_fault_event_entries>();
reader_lock lock(pf_events.pf_mutex);
const auto evt = pf_events.events.find(thread.get());
if (evt == pf_events.events.end()) {
return CELL_EINVAL;
}
// TODO: Fill ctxt with proper information.
return CELL_OK;
}

View file

@ -0,0 +1,590 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/System.h"
#include "Emu/VFS.h"
#include "sys_process.h"
#include "Crypto/unedat.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_cond.h"
#include "sys_event.h"
#include "sys_event_flag.h"
#include "sys_fs.h"
#include "sys_interrupt.h"
#include "sys_lwcond.h"
#include "sys_lwmutex.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_mutex.h"
#include "sys_overlay.h"
#include "sys_prx.h"
#include "sys_rwlock.h"
#include "sys_semaphore.h"
#include "sys_spu.h"
#include "sys_timer.h"
#include "sys_vm.h"
// Check all flags known to be related to extended permissions (TODO)
// It's possible anything which has root flags implicitly has debug perm as well
// But I haven't confirmed it.
bool ps3_process_info_t::debug_or_root() const {
return (ctrl_flags1 & (0xe << 28)) != 0;
}
bool ps3_process_info_t::has_root_perm() const {
return (ctrl_flags1 & (0xc << 28)) != 0;
}
bool ps3_process_info_t::has_debug_perm() const {
return (ctrl_flags1 & (0xa << 28)) != 0;
}
// If a SELF file is of CellOS return its filename, otheriwse return an empty
// string
std::string_view ps3_process_info_t::get_cellos_appname() const {
if (!has_root_perm() || !Emu.GetTitleID().empty()) {
return {};
}
return std::string_view(Emu.GetBoot())
.substr(Emu.GetBoot().find_last_of('/') + 1);
}
LOG_CHANNEL(sys_process);
ps3_process_info_t g_ps3_process_info;
s32 process_getpid() {
// TODO: get current process id
return 1;
}
s32 sys_process_getpid() {
sys_process.trace("sys_process_getpid() -> 1");
return process_getpid();
}
s32 sys_process_getppid() {
sys_process.todo("sys_process_getppid() -> 0");
return 0;
}
template <typename T, typename Get> u32 idm_get_count() {
return idm::select<T, Get>([&](u32, Get &) {});
}
error_code sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump) {
sys_process.error("sys_process_get_number_of_object(object=0x%x, nump=*0x%x)",
object, nump);
switch (object) {
case SYS_MEM_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_memory>();
break;
case SYS_MUTEX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_mutex>();
break;
case SYS_COND_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_cond>();
break;
case SYS_RWLOCK_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_rwlock>();
break;
case SYS_INTR_TAG_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_int_tag>();
break;
case SYS_INTR_SERVICE_HANDLE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_int_serv>();
break;
case SYS_EVENT_QUEUE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_queue>();
break;
case SYS_EVENT_PORT_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_port>();
break;
case SYS_TRACE_OBJECT:
sys_process.error(
"sys_process_get_number_of_object: object = SYS_TRACE_OBJECT");
*nump = 0;
break;
case SYS_SPUIMAGE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_spu_image>();
break;
case SYS_PRX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_prx>();
break;
case SYS_SPUPORT_OBJECT:
sys_process.error(
"sys_process_get_number_of_object: object = SYS_SPUPORT_OBJECT");
*nump = 0;
break;
case SYS_OVERLAY_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_overlay>();
break;
case SYS_LWMUTEX_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_lwmutex>();
break;
case SYS_TIMER_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_timer>();
break;
case SYS_SEMAPHORE_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_sema>();
break;
case SYS_FS_FD_OBJECT:
*nump = idm_get_count<lv2_fs_object, lv2_fs_object>();
break;
case SYS_LWCOND_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_lwcond>();
break;
case SYS_EVENT_FLAG_OBJECT:
*nump = idm_get_count<lv2_obj, lv2_event_flag>();
break;
default: {
return CELL_EINVAL;
}
}
return CELL_OK;
}
#include <set>
template <typename T, typename Get> void idm_get_set(std::set<u32> &out) {
idm::select<T, Get>([&](u32 id, Get &) { out.emplace(id); });
}
static error_code process_get_id(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
std::set<u32> objects;
switch (object) {
case SYS_MEM_OBJECT:
idm_get_set<lv2_obj, lv2_memory>(objects);
break;
case SYS_MUTEX_OBJECT:
idm_get_set<lv2_obj, lv2_mutex>(objects);
break;
case SYS_COND_OBJECT:
idm_get_set<lv2_obj, lv2_cond>(objects);
break;
case SYS_RWLOCK_OBJECT:
idm_get_set<lv2_obj, lv2_rwlock>(objects);
break;
case SYS_INTR_TAG_OBJECT:
idm_get_set<lv2_obj, lv2_int_tag>(objects);
break;
case SYS_INTR_SERVICE_HANDLE_OBJECT:
idm_get_set<lv2_obj, lv2_int_serv>(objects);
break;
case SYS_EVENT_QUEUE_OBJECT:
idm_get_set<lv2_obj, lv2_event_queue>(objects);
break;
case SYS_EVENT_PORT_OBJECT:
idm_get_set<lv2_obj, lv2_event_port>(objects);
break;
case SYS_TRACE_OBJECT:
fmt::throw_exception("SYS_TRACE_OBJECT");
case SYS_SPUIMAGE_OBJECT:
idm_get_set<lv2_obj, lv2_spu_image>(objects);
break;
case SYS_PRX_OBJECT:
idm_get_set<lv2_obj, lv2_prx>(objects);
break;
case SYS_OVERLAY_OBJECT:
idm_get_set<lv2_obj, lv2_overlay>(objects);
break;
case SYS_LWMUTEX_OBJECT:
idm_get_set<lv2_obj, lv2_lwmutex>(objects);
break;
case SYS_TIMER_OBJECT:
idm_get_set<lv2_obj, lv2_timer>(objects);
break;
case SYS_SEMAPHORE_OBJECT:
idm_get_set<lv2_obj, lv2_sema>(objects);
break;
case SYS_FS_FD_OBJECT:
idm_get_set<lv2_fs_object, lv2_fs_object>(objects);
break;
case SYS_LWCOND_OBJECT:
idm_get_set<lv2_obj, lv2_lwcond>(objects);
break;
case SYS_EVENT_FLAG_OBJECT:
idm_get_set<lv2_obj, lv2_event_flag>(objects);
break;
case SYS_SPUPORT_OBJECT:
fmt::throw_exception("SYS_SPUPORT_OBJECT");
default: {
return CELL_EINVAL;
}
}
u32 i = 0;
// NOTE: Treats negative and 0 values as 1 due to signed checks and "do-while"
// behavior of fw
for (auto id = objects.begin();
i < std::max<s32>(size, 1) + 0u && id != objects.end(); id++, i++) {
buffer[i] = *id;
}
*set_size = i;
return CELL_OK;
}
error_code sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
sys_process.error(
"sys_process_get_id(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)",
object, buffer, size, set_size);
if (object == SYS_SPUPORT_OBJECT) {
// Unallowed for this syscall
return CELL_EINVAL;
}
return process_get_id(object, buffer, size, set_size);
}
error_code sys_process_get_id2(u32 object, vm::ptr<u32> buffer, u32 size,
vm::ptr<u32> set_size) {
sys_process.error(
"sys_process_get_id2(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)",
object, buffer, size, set_size);
if (!g_ps3_process_info.has_root_perm()) {
// This syscall is more capable than sys_process_get_id but also needs a
// root perm check
return CELL_ENOSYS;
}
return process_get_id(object, buffer, size, set_size);
}
CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags) {
if (!flags || flags & ~(SYS_MEMORY_ACCESS_RIGHT_SPU_THR |
SYS_MEMORY_ACCESS_RIGHT_RAW_SPU)) {
return CELL_EINVAL;
}
// TODO: respect sys_mmapper region's access rights
switch (addr >> 28) {
case 0x0: // Main memory
case 0x1: // Main memory
case 0x2: // User 64k (sys_memory)
case 0xc: // RSX Local memory
case 0xe: // RawSPU MMIO
break;
case 0xf: // Private SPU MMIO
{
if (flags & SYS_MEMORY_ACCESS_RIGHT_RAW_SPU) {
// Cannot be accessed by RawSPU
return CELL_EPERM;
}
break;
}
case 0xd: // PPU Stack area
return CELL_EPERM;
default: {
if (auto vm0 = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr))) {
// sys_vm area was not covering the address specified but made a
// reservation on the entire 256mb region
if (vm0->addr + vm0->size - 1 < addr) {
return CELL_EINVAL;
}
// sys_vm memory is not allowed
return CELL_EPERM;
}
if (!vm::get(vm::any, addr & -0x1000'0000)) {
return CELL_EINVAL;
}
break;
}
}
return {};
}
error_code sys_process_is_spu_lock_line_reservation_address(u32 addr,
u64 flags) {
sys_process.warning("sys_process_is_spu_lock_line_reservation_address(addr="
"0x%x, flags=0x%llx)",
addr, flags);
if (auto err = process_is_spu_lock_line_reservation_address(addr, flags)) {
return err;
}
return CELL_OK;
}
error_code _sys_process_get_paramsfo(vm::ptr<char> buffer) {
sys_process.warning("_sys_process_get_paramsfo(buffer=0x%x)", buffer);
if (Emu.GetTitleID().empty()) {
return CELL_ENOENT;
}
memset(buffer.get_ptr(), 0, 0x40);
memcpy(buffer.get_ptr() + 1, Emu.GetTitleID().c_str(),
std::min<usz>(Emu.GetTitleID().length(), 9));
return CELL_OK;
}
s32 process_get_sdk_version(u32 /*pid*/, s32 &ver) {
// get correct SDK version for selected pid
ver = g_ps3_process_info.sdk_ver;
return CELL_OK;
}
error_code sys_process_get_sdk_version(u32 pid, vm::ptr<s32> version) {
sys_process.warning("sys_process_get_sdk_version(pid=0x%x, version=*0x%x)",
pid, version);
s32 sdk_ver;
const s32 ret = process_get_sdk_version(pid, sdk_ver);
if (ret != CELL_OK) {
return CellError{ret + 0u}; // error code
} else {
*version = sdk_ver;
return CELL_OK;
}
}
error_code sys_process_kill(u32 pid) {
sys_process.todo("sys_process_kill(pid=0x%x)", pid);
return CELL_OK;
}
error_code sys_process_wait_for_child(u32 pid, vm::ptr<u32> status, u64 unk) {
sys_process.todo(
"sys_process_wait_for_child(pid=0x%x, status=*0x%x, unk=0x%llx", pid,
status, unk);
return CELL_OK;
}
error_code sys_process_wait_for_child2(u64 unk1, u64 unk2, u64 unk3, u64 unk4,
u64 unk5, u64 unk6) {
sys_process.todo("sys_process_wait_for_child2(unk1=0x%llx, unk2=0x%llx, "
"unk3=0x%llx, unk4=0x%llx, unk5=0x%llx, unk6=0x%llx)",
unk1, unk2, unk3, unk4, unk5, unk6);
return CELL_OK;
}
error_code sys_process_get_status(u64 unk) {
sys_process.todo("sys_process_get_status(unk=0x%llx)", unk);
// vm::write32(CPU.gpr[4], GetPPUThreadStatus(CPU));
return CELL_OK;
}
error_code sys_process_detach_child(u64 unk) {
sys_process.todo("sys_process_detach_child(unk=0x%llx)", unk);
return CELL_OK;
}
extern void signal_system_cache_can_stay();
void _sys_process_exit(ppu_thread &ppu, s32 status, u32 arg2, u32 arg3) {
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit(status=%d, arg2=0x%x, arg3=0x%x)",
status, arg2, arg3);
Emu.CallFromMainThread([]() {
sys_process.success("Process finished");
signal_system_cache_can_stay();
Emu.Kill();
});
// Wait for GUI thread
while (auto state = +ppu.state) {
if (is_stopped(state)) {
break;
}
ppu.state.wait(state);
}
}
void _sys_process_exit2(ppu_thread &ppu, s32 status,
vm::ptr<sys_exit2_param> arg, u32 arg_size, u32 arg4) {
ppu.state += cpu_flag::wait;
sys_process.warning(
"_sys_process_exit2(status=%d, arg=*0x%x, arg_size=0x%x, arg4=0x%x)",
status, arg, arg_size, arg4);
auto pstr = +arg->args;
std::vector<std::string> argv;
std::vector<std::string> envp;
while (auto ptr = *pstr++) {
argv.emplace_back(ptr.get_ptr());
sys_process.notice(" *** arg: %s", ptr);
}
while (auto ptr = *pstr++) {
envp.emplace_back(ptr.get_ptr());
sys_process.notice(" *** env: %s", ptr);
}
std::vector<u8> data;
if (arg_size > 0x1030) {
data.resize(0x1000);
std::memcpy(data.data(), vm::base(arg.addr() + arg_size - 0x1000), 0x1000);
}
if (argv.empty()) {
return _sys_process_exit(ppu, status, 0, 0);
}
// TODO: set prio, flags
lv2_exitspawn(ppu, argv, envp, data);
}
void lv2_exitspawn(ppu_thread &ppu, std::vector<std::string> &argv,
std::vector<std::string> &envp, std::vector<u8> &data) {
ppu.state += cpu_flag::wait;
// sys_sm_shutdown
const bool is_real_reboot = (ppu.gpr[11] == 379);
Emu.CallFromMainThread([is_real_reboot, argv = std::move(argv),
envp = std::move(envp),
data = std::move(data)]() mutable {
sys_process.success("Process finished -> %s", argv[0]);
std::string disc;
if (Emu.GetCat() == "DG" || Emu.GetCat() == "GD")
disc = vfs::get("/dev_bdvd/");
if (disc.empty() && !Emu.GetTitleID().empty())
disc = vfs::get(Emu.GetDir());
std::string path = vfs::get(argv[0]);
std::string hdd1 = vfs::get("/dev_hdd1/");
const u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
using namespace id_manager;
shared_ptr<utils::serial> idm_capture = make_shared<utils::serial>();
if (!is_real_reboot) {
reader_lock rlock{id_manager::g_mutex};
g_fxo->get<id_map<lv2_memory_container>>().save(*idm_capture);
stx::serial_breathe_and_tag(*idm_capture, "id_map<lv2_memory_container>",
false);
}
idm_capture->set_reading_state();
auto func = [is_real_reboot,
old_size = g_fxo->get<lv2_memory_container>().size,
idm_capture](u32 sdk_suggested_mem) mutable {
if (is_real_reboot) {
// Do not save containers on actual reboot
ensure(g_fxo->init<id_map<lv2_memory_container>>());
} else {
// Save LV2 memory containers
ensure(g_fxo->init<id_map<lv2_memory_container>>(*idm_capture));
}
// Empty the containers, accumulate their total size
u32 total_size = 0;
idm::select<lv2_memory_container>([&](u32, lv2_memory_container &ctr) {
ctr.used = 0;
total_size += ctr.size;
});
// The default memory container capacity can only decrease after exitspawn
// 1. If newer SDK version suggests higher memory capacity - it is ignored
// 2. If newer SDK version suggests lower memory capacity - it is lowered
// And if 2. happens while user memory containers exist, the left space
// can be spent on user memory containers
ensure(g_fxo->init<lv2_memory_container>(
std::min(old_size - total_size, sdk_suggested_mem) + total_size));
};
Emu.after_kill_callback = [func = std::move(func), argv = std::move(argv),
envp = std::move(envp), data = std::move(data),
disc = std::move(disc), path = std::move(path),
hdd1 = std::move(hdd1),
old_config = Emu.GetUsedConfig(),
klic]() mutable {
Emu.argv = std::move(argv);
Emu.envp = std::move(envp);
Emu.data = std::move(data);
Emu.disc = std::move(disc);
Emu.hdd1 = std::move(hdd1);
Emu.init_mem_containers = std::move(func);
if (klic) {
Emu.klic.emplace_back(klic);
}
Emu.SetForceBoot(true);
auto res = Emu.BootGame(path, "", true, cfg_mode::continuous, old_config);
if (res != game_boot_result::no_errors) {
sys_process.fatal(
"Failed to boot from exitspawn! (path=\"%s\", error=%s)", path,
res);
}
};
signal_system_cache_can_stay();
// Make sure we keep the game window opened
Emu.SetContinuousMode(true);
Emu.Kill(false);
});
// Wait for GUI thread
while (auto state = +ppu.state) {
if (is_stopped(state)) {
break;
}
ppu.state.wait(state);
}
}
void sys_process_exit3(ppu_thread &ppu, s32 status) {
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit3(status=%d)", status);
return _sys_process_exit(ppu, status, 0, 0);
}
error_code sys_process_spawns_a_self2(vm::ptr<u32> pid, u32 primary_prio,
u64 flags, vm::ptr<void> stack,
u32 stack_size, u32 mem_id,
vm::ptr<void> param_sfo,
vm::ptr<void> dbg_data) {
sys_process.todo("sys_process_spawns_a_self2(pid=*0x%x, primary_prio=0x%x, "
"flags=0x%llx, stack=*0x%x, stack_size=0x%x, mem_id=0x%x, "
"param_sfo=*0x%x, dbg_data=*0x%x",
pid, primary_prio, flags, stack, stack_size, mem_id,
param_sfo, dbg_data);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,989 @@
#include "stdafx.h"
#include "sys_rsx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Memory/vm_locking.h"
#include "Emu/RSX/Core/RSXEngLock.hpp"
#include "Emu/RSX/Core/RSXReservationLock.hpp"
#include "Emu/RSX/RSXThread.h"
#include "Emu/System.h"
#include "sys_event.h"
#include "sys_vm.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_rsx);
// Unknown error code returned by sys_rsx_context_attribute
enum sys_rsx_error : s32 { SYS_RSX_CONTEXT_ATTRIBUTE_ERROR = -17 };
template <>
void fmt_class_string<sys_rsx_error>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto error) {
switch (error) { STR_CASE(SYS_RSX_CONTEXT_ATTRIBUTE_ERROR); }
return unknown;
});
}
static u64 rsx_timeStamp() { return get_timebased_time(); }
static void set_rsx_dmactl(rsx::thread *render, u64 get_put) {
{
rsx::eng_lock rlock(render);
render->fifo_ctrl->abort();
// Unconditional set
while (!render->new_get_put.compare_and_swap_test(u64{umax}, get_put)) {
// Wait for the first store to complete (or be aborted)
if (auto cpu = cpu_thread::get_current()) {
if (cpu->state & cpu_flag::exit) {
// Retry
cpu->state += cpu_flag::again;
return;
}
}
utils::pause();
}
// Schedule FIFO interrupt to deal with this immediately
render->m_eng_interrupt_mask |= rsx::dma_control_interrupt;
}
if (auto cpu = cpu_thread::get_current()) {
// Wait for the first store to complete (or be aborted)
while (render->new_get_put != usz{umax}) {
if (cpu->state & cpu_flag::exit) {
if (render->new_get_put.compare_and_swap_test(get_put, umax)) {
// Retry
cpu->state += cpu_flag::again;
return;
}
}
thread_ctrl::wait_for(1000);
}
}
}
bool rsx::thread::send_event(u64 data1, u64 event_flags, u64 data3) {
// Filter event bits, send them only if they are masked by gcm
// Except the upper 32-bits, they are reserved for unmapped io events and
// execute unconditionally
event_flags &= vm::_ref<RsxDriverInfo>(driver_info).handlers | 0xffff'ffffull
<< 32;
if (!event_flags) {
// Nothing to do
return true;
}
auto error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
while (error + 0u == CELL_EBUSY) {
auto cpu = get_current_cpu_thread();
if (cpu && cpu->get_class() == thread_class::ppu) {
// Deschedule
lv2_obj::sleep(*cpu, 100);
}
// Wait a bit before resending event
thread_ctrl::wait_for(100);
if (cpu && cpu->get_class() == thread_class::rsx)
cpu->cpu_wait({});
if (Emu.IsStopped() || (cpu && cpu->check_state())) {
error = 0;
break;
}
error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
}
if (error + 0u == CELL_EAGAIN) {
// Thread has aborted when sending event (VBLANK duplicates are allowed)
ensure((unsent_gcm_events.fetch_or(event_flags) & event_flags &
~(SYS_RSX_EVENT_VBLANK | SYS_RSX_EVENT_SECOND_VBLANK_BASE |
SYS_RSX_EVENT_SECOND_VBLANK_BASE * 2)) == 0);
return false;
}
if (error && error + 0u != CELL_ENOTCONN) {
fmt::throw_exception(
"rsx::thread::send_event() Failed to send event! (error=%x)", +error);
}
return true;
}
error_code sys_rsx_device_open(cpu_thread &cpu) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_open()");
return CELL_OK;
}
error_code sys_rsx_device_close(cpu_thread &cpu) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_close()");
return CELL_OK;
}
/**
* lv2 SysCall 668 (0x29C): sys_rsx_memory_allocate
* @param mem_handle (OUT): Context / ID, which is used by sys_rsx_memory_free
* to free allocated memory.
* @param mem_addr (OUT): Returns the local memory base address, usually
* 0xC0000000.
* @param size (IN): Local memory size. E.g. 0x0F900000 (249 MB). (changes with
* sdk version)
* @param flags (IN): E.g. Immediate value passed in cellGcmSys is 8.
* @param a5 (IN): E.g. Immediate value passed in cellGcmSys is 0x00300000 (3
* MB?).
* @param a6 (IN): E.g. Immediate value passed in cellGcmSys is 16.
* @param a7 (IN): E.g. Immediate value passed in cellGcmSys is 8.
*/
error_code sys_rsx_memory_allocate(cpu_thread &cpu, vm::ptr<u32> mem_handle,
vm::ptr<u64> mem_addr, u32 size, u64 flags,
u64 a5, u64 a6, u64 a7) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_allocate(mem_handle=*0x%x, mem_addr=*0x%x, "
"size=0x%x, flags=0x%llx, a5=0x%llx, a6=0x%llx, a7=0x%llx)",
mem_handle, mem_addr, size, flags, a5, a6, a7);
if (vm::falloc(rsx::constants::local_mem_base, size, vm::video)) {
rsx::get_current_renderer()->local_mem_size = size;
if (u32 addr = rsx::get_current_renderer()->driver_info) {
vm::_ref<RsxDriverInfo>(addr).memory_size = size;
}
*mem_addr = rsx::constants::local_mem_base;
*mem_handle = 0x5a5a5a5b;
return CELL_OK;
}
return CELL_ENOMEM;
}
/**
* lv2 SysCall 669 (0x29D): sys_rsx_memory_free
* @param mem_handle (OUT): Context / ID, for allocated local memory generated
* by sys_rsx_memory_allocate
*/
error_code sys_rsx_memory_free(cpu_thread &cpu, u32 mem_handle) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_free(mem_handle=0x%x)", mem_handle);
if (!vm::check_addr(rsx::constants::local_mem_base)) {
return CELL_ENOMEM;
}
if (rsx::get_current_renderer()->dma_address) {
fmt::throw_exception("Attempting to dealloc rsx memory when the context is "
"still being used");
}
if (!vm::dealloc(rsx::constants::local_mem_base)) {
return CELL_ENOMEM;
}
return CELL_OK;
}
/**
* lv2 SysCall 670 (0x29E): sys_rsx_context_allocate
* @param context_id (OUT): RSX context, E.g. 0x55555555 (in vsh.self)
* @param lpar_dma_control (OUT): Control register area. E.g. 0x60100000 (in
* vsh.self)
* @param lpar_driver_info (OUT): RSX data like frequencies, sizes, version...
* E.g. 0x60200000 (in vsh.self)
* @param lpar_reports (OUT): Report data area. E.g. 0x60300000 (in vsh.self)
* @param mem_ctx (IN): mem_ctx given by sys_rsx_memory_allocate
* @param system_mode (IN):
*/
error_code sys_rsx_context_allocate(cpu_thread &cpu, vm::ptr<u32> context_id,
vm::ptr<u64> lpar_dma_control,
vm::ptr<u64> lpar_driver_info,
vm::ptr<u64> lpar_reports, u64 mem_ctx,
u64 system_mode) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_allocate(context_id=*0x%x, "
"lpar_dma_control=*0x%x, lpar_driver_info=*0x%x, "
"lpar_reports=*0x%x, mem_ctx=0x%llx, system_mode=0x%llx)",
context_id, lpar_dma_control, lpar_driver_info, lpar_reports,
mem_ctx, system_mode);
if (!vm::check_addr(rsx::constants::local_mem_base)) {
return CELL_EINVAL;
}
const auto render = rsx::get_current_renderer();
std::lock_guard lock(render->sys_rsx_mtx);
if (render->dma_address) {
// We currently do not support multiple contexts
fmt::throw_exception("sys_rsx_context_allocate was called twice");
}
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 dma_address = area ? area->alloc(0x300000) : 0;
if (!dma_address) {
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_context_allocate(): Mapped address 0x%x",
dma_address);
*lpar_dma_control = dma_address;
*lpar_driver_info = dma_address + 0x100000;
*lpar_reports = dma_address + 0x200000;
auto &reports = vm::_ref<RsxReports>(vm::cast(*lpar_reports));
std::memset(&reports, 0, sizeof(RsxReports));
for (usz i = 0; i < std::size(reports.notify); ++i)
reports.notify[i].timestamp = -1;
for (usz i = 0; i < std::size(reports.semaphore); i += 4) {
reports.semaphore[i + 0].val.raw() = 0x1337C0D3;
reports.semaphore[i + 1].val.raw() = 0x1337BABE;
reports.semaphore[i + 2].val.raw() = 0x1337BEEF;
reports.semaphore[i + 3].val.raw() = 0x1337F001;
}
for (usz i = 0; i < std::size(reports.report); ++i) {
reports.report[i].val = 0;
reports.report[i].timestamp = -1;
reports.report[i].pad = -1;
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(vm::cast(*lpar_driver_info));
std::memset(&driverInfo, 0, sizeof(RsxDriverInfo));
driverInfo.version_driver = 0x211;
driverInfo.version_gpu = 0x5c;
driverInfo.memory_size = render->local_mem_size;
driverInfo.nvcore_frequency = 500000000; // 0x1DCD6500
driverInfo.memory_frequency = 650000000; // 0x26BE3680
driverInfo.reportsNotifyOffset = 0x1000;
driverInfo.reportsOffset = 0;
driverInfo.reportsReportOffset = 0x1400;
driverInfo.systemModeFlags = static_cast<u32>(system_mode);
driverInfo.hardware_channel = 1; // * i think* this 1 for games, 0 for vsh
render->driver_info = vm::cast(*lpar_driver_info);
auto &dmaControl = vm::_ref<RsxDmaControl>(vm::cast(*lpar_dma_control));
dmaControl.get = 0;
dmaControl.put = 0;
dmaControl.ref = 0; // Set later to -1 by cellGcmSys
if ((false /*system_mode & something*/ || g_cfg.video.decr_memory_layout) &&
g_cfg.core.debug_console_mode)
rsx::get_current_renderer()->main_mem_size = 0x20000000; // 512MB
else
rsx::get_current_renderer()->main_mem_size = 0x10000000; // 256MB
vm::var<sys_event_queue_attribute_t, vm::page_allocator<>> attr;
attr->protocol = SYS_SYNC_PRIORITY;
attr->type = SYS_PPU_QUEUE;
attr->name_u64 = 0;
sys_event_port_create(cpu, vm::get_addr(&driverInfo.handler_queue),
SYS_EVENT_PORT_LOCAL, 0);
render->rsx_event_port = driverInfo.handler_queue;
sys_event_queue_create(cpu, vm::get_addr(&driverInfo.handler_queue), attr, 0,
0x20);
sys_event_port_connect_local(cpu, render->rsx_event_port,
driverInfo.handler_queue);
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->label_addr = vm::cast(*lpar_reports);
render->init(dma_address);
*context_id = 0x55555555;
return CELL_OK;
}
/**
* lv2 SysCall 671 (0x29F): sys_rsx_context_free
* @param context_id (IN): RSX context generated by sys_rsx_context_allocate to
* free the context.
*/
error_code sys_rsx_context_free(ppu_thread &ppu, u32 context_id) {
ppu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_context_free(context_id=0x%x)", context_id);
const auto render = rsx::get_current_renderer();
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
const u32 dma_address = render->dma_address;
render->dma_address = 0;
if (context_id != 0x55555555 || !dma_address ||
render->state & cpu_flag::ret) {
return CELL_EINVAL;
}
g_fxo->get<rsx::vblank_thread>() = thread_state::finished;
const u32 queue_id =
vm::_ptr<RsxDriverInfo>(render->driver_info)->handler_queue;
render->state += cpu_flag::ret;
while (render->state & cpu_flag::ret) {
thread_ctrl::wait_for(1000);
}
sys_event_port_disconnect(ppu, render->rsx_event_port);
sys_event_port_destroy(ppu, render->rsx_event_port);
sys_event_queue_destroy(ppu, queue_id, SYS_EVENT_QUEUE_DESTROY_FORCE);
render->label_addr = 0;
render->driver_info = 0;
render->main_mem_size = 0;
render->rsx_event_port = 0;
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->ctrl = nullptr;
render->rsx_thread_running = false;
render->serialized = false;
ensure(vm::get(vm::rsx_context)->dealloc(dma_address));
return CELL_OK;
}
/**
* lv2 SysCall 672 (0x2A0): sys_rsx_context_iomap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO offset mapping area. E.g. 0x00600000
* @param ea (IN): Start address of mapping area. E.g. 0x20400000
* @param size (IN): Size of mapping area in bytes. E.g. 0x00200000
* @param flags (IN):
*/
error_code sys_rsx_context_iomap(cpu_thread &cpu, u32 context_id, u32 io,
u32 ea, u32 size, u64 flags) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_iomap(context_id=0x%x, io=0x%x, ea=0x%x, "
"size=0x%x, flags=0x%llx)",
context_id, io, ea, size, flags);
const auto render = rsx::get_current_renderer();
if (!size || io & 0xFFFFF ||
ea + u64{size} > rsx::constants::local_mem_base || ea & 0xFFFFF ||
size & 0xFFFFF || context_id != 0x55555555 ||
render->main_mem_size < io + u64{size}) {
return CELL_EINVAL;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_iomap(): RSX is not idle while mapping io");
}
// Wait until we have no active RSX locks and reserve iomap for use. Must do
// so before acquiring vm lock to avoid deadlocks
rsx::reservation_lock<true> rsx_lock(ea, size);
vm::writer_lock rlock;
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000) {
if (!vm::check_addr(addr, vm::page_readable |
(addr < 0x20000000 ? 0 : vm::page_1m_size))) {
return CELL_EINVAL;
}
if ((addr == ea || !(addr % 0x1000'0000)) &&
idm::check_unlocked<sys_vm_t>(sys_vm_t::find_id(addr))) {
// Virtual memory is disallowed
return CELL_EINVAL;
}
}
io >>= 20, ea >>= 20, size >>= 20;
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
for (u32 i = 0; i < size; i++) {
auto &table = render->iomap_table;
// TODO: Investigate relaxed memory ordering
const u32 prev_ea = table.ea[io + i];
table.ea[io + i].release((ea + i) << 20);
if (prev_ea + 1)
table.io[prev_ea >> 20].release(-1); // Clear previous mapping if exists
table.io[ea + i].release((io + i) << 20);
}
return CELL_OK;
}
/**
* lv2 SysCall 673 (0x2A1): sys_rsx_context_iounmap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO address. E.g. 0x00600000 (Start page 6)
* @param size (IN): Size to unmap in byte. E.g. 0x00200000
*/
error_code sys_rsx_context_iounmap(cpu_thread &cpu, u32 context_id, u32 io,
u32 size) {
cpu.state += cpu_flag::wait;
sys_rsx.warning(
"sys_rsx_context_iounmap(context_id=0x%x, io=0x%x, size=0x%x)",
context_id, io, size);
const auto render = rsx::get_current_renderer();
if (!size || size & 0xFFFFF || io & 0xFFFFF || context_id != 0x55555555 ||
render->main_mem_size < io + u64{size}) {
return CELL_EINVAL;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_iounmap(): RSX is not idle while unmapping io");
}
vm::writer_lock rlock;
std::scoped_lock lock(render->sys_rsx_mtx);
for (const u32 end = (io >>= 20) + (size >>= 20); io < end;) {
auto &table = render->iomap_table;
const u32 ea_entry = table.ea[io];
table.ea[io++].release(-1);
if (ea_entry + 1)
table.io[ea_entry >> 20].release(-1);
}
return CELL_OK;
}
/**
* lv2 SysCall 674 (0x2A2): sys_rsx_context_attribute
* @param context_id (IN): RSX context, e.g. 0x55555555
* @param package_id (IN):
* @param a3 (IN):
* @param a4 (IN):
* @param a5 (IN):
* @param a6 (IN):
*/
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3,
u64 a4, u64 a5, u64 a6) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::wait;
}
// Flip/queue/reset flip/flip event/user command/vblank as trace to help with
// log spam
const bool trace_log =
(package_id == 0x102 || package_id == 0x103 || package_id == 0x10a ||
package_id == 0xFEC || package_id == 0xFED || package_id == 0xFEF);
(trace_log ? sys_rsx.trace : sys_rsx.warning)(
"sys_rsx_context_attribute(context_id=0x%x, package_id=0x%x, a3=0x%llx, "
"a4=0x%llx, a5=0x%llx, a6=0x%llx)",
context_id, package_id, a3, a4, a5, a6);
// todo: these event ports probly 'shouldnt' be here as i think its supposed
// to be interrupts that are sent from rsx somewhere in lv1
const auto render = rsx::get_current_renderer();
if (!render->dma_address) {
return {CELL_EINVAL, "dma_address is 0"};
}
if (context_id != 0x55555555) {
return {CELL_EINVAL, "context_id is 0x%x", context_id};
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(render->driver_info);
switch (package_id) {
case 0x001: // FIFO
{
const u64 get = static_cast<u32>(a3);
const u64 put = static_cast<u32>(a4);
const u64 get_put = put << 32 | get;
std::lock_guard lock(render->sys_rsx_mtx);
set_rsx_dmactl(render, get_put);
break;
}
case 0x100: // Display mode set
break;
case 0x101: // Display sync set, cellGcmSetFlipMode
// a4 == 2 is vsync, a4 == 1 is hsync
render->requested_vsync.store(a4 == 2);
break;
case 0x102: // Display flip
{
u32 flip_idx = ~0u;
// high bit signifys grabbing a queued buffer
// otherwise it contains a display buffer offset
if ((a4 & 0x80000000) != 0) {
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// last half byte gives buffer, 0xf seems to trigger just last queued
u8 idx_check = a4 & 0xf;
if (idx_check > 7)
flip_idx = driverInfo.head[a3].lastQueuedBufferId;
else
flip_idx = idx_check;
// fyi -- u32 hardware_channel = (a4 >> 8) & 0xFF;
// sanity check, the head should have a 'queued' buffer on it, and it
// should have been previously 'queued'
const u32 sanity_check = 0x40000000 & (1 << flip_idx);
if ((driverInfo.head[a3].flipFlags & sanity_check) != sanity_check)
rsx_log.error(
"Display Flip Queued: Flipping non previously queued buffer 0x%llx",
a4);
} else {
for (u32 i = 0; i < render->display_buffers_count; ++i) {
if (render->display_buffers[i].offset == a4) {
flip_idx = i;
break;
}
}
if (flip_idx == ~0u) {
rsx_log.error("Display Flip: Couldn't find display buffer offset, "
"flipping 0. Offset: 0x%x",
a4);
flip_idx = 0;
}
}
if (!render->request_emu_flip(flip_idx)) {
if (auto cpu = get_current_cpu_thread()) {
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
return {};
}
break;
}
case 0x103: // Display Queue
{
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].lastQueuedBufferId = static_cast<u32>(a4);
driverInfo.head[a3].flipFlags |= 0x40000000 | (1 << a4);
render->on_frame_end(static_cast<u32>(a4));
if (!render->send_event(0, SYS_RSX_EVENT_QUEUE_BASE << a3, 0)) {
break;
}
if (g_cfg.video.frame_limit == frame_limit_type::infinite) {
render->post_vblank_event(get_system_time());
}
break;
}
case 0x104: // Display buffer
{
const u8 id = a3 & 0xFF;
if (id > 7) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
std::lock_guard lock(render->sys_rsx_mtx);
// Note: no error checking is being done
const u32 width = (a4 >> 32) & 0xFFFFFFFF;
const u32 height = a4 & 0xFFFFFFFF;
const u32 pitch = (a5 >> 32) & 0xFFFFFFFF;
const u32 offset = a5 & 0xFFFFFFFF;
render->display_buffers[id].width = width;
render->display_buffers[id].height = height;
render->display_buffers[id].pitch = pitch;
render->display_buffers[id].offset = offset;
render->display_buffers_count =
std::max<u32>(id + 1, render->display_buffers_count);
break;
}
case 0x105: // destroy buffer?
break;
case 0x106: // ? (Used by cellGcmInitPerfMon)
break;
case 0x108: // cellGcmSetVBlankFrequency, cellGcmSetSecondVFrequency
// a4 == 3, CELL_GCM_DISPLAY_FREQUENCY_59_94HZ
// a4 == 2, CELL_GCM_DISPLAY_FREQUENCY_SCANOUT
// a4 == 4, CELL_GCM_DISPLAY_FREQUENCY_DISABLE
if (a5 == 1u) {
// This function resets vsync state to enabled
render->requested_vsync = true;
// TODO: Set vblank frequency
} else if (ensure(a5 == 2u)) {
// TODO: Implement its frequency as well
render->enable_second_vhandler.store(a4 != 4);
}
break;
case 0x10a: // ? Involved in managing flip status through
// cellGcmResetFlipStatus
{
if (a3 > 7) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].flipFlags.atomic_op([&](be_t<u32> &flipStatus) {
flipStatus = (flipStatus & static_cast<u32>(a4)) | static_cast<u32>(a5);
});
break;
}
case 0x10D: // Called by cellGcmInitCursor
break;
case 0x300: // Tiles
{
// a4 high bits = ret.tile = (location + 1) | (bank << 4) | ((offset /
// 0x10000) << 16) | (location << 31); a4 low bits = ret.limit = ((offset +
// size - 1) / 0x10000) << 16 | (location << 31); a5 high bits = ret.pitch =
// (pitch / 0x100) << 8; a5 low bits = ret.format = base | ((base + ((size -
// 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30);
ensure(a3 < std::size(render->tiles));
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_attribute(): RSX is not idle while setting tile");
}
auto &tile = render->tiles[a3];
const u32 location = ((a4 >> 32) & 0x3) - 1;
const u32 offset = ((((a4 >> 32) & 0x7FFFFFFF) >> 16) * 0x10000);
const u32 size = ((((a4 & 0x7FFFFFFF) >> 16) + 1) * 0x10000) - offset;
const u32 pitch = (((a5 >> 32) & 0xFFFFFFFF) >> 8) * 0x100;
const u32 comp = ((a5 & 0xFFFFFFFF) >> 26) & 0xF;
const u32 base = (a5 & 0xFFFFFFFF) & 0x7FF;
// const u32 bank = (((a4 >> 32) & 0xFFFFFFFF) >> 4) & 0xF;
const bool bound = ((a4 >> 32) & 0x3) != 0;
const auto range = utils::address_range::start_length(offset, size);
if (bound) {
if (!size || !pitch) {
return {CELL_EINVAL, "size or pitch are 0 (size=%d, pitch=%d)", size,
pitch};
}
u32 limit = -1;
switch (location) {
case CELL_GCM_LOCATION_MAIN:
limit = render->main_mem_size;
break;
case CELL_GCM_LOCATION_LOCAL:
limit = render->local_mem_size;
break;
default:
fmt::throw_exception("sys_rsx_context_attribute(): Unexpected location "
"value (location=0x%x)",
location);
}
if (!range.valid() || range.end >= limit) {
return {CELL_EINVAL, "range invalid (valid=%d, end=%d, limit=%d)",
range.valid(), range.end, limit};
}
// Hardcoded value in gcm
ensure(a5 & (1 << 30));
}
std::lock_guard lock(render->sys_rsx_mtx);
// When tile is going to be unbound, we can use it as a hint that the
// address will no longer be used as a surface and can be
// removed/invalidated Todo: There may be more checks such as
// format/size/width can could be done
if (tile.bound && !bound)
render->notify_tile_unbound(static_cast<u32>(a3));
if (location == CELL_GCM_LOCATION_MAIN && bound) {
vm::writer_lock rlock;
for (u32 io = (offset >> 20), end = (range.end >> 20); io <= end; io++) {
if (render->iomap_table.ea[io] == umax) {
return {CELL_EINVAL, "iomap_table ea is umax"};
}
}
}
tile.location = location;
tile.offset = offset;
tile.size = size;
tile.pitch = pitch;
tile.comp = comp;
tile.base = base;
tile.bank = base;
tile.bound = bound;
break;
}
case 0x301: // Depth-buffer (Z-cull)
{
// a4 high = region = (1 << 0) | (zFormat << 4) | (aaFormat << 8);
// a4 low = size = ((width >> 6) << 22) | ((height >> 6) << 6);
// a5 high = start = cullStart&(~0xFFF);
// a5 low = offset = offset;
// a6 high = status0 = (zcullDir << 1) | (zcullFormat << 2) | ((sFunc & 0xF)
// << 12) | (sRef << 16) | (sMask << 24); a6 low = status1 = (0x2000 << 0) |
// (0x20 << 16);
if (a3 >= std::size(render->zculls)) {
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
if (!render->is_fifo_idle()) {
sys_rsx.warning(
"sys_rsx_context_attribute(): RSX is not idle while setting zcull");
}
const u32 width = ((a4 & 0xFFFFFFFF) >> 22) << 6;
const u32 height = ((a4 & 0x0000FFFF) >> 6) << 6;
const u32 cullStart = (a5 >> 32) & ~0xFFF;
const u32 offset = (a5 & 0x0FFFFFFF);
const bool bound = (a6 & 0xFFFFFFFF) != 0;
if (bound) {
const auto cull_range =
utils::address_range::start_length(cullStart, width * height);
// cullStart is an offset inside ZCULL RAM which is 3MB long, check bounds
// width and height are not allowed to be zero (checked by range.valid())
if (!cull_range.valid() || cull_range.end >= 3u << 20 ||
offset >= render->local_mem_size) {
return {CELL_EINVAL,
"cull_range invalid (valid=%d, end=%d, offset=%d, "
"local_mem_size=%d)",
cull_range.valid(),
cull_range.end,
offset,
render->local_mem_size};
}
if (a5 & 0xF0000000) {
sys_rsx.warning("sys_rsx_context_attribute(): ZCULL offset greater "
"than 256MB (offset=0x%x)",
offset);
}
// Hardcoded values in gcm
ensure(a4 & (1ull << 32));
ensure((a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16)));
}
std::lock_guard lock(render->sys_rsx_mtx);
auto &zcull = render->zculls[a3];
zcull.zFormat = ((a4 >> 32) >> 4) & 0xF;
zcull.aaFormat = ((a4 >> 32) >> 8) & 0xF;
zcull.width = width;
zcull.height = height;
zcull.cullStart = cullStart;
zcull.offset = offset;
zcull.zcullDir = ((a6 >> 32) >> 1) & 0x1;
zcull.zcullFormat = ((a6 >> 32) >> 2) & 0x3FF;
zcull.sFunc = ((a6 >> 32) >> 12) & 0xF;
zcull.sRef = ((a6 >> 32) >> 16) & 0xFF;
zcull.sMask = ((a6 >> 32) >> 24) & 0xFF;
zcull.bound = bound;
break;
}
case 0x302: // something with zcull
break;
case 0x600: // Framebuffer setup
break;
case 0x601: // Framebuffer blit
break;
case 0x602: // Framebuffer blit sync
break;
case 0x603: // Framebuffer close
break;
case 0xFEC: // hack: flip event notification
{
// we only ever use head 1 for now
driverInfo.head[1].flipFlags |= 0x80000000;
driverInfo.head[1].lastFlipTime =
rsx_timeStamp(); // should rsxthread set this?
driverInfo.head[1].flipBufferId = static_cast<u32>(a3);
// seems gcmSysWaitLabel uses this offset, so lets set it to 0 every flip
// NOTE: Realhw resets 16 bytes of this semaphore for some reason
vm::_ref<atomic_t<u128>>(render->label_addr + 0x10).store(u128{});
render->send_event(0, SYS_RSX_EVENT_FLIP_BASE << 1, 0);
break;
}
case 0xFED: // hack: vblank command
{
if (cpu_thread::get_current<ppu_thread>()) {
// VBLANK/RSX thread only
return {CELL_EINVAL, "wrong thread"};
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// todo: this is wrong and should be 'second' vblank handler and freq, but
// since currently everything is reported as being 59.94, this should be
// fine
driverInfo.head[a3].lastSecondVTime.atomic_op([&](be_t<u64> &time) {
a4 = std::max<u64>(a4, time + 1);
time = a4;
});
// Time point is supplied in argument 4 (todo: convert it to MFTB rate and
// use it)
const u64 current_time = rsx_timeStamp();
// Note: not atomic
driverInfo.head[a3].lastVTimeLow = static_cast<u32>(current_time);
driverInfo.head[a3].lastVTimeHigh = static_cast<u32>(current_time >> 32);
driverInfo.head[a3].vBlankCount++;
u64 event_flags = SYS_RSX_EVENT_VBLANK;
if (render->enable_second_vhandler)
event_flags |= SYS_RSX_EVENT_SECOND_VBLANK_BASE << a3; // second vhandler
render->send_event(0, event_flags, 0);
break;
}
case 0xFEF: // hack: user command
{
// 'custom' invalid package id for now
// as i think we need custom lv1 interrupts to handle this accurately
// this also should probly be set by rsxthread
driverInfo.userCmdParam = static_cast<u32>(a4);
render->send_event(0, SYS_RSX_EVENT_USER_CMD, 0);
break;
}
default:
return {CELL_EINVAL, "unsupported package id %d", package_id};
}
return CELL_OK;
}
/**
* lv2 SysCall 675 (0x2A3): sys_rsx_device_map
* @param a1 (OUT): rsx device map address : 0x40000000, 0x50000000.. 0xB0000000
* @param a2 (OUT): Unused
* @param dev_id (IN): An immediate value and always 8. (cellGcmInitPerfMon uses
* 11, 10, 9, 7, 12 successively).
*/
error_code sys_rsx_device_map(cpu_thread &cpu, vm::ptr<u64> dev_addr,
vm::ptr<u64> a2, u32 dev_id) {
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_device_map(dev_addr=*0x%x, a2=*0x%x, dev_id=0x%x)",
dev_addr, a2, dev_id);
if (dev_id != 8) {
// TODO: lv1 related
fmt::throw_exception("sys_rsx_device_map: Invalid dev_id %d", dev_id);
}
const auto render = rsx::get_current_renderer();
std::scoped_lock lock(render->sys_rsx_mtx);
if (!render->device_addr) {
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 addr = area ? area->alloc(0x100000) : 0;
if (!addr) {
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_device_map(): Mapped address 0x%x", addr);
*dev_addr = addr;
render->device_addr = addr;
return CELL_OK;
}
*dev_addr = render->device_addr;
return CELL_OK;
}
/**
* lv2 SysCall 676 (0x2A4): sys_rsx_device_unmap
* @param dev_id (IN): An immediate value and always 8.
*/
error_code sys_rsx_device_unmap(cpu_thread &cpu, u32 dev_id) {
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_unmap(dev_id=0x%x)", dev_id);
return CELL_OK;
}
/**
* lv2 SysCall 677 (0x2A5): sys_rsx_attribute
*/
error_code sys_rsx_attribute(cpu_thread &cpu, u32 packageId, u32 a2, u32 a3,
u32 a4, u32 a5) {
cpu.state += cpu_flag::wait;
sys_rsx.warning(
"sys_rsx_attribute(packageId=0x%x, a2=0x%x, a3=0x%x, a4=0x%x, a5=0x%x)",
packageId, a2, a3, a4, a5);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,496 @@
#include "stdafx.h"
#include "sys_rwlock.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_rwlock);
lv2_rwlock::lv2_rwlock(utils::serial &ar) : protocol(ar), key(ar), name(ar) {
ar(owner);
}
std::function<void(void *)> lv2_rwlock::load(utils::serial &ar) {
return load_func(make_shared<lv2_rwlock>(exact_t<utils::serial &>(ar)));
}
void lv2_rwlock::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, owner);
}
error_code sys_rwlock_create(ppu_thread &ppu, vm::ptr<u32> rw_lock_id,
vm::ptr<sys_rwlock_attribute_t> attr) {
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_create(rw_lock_id=*0x%x, attr=*0x%x)",
rw_lock_id, attr);
if (!rw_lock_id || !attr) {
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_rwlock.error("sys_rwlock_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (auto error =
lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
})) {
return error;
}
ppu.check_state();
*rw_lock_id = idm::last_id();
return CELL_OK;
}
error_code sys_rwlock_destroy(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_destroy(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::withdraw<lv2_obj, lv2_rwlock>(
rw_lock_id, [](lv2_rwlock &rw) -> CellError {
if (rw.owner) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(rw, rw.key);
return {};
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret) {
return rwlock.ret;
}
return CELL_OK;
}
error_code sys_rwlock_rlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)",
rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(
rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
if (val <= 0 && !(val & 1)) {
if (rwlock.owner.compare_and_swap_test(val, val - 2)) {
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64 &val) {
if (val <= 0 && !(val & 1)) {
val -= 2;
} else {
val |= 1;
}
});
if (_old > 0 || _old & 1) {
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.rq, &ppu);
return false;
}
return true;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret) {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread *>::load(rwlock->rq)) {
// Waiters queue is empty, so the thread must have been signaled
rwlock->mutex.lock_unlock();
break;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->rq, &ppu)) {
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_tryrlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_tryrlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock &rwlock) {
auto [_, ok] = rwlock.owner.fetch_op([](s64 &val) {
if (val <= 0 && !(val & 1)) {
val -= 2;
return true;
}
return false;
});
return ok;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (!rwlock.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_runlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_runlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
if (val < 0 && !(val & 1)) {
if (rwlock.owner.compare_and_swap_test(val, val + 2)) {
return true;
}
}
return false;
});
if (!rwlock) {
return CELL_ESRCH;
}
lv2_obj::notify_all_t notify;
if (rwlock.ret) {
return CELL_OK;
} else {
std::lock_guard lock(rwlock->mutex);
// Remove one reader
const s64 _old = rwlock->owner.fetch_op([](s64 &val) {
if (val < -1) {
val += 2;
}
});
if (_old >= 0) {
return CELL_EPERM;
}
if (_old == -1) {
if (const auto cpu =
rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
} else {
rwlock->owner = 0;
ensure(!rwlock->rq);
}
}
}
return CELL_OK;
}
error_code sys_rwlock_wlock(ppu_thread &ppu, u32 rw_lock_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)",
rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(
rw_lock_id,
[&, notify = lv2_obj::notify_all_t()](lv2_rwlock &rwlock) -> s64 {
const s64 val = rwlock.owner;
if (val == 0) {
if (rwlock.owner.compare_and_swap_test(0, ppu.id << 1)) {
return 0;
}
} else if (val >> 1 == ppu.id) {
return val;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64 &val) {
if (val == 0) {
val = ppu.id << 1;
} else {
val |= 1;
}
});
if (_old != 0) {
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.wq, &ppu);
}
return _old;
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret == 0) {
return CELL_OK;
}
if (rwlock.ret >> 1 == ppu.id) {
return CELL_EDEADLK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->wq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->wq, &ppu)) {
break;
}
// If the last waiter quit the writer sleep queue, wake blocked readers
if (rwlock->rq && !rwlock->wq && rwlock->owner < 0) {
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu =
rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO)) {
size++;
rwlock->append(cpu);
}
rwlock->owner.atomic_op([&](s64 &owner) {
owner -= 2 * size; // Add readers to value
owner &= -2; // Clear wait bit
});
lv2_obj::awake_all();
} else if (!rwlock->rq && !rwlock->wq) {
rwlock->owner &= -2;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_trywlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
// Return previous value
return val ? val : rwlock.owner.compare_and_swap(0, ppu.id << 1);
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret != 0) {
if (rwlock.ret >> 1 == ppu.id) {
return CELL_EDEADLK;
}
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_wunlock(ppu_thread &ppu, u32 rw_lock_id) {
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock =
idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock &rwlock) {
const s64 val = rwlock.owner;
// Return previous value
return val != ppu.id << 1 ? val : rwlock.owner.compare_and_swap(val, 0);
});
if (!rwlock) {
return CELL_ESRCH;
}
if (rwlock.ret >> 1 != ppu.id) {
return CELL_EPERM;
}
if (lv2_obj::notify_all_t notify; rwlock.ret & 1) {
std::lock_guard lock(rwlock->mutex);
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol)) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
} else if (rwlock->rq) {
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) {
if (cpu->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu =
rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO)) {
size++;
rwlock->append(cpu);
}
rwlock->owner.release(-2 * static_cast<s64>(size));
lv2_obj::awake_all();
} else {
rwlock->owner = 0;
}
}
return CELL_OK;
}

View file

@ -0,0 +1,318 @@
#include "stdafx.h"
#include "sys_semaphore.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_semaphore);
lv2_sema::lv2_sema(utils::serial &ar)
: protocol(ar), key(ar), name(ar), max(ar) {
ar(val);
}
std::function<void(void *)> lv2_sema::load(utils::serial &ar) {
return load_func(make_shared<lv2_sema>(exact_t<utils::serial &>(ar)));
}
void lv2_sema::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, max, std::max<s32>(+val, 0));
}
error_code sys_semaphore_create(ppu_thread &ppu, vm::ptr<u32> sem_id,
vm::ptr<sys_semaphore_attribute_t> attr,
s32 initial_val, s32 max_val) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, "
"initial_val=%d, max_val=%d)",
sem_id, attr, initial_val, max_val);
if (!sem_id || !attr) {
return CELL_EFAULT;
}
if (max_val <= 0 || initial_val > max_val || initial_val < 0) {
sys_semaphore.error("sys_semaphore_create(): invalid parameters "
"(initial_val=%d, max_val=%d)",
initial_val, max_val);
return CELL_EINVAL;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY) {
sys_semaphore.error("sys_semaphore_create(): unknown protocol (0x%x)",
protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key) {
sys_semaphore.warning("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, "
"initial_val=%d, max_val=%d): IPC=0x%016x",
sem_id, attr, initial_val, max_val, ipc_key);
}
if (auto error =
lv2_obj::create<lv2_sema>(_attr.pshared, ipc_key, _attr.flags, [&] {
return make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64,
max_val, initial_val);
})) {
return error;
}
static_cast<void>(ppu.test_stopped());
*sem_id = idm::last_id();
return CELL_OK;
}
error_code sys_semaphore_destroy(ppu_thread &ppu, u32 sem_id) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_destroy(sem_id=0x%x)", sem_id);
const auto sem =
idm::withdraw<lv2_obj, lv2_sema>(sem_id, [](lv2_sema &sema) -> CellError {
if (sema.val < 0) {
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(sema, sema.key);
return {};
});
if (!sem) {
return CELL_ESRCH;
}
if (sem->key) {
sys_semaphore.warning("sys_semaphore_destroy(sem_id=0x%x): IPC=0x%016x",
sem_id, sem->key);
}
if (sem.ret) {
return sem.ret;
}
return CELL_OK;
}
error_code sys_semaphore_wait(ppu_thread &ppu, u32 sem_id, u64 timeout) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id,
timeout);
const auto sem = idm::get<lv2_obj, lv2_sema>(
sem_id, [&, notify = lv2_obj::notify_all_t()](lv2_sema &sema) {
const s32 val = sema.val;
if (val > 0) {
if (sema.val.compare_and_swap_test(val, val - 1)) {
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(sema.mutex);
if (sema.val-- <= 0) {
sema.sleep(ppu, timeout);
lv2_obj::emplace(sema.sq, &ppu);
return false;
}
return true;
});
if (!sem) {
return CELL_ESRCH;
}
if (sem.ret) {
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state) {
if (state & cpu_flag::signal &&
ppu.state.test_and_reset(cpu_flag::signal)) {
break;
}
if (is_stopped(state)) {
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu) {
if (cpu == &ppu) {
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) {
busy_wait(500);
}
if (ppu.state & cpu_flag::signal) {
continue;
}
if (timeout) {
if (lv2_obj::wait_timeout(timeout, &ppu)) {
// Wait for rescheduling
if (ppu.check_state()) {
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(sem->mutex);
if (!sem->unqueue(sem->sq, &ppu)) {
break;
}
ensure(0 > sem->val.fetch_op([](s32 &val) {
if (val < 0) {
val++;
}
}));
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
} else {
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_semaphore_trywait(ppu_thread &ppu, u32 sem_id) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_trywait(sem_id=0x%x)", sem_id);
const auto sem = idm::check<lv2_obj, lv2_sema>(
sem_id, [&](lv2_sema &sema) { return sema.val.try_dec(0); });
if (!sem) {
return CELL_ESRCH;
}
if (!sem.ret) {
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_semaphore_post(ppu_thread &ppu, u32 sem_id, s32 count) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_post(sem_id=0x%x, count=%d)", sem_id,
count);
const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&](lv2_sema &sema) {
const s32 val = sema.val;
if (val >= 0 && count > 0 && count <= sema.max - val) {
if (sema.val.compare_and_swap_test(val, val + count)) {
return true;
}
}
return false;
});
if (!sem) {
return CELL_ESRCH;
}
if (count <= 0) {
return CELL_EINVAL;
}
lv2_obj::notify_all_t notify;
if (sem.ret) {
return CELL_OK;
} else {
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu) {
if (static_cast<ppu_thread *>(cpu)->state & cpu_flag::again) {
ppu.state += cpu_flag::again;
return {};
}
}
const auto [val, ok] = sem->val.fetch_op([&](s32 &val) {
if (count + 0u <= sem->max + 0u - val) {
val += count;
return true;
}
return false;
});
if (!ok) {
return not_an_error(CELL_EBUSY);
}
// Wake threads
const s32 to_awake = std::min<s32>(-std::min<s32>(val, 0), count);
for (s32 i = 0; i < to_awake; i++) {
sem->append((ensure(sem->schedule<ppu_thread>(sem->sq, sem->protocol))));
}
if (to_awake > 0) {
lv2_obj::awake_all();
}
}
return CELL_OK;
}
error_code sys_semaphore_get_value(ppu_thread &ppu, u32 sem_id,
vm::ptr<s32> count) {
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_get_value(sem_id=0x%x, count=*0x%x)",
sem_id, count);
const auto sema = idm::check<lv2_obj, lv2_sema>(
sem_id, [](lv2_sema &sema) { return std::max<s32>(0, sema.val); });
if (!sema) {
return CELL_ESRCH;
}
if (!count) {
return CELL_EFAULT;
}
static_cast<void>(ppu.test_stopped());
*count = sema.ret;
return CELL_OK;
}

View file

@ -0,0 +1,123 @@
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "cellos/sys_process.h"
#include "sys_sm.h"
LOG_CHANNEL(sys_sm);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c,
vm::ptr<u64> d) {
sys_sm.todo("sys_sm_get_params(a=*0x%x, b=*0x%x, c=*0x%x, d=*0x%x)", a, b, c,
d);
if (a)
*a = 0;
else
return CELL_EFAULT;
if (b)
*b = 0;
else
return CELL_EFAULT;
if (c)
*c = 0x200;
else
return CELL_EFAULT;
if (d)
*d = 7;
else
return CELL_EFAULT;
return CELL_OK;
}
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2,
vm::ptr<u64> a3, u64 a4) {
sys_sm.todo(
"sys_sm_get_ext_event2(a1=*0x%x, a2=*0x%x, a3=*0x%x, a4=*0x%x, a4=0x%xll",
a1, a2, a3, a4);
if (a4 != 0 && a4 != 1) {
return CELL_EINVAL;
}
// a1 == 7 - 'console too hot, restart'
// a2 looks to be used if a1 is either 5 or 3?
// a3 looks to be ignored in vsh
if (a1)
*a1 = 0;
else
return CELL_EFAULT;
if (a2)
*a2 = 0;
else
return CELL_EFAULT;
if (a3)
*a3 = 0;
else
return CELL_EFAULT;
// eagain for no event
return not_an_error(CELL_EAGAIN);
}
error_code sys_sm_shutdown(ppu_thread &ppu, u16 op, vm::ptr<void> param,
u64 size) {
ppu.state += cpu_flag::wait;
sys_sm.success("sys_sm_shutdown(op=0x%x, param=*0x%x, size=0x%x)", op, param,
size);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
switch (op) {
case 0x100:
case 0x1100: {
sys_sm.success("Received shutdown request from application");
_sys_process_exit(ppu, 0, 0, 0);
break;
}
case 0x200:
case 0x1200: {
sys_sm.success("Received reboot request from application");
lv2_exitspawn(ppu, Emu.argv, Emu.envp, Emu.data);
break;
}
case 0x8201:
case 0x8202:
case 0x8204: {
sys_sm.warning("Unsupported LPAR operation: 0x%x", op);
return CELL_ENOTSUP;
}
default:
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_sm_set_shop_mode(s32 mode) {
sys_sm.todo("sys_sm_set_shop_mode(mode=0x%x)", mode);
return CELL_OK;
}
error_code sys_sm_control_led(u8 led, u8 action) {
sys_sm.todo("sys_sm_control_led(led=0x%x, action=0x%x)", led, action);
return CELL_OK;
}
error_code sys_sm_ring_buzzer(u64 packet, u64 a1, u64 a2) {
sys_sm.todo("sys_sm_ring_buzzer(packet=0x%x, a1=0x%x, a2=0x%x)", packet, a1,
a2);
return CELL_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,531 @@
#include "stdafx.h"
#include "sys_ss.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
#include "Emu/system_config.h"
#include "sys_process.h"
#include "util/sysinfo.hpp"
#include <charconv>
#include <shared_mutex>
#include <unordered_set>
#ifdef _WIN32
#include <bcrypt.h>
#include <windows.h>
#endif
struct lv2_update_manager {
lv2_update_manager() {
std::string version_str = utils::get_firmware_version();
// For example, 4.90 should be converted to 0x4900000000000
std::erase(version_str, '.');
if (std::from_chars(version_str.data(),
version_str.data() + version_str.size(),
system_sw_version, 16)
.ec != std::errc{})
system_sw_version <<= 40;
else
system_sw_version = 0;
}
lv2_update_manager(const lv2_update_manager &) = delete;
lv2_update_manager &operator=(const lv2_update_manager &) = delete;
~lv2_update_manager() = default;
u64 system_sw_version;
std::unordered_map<u32, u8> eeprom_map // offset, value
{
// system language
// *i think* this gives english
{0x48C18, 0x00},
{0x48C19, 0x00},
{0x48C1A, 0x00},
{0x48C1B, 0x01},
// system language end
// vsh target (seems it can be 0xFFFFFFFE, 0xFFFFFFFF, 0x00000001
// default: 0x00000000 / vsh sets it to 0x00000000 on boot if it isn't
// 0x00000000)
{0x48C1C, 0x00},
{0x48C1D, 0x00},
{0x48C1E, 0x00},
{0x48C1F, 0x00} // vsh target end
};
mutable std::shared_mutex eeprom_mutex;
std::unordered_set<u32> malloc_set;
mutable std::shared_mutex malloc_mutex;
// return address
u32 allocate(u32 size) {
std::unique_lock unique_lock(malloc_mutex);
if (const auto addr = vm::alloc(size, vm::main); addr) {
malloc_set.emplace(addr);
return addr;
}
return 0;
}
// return size
u32 deallocate(u32 addr) {
std::unique_lock unique_lock(malloc_mutex);
if (malloc_set.count(addr)) {
return vm::dealloc(addr, vm::main);
}
return 0;
}
};
template <>
void fmt_class_string<sys_ss_rng_error>::format(std::string &out, u64 arg) {
format_enum(out, arg, [](auto error) {
switch (error) {
STR_CASE(SYS_SS_RNG_ERROR_INVALID_PKG);
STR_CASE(SYS_SS_RNG_ERROR_ENOMEM);
STR_CASE(SYS_SS_RNG_ERROR_EAGAIN);
STR_CASE(SYS_SS_RNG_ERROR_EFAULT);
STR_CASE(SYS_SS_RTC_ERROR_UNK);
}
return unknown;
});
}
LOG_CHANNEL(sys_ss);
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf,
u64 size) {
sys_ss.warning(
"sys_ss_random_number_generator(pkg_id=%u, buf=*0x%x, size=0x%x)", pkg_id,
buf, size);
if (pkg_id != 2) {
if (pkg_id == 1) {
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
sys_ss.todo("sys_ss_random_number_generator(): pkg_id=1");
std::memset(buf.get_ptr(), 0, 0x18);
return CELL_OK;
}
return SYS_SS_RNG_ERROR_INVALID_PKG;
}
// TODO
if (size > 0x10000000) {
return SYS_SS_RNG_ERROR_ENOMEM;
}
std::unique_ptr<u8[]> temp(new u8[size]);
#ifdef _WIN32
if (auto ret = BCryptGenRandom(nullptr, temp.get(), static_cast<ULONG>(size),
BCRYPT_USE_SYSTEM_PREFERRED_RNG)) {
fmt::throw_exception(
"sys_ss_random_number_generator(): BCryptGenRandom failed (0x%08x)",
ret);
}
#else
fs::file rnd{"/dev/urandom"};
if (!rnd || rnd.read(temp.get(), size) != size) {
fmt::throw_exception("sys_ss_random_number_generator(): Failed to generate "
"pseudo-random numbers");
}
#endif
std::memcpy(buf.get_ptr(), temp.get(), size);
return CELL_OK;
}
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3) {
sys_ss.success(
"sys_ss_access_control_engine(pkg_id=0x%llx, a2=0x%llx, a3=0x%llx)",
pkg_id, a2, a3);
const u64 authid =
g_ps3_process_info.self_info.valid
? g_ps3_process_info.self_info.prog_id_hdr.program_authority_id
: 0;
switch (pkg_id) {
case 0x1: {
if (!g_ps3_process_info.debug_or_root()) {
return not_an_error(CELL_ENOSYS);
}
if (!a2) {
return CELL_ESRCH;
}
ensure(a2 == static_cast<u64>(process_getpid()));
vm::write64(vm::cast(a3), authid);
break;
}
case 0x2: {
vm::write64(vm::cast(a2), authid);
break;
}
case 0x3: {
if (!g_ps3_process_info.debug_or_root()) {
return CELL_ENOSYS;
}
break;
}
default:
return 0x8001051du;
}
return CELL_OK;
}
error_code sys_ss_get_console_id(vm::ptr<u8> buf) {
sys_ss.notice("sys_ss_get_console_id(buf=*0x%x)", buf);
return sys_ss_appliance_info_manager(0x19003, buf);
}
error_code sys_ss_get_open_psid(vm::ptr<CellSsOpenPSID> psid) {
sys_ss.notice("sys_ss_get_open_psid(psid=*0x%x)", psid);
psid->high = g_cfg.sys.console_psid_high;
psid->low = g_cfg.sys.console_psid_low;
return CELL_OK;
}
error_code sys_ss_appliance_info_manager(u32 code, vm::ptr<u8> buffer) {
sys_ss.notice("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code,
buffer);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
if (!buffer)
return CELL_EFAULT;
switch (code) {
case 0x19002: {
// AIM_get_device_type
constexpr u8 product_code[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x89};
std::memcpy(buffer.get_ptr(), product_code, 16);
if (g_cfg.core.debug_console_mode)
buffer[15] = 0x81; // DECR
break;
}
case 0x19003: {
// AIM_get_device_id
constexpr u8 idps[] = {0x00, 0x00, 0x00, 0x01, 0x00, 0x89, 0x00, 0x0B,
0x14, 0x00, 0xEF, 0xDD, 0xCA, 0x25, 0x52, 0x66};
std::memcpy(buffer.get_ptr(), idps, 16);
if (g_cfg.core.debug_console_mode) {
buffer[5] = 0x81; // DECR
buffer[7] = 0x09; // DECR-1400
}
break;
}
case 0x19004: {
// AIM_get_ps_code
constexpr u8 pscode[] = {0x00, 0x01, 0x00, 0x85, 0x00, 0x07, 0x00, 0x04};
std::memcpy(buffer.get_ptr(), pscode, 8);
break;
}
case 0x19005: {
// AIM_get_open_ps_id
be_t<u64> psid[2] = {+g_cfg.sys.console_psid_high,
+g_cfg.sys.console_psid_low};
std::memcpy(buffer.get_ptr(), psid, 16);
break;
}
case 0x19006: {
// qa values (dex only) ??
[[fallthrough]];
}
default:
sys_ss.todo("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code,
buffer);
}
return CELL_OK;
}
error_code sys_ss_get_cache_of_product_mode(vm::ptr<u8> ptr) {
sys_ss.todo("sys_ss_get_cache_of_product_mode(ptr=*0x%x)", ptr);
if (!ptr) {
return CELL_EINVAL;
}
// 0xff Happens when hypervisor call returns an error
// 0 - disabled
// 1 - enabled
// except something segfaults when using 0, so error it is!
*ptr = 0xFF;
return CELL_OK;
}
error_code sys_ss_secure_rtc(u64 cmd, u64 a2, u64 a3, u64 a4) {
sys_ss.todo("sys_ss_secure_rtc(cmd=0x%llx, a2=0x%x, a3=0x%llx, a4=0x%llx)",
cmd, a2, a3, a4);
if (cmd == 0x3001) {
if (a3 != 0x20)
return 0x80010500; // bad packet id
return CELL_OK;
} else if (cmd == 0x3002) {
// Get time
if (a2 > 1)
return 0x80010500; // bad packet id
// a3 is actual output, not 100% sure, but best guess is its tb val
vm::write64(::narrow<u32>(a3), get_timebased_time());
// a4 is a pointer to status, non 0 on error
vm::write64(::narrow<u32>(a4), 0);
return CELL_OK;
} else if (cmd == 0x3003) {
return CELL_OK;
}
return 0x80010500; // bad packet id
}
error_code sys_ss_get_cache_of_flash_ext_flag(vm::ptr<u64> flag) {
sys_ss.todo("sys_ss_get_cache_of_flash_ext_flag(flag=*0x%x)", flag);
if (!flag) {
return CELL_EFAULT;
}
*flag = 0xFE; // nand vs nor from lsb
return CELL_OK;
}
error_code sys_ss_get_boot_device(vm::ptr<u64> dev) {
sys_ss.todo("sys_ss_get_boot_device(dev=*0x%x)", dev);
if (!dev) {
return CELL_EINVAL;
}
*dev = 0x190;
return CELL_OK;
}
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4,
u64 a5, u64 a6) {
sys_ss.notice("sys_ss_update_manager(pkg=0x%x, a1=0x%x, a2=0x%x, a3=0x%x, "
"a4=0x%x, a5=0x%x, a6=0x%x)",
pkg_id, a1, a2, a3, a4, a5, a6);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
auto &update_manager = g_fxo->get<lv2_update_manager>();
switch (pkg_id) {
case 0x6001: {
// update package async
break;
}
case 0x6002: {
// inspect package async
break;
}
case 0x6003: {
// get installed package info
[[maybe_unused]] const auto type = ::narrow<u32>(a1);
const auto info_ptr = ::narrow<u32>(a2);
if (!info_ptr)
return CELL_EFAULT;
vm::write64(info_ptr, update_manager.system_sw_version);
break;
}
case 0x6004: {
// get fix instruction
break;
}
case 0x6005: {
// extract package async
break;
}
case 0x6006: {
// get extract package
break;
}
case 0x6007: {
// get flash initialized
break;
}
case 0x6008: {
// set flash initialized
break;
}
case 0x6009: {
// get seed token
break;
}
case 0x600A: {
// set seed token
break;
}
case 0x600B: {
// read eeprom
const auto offset = ::narrow<u32>(a1);
const auto value_ptr = ::narrow<u32>(a2);
if (!value_ptr)
return CELL_EFAULT;
std::shared_lock shared_lock(update_manager.eeprom_mutex);
if (const auto iterator = update_manager.eeprom_map.find(offset);
iterator != update_manager.eeprom_map.end())
vm::write8(value_ptr, iterator->second);
else
vm::write8(value_ptr, 0xFF); // 0xFF if not set
break;
}
case 0x600C: {
// write eeprom
const auto offset = ::narrow<u32>(a1);
const auto value = ::narrow<u8>(a2);
std::unique_lock unique_lock(update_manager.eeprom_mutex);
if (value != 0xFF)
update_manager.eeprom_map[offset] = value;
else
update_manager.eeprom_map.erase(offset); // 0xFF: unset
break;
}
case 0x600D: {
// get async status
break;
}
case 0x600E: {
// allocate buffer
const auto size = ::narrow<u32>(a1);
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x600F: {
// release buffer
const auto addr = ::narrow<u32>(a1);
if (!update_manager.deallocate(addr))
return CELL_ENOMEM;
break;
}
case 0x6010: {
// check integrity
break;
}
case 0x6011: {
// get applicable version
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
vm::write64(addr_ptr, 0x30040ULL << 32); // 3.40
break;
}
case 0x6012: {
// allocate buffer from memory container
[[maybe_unused]] const auto mem_ct = ::narrow<u32>(a1);
const auto size = ::narrow<u32>(a2);
const auto addr_ptr = ::narrow<u32>(a3);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x6013: {
// unknown
break;
}
default: {
sys_ss.error("sys_ss_update_manager(): invalid packet id 0x%x ", pkg_id);
return CELL_EINVAL;
}
}
return CELL_OK;
}
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3,
u64 a4) {
sys_ss.todo("sys_ss_virtual_trm_manager(pkg=0x%llx, a1=0x%llx, a2=0x%llx, "
"a3=0x%llx, a4=0x%llx)",
pkg_id, a1, a2, a3, a4);
return CELL_OK;
}
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2,
vm::ptr<u64> out_size, u64 a4, u64 a5,
u64 a6) {
sys_ss.todo("sys_ss_individual_info_manager(pkg=0x%llx, a2=0x%llx, "
"out_size=*0x%llx, a4=0x%llx, a5=0x%llx, a6=0x%llx)",
pkg_id, a2, out_size, a4, a5, a6);
switch (pkg_id) {
// Read EID
case 0x17002: {
// TODO
vm::_ref<u64>(a5) = a4; // Write back size of buffer
break;
}
// Get EID size
case 0x17001:
*out_size = 0x100;
break;
default:
break;
}
return CELL_OK;
}

View file

@ -0,0 +1,456 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_event.h"
#include "sys_fs.h"
#include "util/shared_ptr.hpp"
#include "sys_storage.h"
LOG_CHANNEL(sys_storage);
namespace {
struct storage_manager {
// This is probably wrong and should be assigned per fd or something
atomic_ptr<lv2_event_queue> asyncequeue;
};
} // namespace
error_code sys_storage_open(u64 device, u64 mode, vm::ptr<u32> fd, u64 flags) {
sys_storage.todo(
"sys_storage_open(device=0x%x, mode=0x%x, fd=*0x%x, flags=0x%x)", device,
mode, fd, flags);
if (device == 0) {
return CELL_ENOENT;
}
if (!fd) {
return CELL_EFAULT;
}
[[maybe_unused]] u64 storage_id = device & 0xFFFFF00FFFFFFFF;
fs::file file;
if (const u32 id =
idm::make<lv2_storage>(device, std::move(file), mode, flags)) {
*fd = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_storage_close(u32 fd) {
sys_storage.todo("sys_storage_close(fd=0x%x)", fd);
idm::remove<lv2_storage>(fd);
return CELL_OK;
}
error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors,
vm::ptr<void> bounce_buf, vm::ptr<u32> sectors_read,
u64 flags) {
sys_storage.todo(
"sys_storage_read(fd=0x%x, mode=0x%x, start_sector=0x%x, "
"num_sectors=0x%x, bounce_buf=*0x%x, sectors_read=*0x%x, flags=0x%x)",
fd, mode, start_sector, num_sectors, bounce_buf, sectors_read, flags);
if (!bounce_buf || !sectors_read) {
return CELL_EFAULT;
}
std::memset(bounce_buf.get_ptr(), 0, num_sectors * 0x200ull);
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle) {
return CELL_ESRCH;
}
if (handle->file) {
handle->file.seek(start_sector * 0x200ull);
const u64 size = num_sectors * 0x200ull;
const u64 result = lv2_file::op_read(handle->file, bounce_buf, size);
num_sectors = ::narrow<u32>(result / 0x200ull);
}
*sectors_read = num_sectors;
return CELL_OK;
}
error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector,
u32 num_sectors, vm::ptr<void> data,
vm::ptr<u32> sectors_wrote, u64 flags) {
sys_storage.todo(
"sys_storage_write(fd=0x%x, mode=0x%x, start_sector=0x%x, "
"num_sectors=0x%x, data=*=0x%x, sectors_wrote=*0x%x, flags=0x%llx)",
fd, mode, start_sector, num_sectors, data, sectors_wrote, flags);
if (!sectors_wrote) {
return CELL_EFAULT;
}
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle) {
return CELL_ESRCH;
}
*sectors_wrote = num_sectors;
return CELL_OK;
}
error_code sys_storage_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen) {
sys_storage.todo("sys_storage_send_device_command(dev_handle=0x%x, "
"cmd=0x%llx, in=*0x%, inlen=0x%x, out=*0x%x, outlen=0x%x)",
dev_handle, cmd, in, inlen, out, outlen);
return CELL_OK;
}
error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id,
u32 unk) {
sys_storage.todo("sys_storage_async_configure(fd=0x%x, io_buf=0x%x, "
"equeue_id=0x%x, unk=*0x%x)",
fd, io_buf, equeue_id, unk);
auto &manager = g_fxo->get<storage_manager>();
if (auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id)) {
manager.asyncequeue.store(queue);
} else {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_storage_async_send_device_command(u32 dev_handle, u64 cmd,
vm::ptr<void> in, u64 inlen,
vm::ptr<void> out, u64 outlen,
u64 unk) {
sys_storage.todo(
"sys_storage_async_send_device_command(dev_handle=0x%x, cmd=0x%llx, "
"in=*0x%x, inlen=0x%x, out=*0x%x, outlen=0x%x, unk=0x%x)",
dev_handle, cmd, in, inlen, out, outlen, unk);
auto &manager = g_fxo->get<storage_manager>();
if (auto q = manager.asyncequeue.load()) {
q->send(0, unk, unk, unk);
}
return CELL_OK;
}
error_code sys_storage_async_read() {
sys_storage.todo("sys_storage_async_read()");
return CELL_OK;
}
error_code sys_storage_async_write() {
sys_storage.todo("sys_storage_async_write()");
return CELL_OK;
}
error_code sys_storage_async_cancel() {
sys_storage.todo("sys_storage_async_cancel()");
return CELL_OK;
}
error_code sys_storage_get_device_info(u64 device,
vm::ptr<StorageDeviceInfo> buffer) {
sys_storage.todo("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)",
device, buffer);
if (!buffer) {
return CELL_EFAULT;
}
memset(buffer.get_ptr(), 0, sizeof(StorageDeviceInfo));
u64 storage = device & 0xFFFFF00FFFFFFFF;
u32 dev_num = (device >> 32) & 0xFF;
if (storage == ATA_HDD) // dev_hdd?
{
if (dev_num > 2) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// set partition size based on dev_num
// stole these sizes from kernel dump, unknown if they are 100% correct
// vsh reports only 2 partitions even though there is 3 sizes
switch (dev_num) {
case 0:
buffer->sector_count = 0x2542EAB0; // possibly total size
break;
case 1:
buffer->sector_count = 0x24FAEA98; // which makes this hdd0
break;
case 2:
buffer->sector_count = 0x3FFFF8; // and this one hdd1
break;
}
} else if (storage == BDVD_DRIVE) // dev_bdvd?
{
if (dev_num > 0) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_count = 0x4D955;
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
} else if (storage == USB_MASS_STORAGE_1(0)) {
if (dev_num > 0) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
/*buffer->sector_count = 0x4D955;*/
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
} else if (storage == NAND_FLASH) {
if (dev_num > 6) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x80000;
break;
case 1:
buffer->sector_count = 0x75F8;
break;
case 2:
buffer->sector_count = 0x63E00;
break;
case 3:
buffer->sector_count = 0x8000;
break;
case 4:
buffer->sector_count = 0x400;
break;
case 5:
buffer->sector_count = 0x2000;
break;
case 6:
buffer->sector_count = 0x200;
break;
}
} else if (storage == NOR_FLASH) {
if (dev_num > 3) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x8000;
break;
case 1:
buffer->sector_count = 0x77F8;
break;
case 2:
buffer->sector_count = 0x100; // offset, 0x20000
break;
case 3:
buffer->sector_count = 0x400;
break;
}
} else if (storage == NAND_UNK) {
if (dev_num > 1) {
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num) {
case 0:
buffer->sector_count = 0x7FFFFFFF;
break;
}
} else {
sys_storage.error("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)",
device, buffer);
}
return CELL_OK;
}
error_code sys_storage_get_device_config(vm::ptr<u32> storages,
vm::ptr<u32> devices) {
sys_storage.todo(
"sys_storage_get_device_config(storages=*0x%x, devices=*0x%x)", storages,
devices);
if (storages)
*storages = 6;
else
return CELL_EFAULT;
if (devices)
*devices = 17;
else
return CELL_EFAULT;
return CELL_OK;
}
error_code sys_storage_report_devices(u32 storages, u32 start, u32 devices,
vm::ptr<u64> device_ids) {
sys_storage.todo("sys_storage_report_devices(storages=0x%x, start=0x%x, "
"devices=0x%x, device_ids=0x%x)",
storages, start, devices, device_ids);
if (!device_ids) {
return CELL_EFAULT;
}
static constexpr std::array<u64, 0x11> all_devs = [] {
std::array<u64, 0x11> all_devs{};
all_devs[0] = 0x10300000000000A;
for (int i = 0; i < 7; ++i) {
all_devs[i + 1] = 0x100000000000001 | (static_cast<u64>(i) << 32);
}
for (int i = 0; i < 3; ++i) {
all_devs[i + 8] = 0x101000000000007 | (static_cast<u64>(i) << 32);
}
all_devs[11] = 0x101000000000006;
for (int i = 0; i < 4; ++i) {
all_devs[i + 12] = 0x100000000000004 | (static_cast<u64>(i) << 32);
}
all_devs[16] = 0x100000000000003;
return all_devs;
}();
if (!devices || start >= all_devs.size() ||
devices > all_devs.size() - start) {
return CELL_EINVAL;
}
std::copy_n(all_devs.begin() + start, devices, device_ids.get_ptr());
return CELL_OK;
}
error_code sys_storage_configure_medium_event(u32 fd, u32 equeue_id, u32 c) {
sys_storage.todo(
"sys_storage_configure_medium_event(fd=0x%x, equeue_id=0x%x, c=0x%x)", fd,
equeue_id, c);
return CELL_OK;
}
error_code sys_storage_set_medium_polling_interval() {
sys_storage.todo("sys_storage_set_medium_polling_interval()");
return CELL_OK;
}
error_code sys_storage_create_region() {
sys_storage.todo("sys_storage_create_region()");
return CELL_OK;
}
error_code sys_storage_delete_region() {
sys_storage.todo("sys_storage_delete_region()");
return CELL_OK;
}
error_code sys_storage_execute_device_command(
u32 fd, u64 cmd, vm::ptr<char> cmdbuf, u64 cmdbuf_size,
vm::ptr<char> databuf, u64 databuf_size, vm::ptr<u32> driver_status) {
sys_storage.todo("sys_storage_execute_device_command(fd=0x%x, cmd=0x%llx, "
"cmdbuf=*0x%x, cmdbuf_size=0x%llx, databuf=*0x%x, "
"databuf_size=0x%llx, driver_status=*0x%x)",
fd, cmd, cmdbuf, cmdbuf_size, databuf, databuf_size,
driver_status);
// cmd == 2 is get device info,
// databuf, first byte 0 == status ok?
// byte 1, if < 0 , not ata device
return CELL_OK;
}
error_code sys_storage_check_region_acl() {
sys_storage.todo("sys_storage_check_region_acl()");
return CELL_OK;
}
error_code sys_storage_set_region_acl() {
sys_storage.todo("sys_storage_set_region_acl()");
return CELL_OK;
}
error_code sys_storage_get_region_offset() {
sys_storage.todo("sys_storage_get_region_offset()");
return CELL_OK;
}
error_code sys_storage_set_emulated_speed() {
sys_storage.todo("sys_storage_set_emulated_speed()");
// todo: only debug kernel has this
return CELL_ENOSYS;
}

View file

@ -0,0 +1,446 @@
#include "stdafx.h"
#include "sys_time.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/system_config.h"
#include "sys_process.h"
#include "util/tsc.hpp"
#include "util/sysinfo.hpp"
u64 g_timebase_offs{};
static u64 systemtime_offset;
#ifndef __linux__
#include "util/asm.hpp"
#endif
#ifdef _WIN32
#include <windows.h>
struct time_aux_info_t {
u64 perf_freq;
u64 start_time;
u64 start_ftime; // time in 100ns units since Epoch
};
// Initialize time-related values
const auto s_time_aux_info = []() -> time_aux_info_t {
LARGE_INTEGER freq;
if (!QueryPerformanceFrequency(&freq)) {
MessageBox(
nullptr,
L"Your hardware doesn't support a high-resolution performance counter",
L"Error", MB_OK | MB_ICONERROR);
return {};
}
LARGE_INTEGER start;
QueryPerformanceCounter(&start); // get time in 1/perf_freq units from RDTSC
FILETIME ftime;
GetSystemTimeAsFileTime(
&ftime); // get time in 100ns units since January 1, 1601 (UTC)
time_aux_info_t result;
result.perf_freq = freq.QuadPart;
result.start_time = start.QuadPart;
result.start_ftime =
(ftime.dwLowDateTime | static_cast<u64>(ftime.dwHighDateTime) << 32) -
116444736000000000;
return result;
}();
#elif __APPLE__
// XXX only supports a single timer
#if !defined(HAVE_CLOCK_GETTIME)
#define TIMER_ABSTIME -1
// The opengroup spec isn't clear on the mapping from REALTIME to CALENDAR being
// appropriate or not.
// http://pubs.opengroup.org/onlinepubs/009695299/basedefs/time.h.html
#define CLOCK_REALTIME 1 // #define CALENDAR_CLOCK 1 from mach/clock_types.h
#define CLOCK_MONOTONIC 0 // #define SYSTEM_CLOCK 0
// the mach kernel uses struct mach_timespec, so struct timespec is loaded from
// <sys/_types/_timespec.h> for compatability struct timespec { time_t tv_sec;
// long tv_nsec; };
#include <mach/clock.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <sys/_types/_timespec.h>
#include <sys/types.h>
#undef CPU_STATE_MAX
#define MT_NANO (+1.0E-9)
#define MT_GIGA UINT64_C(1000000000)
// TODO create a list of timers,
static double mt_timebase = 0.0;
static u64 mt_timestart = 0;
static int clock_gettime(int clk_id, struct timespec *tp) {
kern_return_t retval = KERN_SUCCESS;
if (clk_id == TIMER_ABSTIME) {
if (!mt_timestart) {
// only one timer, initilized on the first call to the TIMER
mach_timebase_info_data_t tb = {0};
mach_timebase_info(&tb);
mt_timebase = tb.numer;
mt_timebase /= tb.denom;
mt_timestart = mach_absolute_time();
}
double diff = (mach_absolute_time() - mt_timestart) * mt_timebase;
tp->tv_sec = diff * MT_NANO;
tp->tv_nsec = diff - (tp->tv_sec * MT_GIGA);
} else // other clk_ids are mapped to the coresponding mach clock_service
{
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), clk_id, &cclock);
retval = clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
tp->tv_sec = mts.tv_sec;
tp->tv_nsec = mts.tv_nsec;
}
return retval;
}
#endif
#endif
#ifndef _WIN32
#include <sys/time.h>
static struct timespec start_time = []() {
struct timespec ts;
if (::clock_gettime(CLOCK_REALTIME, &ts) != 0) {
// Fatal error
std::terminate();
}
tzset();
return ts;
}();
#endif
LOG_CHANNEL(sys_time);
static constexpr u64 g_timebase_freq = /*79800000*/ 80000000ull; // 80 Mhz
// Convert time is microseconds to timebased time
u64 convert_to_timebased_time(u64 time) {
const u64 result =
time * (g_timebase_freq / 1000000ull) * g_cfg.core.clocks_scale / 100u;
ensure(result >= g_timebase_offs);
return result - g_timebase_offs;
}
u64 get_timebased_time() {
if (u64 freq = utils::get_tsc_freq()) {
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result =
static_cast<u64>(u128_from_mul(tsc, g_timebase_freq) / freq) *
g_cfg.core.clocks_scale / 100u;
#else
const u64 result =
(tsc / freq * g_timebase_freq + tsc % freq * g_timebase_freq / freq) *
g_cfg.core.clocks_scale / 100u;
#endif
return result - g_timebase_offs;
}
while (true) {
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(
u128_from_mul(time * g_cfg.core.clocks_scale, g_timebase_freq) / freq /
100u);
#else
const u64 result =
(time / freq * g_timebase_freq + time % freq * g_timebase_freq / freq) *
g_cfg.core.clocks_scale / 100u;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result =
(static_cast<u64>(ts.tv_sec) * g_timebase_freq +
static_cast<u64>(ts.tv_nsec) * g_timebase_freq / 1000000000ull) *
g_cfg.core.clocks_scale / 100u;
#endif
if (result)
return result - g_timebase_offs;
}
}
// Add an offset to get_timebased_time to avoid leaking PC's uptime into the
// game As if PS3 starts at value 0 (base time) when the game boots If none-zero
// arg is specified it will become the base time (for savestates)
void initialize_timebased_time(u64 timebased_init, bool reset) {
g_timebase_offs = 0;
if (reset) {
// We simply want to zero-out these values
systemtime_offset = 0;
return;
}
const u64 current = get_timebased_time();
timebased_init = current - timebased_init;
g_timebase_offs = timebased_init;
systemtime_offset = timebased_init / (g_timebase_freq / 1000000);
}
// Returns some relative time in microseconds, don't change this fact
u64 get_system_time() {
if (u64 freq = utils::get_tsc_freq()) {
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(tsc, 1000000ull) / freq);
#else
const u64 result =
(tsc / freq * 1000000ull + tsc % freq * 1000000ull / freq);
#endif
return result;
}
while (true) {
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(time, 1000000ull) / freq);
#else
const u64 result =
time / freq * 1000000ull + (time % freq) * 1000000ull / freq;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result = static_cast<u64>(ts.tv_sec) * 1000000ull +
static_cast<u64>(ts.tv_nsec) / 1000u;
#endif
if (result)
return result;
}
}
// As get_system_time but obeys Clocks scaling setting
u64 get_guest_system_time(u64 time) {
const u64 result =
(time != umax ? time : get_system_time()) * g_cfg.core.clocks_scale / 100;
return result - systemtime_offset;
}
// Functions
error_code sys_time_set_timezone(s32 timezone, s32 summertime) {
sys_time.trace("sys_time_set_timezone(timezone=0x%x, summertime=0x%x)",
timezone, summertime);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code sys_time_get_timezone(vm::ptr<s32> timezone,
vm::ptr<s32> summertime) {
sys_time.trace("sys_time_get_timezone(timezone=*0x%x, summertime=*0x%x)",
timezone, summertime);
#ifdef _WIN32
TIME_ZONE_INFORMATION tz{};
switch (GetTimeZoneInformation(&tz)) {
case TIME_ZONE_ID_UNKNOWN: {
*timezone = -tz.Bias;
*summertime = 0;
break;
}
case TIME_ZONE_ID_STANDARD: {
*timezone = -tz.Bias;
*summertime = -tz.StandardBias;
if (tz.StandardBias) {
sys_time.error("Unexpected timezone bias (base=%d, std=%d, daylight=%d)",
tz.Bias, tz.StandardBias, tz.DaylightBias);
}
break;
}
case TIME_ZONE_ID_DAYLIGHT: {
*timezone = -tz.Bias;
*summertime = -tz.DaylightBias;
break;
}
default: {
ensure(0);
}
}
#elif __linux__
*timezone = ::narrow<s16>(-::timezone / 60);
*summertime = !::daylight ? 0 : []() -> s32 {
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
// Check bounds [0,1]
if (test.tm_isdst & -2) {
sys_time.error(
"No information for timezone DST bias (timezone=%.2fh, tm_gmtoff=%d)",
-::timezone / 3600.0, test.tm_gmtoff);
return 0;
} else {
return test.tm_isdst ? ::narrow<s16>((test.tm_gmtoff + ::timezone) / 60)
: 0;
}
}();
#else
// gettimeofday doesn't return timezone on linux anymore, but this should work
// on other OSes?
struct timezone tz{};
ensure(gettimeofday(nullptr, &tz) == 0);
*timezone = ::narrow<s16>(-tz.tz_minuteswest);
*summertime = !tz.tz_dsttime ? 0 : [&]() -> s32 {
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
return test.tm_isdst
? ::narrow<s16>(test.tm_gmtoff / 60 + tz.tz_minuteswest)
: 0;
}();
#endif
return CELL_OK;
}
error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec) {
sys_time.trace("sys_time_get_current_time(sec=*0x%x, nsec=*0x%x)", sec, nsec);
if (!sec) {
return CELL_EFAULT;
}
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 diff_base = count.QuadPart - s_time_aux_info.start_time;
// Get time difference in nanoseconds (using 128 bit accumulator)
const u64 diff_sl = diff_base * 1000000000ull;
const u64 diff_sh = utils::umulh64(diff_base, 1000000000ull);
const u64 diff = utils::udiv128(diff_sh, diff_sl, s_time_aux_info.perf_freq);
// get time since Epoch in nanoseconds
const u64 time = s_time_aux_info.start_ftime * 100u +
(diff * g_cfg.core.clocks_scale / 100u);
// scale to seconds, and add the console time offset (which might be negative)
*sec = (time / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
*nsec = time % 1000000000ull;
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_REALTIME, &ts) == 0);
if (g_cfg.core.clocks_scale == 100) {
// get the seconds from the system clock, and add the console time offset
// (which might be negative)
*sec = ts.tv_sec + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
*nsec = ts.tv_nsec;
return CELL_OK;
}
u64 tv_sec = ts.tv_sec, stv_sec = start_time.tv_sec;
u64 tv_nsec = ts.tv_nsec, stv_nsec = start_time.tv_nsec;
// Substruct time since Epoch and since start time
tv_sec -= stv_sec;
if (tv_nsec < stv_nsec) {
// Correct value if borrow encountered
tv_sec -= 1;
tv_nsec = 1'000'000'000ull - (stv_nsec - tv_nsec);
} else {
tv_nsec -= stv_nsec;
}
// Scale nanocseconds
tv_nsec = stv_nsec + (tv_nsec * g_cfg.core.clocks_scale / 100);
// Scale seconds and add from nanoseconds / 1'000'000'000, and add the console
// time offset (which might be negative)
*sec = stv_sec + (tv_sec * g_cfg.core.clocks_scale / 100u) +
(tv_nsec / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec) {
return CELL_EFAULT;
}
// Set nanoseconds
*nsec = tv_nsec % 1000000000ull;
#endif
return CELL_OK;
}
error_code sys_time_set_current_time(s64 sec, s64 nsec) {
sys_time.trace("sys_time_set_current_time(sec=0x%x, nsec=0x%x)", sec, nsec);
if (!g_ps3_process_info.has_root_perm()) {
return CELL_ENOSYS;
}
return CELL_OK;
}
u64 sys_time_get_timebase_frequency() {
sys_time.trace("sys_time_get_timebase_frequency()");
return g_timebase_freq;
}
error_code sys_time_get_rtc(vm::ptr<u64> rtc) {
sys_time.todo("sys_time_get_rtc(rtc=*0x%x)", rtc);
return CELL_OK;
}

View file

@ -0,0 +1,450 @@
#include "stdafx.h"
#include "sys_timer.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "sys_event.h"
#include "sys_process.h"
#include "util/asm.hpp"
#include <deque>
#include <thread>
LOG_CHANNEL(sys_timer);
struct lv2_timer_thread {
shared_mutex mutex;
std::deque<shared_ptr<lv2_timer>> timers;
lv2_timer_thread();
void operator()();
// SAVESTATE_INIT_POS(46); // FREE SAVESTATE_INIT_POS number
static constexpr auto thread_name = "Timer Thread"sv;
};
lv2_timer::lv2_timer(utils::serial &ar)
: lv2_obj(1), state(ar), port(lv2_event_queue::load_ptr(ar, port, "timer")),
source(ar), data1(ar), data2(ar), expire(ar), period(ar) {}
void lv2_timer::save(utils::serial &ar) {
USING_SERIALIZATION_VERSION(lv2_sync);
ar(state), lv2_event_queue::save_ptr(ar, port.get()),
ar(source, data1, data2, expire, period);
}
u64 lv2_timer::check(u64 _now) noexcept {
while (true) {
const u32 _state = +state;
if (_state == SYS_TIMER_STATE_RUN) {
u64 next = expire;
// If aborting, perform the last accurate check for event
if (_now >= next) {
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex);
return check_unlocked(_now);
}
return (next - _now);
}
break;
}
return umax;
}
u64 lv2_timer::check_unlocked(u64 _now) noexcept {
const u64 next = expire;
if (_now < next || state != SYS_TIMER_STATE_RUN) {
return umax;
}
if (port) {
port->send(source, data1, data2, next);
}
if (period) {
// Set next expiration time and check again
const u64 expire0 = utils::add_saturate<u64>(next, period);
expire.release(expire0);
return utils::sub_saturate<u64>(expire0, _now);
}
// Stop after oneshot
state.release(SYS_TIMER_STATE_STOP);
return umax;
}
lv2_timer_thread::lv2_timer_thread() {
Emu.PostponeInitCode([this]() {
idm::select<lv2_obj, lv2_timer>([&](u32 id, lv2_timer &) {
timers.emplace_back(idm::get_unlocked<lv2_obj, lv2_timer>(id));
});
});
}
void lv2_timer_thread::operator()() {
u64 sleep_time = 0;
while (true) {
if (sleep_time != umax) {
// Scale time
sleep_time =
std::min(sleep_time, u64{umax} / 100) * 100 / g_cfg.core.clocks_scale;
}
thread_ctrl::wait_for(sleep_time);
if (thread_ctrl::state() == thread_state::aborting) {
break;
}
sleep_time = umax;
if (Emu.IsPausedOrReady()) {
sleep_time = 10000;
continue;
}
const u64 _now = get_guest_system_time();
reader_lock lock(mutex);
for (const auto &timer : timers) {
while (lv2_obj::check(timer)) {
if (thread_ctrl::state() == thread_state::aborting) {
break;
}
if (const u64 advised_sleep_time = timer->check(_now)) {
if (sleep_time > advised_sleep_time) {
sleep_time = advised_sleep_time;
}
break;
}
}
}
}
}
error_code sys_timer_create(ppu_thread &ppu, vm::ptr<u32> timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_create(timer_id=*0x%x)", timer_id);
if (auto ptr = idm::make_ptr<lv2_obj, lv2_timer>()) {
auto &thread = g_fxo->get<named_thread<lv2_timer_thread>>();
{
std::lock_guard lock(thread.mutex);
// Theoretically could have been destroyed by sys_timer_destroy by now
if (auto it = std::find(thread.timers.begin(), thread.timers.end(), ptr);
it == thread.timers.end()) {
thread.timers.emplace_back(std::move(ptr));
}
}
ppu.check_state();
*timer_id = idm::last_id();
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_timer_destroy(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_destroy(timer_id=0x%x)", timer_id);
auto timer = idm::withdraw<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
if (reader_lock lock(timer.mutex); lv2_obj::check(timer.port)) {
return CELL_EISCONN;
}
timer.exists--;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
auto &thread = g_fxo->get<named_thread<lv2_timer_thread>>();
std::lock_guard lock(thread.mutex);
if (auto it =
std::find(thread.timers.begin(), thread.timers.end(), timer.ptr);
it != thread.timers.end()) {
thread.timers.erase(it);
}
return CELL_OK;
}
error_code sys_timer_get_information(ppu_thread &ppu, u32 timer_id,
vm::ptr<sys_timer_information_t> info) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_get_information(timer_id=0x%x, info=*0x%x)",
timer_id, info);
sys_timer_information_t _info{};
const u64 now = get_guest_system_time();
const auto timer =
idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer &timer) {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.get_information(_info);
});
if (!timer) {
return CELL_ESRCH;
}
ppu.check_state();
std::memcpy(info.get_ptr(), &_info, info.size());
return CELL_OK;
}
error_code _sys_timer_start(ppu_thread &ppu, u32 timer_id, u64 base_time,
u64 period) {
ppu.state += cpu_flag::wait;
(period ? sys_timer.warning : sys_timer.trace)(
"_sys_timer_start(timer_id=0x%x, base_time=0x%llx, period=0x%llx)",
timer_id, base_time, period);
const u64 start_time = get_guest_system_time();
if (period && period < 100) {
// Invalid periodic timer
return CELL_EINVAL;
}
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
std::lock_guard lock(timer.mutex);
// LV2 Disassembly: Simple nullptr check (assignment test, do not use
// lv2_obj::check here)
if (!timer.port) {
return CELL_ENOTCONN;
}
timer.check_unlocked(start_time);
if (timer.state != SYS_TIMER_STATE_STOP) {
return CELL_EBUSY;
}
if (!period && start_time >= base_time) {
// Invalid oneshot
return CELL_ETIMEDOUT;
}
const u64 expire =
period == 0 ? base_time : // oneshot
base_time == 0
? utils::add_saturate(start_time, period)
:
// periodic timer with no base (using start time as base)
start_time < utils::add_saturate(base_time, period)
? utils::add_saturate(base_time, period)
:
// periodic with base time over start time
[&]() -> u64 // periodic timer base before start time (align to
// be at least a period over start time)
{
// Optimized from a loop in LV2:
// do
// {
// base_time += period;
// }
// while (base_time < start_time);
const u64 start_time_with_base_time_reminder = utils::add_saturate(
start_time - start_time % period, base_time % period);
return utils::add_saturate(
start_time_with_base_time_reminder,
start_time_with_base_time_reminder < start_time ? period : 0);
}();
timer.expire = expire;
timer.period = period;
timer.state = SYS_TIMER_STATE_RUN;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
if (timer.ret == CELL_ETIMEDOUT) {
return not_an_error(timer.ret);
}
return timer.ret;
}
g_fxo->get<named_thread<lv2_timer_thread>>()([] {});
return CELL_OK;
}
error_code sys_timer_stop(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_stop()");
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [now = get_guest_system_time(),
notify = lv2_obj::notify_all_t()](lv2_timer &timer) {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
});
if (!timer) {
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_timer_connect_event_queue(ppu_thread &ppu, u32 timer_id,
u32 queue_id, u64 name, u64 data1,
u64 data2) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_connect_event_queue(timer_id=0x%x, "
"queue_id=0x%x, name=0x%llx, data1=0x%llx, data2=0x%llx)",
timer_id, queue_id, name, data1, data2);
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id, [&](lv2_timer &timer) -> CellError {
auto found = idm::get_unlocked<lv2_obj, lv2_event_queue>(queue_id);
if (!found) {
return CELL_ESRCH;
}
std::lock_guard lock(timer.mutex);
if (lv2_obj::check(timer.port)) {
return CELL_EISCONN;
}
// Connect event queue
timer.port = found;
timer.source =
name ? name : (u64{process_getpid() + 0u} << 32) | u64{timer_id};
timer.data1 = data1;
timer.data2 = data2;
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_disconnect_event_queue(ppu_thread &ppu, u32 timer_id) {
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_disconnect_event_queue(timer_id=0x%x)",
timer_id);
const auto timer = idm::check<lv2_obj, lv2_timer>(
timer_id,
[now = get_guest_system_time(),
notify = lv2_obj::notify_all_t()](lv2_timer &timer) -> CellError {
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
if (!lv2_obj::check(timer.port)) {
return CELL_ENOTCONN;
}
timer.port.reset();
return {};
});
if (!timer) {
return CELL_ESRCH;
}
if (timer.ret) {
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_sleep(ppu_thread &ppu, u32 sleep_time) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_sleep(sleep_time=%d)", sleep_time);
return sys_timer_usleep(ppu, sleep_time * u64{1000000});
}
error_code sys_timer_usleep(ppu_thread &ppu, u64 sleep_time) {
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_usleep(sleep_time=0x%llx)", sleep_time);
if (sleep_time) {
const s64 add_time = g_cfg.core.usleep_addend;
// Over/underflow checks
if (add_time >= 0) {
sleep_time = utils::add_saturate<u64>(sleep_time, add_time);
} else {
sleep_time =
std::max<u64>(1, utils::sub_saturate<u64>(sleep_time, -add_time));
}
lv2_obj::sleep(ppu, g_cfg.core.sleep_timers_accuracy <
sleep_timers_accuracy_level::_usleep
? sleep_time
: 0);
if (!lv2_obj::wait_timeout(sleep_time, &ppu, true, true)) {
ppu.state += cpu_flag::again;
}
} else {
std::this_thread::yield();
}
return CELL_OK;
}

View file

@ -0,0 +1,59 @@
#include "stdafx.h"
#include "sys_trace.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_trace);
// TODO: DEX/DECR mode support?
s32 sys_trace_create() {
sys_trace.todo("sys_trace_create()");
return CELL_ENOSYS;
}
s32 sys_trace_start() {
sys_trace.todo("sys_trace_start()");
return CELL_ENOSYS;
}
s32 sys_trace_stop() {
sys_trace.todo("sys_trace_stop()");
return CELL_ENOSYS;
}
s32 sys_trace_update_top_index() {
sys_trace.todo("sys_trace_update_top_index()");
return CELL_ENOSYS;
}
s32 sys_trace_destroy() {
sys_trace.todo("sys_trace_destroy()");
return CELL_ENOSYS;
}
s32 sys_trace_drain() {
sys_trace.todo("sys_trace_drain()");
return CELL_ENOSYS;
}
s32 sys_trace_attach_process() {
sys_trace.todo("sys_trace_attach_process()");
return CELL_ENOSYS;
}
s32 sys_trace_allocate_buffer() {
sys_trace.todo("sys_trace_allocate_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_free_buffer() {
sys_trace.todo("sys_trace_free_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_create2() {
sys_trace.todo("sys_trace_create2()");
return CELL_ENOSYS;
}

Some files were not shown because too many files have changed in this diff Show more