diff --git a/rpcs3/Emu/Cell/Modules/cellDmux.cpp b/rpcs3/Emu/Cell/Modules/cellDmux.cpp index d7f6f84f3f..fb1f32837d 100644 --- a/rpcs3/Emu/Cell/Modules/cellDmux.cpp +++ b/rpcs3/Emu/Cell/Modules/cellDmux.cpp @@ -169,18 +169,18 @@ public: static const u32 id_count = 1023; SAVESTATE_INIT_POS(34); - ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr cbFunc, u32 cbArg, u32 spec); + ElementaryStream(Demuxer* dmux, vm::ptr addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr cbFunc, vm::ptr cbArg, u32 spec); Demuxer* dmux; const u32 id = idm::last_id(); - const u32 memAddr; + const vm::ptr memAddr; const u32 memSize; const u32 fidMajor; const u32 fidMinor; const u32 sup1; const u32 sup2; const vm::ptr cbFunc; - const u32 cbArg; + const vm::ptr cbArg; const u32 spec; //addr std::vector raw_data; // demultiplexed data stream (managed by demuxer thread) @@ -208,13 +208,13 @@ public: const u32 memAddr; const u32 memSize; const vm::ptr cbFunc; - const u32 cbArg; + const vm::ptr cbArg; volatile bool is_finished = false; volatile bool is_closed = false; atomic_t is_running = false; atomic_t is_working = false; - Demuxer(u32 addr, u32 size, vm::ptr func, u32 arg) + Demuxer(u32 addr, u32 size, vm::ptr func, vm::ptr arg) : ppu_thread({}, "", 0) , memAddr(addr) , memSize(size) @@ -755,11 +755,11 @@ PesHeader::PesHeader(DemuxerStream& stream) is_ok = true; } -ElementaryStream::ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr cbFunc, u32 cbArg, u32 spec) - : put(utils::align(addr, 128)) +ElementaryStream::ElementaryStream(Demuxer* dmux, vm::ptr addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr cbFunc, vm::ptr cbArg, u32 spec) + : put(utils::align(addr.addr(), 128)) , dmux(dmux) - , memAddr(utils::align(addr, 128)) - , memSize(size - (addr - memAddr)) + , memAddr(vm::ptr::make(utils::align(addr.addr(), 128))) + , memSize(size - (addr.addr() - memAddr.addr())) , fidMajor(fidMajor) , fidMinor(fidMinor) , sup1(sup1) @@ -788,9 +788,9 @@ bool ElementaryStream::is_full(u32 space) { return first - put < space + 128; } - else if (put + space + 128 > memAddr + memSize) + else if (put + space + 128 > memAddr.addr() + memSize) { - return first - memAddr < space + 128; + return first - memAddr.addr() < space + 128; } else { @@ -816,35 +816,35 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra std::lock_guard lock(m_mutex); ensure(!is_full(size)); - if (put + size + 128 > memAddr + memSize) + if (put + size + 128 > memAddr.addr() + memSize) { - put = memAddr; + put = memAddr.addr(); } std::memcpy(vm::base(put + 128), raw_data.data(), size); raw_data.erase(raw_data.begin(), raw_data.begin() + size); auto info = vm::ptr::make(put); - info->auAddr = put + 128; + info->auAddr.set(put + 128); info->auSize = size; info->dts.lower = static_cast(dts); info->dts.upper = static_cast(dts >> 32); info->pts.lower = static_cast(pts); info->pts.upper = static_cast(pts >> 32); info->isRap = rap; - info->reserved = 0; + info->auMaxSize = 0; info->userData = userdata; auto spec = vm::ptr::make(put + u32{sizeof(CellDmuxAuInfoEx)}); *spec = specific; auto inf = vm::ptr::make(put + 64); - inf->auAddr = put + 128; + inf->auAddr.set(put + 128); inf->auSize = size; - inf->dtsLower = static_cast(dts); - inf->dtsUpper = static_cast(dts >> 32); - inf->ptsLower = static_cast(pts); - inf->ptsUpper = static_cast(pts >> 32); + inf->dts.lower = static_cast(dts); + inf->dts.upper = static_cast(dts >> 32); + inf->pts.lower = static_cast(pts); + inf->pts.upper = static_cast(pts >> 32); inf->auMaxSize = 0; // ????? inf->userData = userdata; @@ -927,7 +927,7 @@ bool ElementaryStream::peek(u32& out_data, bool no_ex, u32& out_spec, bool updat void ElementaryStream::reset() { std::lock_guard lock(m_mutex); - put = memAddr; + put = memAddr.addr(); entries.clear(); put_count = 0; got_count = 0; diff --git a/rpcs3/Emu/Cell/Modules/cellDmux.h b/rpcs3/Emu/Cell/Modules/cellDmux.h index 7c31bbf105..dc17cb3314 100644 --- a/rpcs3/Emu/Cell/Modules/cellDmux.h +++ b/rpcs3/Emu/Cell/Modules/cellDmux.h @@ -33,118 +33,6 @@ enum CellDmuxEsMsgType : s32 CELL_DMUX_ES_MSG_TYPE_FLUSH_DONE = 1, }; -enum CellDmuxPamfM2vLevel : s32 -{ - CELL_DMUX_PAMF_M2V_MP_LL = 0, - CELL_DMUX_PAMF_M2V_MP_ML, - CELL_DMUX_PAMF_M2V_MP_H14, - CELL_DMUX_PAMF_M2V_MP_HL, -}; - -enum CellDmuxPamfAvcLevel : s32 -{ - CELL_DMUX_PAMF_AVC_LEVEL_2P1 = 21, - CELL_DMUX_PAMF_AVC_LEVEL_3P0 = 30, - CELL_DMUX_PAMF_AVC_LEVEL_3P1 = 31, - CELL_DMUX_PAMF_AVC_LEVEL_3P2 = 32, - CELL_DMUX_PAMF_AVC_LEVEL_4P1 = 41, - CELL_DMUX_PAMF_AVC_LEVEL_4P2 = 42, -}; - -struct CellDmuxPamfAuSpecificInfoM2v -{ - be_t reserved1; -}; - -struct CellDmuxPamfAuSpecificInfoAvc -{ - be_t reserved1; -}; - -struct CellDmuxPamfAuSpecificInfoLpcm -{ - u8 channelAssignmentInfo; - u8 samplingFreqInfo; - u8 bitsPerSample; -}; - -struct CellDmuxPamfAuSpecificInfoAc3 -{ - be_t reserved1; -}; - -struct CellDmuxPamfAuSpecificInfoAtrac3plus -{ - be_t reserved1; -}; - -struct CellDmuxPamfAuSpecificInfoUserData -{ - be_t reserved1; -}; - -struct CellDmuxPamfEsSpecificInfoM2v -{ - be_t profileLevel; -}; - -struct CellDmuxPamfEsSpecificInfoAvc -{ - be_t level; -}; - -struct CellDmuxPamfEsSpecificInfoLpcm -{ - be_t samplingFreq; - be_t numOfChannels; - be_t bitsPerSample; -}; - -struct CellDmuxPamfEsSpecificInfoAc3 -{ - be_t reserved1; -}; - -struct CellDmuxPamfEsSpecificInfoAtrac3plus -{ - be_t reserved1; -}; - -struct CellDmuxPamfEsSpecificInfoUserData -{ - be_t reserved1; -}; - -enum CellDmuxPamfSamplingFrequency : s32 -{ - CELL_DMUX_PAMF_FS_48K = 48000, -}; - -enum CellDmuxPamfBitsPerSample : s32 -{ - CELL_DMUX_PAMF_BITS_PER_SAMPLE_16 = 16, - CELL_DMUX_PAMF_BITS_PER_SAMPLE_24 = 24, -}; - -enum CellDmuxPamfLpcmChannelAssignmentInfo : s32 -{ - CELL_DMUX_PAMF_LPCM_CH_M1 = 1, - CELL_DMUX_PAMF_LPCM_CH_LR = 3, - CELL_DMUX_PAMF_LPCM_CH_LRCLSRSLFE = 9, - CELL_DMUX_PAMF_LPCM_CH_LRCLSCS1CS2RSLFE = 11, -}; - -enum CellDmuxPamfLpcmFs : s32 -{ - CELL_DMUX_PAMF_LPCM_FS_48K = 1, -}; - -enum CellDmuxPamfLpcmBitsPerSamples : s32 -{ - CELL_DMUX_PAMF_LPCM_BITS_PER_SAMPLE_16 = 1, - CELL_DMUX_PAMF_LPCM_BITS_PER_SAMPLE_24 = 3, -}; - struct CellDmuxMsg { be_t msgType; // CellDmuxMsgType @@ -163,12 +51,6 @@ struct CellDmuxType be_t reserved[2]; }; -struct CellDmuxPamfSpecificInfo -{ - be_t thisSize; - b8 programEndCodeCb; -}; - struct CellDmuxType2 { be_t streamType; // CellDmuxStreamType @@ -177,7 +59,7 @@ struct CellDmuxType2 struct CellDmuxResource { - be_t memAddr; + vm::bptr memAddr; be_t memSize; be_t ppuThreadPriority; be_t ppuThreadStackSize; @@ -187,7 +69,7 @@ struct CellDmuxResource struct CellDmuxResourceEx { - be_t memAddr; + vm::bptr memAddr; be_t memSize; be_t ppuThreadPriority; be_t ppuThreadStackSize; @@ -227,16 +109,16 @@ struct CellDmuxResource2 be_t shit[4]; }; -using CellDmuxCbMsg = u32(u32 demuxerHandle, vm::ptr demuxerMsg, u32 cbArg); +using CellDmuxCbMsg = u32(u32 demuxerHandle, vm::cptr demuxerMsg, vm::ptr cbArg); -using CellDmuxCbEsMsg = u32(u32 demuxerHandle, u32 esHandle, vm::ptr esMsg, u32 cbArg); +using CellDmuxCbEsMsg = u32(u32 demuxerHandle, u32 esHandle, vm::cptr esMsg, vm::ptr cbArg); // Used for internal callbacks as well template struct DmuxCb { vm::bptr cbFunc; - be_t cbArg; + vm::bptr cbArg; }; using CellDmuxCb = DmuxCb; @@ -250,42 +132,50 @@ struct CellDmuxAttr be_t demuxerVerLower; }; +struct CellDmuxPamfAttr +{ + be_t maxEnabledEsNum; + be_t version; + be_t memSize; +}; + struct CellDmuxEsAttr { be_t memSize; }; +struct CellDmuxPamfEsAttr +{ + be_t auQueueMaxSize; + be_t memSize; + be_t specificInfoSize; +}; + struct CellDmuxEsResource { - be_t memAddr; + vm::bptr memAddr; be_t memSize; }; struct CellDmuxAuInfo { - be_t auAddr; + vm::bptr auAddr; be_t auSize; be_t auMaxSize; - be_t userData; - be_t ptsUpper; - be_t ptsLower; - be_t dtsUpper; - be_t dtsLower; -}; - -struct CellDmuxAuInfoEx -{ - be_t auAddr; - be_t auSize; - be_t reserved; b8 isRap; be_t userData; CellCodecTimeStamp pts; CellCodecTimeStamp dts; }; -struct CellDmuxPamfAttr; -struct CellDmuxPamfEsAttr; +using CellDmuxAuInfoEx = CellDmuxAuInfo; + +struct DmuxAuInfo +{ + CellDmuxAuInfo info; + vm::bptr specific_info; + be_t specific_info_size; +}; using DmuxNotifyDemuxDone = error_code(vm::ptr, u32, vm::ptr); using DmuxNotifyFatalErr = error_code(vm::ptr, u32, vm::ptr); diff --git a/rpcs3/Emu/Cell/Modules/cellDmuxPamf.cpp b/rpcs3/Emu/Cell/Modules/cellDmuxPamf.cpp index 7c0fd8ec39..cc2fd6af42 100644 --- a/rpcs3/Emu/Cell/Modules/cellDmuxPamf.cpp +++ b/rpcs3/Emu/Cell/Modules/cellDmuxPamf.cpp @@ -1,6 +1,11 @@ #include "stdafx.h" #include "Emu/Cell/PPUModule.h" +#include "Emu/Cell/lv2/sys_cond.h" +#include "Emu/Cell/lv2/sys_mutex.h" +#include "Emu/Cell/lv2/sys_ppu_thread.h" #include "Emu/Cell/lv2/sys_sync.h" +#include "sysPrxForUser.h" +#include "util/asm.hpp" #include "cellDmuxPamf.h" #include @@ -10,6 +15,24 @@ vm::gvar g_cell_dmux_core_ops_raw_es; LOG_CHANNEL(cellDmuxPamf) +template <> +void fmt_class_string::format(std::string& out, u64 arg) +{ + format_enum(out, arg, [](CellDmuxPamfError value) + { + switch (value) + { + STR_CASE(CELL_DMUX_PAMF_ERROR_BUSY); + STR_CASE(CELL_DMUX_PAMF_ERROR_ARG); + STR_CASE(CELL_DMUX_PAMF_ERROR_UNKNOWN_STREAM); + STR_CASE(CELL_DMUX_PAMF_ERROR_NO_MEMORY); + STR_CASE(CELL_DMUX_PAMF_ERROR_FATAL); + } + + return unknown; + }); +} + inline std::pair dmuxPamfStreamIdToTypeChannel(u16 stream_id, u16 private_stream_id) { if ((stream_id & 0xf0) == 0xe0) @@ -1121,109 +1144,1659 @@ void dmux_pamf_spu_context::save(utils::serial& ar) // PPU thread +template +void DmuxPamfContext::send_spu_command_and_wait(ppu_thread& ppu, bool waiting_for_spu_state, auto&&... cmd_params) +{ + if (!waiting_for_spu_state) + { + // The caller is supposed to own the mutex until the SPU thread has consumed the command, so the queue should always be empty here + ensure(cmd_queue.emplace(type, std::forward(cmd_params)...), "The command queue wasn't empty"); + } + + lv2_obj::sleep(ppu); + + // Block until the SPU thread has consumed the command + cmd_result_queue.wait(); + + if (ppu.check_state()) + { + ppu.state += cpu_flag::again; + return; + } + + be_t result{}; + ensure(cmd_result_queue.pop(result), "The result queue was empty"); + ensure(result == static_cast(type) + 1, "The HLE SPU thread sent an invalid result"); +} + +DmuxPamfElementaryStream* DmuxPamfContext::find_es(u16 stream_id, u16 private_stream_id) +{ + const auto it = dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first == DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO + ? std::ranges::find_if(elementary_streams | std::views::reverse, [&](const auto& es){ return es && es->stream_id == stream_id; }) + : std::ranges::find_if(elementary_streams | std::views::reverse, [&](const auto& es){ return es && es->stream_id == stream_id && es->private_stream_id == private_stream_id; }); + + return it != std::ranges::rend(elementary_streams) ? it->get_ptr() : nullptr; +} + +error_code DmuxPamfContext::wait_au_released_or_stream_reset(ppu_thread& ppu, u64 au_queue_full_bitset, b8& stream_reset_started, dmux_pamf_state& savestate) +{ + if (savestate == dmux_pamf_state::waiting_for_au_released) + { + goto label1_waiting_for_au_released_state; + } + + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + + if (au_queue_full_bitset) + { + cellDmuxPamf.trace("Access unit queue of elementary stream no. %d is full. Waiting for access unit to be released...", std::countr_zero(au_queue_full_bitset)); + + while (!(au_queue_full_bitset & au_released_bitset) && !stream_reset_requested) + { + savestate = dmux_pamf_state::waiting_for_au_released; + label1_waiting_for_au_released_state: + + if (sys_cond_wait(ppu, cond, 0) != CELL_OK) + { + sys_mutex_unlock(ppu, mutex); + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + } + + cellDmuxPamf.trace("Access unit released"); + } + + stream_reset_started = stream_reset_requested; + stream_reset_requested = false; + + au_released_bitset = 0; + + return sys_mutex_unlock(ppu, mutex) != CELL_OK ? static_cast(CELL_DMUX_PAMF_ERROR_FATAL) : CELL_OK; +} + +template +error_code DmuxPamfContext::set_au_reset(ppu_thread& ppu) +{ + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + + std::ranges::for_each(elementary_streams | std::views::filter([](auto es){ return !!es; }), [](auto& reset_next_au) { reset_next_au = reset; }, &DmuxPamfElementaryStream::reset_next_au); + + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; +} + +template +error_code DmuxPamfContext::callback(ppu_thread& ppu, DmuxCb cb, auto&&... args) +{ + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; + + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + return cb.cbFunc(ppu, std::forward(args)..., cb.cbArg); +} + +void DmuxPamfContext::run_spu_thread() +{ + hle_spu_thread_id = idm::make(cmd_queue_addr, cmd_result_queue_addr, stream_info_queue_addr, event_queue_addr); +} + +void DmuxPamfContext::exec(ppu_thread& ppu) +{ + // These are repeated a lot in this function, in my opinion using defines here makes it more readable +#define SEND_FATAL_ERR_AND_CONTINUE()\ + savestate = dmux_pamf_state::sending_fatal_err;\ + callback(ppu, notify_fatal_err, _this, CELL_OK); /* LLE uses CELL_OK as error code */\ + if (ppu.state & cpu_flag::again)\ + {\ + return;\ + }\ + continue + +#define RETURN_ON_CPU_FLAG_AGAIN()\ + if (ppu.state & cpu_flag::again)\ + return + + switch (savestate) + { + case dmux_pamf_state::initial: break; + case dmux_pamf_state::waiting_for_au_released: goto label1_waiting_for_au_released_state; + case dmux_pamf_state::waiting_for_au_released_error: goto label2_waiting_for_au_released_error_state; + case dmux_pamf_state::waiting_for_event: goto label3_waiting_for_event_state; + case dmux_pamf_state::starting_demux_done: goto label4_starting_demux_done_state; + case dmux_pamf_state::starting_demux_done_mutex_lock_error: goto label5_starting_demux_done_mutex_lock_error_state; + case dmux_pamf_state::starting_demux_done_mutex_unlock_error: goto label6_starting_demux_done_mutex_unlock_error_state; + case dmux_pamf_state::starting_demux_done_checking_stream_reset: goto label7_starting_demux_done_check_stream_reset_state; + case dmux_pamf_state::starting_demux_done_checking_stream_reset_error: goto label8_start_demux_done_check_stream_reset_error_state; + case dmux_pamf_state::setting_au_reset: goto label9_setting_au_reset_state; + case dmux_pamf_state::setting_au_reset_error: goto label10_setting_au_reset_error_state; + case dmux_pamf_state::processing_event: goto label11_processing_event_state; + case dmux_pamf_state::au_found_waiting_for_spu: goto label12_au_found_waiting_for_spu_state; + case dmux_pamf_state::unsetting_au_cancel: goto label13_unsetting_au_cancel_state; + case dmux_pamf_state::demux_done_notifying: goto label14_demux_done_notifying_state; + case dmux_pamf_state::demux_done_mutex_lock: goto label15_demux_done_mutex_lock_state; + case dmux_pamf_state::demux_done_cond_signal: goto label16_demux_done_cond_signal_state; + case dmux_pamf_state::resuming_demux_mutex_lock: goto label17_resuming_demux_mutex_lock_state; + case dmux_pamf_state::resuming_demux_waiting_for_spu: goto label18_resuming_demux_waiting_for_spu_state; + case dmux_pamf_state::sending_fatal_err: + callback(ppu, notify_fatal_err, _this, CELL_OK); + RETURN_ON_CPU_FLAG_AGAIN(); + } + + for (;;) + { + savestate = dmux_pamf_state::initial; + + stream_reset_started = false; + + // If the access unit queue of an enabled elementary stream is full, wait until the user releases an access unit or requests a stream reset before processing the next event + label1_waiting_for_au_released_state: + + if (wait_au_released_or_stream_reset(ppu, au_queue_full_bitset, stream_reset_started, savestate) != CELL_OK) + { + savestate = dmux_pamf_state::waiting_for_au_released_error; + label2_waiting_for_au_released_error_state: + + callback(ppu, notify_fatal_err, _this, CELL_OK); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + // Wait for the next event + if (!event_queue.peek(event)) + { + savestate = dmux_pamf_state::waiting_for_event; + label3_waiting_for_event_state: + + cellDmuxPamf.trace("Waiting for the next event..."); + + lv2_obj::sleep(ppu); + event_queue.wait(); + + if (ppu.check_state()) + { + ppu.state += cpu_flag::again; + return; + } + + ensure(event_queue.peek(event)); + } + + cellDmuxPamf.trace("Event type: %d", static_cast(event.type.get())); + + // If the event is a demux done event, set the sequence state to resetting and check for a potential stream reset request again + if (event.type == DmuxPamfEventType::demux_done) + { + savestate = dmux_pamf_state::starting_demux_done; + label4_starting_demux_done_state: + + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + savestate = dmux_pamf_state::starting_demux_done_mutex_lock_error; + label5_starting_demux_done_mutex_lock_error_state: + + callback(ppu, notify_fatal_err, _this, CELL_OK); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + sequence_state = DmuxPamfSequenceState::resetting; + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + savestate = dmux_pamf_state::starting_demux_done_mutex_unlock_error; + label6_starting_demux_done_mutex_unlock_error_state: + + callback(ppu, notify_fatal_err, _this, CELL_OK); + + RETURN_ON_CPU_FLAG_AGAIN(); + } + + if (!stream_reset_started) + { + savestate = dmux_pamf_state::starting_demux_done_checking_stream_reset; + label7_starting_demux_done_check_stream_reset_state: + + if (wait_au_released_or_stream_reset(ppu, 0, stream_reset_started, savestate) != CELL_OK) + { + savestate = dmux_pamf_state::starting_demux_done_checking_stream_reset_error; + label8_start_demux_done_check_stream_reset_error_state: + + callback(ppu, notify_fatal_err, _this, CELL_OK); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + } + } + + // If the user requested a stream reset, set the reset flag for every enabled elementary stream + if (stream_reset_started) + { + stream_reset_in_progress = true; + + savestate = dmux_pamf_state::setting_au_reset; + label9_setting_au_reset_state: + + if (set_au_reset(ppu) != CELL_OK) + { + savestate = dmux_pamf_state::setting_au_reset_error; + label10_setting_au_reset_error_state: + + callback(ppu, notify_fatal_err, _this, CELL_OK); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + } + + savestate = dmux_pamf_state::processing_event; + label11_processing_event_state: + + switch (event.type) + { + case DmuxPamfEventType::au_found: + { + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + label12_au_found_waiting_for_spu_state: + + DmuxPamfElementaryStream* const es = find_es(event.au_found.stream_id, event.au_found.private_stream_id); + + // If the elementary stream of the found access unit is not enabled, don't do anything + if (!es || es->_this.get_ptr() != es || es->es_id != event.au_found.user_data) + { + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + break; + } + + // If a stream reset was requested, don't notify the user of any found access units that are still in the event queue + // We need to send the SPU thread the address of the first found access unit for each elementary stream still in the event queue, + // so that it can remove the access units from the queue. + if (stream_reset_in_progress) + { + if (es->reset_next_au) + { + send_spu_command_and_wait(ppu, savestate == dmux_pamf_state::au_found_waiting_for_spu, + event.au_found.stream_id, event.au_found.private_stream_id, event.au_found.au_addr); + + if (ppu.state & cpu_flag::again) + { + savestate = dmux_pamf_state::au_found_waiting_for_spu; + return; + } + + es->reset_next_au = false; + } + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + break; + } + + const vm::var au_info; + au_info->addr = std::bit_cast>(event.au_found.au_addr); + au_info->size = event.au_found.au_size; + au_info->pts = event.au_found.pts; + au_info->dts = event.au_found.dts; + au_info->user_data = user_data; + au_info->specific_info = es->_this.ptr(&DmuxPamfElementaryStream::au_specific_info); + au_info->specific_info_size = es->au_specific_info_size; + au_info->is_rap = static_cast(event.au_found.is_rap); + + if (!is_raw_es) + { + if (dmuxPamfStreamIdToTypeChannel(event.au_found.stream_id, event.au_found.private_stream_id).first == DMUX_PAMF_STREAM_TYPE_INDEX_LPCM) + { + es->au_specific_info[0] = read_from_ptr(event.au_found.stream_header_buf) >> 4; + es->au_specific_info[1] = read_from_ptr(event.au_found.stream_header_buf) & 0xf; + es->au_specific_info[2] = read_from_ptr(&event.au_found.stream_header_buf[1]) >> 6; + } + } + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + if (callback(ppu, es->notify_au_found, es->_this, au_info) != CELL_OK) + { + // If the callback returns an error, the access unit queue for this elementary stream is full + au_queue_full_bitset |= 1ull << es->this_index; + continue; + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + break; + } + case DmuxPamfEventType::demux_done: + { + if (stream_reset_in_progress) + { + stream_reset_in_progress = false; + + savestate = dmux_pamf_state::unsetting_au_cancel; + label13_unsetting_au_cancel_state: + + if (set_au_reset(ppu) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + } + + savestate = dmux_pamf_state::demux_done_notifying; + label14_demux_done_notifying_state: + + callback(ppu, notify_demux_done, _this, CELL_OK); + + RETURN_ON_CPU_FLAG_AGAIN(); + + savestate = dmux_pamf_state::demux_done_mutex_lock; + label15_demux_done_mutex_lock_state: + + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + if (sequence_state == DmuxPamfSequenceState::resetting) + { + sequence_state = DmuxPamfSequenceState::dormant; + + savestate = dmux_pamf_state::demux_done_cond_signal; + label16_demux_done_cond_signal_state: + + if (sys_cond_signal_all(ppu, cond) != CELL_OK) + { + sys_mutex_unlock(ppu, mutex); + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + } + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + break; + } + case DmuxPamfEventType::close: + { + while (event_queue.pop()){} // Clear the event queue + return; + } + case DmuxPamfEventType::flush_done: + { + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + DmuxPamfElementaryStream* const es = find_es(event.flush_done.stream_id, event.flush_done.private_stream_id); + const bool valid = es && es->_this.get_ptr() == es && es->es_id == event.flush_done.user_data; + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + if (valid) + { + callback(ppu, es->notify_flush_done, es->_this); + + RETURN_ON_CPU_FLAG_AGAIN(); + } + + break; + } + case DmuxPamfEventType::prog_end_code: + { + callback(ppu, notify_prog_end_code, _this); + + RETURN_ON_CPU_FLAG_AGAIN(); + + break; + } + case DmuxPamfEventType::fatal_error: + { + ensure(event_queue.pop()); + + SEND_FATAL_ERR_AND_CONTINUE(); + } + default: + fmt::throw_exception("Invalid event"); + } + + ensure(event_queue.pop()); + + // If there are too many events enqueued, the SPU thread will stop demuxing until it receives a new command. + // Once the event queue size is reduced to two, send a resume command + if (enabled_es_num >= 0 && event_queue.size() == 2) + { + savestate = dmux_pamf_state::resuming_demux_mutex_lock; + label17_resuming_demux_mutex_lock_state: + + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + + RETURN_ON_CPU_FLAG_AGAIN(); + + if (enabled_es_num >= 0) + { + ensure(cmd_queue.emplace(DmuxPamfCommandType::resume)); + + savestate = dmux_pamf_state::resuming_demux_waiting_for_spu; + label18_resuming_demux_waiting_for_spu_state: + + lv2_obj::sleep(ppu); + cmd_result_queue.wait(); + + if (ppu.check_state()) + { + ppu.state += cpu_flag::again; + return; + } + + ensure(cmd_result_queue.pop()); + } + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + SEND_FATAL_ERR_AND_CONTINUE(); + } + } + + au_queue_full_bitset = 0; + } +} + +void dmuxPamfEntry(ppu_thread& ppu, vm::ptr dmux) +{ + dmux->exec(ppu); + + if (ppu.state & cpu_flag::again) + { + ppu.syscall_args[0] = dmux.addr(); + return; + } + + ppu_execute<&sys_ppu_thread_exit>(ppu, CELL_OK); +} + +error_code dmuxPamfVerifyEsSpecificInfo(u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info) +{ + // The meaning of error code value 5 in here is inconsistent with how it's used elsewhere for some reason + + if (!es_specific_info) + { + return CELL_OK; + } + + switch (dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first) + { + case DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO: + if (is_avc) + { + if (const u32 level = vm::static_ptr_cast(es_specific_info)->level; + level != CELL_DMUX_PAMF_AVC_LEVEL_2P1 && level != CELL_DMUX_PAMF_AVC_LEVEL_3P0 && level != CELL_DMUX_PAMF_AVC_LEVEL_3P1 + && level != CELL_DMUX_PAMF_AVC_LEVEL_3P2 && level != CELL_DMUX_PAMF_AVC_LEVEL_4P1 && level != CELL_DMUX_PAMF_AVC_LEVEL_4P2) + { + return 5; + } + } + else if (vm::static_ptr_cast(es_specific_info)->profileLevel > CELL_DMUX_PAMF_M2V_MP_HL) + { + return 5; + } + + return CELL_OK; + + case DMUX_PAMF_STREAM_TYPE_INDEX_LPCM: + if (const auto [sampling_freq, nch, bps] = *vm::static_ptr_cast(es_specific_info); + sampling_freq != CELL_DMUX_PAMF_FS_48K || (nch != 1u && nch != 2u && nch != 6u && nch != 8u) || (bps != CELL_DMUX_PAMF_BITS_PER_SAMPLE_16 && bps != CELL_DMUX_PAMF_BITS_PER_SAMPLE_24)) + { + return 5; + } + + return CELL_OK; + + case DMUX_PAMF_STREAM_TYPE_INDEX_AC3: + case DMUX_PAMF_STREAM_TYPE_INDEX_ATRACX: + case DMUX_PAMF_STREAM_TYPE_INDEX_USER_DATA: + return CELL_OK; + + default: + return 5; + } +} + +template +u32 dmuxPamfGetAuSpecificInfoSize(u16 stream_id, u16 private_stream_id, bool is_avc) +{ + if constexpr (raw_es) + { + return 0; + } + + switch (dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first) + { + case DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO: + if (is_avc) + { + return 4; // LLE returns four, even though CellDmuxPamfAuSpecificInfoAvc only has a reserved field like the others + } + + return 0; + + case DMUX_PAMF_STREAM_TYPE_INDEX_LPCM: + case DMUX_PAMF_STREAM_TYPE_INDEX_AC3: // LLE returns three, even though CellDmuxPamfAuSpecificInfoAc3 only has a reserved field like the others + return 3; + + case DMUX_PAMF_STREAM_TYPE_INDEX_ATRACX: + case DMUX_PAMF_STREAM_TYPE_INDEX_USER_DATA: + default: + return 0; + } +} + +u32 dmuxPamfGetAuQueueMaxSize(u16 stream_id, u16 private_stream_id) +{ + switch (dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first) + { + case DMUX_PAMF_STREAM_TYPE_INDEX_LPCM: + return 0x100; + + case DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO: + case DMUX_PAMF_STREAM_TYPE_INDEX_AC3: + case DMUX_PAMF_STREAM_TYPE_INDEX_ATRACX: + case DMUX_PAMF_STREAM_TYPE_INDEX_USER_DATA: + return 0x40; + + default: + return 0; + } +} + +u32 dmuxPamfGetLpcmAuSize(vm::cptr lpcm_info) +{ + return lpcm_info->samplingFreq * lpcm_info->bitsPerSample * (lpcm_info->numOfChannels + (lpcm_info->numOfChannels & 1)) / 1600; // Streams with an odd number of channels contain an empty dummy channel +} + +u32 dmuxPamfGetAuQueueBufferSize(u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info) +{ + switch (dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first) + { + case DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO: + if (is_avc) + { + if (!es_specific_info) + { + return 0x46a870; + } + + switch (vm::static_ptr_cast(es_specific_info)->level) + { + case CELL_DMUX_PAMF_AVC_LEVEL_2P1: return 0xb00c0; + case CELL_DMUX_PAMF_AVC_LEVEL_3P0: return 0x19f2e0; + case CELL_DMUX_PAMF_AVC_LEVEL_3P1: return 0x260120; + case CELL_DMUX_PAMF_AVC_LEVEL_3P2: return 0x35f6c0; + case CELL_DMUX_PAMF_AVC_LEVEL_4P1: return 0x45e870; + case CELL_DMUX_PAMF_AVC_LEVEL_4P2: // Same as below + default: return 0x46a870; + } + } + + if (es_specific_info && vm::static_ptr_cast(es_specific_info)->profileLevel > CELL_DMUX_PAMF_M2V_MP_ML) + { + return 0x255000; + } + + return 0x70000; + + case DMUX_PAMF_STREAM_TYPE_INDEX_LPCM: + { + if (!es_specific_info) + { + return 0x104380; + } + + const u32 nch = vm::static_ptr_cast(es_specific_info)->numOfChannels; + const u32 lpcm_au_size = dmuxPamfGetLpcmAuSize(vm::static_ptr_cast(es_specific_info)); + + if (vm::static_ptr_cast(es_specific_info)->samplingFreq <= 96000) + { + if (nch > 0 && nch <= 2) + { + return 0x20000 + lpcm_au_size; + } + + if (nch <= 6) + { + return 0x60000 + lpcm_au_size; + } + + if (nch <= 8) + { + return 0x80000 + lpcm_au_size; + } + + return lpcm_au_size; + } + + if (nch > 0 && nch <= 2) + { + return 0x60000 + lpcm_au_size; + } + + if (nch <= 6) + { + return 0x100000 + lpcm_au_size; + } + + return lpcm_au_size; + } + case DMUX_PAMF_STREAM_TYPE_INDEX_AC3: + return 0xa000; + + case DMUX_PAMF_STREAM_TYPE_INDEX_ATRACX: + return 0x6400; + + case DMUX_PAMF_STREAM_TYPE_INDEX_USER_DATA: + return 0x160000; + + default: + return 0; + } +} + +template +u32 dmuxPamfGetEsMemSize(u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info) +{ + return dmuxPamfGetAuSpecificInfoSize(stream_id, private_stream_id, is_avc) * dmuxPamfGetAuQueueMaxSize(stream_id, private_stream_id) + + dmuxPamfGetAuQueueBufferSize(stream_id, private_stream_id, is_avc, es_specific_info) + 0x7f + static_cast(sizeof(DmuxPamfElementaryStream)) + 0xf; +} + +error_code dmuxPamfNotifyDemuxDone(ppu_thread& ppu, [[maybe_unused]] vm::ptr core_handle, error_code error, vm::ptr handle) +{ + handle->notify_demux_done.cbFunc(ppu, handle, error, handle->notify_demux_done.cbArg); + return CELL_OK; +} + +error_code dmuxPamfNotifyProgEndCode(ppu_thread& ppu, [[maybe_unused]] vm::ptr core_handle, vm::ptr handle) +{ + if (handle->notify_prog_end_code.cbFunc) + { + handle->notify_prog_end_code.cbFunc(ppu, handle, handle->notify_prog_end_code.cbArg); + } + + return CELL_OK; +} + +error_code dmuxPamfNotifyFatalErr(ppu_thread& ppu, [[maybe_unused]] vm::ptr core_handle, error_code error, vm::ptr handle) +{ + handle->notify_fatal_err.cbFunc(ppu, handle, error, handle->notify_fatal_err.cbArg); + return CELL_OK; +} + +error_code dmuxPamfEsNotifyAuFound(ppu_thread& ppu, [[maybe_unused]] vm::ptr core_handle, vm::cptr au_info, vm::ptr handle) +{ + const vm::var _au_info; + _au_info->info.auAddr = au_info->addr; + _au_info->info.auSize = au_info->size; + _au_info->info.isRap = au_info->is_rap; + _au_info->info.userData = au_info->user_data; + _au_info->info.pts = au_info->pts; + _au_info->info.dts = au_info->dts; + _au_info->specific_info = au_info->specific_info; + _au_info->specific_info_size = au_info->specific_info_size; + // _au_info->info.auMaxSize is left uninitialized + + return handle->notify_au_found.cbFunc(ppu, handle, _au_info, handle->notify_au_found.cbArg); +} + +error_code dmuxPamfEsNotifyFlushDone(ppu_thread& ppu, [[maybe_unused]] vm::ptr core_handle, vm::ptr handle) +{ + return handle->notify_flush_done.cbFunc(ppu, handle, handle->notify_flush_done.cbArg); +} + error_code _CellDmuxCoreOpQueryAttr(vm::cptr pamfSpecificInfo, vm::ptr pamfAttr) { - cellDmuxPamf.todo("_CellDmuxCoreOpQueryAttr(pamfSpecificInfo=*0x%x, pamfAttr=*0x%x)", pamfSpecificInfo, pamfAttr); + cellDmuxPamf.notice("_CellDmuxCoreOpQueryAttr(pamfSpecificInfo=*0x%x, pamfAttr=*0x%x)", pamfSpecificInfo, pamfAttr); + + if (!pamfAttr || (pamfSpecificInfo && pamfSpecificInfo->thisSize != sizeof(CellDmuxPamfSpecificInfo))) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + pamfAttr->maxEnabledEsNum = DMUX_PAMF_MAX_ENABLED_ES_NUM; + pamfAttr->version = DMUX_PAMF_VERSION; + pamfAttr->memSize = sizeof(CellDmuxPamfHandle) + sizeof(DmuxPamfContext) + 0xe7b; return CELL_OK; } -error_code _CellDmuxCoreOpOpen(vm::cptr pamfSpecificInfo, vm::cptr demuxerResource, vm::cptr demuxerResourceSpurs, vm::cptr> notifyDemuxDone, - vm::cptr> notifyProgEndCode, vm::cptr> notifyFatalErr, vm::pptr handle) +error_code DmuxPamfContext::open(ppu_thread& ppu, const CellDmuxPamfResource& res, const DmuxCb& notify_dmux_done, const DmuxCb& notify_prog_end_code, + const DmuxCb& notify_fatal_err, vm::bptr& handle) { - cellDmuxPamf.todo("_CellDmuxCoreOpOpen(pamfSpecificInfo=*0x%x, demuxerResource=*0x%x, demuxerResourceSpurs=*0x%x, notifyDemuxDone=*0x%x, notifyProgEndCode=*0x%x, notifyFatalErr=*0x%x, handle=**0x%x)", + if (res.ppuThreadPriority >= 0xc00u || res.ppuThreadStackSize < 0x1000u || res.spuThreadPriority >= 0x100u || res.numOfSpus != 1u || !res.memAddr || res.memSize < sizeof(DmuxPamfContext) + 0xe7b) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + const auto _this = vm::ptr::make(utils::align(+res.memAddr.addr(), 0x80)); + + _this->_this = _this; + _this->this_size = res.memSize; + _this->version = DMUX_PAMF_VERSION; + _this->notify_demux_done = notify_dmux_done; + _this->notify_prog_end_code = notify_prog_end_code; + _this->notify_fatal_err = notify_fatal_err; + _this->resource = res; + _this->unk = 0; + _this->ppu_thread_stack_size = res.ppuThreadStackSize; + _this->au_released_bitset = 0; + _this->stream_reset_requested = false; + _this->sequence_state = DmuxPamfSequenceState::dormant; + _this->max_enabled_es_num = DMUX_PAMF_MAX_ENABLED_ES_NUM; + _this->enabled_es_num = 0; + std::ranges::fill(_this->elementary_streams, vm::null); + _this->next_es_id = 0; + + const vm::var mutex_attr = {{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_dxpmtx"_u64 } }}; + const vm::var cond_attr = {{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_dxpcnd"_u64 } }}; + + if (sys_mutex_create(ppu, _this.ptr(&DmuxPamfContext::mutex), mutex_attr) != CELL_OK + || sys_cond_create(ppu, _this.ptr(&DmuxPamfContext::cond), _this->mutex, cond_attr) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + _this->spurs_context_addr = _this.ptr(&DmuxPamfContext::spurs_context); + _this->cmd_queue_addr_ = _this.ptr(&DmuxPamfContext::cmd_queue); + _this->cmd_queue_buffer_addr_ = _this.ptr(&DmuxPamfContext::cmd_queue_buffer); + _this->cmd_queue_addr = _this.ptr(&DmuxPamfContext::cmd_queue); + _this->cmd_result_queue_addr = _this.ptr(&DmuxPamfContext::cmd_result_queue); + _this->stream_info_queue_addr = _this.ptr(&DmuxPamfContext::stream_info_queue); + _this->event_queue_addr = _this.ptr(&DmuxPamfContext::event_queue); + _this->cmd_queue_buffer_addr = _this.ptr(&DmuxPamfContext::cmd_queue_buffer); + _this->cmd_result_queue_buffer_addr = _this.ptr(&DmuxPamfContext::cmd_result_queue_buffer); + _this->event_queue_buffer_addr = _this.ptr(&DmuxPamfContext::event_queue_buffer); + _this->stream_info_queue_buffer_addr = _this.ptr(&DmuxPamfContext::stream_info_queue_buffer); + _this->cmd_queue_addr__ = _this.ptr(&DmuxPamfContext::cmd_queue); + + ensure(std::snprintf(_this->spurs_taskset_name, sizeof(_this->spurs_taskset_name), "_libdmux_pamf_%08x", _this.addr()) == 22); + + _this->cmd_queue.init(_this->cmd_queue_buffer); + _this->cmd_result_queue.init(_this->cmd_result_queue_buffer); + _this->stream_info_queue.init(_this->stream_info_queue_buffer); + _this->event_queue.init(_this->event_queue_buffer); + + // HLE exclusive + _this->savestate = {}; + _this->au_queue_full_bitset = 0; + _this->stream_reset_started = false; + _this->stream_reset_in_progress = false; + + _this->run_spu_thread(); + + handle = _this; + return _this->create_thread(ppu); +} + +error_code _CellDmuxCoreOpOpen(ppu_thread& ppu, vm::cptr pamfSpecificInfo, vm::cptr demuxerResource, vm::cptr demuxerResourceSpurs, vm::cptr> notifyDemuxDone, + vm::cptr> notifyProgEndCode, vm::cptr> notifyFatalErr, vm::pptr handle) +{ + // Block savestates during ppu_execute + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; + + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + cellDmuxPamf.notice("_CellDmuxCoreOpOpen(pamfSpecificInfo=*0x%x, demuxerResource=*0x%x, demuxerResourceSpurs=*0x%x, notifyDemuxDone=*0x%x, notifyProgEndCode=*0x%x, notifyFatalErr=*0x%x, handle=**0x%x)", pamfSpecificInfo, demuxerResource, demuxerResourceSpurs, notifyDemuxDone, notifyProgEndCode, notifyFatalErr, handle); - return CELL_OK; + if ((pamfSpecificInfo && pamfSpecificInfo->thisSize != sizeof(CellDmuxPamfSpecificInfo)) + || !demuxerResource + || (demuxerResourceSpurs && !demuxerResourceSpurs->spurs) + || !notifyDemuxDone || !notifyDemuxDone->cbFunc || !notifyDemuxDone->cbArg + || !notifyProgEndCode + || !notifyFatalErr || !notifyFatalErr->cbFunc || !notifyFatalErr->cbArg + || !handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + ensure(demuxerResource->memAddr.aligned(0x10)); // Not checked on LLE + + const auto _handle = vm::static_ptr_cast(demuxerResource->memAddr); + + _handle->notify_demux_done = *notifyDemuxDone; + _handle->notify_fatal_err = *notifyFatalErr; + _handle->notify_prog_end_code = *notifyProgEndCode; + + if (!pamfSpecificInfo || !pamfSpecificInfo->programEndCodeCb) + { + _handle->notify_prog_end_code.cbFunc = vm::null; + } + + const CellDmuxPamfResource res{ demuxerResource->ppuThreadPriority, demuxerResource->ppuThreadStackSize, demuxerResource->numOfSpus, demuxerResource->spuThreadPriority, + vm::bptr::make(demuxerResource->memAddr.addr() + sizeof(CellDmuxPamfHandle)), demuxerResource->memSize - sizeof(CellDmuxPamfHandle) }; + + const auto demux_done_func = vm::bptr::make(g_fxo->get().func_addr(FIND_FUNC(dmuxPamfNotifyDemuxDone))); + const auto prog_end_code_func = vm::bptr::make(g_fxo->get().func_addr(FIND_FUNC(dmuxPamfNotifyProgEndCode))); + const auto fatal_err_func = vm::bptr::make(g_fxo->get().func_addr(FIND_FUNC(dmuxPamfNotifyFatalErr))); + + const error_code ret = DmuxPamfContext::open(ppu, res, { demux_done_func, _handle }, { prog_end_code_func, _handle }, { fatal_err_func, _handle }, _handle->demuxer); + + *handle = _handle; + + return ret; } -error_code _CellDmuxCoreOpClose(vm::ptr handle) +error_code DmuxPamfContext::close(ppu_thread& ppu) { - cellDmuxPamf.todo("_CellDmuxCoreOpClose(handle=*0x%x)", handle); + if (join_thread(ppu) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + ensure(idm::remove(hle_spu_thread_id)); return CELL_OK; } -error_code _CellDmuxCoreOpResetStream(vm::ptr handle) +error_code _CellDmuxCoreOpClose(ppu_thread& ppu, vm::ptr handle) { - cellDmuxPamf.todo("_CellDmuxCoreOpResetStream(handle=*0x%x)", handle); + // The PPU thread is going to use ppu_execute + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; + + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + cellDmuxPamf.notice("_CellDmuxCoreOpClose(handle=*0x%x)", handle); + + if (!handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return handle->demuxer->close(ppu); +} + +error_code DmuxPamfContext::reset_stream(ppu_thread& ppu) +{ + auto& ar = *ppu.optional_savestate_state; + const u8 savestate = ar.try_read().second; + ar.clear(); + + switch (savestate) + { + case 0: + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(0); + return {}; + } + + if (sequence_state != DmuxPamfSequenceState::running) + { + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; + } + + [[fallthrough]]; + + case 1: + send_spu_command_and_wait(ppu, savestate); + + if (ppu.state & cpu_flag::again) + { + ar(1); + return {}; + } + + stream_reset_requested = true; + [[fallthrough]]; + + case 2: + if (const error_code ret = sys_cond_signal_to(ppu, cond, static_cast(thread_id)); ret != CELL_OK && ret != static_cast(CELL_EPERM)) + { + sys_mutex_unlock(ppu, mutex); + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(2); + return {}; + } + + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; + + default: + fmt::throw_exception("Unexpected savestate value: 0x%x", savestate); + } +} + +error_code _CellDmuxCoreOpResetStream(ppu_thread& ppu, vm::ptr handle) +{ + cellDmuxPamf.notice("_CellDmuxCoreOpResetStream(handle=*0x%x)", handle); + + if (!handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return handle->demuxer->reset_stream(ppu); +} + +error_code DmuxPamfContext::create_thread(ppu_thread& ppu) +{ + const vm::var name = vm::make_str("HLE PAMF demuxer"); + const auto entry = g_fxo->get().func_addr(FIND_FUNC(dmuxPamfEntry)); + + if (ppu_execute<&sys_ppu_thread_create>(ppu, _this.ptr(&DmuxPamfContext::thread_id), entry, +_this.addr(), +resource.ppuThreadPriority, +resource.ppuThreadStackSize, SYS_PPU_THREAD_CREATE_JOINABLE, +name) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } return CELL_OK; } -error_code _CellDmuxCoreOpCreateThread(vm::ptr handle) +error_code _CellDmuxCoreOpCreateThread(ppu_thread& ppu, vm::ptr handle) { - cellDmuxPamf.todo("_CellDmuxCoreOpCreateThread(handle=*0x%x)", handle); + // Block savestates during ppu_execute + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; - return CELL_OK; + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + cellDmuxPamf.notice("_CellDmuxCoreOpCreateThread(handle=*0x%x)", handle); + + if (!handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return handle->demuxer->create_thread(ppu); } -error_code _CellDmuxCoreOpJoinThread(vm::ptr handle) +error_code DmuxPamfContext::join_thread(ppu_thread& ppu) { - cellDmuxPamf.todo("_CellDmuxCoreOpJoinThread(handle=*0x%x)", handle); + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + std::ranges::fill_n(elementary_streams, enabled_es_num, vm::null); + + enabled_es_num = -1; + + send_spu_command_and_wait(ppu, false); + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + return sys_ppu_thread_join(ppu, static_cast(thread_id), +vm::var{}) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; +} + +error_code _CellDmuxCoreOpJoinThread(ppu_thread& ppu, vm::ptr handle) +{ + // The PPU thread is going to use ppu_execute + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; + + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + cellDmuxPamf.notice("_CellDmuxCoreOpJoinThread(handle=*0x%x)", handle); + + if (!handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return handle->demuxer->join_thread(ppu); +} + +template +error_code DmuxPamfContext::set_stream(ppu_thread& ppu, vm::cptr stream_address, u32 stream_size, b8 discontinuity, u32 user_data) +{ + auto& ar = *ppu.optional_savestate_state; + const bool waiting_for_spu_state = ar.try_read().second; + ar.clear(); + + if (!waiting_for_spu_state) + { + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(false); + return {}; + } + + this->user_data = user_data; + + if (!stream_info_queue.emplace(stream_address, stream_size, user_data, !discontinuity, raw_es)) + { + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_BUSY : CELL_DMUX_PAMF_ERROR_FATAL; + } + } + + send_spu_command_and_wait(ppu, waiting_for_spu_state); + + if (ppu.state & cpu_flag::again) + { + ar(true); + return {}; + } + + sequence_state = DmuxPamfSequenceState::running; + + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; +} + +template +error_code _CellDmuxCoreOpSetStream(ppu_thread& ppu, vm::ptr handle, vm::cptr streamAddress, u32 streamSize, b8 discontinuity, u64 userData) +{ + cellDmuxPamf.trace("_CellDmuxCoreOpSetStream(handle=*0x%x, streamAddress=*0x%x, streamSize=0x%x, discontinuity=%d, userData=0x%llx)", raw_es, handle, streamAddress, streamSize, +discontinuity, userData); + + if (!streamAddress || streamSize == 0) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + ensure(!!handle); // Not checked on LLE + + return handle->demuxer->set_stream(ppu, streamAddress, streamSize, discontinuity, static_cast(userData)); +} + +error_code DmuxPamfElementaryStream::release_au(ppu_thread& ppu, vm::ptr au_addr, u32 au_size) const +{ + auto& ar = *ppu.optional_savestate_state; + const u8 savestate = ar.try_read().second; + ar.clear(); + + switch (savestate) + { + case 0: + if (sys_mutex_lock(ppu, demuxer->mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(0); + return {}; + } + + [[fallthrough]]; + + case 1: + demuxer->send_spu_command_and_wait(ppu, savestate, au_addr, au_size, static_cast>(stream_id), static_cast>(private_stream_id)); + + if (ppu.state & cpu_flag::again) + { + ar(1); + return {}; + } + + demuxer->au_released_bitset |= 1ull << this_index; + [[fallthrough]]; + + case 2: + if (const error_code ret = sys_cond_signal_to(ppu, demuxer->cond, static_cast(demuxer->thread_id)); ret != CELL_OK && ret != static_cast(CELL_EPERM)) + { + sys_mutex_unlock(ppu, demuxer->mutex); + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(2); + return {}; + } + + return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; + + default: + fmt::throw_exception("Unexpected savestate value: 0x%x", savestate); + } +} + +error_code _CellDmuxCoreOpReleaseAu(ppu_thread& ppu, vm::ptr esHandle, vm::ptr auAddr, u32 auSize) +{ + cellDmuxPamf.trace("_CellDmuxCoreOpReleaseAu(esHandle=*0x%x, auAddr=*0x%x, auSize=0x%x)", esHandle, auAddr, auSize); + + if (!auAddr || auSize == 0) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + ensure(!!esHandle); // Not checked on LLE + + return esHandle->es->release_au(ppu, auAddr, auSize); +} + +template +error_code dmuxPamfGetEsAttr(u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info, CellDmuxPamfEsAttr& attr) +{ + if (dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first == DMUX_PAMF_STREAM_TYPE_INDEX_INVALID) + { + return CELL_DMUX_PAMF_ERROR_UNKNOWN_STREAM; + } + + if (dmuxPamfVerifyEsSpecificInfo(stream_id, private_stream_id, is_avc, es_specific_info) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + attr.auQueueMaxSize = dmuxPamfGetAuQueueMaxSize(stream_id, private_stream_id); + attr.memSize = dmuxPamfGetEsMemSize(stream_id, private_stream_id, is_avc, es_specific_info); + attr.specificInfoSize = dmuxPamfGetAuSpecificInfoSize(stream_id, private_stream_id, is_avc); return CELL_OK; } template -error_code _CellDmuxCoreOpSetStream(vm::ptr handle, vm::cptr streamAddress, u32 streamSize, b8 discontinuity, u64 userData) +static inline std::tuple get_stream_ids(vm::cptr esFilterId) { - cellDmuxPamf.todo("_CellDmuxCoreOpSetStream(handle=*0x%x, streamAddress=*0x%x, streamSize=0x%x, discontinuity=%d, userData=0x%llx)", raw_es, handle, streamAddress, streamSize, +discontinuity, userData); + if constexpr (raw_es) + { + const auto filter_id = vm::static_ptr_cast(esFilterId); + return { filter_id[2], filter_id[3], filter_id[8] >> 7 }; + } - return CELL_OK; -} - -error_code _CellDmuxCoreOpReleaseAu(vm::ptr esHandle, vm::ptr memAddr, u32 memSize) -{ - cellDmuxPamf.todo("_CellDmuxCoreOpReleaseAu(esHandle=*0x%x, memAddr=*0x%x, memSize=0x%x)", esHandle, memAddr, memSize); - - return CELL_OK; + const auto filter_id = vm::static_ptr_cast(esFilterId); + return { filter_id->filterIdMajor, filter_id->filterIdMinor, filter_id->supplementalInfo1 }; } template error_code _CellDmuxCoreOpQueryEsAttr(vm::cptr esFilterId, vm::cptr esSpecificInfo, vm::ptr attr) { - cellDmuxPamf.todo("_CellDmuxCoreOpQueryEsAttr(esFilterId=*0x%x, esSpecificInfo=*0x%x, attr=*0x%x)", raw_es, esFilterId, esSpecificInfo, attr); + cellDmuxPamf.notice("_CellDmuxCoreOpQueryEsAttr(esFilterId=*0x%x, esSpecificInfo=*0x%x, attr=*0x%x)", raw_es, esFilterId, esSpecificInfo, attr); + if (!esFilterId || !attr) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + const auto [stream_id, private_stream_id, is_avc] = get_stream_ids(esFilterId); + + CellDmuxPamfEsAttr es_attr; + + const error_code ret = dmuxPamfGetEsAttr(stream_id, private_stream_id, is_avc, esSpecificInfo, es_attr); + + *attr = es_attr; + attr->memSize += static_cast(sizeof(CellDmuxPamfEsHandle)); + + return ret; +} + +template +error_code DmuxPamfContext::enable_es(ppu_thread& ppu, u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info, vm::ptr mem_addr, u32 mem_size, const DmuxCb& notify_au_found, + const DmuxCb& notify_flush_done, vm::bptr& es) +{ + auto& ar = *ppu.optional_savestate_state; + const bool waiting_for_spu_state = ar.try_read().second; + ar.clear(); + + if (mem_size < dmuxPamfGetEsMemSize(stream_id, private_stream_id, is_avc, es_specific_info)) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + const auto stream_type = dmuxPamfStreamIdToTypeChannel(stream_id, private_stream_id).first; + + if (!waiting_for_spu_state) + { + if (stream_type == DMUX_PAMF_STREAM_TYPE_INDEX_INVALID) + { + return CELL_DMUX_PAMF_ERROR_UNKNOWN_STREAM; + } + + if (dmuxPamfVerifyEsSpecificInfo(stream_id, private_stream_id, is_avc, es_specific_info) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + if (const error_code ret = sys_mutex_lock(ppu, mutex, 0); ret != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(false); + return {}; + } + + this->is_raw_es = raw_es; + + if (enabled_es_num == max_enabled_es_num) + { + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_NO_MEMORY : CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (find_es(stream_id, private_stream_id)) + { + // Elementary stream is already enabled + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL; + } + } + + const be_t au_max_size = [&]() -> be_t + { + switch (stream_type) + { + case DMUX_PAMF_STREAM_TYPE_INDEX_VIDEO: + if (is_avc) + { + if (!es_specific_info || vm::static_ptr_cast(es_specific_info)->level == CELL_DMUX_PAMF_AVC_LEVEL_4P2) + { + return 0xcc000u; + } + + switch (vm::static_ptr_cast(es_specific_info)->level) + { + case CELL_DMUX_PAMF_AVC_LEVEL_2P1: return 0x12900u; + case CELL_DMUX_PAMF_AVC_LEVEL_3P0: return 0x25f80u; + case CELL_DMUX_PAMF_AVC_LEVEL_3P1: return 0x54600u; + case CELL_DMUX_PAMF_AVC_LEVEL_3P2: return 0x78000u; + case CELL_DMUX_PAMF_AVC_LEVEL_4P1: return 0xc0000u; + default: fmt::throw_exception("Unreachable"); // es_specific_info was already checked for invalid values in dmuxPamfVerifyEsSpecificInfo() + } + } + + if (!es_specific_info || vm::static_ptr_cast(es_specific_info)->profileLevel > CELL_DMUX_PAMF_M2V_MP_ML) + { + return 0x12a800u; + } + + return 0x38000u; + + case DMUX_PAMF_STREAM_TYPE_INDEX_LPCM: return dmuxPamfGetLpcmAuSize(vm::static_ptr_cast(es_specific_info)); + case DMUX_PAMF_STREAM_TYPE_INDEX_AC3: return 0xf00u; + case DMUX_PAMF_STREAM_TYPE_INDEX_ATRACX: return 0x1008u; + case DMUX_PAMF_STREAM_TYPE_INDEX_USER_DATA: return 0xa0000u; + default: fmt::throw_exception("Unreachable"); // stream_type was already checked + } + }(); + + const auto _es = vm::bptr::make(utils::align(mem_addr.addr(), 0x10)); + + const auto au_queue_buffer = vm::bptr::make(utils::align(_es.addr() + static_cast(sizeof(DmuxPamfElementaryStream)), 0x80)); + const be_t au_specific_info_size = dmuxPamfGetAuSpecificInfoSize(stream_id, private_stream_id, is_avc); + + send_spu_command_and_wait(ppu, waiting_for_spu_state, stream_id, private_stream_id, is_avc, au_queue_buffer, + dmuxPamfGetAuQueueBufferSize(stream_id, private_stream_id, is_avc, es_specific_info), au_max_size, au_specific_info_size, raw_es, next_es_id); + + if (ppu.state & cpu_flag::again) + { + ar(true); + return {}; + } + + u32 es_idx = umax; + while (elementary_streams[++es_idx]){} // There is guaranteed to be an empty slot, this was already checked above + + _es->_this = _es; + _es->this_size = mem_size; + _es->this_index = es_idx; + _es->demuxer = _this; + _es->notify_au_found = notify_au_found; + _es->notify_flush_done = notify_flush_done; + _es->stream_id = stream_id; + _es->private_stream_id = private_stream_id; + _es->is_avc = is_avc; + _es->au_queue_buffer = au_queue_buffer; + _es->au_max_size = au_max_size; + _es->au_specific_info_size = au_specific_info_size; + _es->reset_next_au = false; + _es->es_id = next_es_id++; + + elementary_streams[es_idx] = _es; + + enabled_es_num++; + + if (sys_mutex_unlock(ppu, mutex) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + es = _es; return CELL_OK; } template -error_code _CellDmuxCoreOpEnableEs(vm::ptr handle, vm::cptr esFilterId, vm::cptr esResource, vm::cptr> notifyAuFound, - vm::cptr> notifyFlushDone, vm::cptr esSpecificInfo, vm::pptr esHandle) +error_code _CellDmuxCoreOpEnableEs(ppu_thread& ppu, vm::ptr handle, vm::cptr esFilterId, vm::cptr esResource, vm::cptr> notifyAuFound, + vm::cptr> notifyFlushDone, vm::cptr esSpecificInfo, vm::pptr esHandle) { - cellDmuxPamf.todo("_CellDmuxCoreOpEnableEs(handle=*0x%x, esFilterId=*0x%x, esResource=*0x%x, notifyAuFound=*0x%x, notifyFlushDone=*0x%x, esSpecificInfo=*0x%x, esHandle)", + cellDmuxPamf.notice("_CellDmuxCoreOpEnableEs(handle=*0x%x, esFilterId=*0x%x, esResource=*0x%x, notifyAuFound=*0x%x, notifyFlushDone=*0x%x, esSpecificInfo=*0x%x, esHandle)", raw_es, handle, esFilterId, esResource, notifyAuFound, notifyFlushDone, esSpecificInfo, esHandle); - return CELL_OK; + if (!handle || !esFilterId || !esResource || !esResource->memAddr || esResource->memSize == 0u || !notifyAuFound || !notifyAuFound->cbFunc || !notifyAuFound->cbArg || !notifyFlushDone || !notifyFlushDone->cbFunc || !notifyFlushDone->cbArg) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + ensure(!!esHandle && esResource->memAddr.aligned(0x10)); // Not checked on LLE + + const auto es_handle = vm::static_ptr_cast(esResource->memAddr); + + es_handle->notify_au_found = *notifyAuFound; + es_handle->notify_flush_done = *notifyFlushDone; + + const auto au_found_func = vm::bptr::make(g_fxo->get().func_addr(FIND_FUNC(dmuxPamfEsNotifyAuFound))); + const auto flush_done_func = vm::bptr::make(g_fxo->get().func_addr(FIND_FUNC(dmuxPamfEsNotifyFlushDone))); + + const auto [stream_id, private_stream_id, is_avc] = get_stream_ids(esFilterId); + + const error_code ret = handle->demuxer->enable_es(ppu, stream_id, private_stream_id, is_avc, esSpecificInfo, vm::ptr::make(esResource->memAddr.addr() + sizeof(CellDmuxPamfEsHandle)), + esResource->memSize - sizeof(CellDmuxPamfEsHandle), { au_found_func, es_handle }, { flush_done_func, es_handle }, es_handle->es); + + *esHandle = es_handle; + + return ret; } -error_code _CellDmuxCoreOpDisableEs(vm::ptr esHandle) +error_code DmuxPamfElementaryStream::disable_es(ppu_thread& ppu) { - cellDmuxPamf.todo("_CellDmuxCoreOpDisableEs(esHandle=*0x%x)", esHandle); + const auto dmux = demuxer.get_ptr(); - return CELL_OK; + auto& ar = *ppu.optional_savestate_state; + const u8 savestate = ar.try_read().second; + ar.clear(); + + switch (savestate) + { + case 0: + if (sys_mutex_lock(ppu, dmux->mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(0); + return {}; + } + + if (!dmux->find_es(stream_id, private_stream_id)) + { + // Elementary stream is already disabled + return sys_mutex_unlock(ppu, dmux->mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL; + } + + [[fallthrough]]; + + case 1: + dmux->send_spu_command_and_wait(ppu, savestate, static_cast>(stream_id), static_cast>(private_stream_id)); + + if (ppu.state & cpu_flag::again) + { + ar(1); + return {}; + } + + _this = vm::null; + this_size = 0; + demuxer = vm::null; + notify_au_found = {}; + au_queue_buffer = vm::null; + unk = 0; + au_max_size = 0; + + dmux->elementary_streams[this_index] = vm::null; + dmux->enabled_es_num--; + + dmux->au_released_bitset |= 1ull << this_index; + + this_index = 0; + [[fallthrough]]; + + case 2: + if (const error_code ret = sys_cond_signal_to(ppu, dmux->cond, static_cast(dmux->thread_id)); ret != CELL_OK && ret != static_cast(CELL_EPERM)) + { + sys_mutex_unlock(ppu, dmux->mutex); + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(2); + return {}; + } + + return sys_mutex_unlock(ppu, dmux->mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; + + default: + fmt::throw_exception("Unexpected savestate value: 0x%x", savestate); + } } -error_code _CellDmuxCoreOpFlushEs(vm::ptr esHandle) +error_code _CellDmuxCoreOpDisableEs(ppu_thread& ppu, vm::ptr esHandle) { - cellDmuxPamf.todo("_CellDmuxCoreOpFlushEs(esHandle=*0x%x)", esHandle); + cellDmuxPamf.notice("_CellDmuxCoreOpDisableEs(esHandle=*0x%x)", esHandle); - return CELL_OK; + if (!esHandle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return esHandle->es->disable_es(ppu); } -error_code _CellDmuxCoreOpResetEs(vm::ptr esHandle) +error_code DmuxPamfElementaryStream::flush_es(ppu_thread& ppu) const { - cellDmuxPamf.todo("_CellDmuxCoreOpResetEs(esHandle=*0x%x)", esHandle); + auto& ar = *ppu.optional_savestate_state; + const bool waiting_for_spu_state = ar.try_read().second; + ar.clear(); - return CELL_OK; + if (!waiting_for_spu_state) + { + if (sys_mutex_lock(ppu, demuxer->mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(false); + return {}; + } + } + + demuxer->send_spu_command_and_wait(ppu, waiting_for_spu_state, static_cast>(stream_id), static_cast>(private_stream_id)); + + if (ppu.state & cpu_flag::again) + { + ar(true); + return {}; + } + + return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; } -error_code _CellDmuxCoreOpResetStreamAndWaitDone(vm::ptr handle) +error_code _CellDmuxCoreOpFlushEs(ppu_thread& ppu, vm::ptr esHandle) { - cellDmuxPamf.todo("_CellDmuxCoreOpResetStreamAndWaitDone(handle=*0x%x)", handle); + cellDmuxPamf.notice("_CellDmuxCoreOpFlushEs(esHandle=*0x%x)", esHandle); - return CELL_OK; + if (!esHandle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return esHandle->es->flush_es(ppu); +} + +error_code DmuxPamfElementaryStream::reset_es(ppu_thread& ppu) const +{ + auto& ar = *ppu.optional_savestate_state; + const bool waiting_for_spu_state = ar.try_read().second; + ar.clear(); + + if (!waiting_for_spu_state) + { + if (sys_mutex_lock(ppu, demuxer->mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + ar(false); + return {}; + } + } + + demuxer->send_spu_command_and_wait(ppu, waiting_for_spu_state, static_cast>(stream_id), static_cast>(private_stream_id), vm::null); + + if (ppu.state & cpu_flag::again) + { + ar(true); + return {}; + } + + return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; +} + +error_code _CellDmuxCoreOpResetEs(ppu_thread& ppu, vm::ptr esHandle) +{ + cellDmuxPamf.notice("_CellDmuxCoreOpResetEs(esHandle=*0x%x)", esHandle); + + if (!esHandle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return esHandle->es->reset_es(ppu); +} + +error_code DmuxPamfContext::reset_stream_and_wait_done(ppu_thread& ppu) +{ + // Both sys_cond_wait() and DmuxPamfContext::reset_stream() are already using ppu_thread::optional_savestate_state, so we can't save this function currently + std::unique_lock savestate_lock{ g_fxo->get(), std::try_to_lock }; + + if (!savestate_lock.owns_lock()) + { + ppu.state += cpu_flag::again; + return {}; + } + + if (reset_stream(ppu) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + + if (sys_mutex_lock(ppu, mutex, 0) != CELL_OK) + { + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + + while (sequence_state != DmuxPamfSequenceState::dormant) + { + if (sys_cond_wait(ppu, cond, 0) != CELL_OK) + { + sys_mutex_unlock(ppu, mutex); + return CELL_DMUX_PAMF_ERROR_FATAL; + } + + if (ppu.state & cpu_flag::again) + { + return {}; + } + } + + return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL; +} + +error_code _CellDmuxCoreOpResetStreamAndWaitDone(ppu_thread& ppu, vm::ptr handle) +{ + cellDmuxPamf.notice("_CellDmuxCoreOpResetStreamAndWaitDone(handle=*0x%x)", handle); + + if (!handle) + { + return CELL_DMUX_PAMF_ERROR_ARG; + } + + return handle->demuxer->reset_stream_and_wait_done(ppu); } template @@ -1267,4 +2840,12 @@ DECLARE(ppu_module_manager::cellDmuxPamf)("cellDmuxPamf", [] REG_HIDDEN_FUNC(_CellDmuxCoreOpFlushEs); REG_HIDDEN_FUNC(_CellDmuxCoreOpResetEs); REG_HIDDEN_FUNC(_CellDmuxCoreOpResetStreamAndWaitDone); + + REG_HIDDEN_FUNC(dmuxPamfNotifyDemuxDone); + REG_HIDDEN_FUNC(dmuxPamfNotifyProgEndCode); + REG_HIDDEN_FUNC(dmuxPamfNotifyFatalErr); + REG_HIDDEN_FUNC(dmuxPamfEsNotifyAuFound); + REG_HIDDEN_FUNC(dmuxPamfEsNotifyFlushDone); + + REG_HIDDEN_FUNC(dmuxPamfEntry); }); diff --git a/rpcs3/Emu/Cell/Modules/cellDmuxPamf.h b/rpcs3/Emu/Cell/Modules/cellDmuxPamf.h index c720fb89c5..dea9fef073 100644 --- a/rpcs3/Emu/Cell/Modules/cellDmuxPamf.h +++ b/rpcs3/Emu/Cell/Modules/cellDmuxPamf.h @@ -661,16 +661,376 @@ using dmux_pamf_spu_thread = named_thread; // PPU thread -struct CellDmuxPamfAttr +// For some reason, cellDmuxPamf doesn't use regular error code values and also has a second set of error codes that's only used internally +enum CellDmuxPamfError { - be_t maxEnabledEsNum; - be_t version; + CELL_DMUX_PAMF_ERROR_BUSY = 1, + CELL_DMUX_PAMF_ERROR_ARG = 2, + CELL_DMUX_PAMF_ERROR_UNKNOWN_STREAM = 3, + CELL_DMUX_PAMF_ERROR_NO_MEMORY = 5, + CELL_DMUX_PAMF_ERROR_FATAL = 6, +}; + +enum CellDmuxPamfM2vLevel +{ + CELL_DMUX_PAMF_M2V_MP_LL = 0, + CELL_DMUX_PAMF_M2V_MP_ML, + CELL_DMUX_PAMF_M2V_MP_H14, + CELL_DMUX_PAMF_M2V_MP_HL, +}; + +enum CellDmuxPamfAvcLevel +{ + CELL_DMUX_PAMF_AVC_LEVEL_2P1 = 21, + CELL_DMUX_PAMF_AVC_LEVEL_3P0 = 30, + CELL_DMUX_PAMF_AVC_LEVEL_3P1 = 31, + CELL_DMUX_PAMF_AVC_LEVEL_3P2 = 32, + CELL_DMUX_PAMF_AVC_LEVEL_4P1 = 41, + CELL_DMUX_PAMF_AVC_LEVEL_4P2 = 42, +}; + +struct CellDmuxPamfAuSpecificInfoM2v +{ + be_t reserved1; +}; + +struct CellDmuxPamfAuSpecificInfoAvc +{ + be_t reserved1; +}; + +struct CellDmuxPamfAuSpecificInfoLpcm +{ + u8 channelAssignmentInfo; + u8 samplingFreqInfo; + u8 bitsPerSample; +}; + +struct CellDmuxPamfAuSpecificInfoAc3 +{ + be_t reserved1; +}; + +struct CellDmuxPamfAuSpecificInfoAtrac3plus +{ + be_t reserved1; +}; + +struct CellDmuxPamfAuSpecificInfoUserData +{ + be_t reserved1; +}; + +struct CellDmuxPamfEsSpecificInfoM2v +{ + be_t profileLevel; +}; + +struct CellDmuxPamfEsSpecificInfoAvc +{ + be_t level; +}; + +struct CellDmuxPamfEsSpecificInfoLpcm +{ + be_t samplingFreq; + be_t numOfChannels; + be_t bitsPerSample; +}; + +struct CellDmuxPamfEsSpecificInfoAc3 +{ + be_t reserved1; +}; + +struct CellDmuxPamfEsSpecificInfoAtrac3plus +{ + be_t reserved1; +}; + +struct CellDmuxPamfEsSpecificInfoUserData +{ + be_t reserved1; +}; + +enum CellDmuxPamfSamplingFrequency +{ + CELL_DMUX_PAMF_FS_48K = 48000, +}; + +enum CellDmuxPamfBitsPerSample +{ + CELL_DMUX_PAMF_BITS_PER_SAMPLE_16 = 16, + CELL_DMUX_PAMF_BITS_PER_SAMPLE_24 = 24, +}; + +enum CellDmuxPamfLpcmChannelAssignmentInfo +{ + CELL_DMUX_PAMF_LPCM_CH_M1 = 1, + CELL_DMUX_PAMF_LPCM_CH_LR = 3, + CELL_DMUX_PAMF_LPCM_CH_LRCLSRSLFE = 9, + CELL_DMUX_PAMF_LPCM_CH_LRCLSCS1CS2RSLFE = 11, +}; + +enum CellDmuxPamfLpcmFs +{ + CELL_DMUX_PAMF_LPCM_FS_48K = 1, +}; + +enum CellDmuxPamfLpcmBitsPerSamples +{ + CELL_DMUX_PAMF_LPCM_BITS_PER_SAMPLE_16 = 1, + CELL_DMUX_PAMF_LPCM_BITS_PER_SAMPLE_24 = 3, +}; + +struct CellDmuxPamfSpecificInfo +{ + be_t thisSize; + b8 programEndCodeCb; +}; + +struct CellDmuxPamfResource +{ + be_t ppuThreadPriority; + be_t ppuThreadStackSize; + be_t numOfSpus; + be_t spuThreadPriority; + vm::bptr memAddr; be_t memSize; }; -struct CellDmuxPamfEsAttr +struct DmuxPamfAuInfo { - be_t auQueueMaxSize; - be_t memSize; - be_t specificInfoSize; + vm::bptr addr; + be_t size; + CellCodecTimeStamp pts; + CellCodecTimeStamp dts; + be_t user_data; + vm::bptr specific_info; + be_t specific_info_size; + b8 is_rap; }; + +CHECK_SIZE(DmuxPamfAuInfo, 0x30); + +constexpr u32 DMUX_PAMF_VERSION = 0x280000; +constexpr s32 DMUX_PAMF_MAX_ENABLED_ES_NUM = 0x40; + +// HLE exclusive, for savestates +enum class dmux_pamf_state : u8 +{ + initial, + waiting_for_au_released, + waiting_for_au_released_error, + waiting_for_event, + starting_demux_done, + starting_demux_done_mutex_lock_error, + starting_demux_done_mutex_unlock_error, + starting_demux_done_checking_stream_reset, + starting_demux_done_checking_stream_reset_error, + setting_au_reset, + setting_au_reset_error, + processing_event, + au_found_waiting_for_spu, + unsetting_au_cancel, + demux_done_notifying, + demux_done_mutex_lock, + demux_done_cond_signal, + resuming_demux_mutex_lock, + resuming_demux_waiting_for_spu, + sending_fatal_err +}; + +enum class DmuxPamfSequenceState : u32 +{ + dormant, + resetting, + running +}; + +struct DmuxPamfElementaryStream; + +class DmuxPamfContext +{ + // HLE exclusive + // These are local variables in the PPU thread function, they're here for savestates + DmuxPamfEvent event; + u64 au_queue_full_bitset; + b8 stream_reset_started; + b8 stream_reset_in_progress; + + u32 hle_spu_thread_id; + dmux_pamf_state savestate; + + [[maybe_unused]] u8 spurs[0xf6b]; // CellSpurs, 0x1000 bytes on LLE + [[maybe_unused]] vm::bptr spurs_addr; // CellSpurs* + [[maybe_unused]] b8 use_existing_spurs; + + [[maybe_unused]] alignas(0x80) u8 spurs_taskset[0x1900]; // CellSpursTaskset + [[maybe_unused]] be_t spurs_task_id; // CellSpursTaskId + vm::bptr spurs_context_addr; + + [[maybe_unused]] u8 reserved1[0x10]; + + vm::bptr _this; + be_t this_size; + be_t version; + + DmuxCb notify_demux_done; + DmuxCb notify_prog_end_code; + DmuxCb notify_fatal_err; + + CellDmuxPamfResource resource; + + be_t thread_id; // sys_ppu_thread_t + + be_t unk; // Unused + + be_t ppu_thread_stack_size; + + be_t au_released_bitset; // Each bit corresponds to an elementary stream, if a bit is set then cellDmuxReleaseAu() was called for that elementary stream + + b8 stream_reset_requested; + + be_t sequence_state; + + be_t max_enabled_es_num; + be_t enabled_es_num; + vm::bptr elementary_streams[DMUX_PAMF_MAX_ENABLED_ES_NUM]; + + be_t mutex; // sys_mutex_t + be_t cond; // sys_cond_t + + vm::bptr> cmd_queue_addr_; // Same as cmd_queue_addr, unused + vm::bptr cmd_queue_buffer_addr_; // Same as cmd_queue_buffer_addr, unused + + vm::bptr> cmd_queue_addr; // CellSpursQueue* + vm::bptr, 1>> cmd_result_queue_addr; // CellSpursQueue* + vm::bptr> stream_info_queue_addr; // CellSpursQueue* + vm::bptr> event_queue_addr; // CellSpursQueue* + + vm::bptr cmd_queue_buffer_addr; + vm::bptr[1]> cmd_result_queue_buffer_addr; + vm::bptr event_queue_buffer_addr; + vm::bptr stream_info_queue_buffer_addr; + + vm::bptr> cmd_queue_addr__; // Same as cmd_queue_addr, unused + + be_t user_data; + + b8 is_raw_es; + + be_t next_es_id; + + char spurs_taskset_name[24]; + + [[maybe_unused]] u8 reserved2[928]; // Unused + + dmux_pamf_hle_spurs_queue cmd_queue; // CellSpursQueue + dmux_pamf_hle_spurs_queue, 1> cmd_result_queue; // CellSpursQueue + dmux_pamf_hle_spurs_queue stream_info_queue; // CellSpursQueue + dmux_pamf_hle_spurs_queue event_queue; // CellSpursQueue + + DmuxPamfCommand cmd_queue_buffer[1]; + alignas(0x80) be_t cmd_result_queue_buffer[1]; + DmuxPamfStreamInfo stream_info_queue_buffer[1]; + DmuxPamfEvent event_queue_buffer[4 + 2 * DMUX_PAMF_MAX_ENABLED_ES_NUM]; + + alignas(0x80) u8 spurs_context[0x36400]; + + + template + void send_spu_command_and_wait(ppu_thread& ppu, bool waiting_for_spu_state, auto&&... cmd_params); + + error_code wait_au_released_or_stream_reset(ppu_thread& ppu, u64 au_queue_full_bitset, b8& stream_reset_started, dmux_pamf_state& savestate); + + template + error_code set_au_reset(ppu_thread& ppu); + + template + static error_code callback(ppu_thread& ppu, DmuxCb cb, auto&&... args); + + friend struct DmuxPamfElementaryStream; + +public: + void run_spu_thread(); + + DmuxPamfElementaryStream* find_es(u16 stream_id, u16 private_stream_id); + + void exec(ppu_thread& ppu); + + static error_code open(ppu_thread& ppu, const CellDmuxPamfResource& res, const DmuxCb& notify_dmux_done, const DmuxCb& notify_prog_end_code, const DmuxCb& notify_fatal_err, vm::bptr& handle); + error_code create_thread(ppu_thread& ppu); + error_code close(ppu_thread& ppu); + error_code reset_stream(ppu_thread& ppu); + error_code join_thread(ppu_thread& ppu); + + template + error_code set_stream(ppu_thread& ppu, vm::cptr stream_address, u32 stream_size, b8 discontinuity, u32 user_data); + + template + error_code enable_es(ppu_thread& ppu, u16 stream_id, u16 private_stream_id, bool is_avc, vm::cptr es_specific_info, vm::ptr mem_addr, u32 mem_size, const DmuxCb& notify_au_found, + const DmuxCb& notify_flush_done, vm::bptr& es); + + error_code reset_stream_and_wait_done(ppu_thread& ppu); +}; + +static_assert(std::is_standard_layout_v && std::is_trivial_v); +CHECK_SIZE_ALIGN(DmuxPamfContext, 0x3d880, 0x80); + +struct CellDmuxPamfHandle +{ + vm::bptr demuxer; + + DmuxCb notify_demux_done; + DmuxCb notify_prog_end_code; + DmuxCb notify_fatal_err; +}; + +CHECK_SIZE(CellDmuxPamfHandle, 0x1c); + +struct DmuxPamfElementaryStream +{ + vm::bptr _this; + be_t this_size; + u8 this_index; + + vm::bptr demuxer; + + DmuxCb notify_au_found; + DmuxCb notify_flush_done; + + be_t stream_id; + be_t private_stream_id; + b8 is_avc; + + vm::bptr au_queue_buffer; + be_t unk; // Likely au_queue_buffer_size, unused + be_t au_max_size; + u8 au_specific_info[0x10]; + be_t au_specific_info_size; + + b8 reset_next_au; + + be_t es_id; + + u8 reserved[72]; + + error_code release_au(ppu_thread& ppu, vm::ptr au_addr, u32 au_size) const; + error_code disable_es(ppu_thread& ppu); + error_code flush_es(ppu_thread& ppu) const; + error_code reset_es(ppu_thread& ppu) const; +}; + +static_assert(std::is_standard_layout_v && std::is_trivial_v); +CHECK_SIZE_ALIGN(DmuxPamfElementaryStream, 0x98, 4); + +struct CellDmuxPamfEsHandle +{ + vm::bptr es; + + DmuxCb notify_au_found; + DmuxCb notify_flush_done; +}; + +CHECK_SIZE(CellDmuxPamfEsHandle, 0x14); diff --git a/rpcs3/Emu/Cell/lv2/sys_prx.cpp b/rpcs3/Emu/Cell/lv2/sys_prx.cpp index 046d19c48e..19e8640cc5 100644 --- a/rpcs3/Emu/Cell/lv2/sys_prx.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_prx.cpp @@ -64,7 +64,7 @@ extern const std::map g_prx_list { "libddpdec.sprx", 0 }, { "libdivxdec.sprx", 0 }, { "libdmux.sprx", 0 }, - { "libdmuxpamf.sprx", 0 }, + { "libdmuxpamf.sprx", 1 }, { "libdtslbrdec.sprx", 0 }, { "libfiber.sprx", 0 }, { "libfont.sprx", 0 },