2020-12-05 13:08:24 +01:00
|
|
|
#include "stdafx.h"
|
2018-09-29 00:12:00 +02:00
|
|
|
#include "sys_memory.h"
|
|
|
|
|
|
2018-09-25 22:34:45 +02:00
|
|
|
#include "Emu/Memory/vm_locking.h"
|
2020-06-05 11:36:28 +02:00
|
|
|
#include "Emu/CPU/CPUThread.h"
|
2020-10-30 21:26:22 +01:00
|
|
|
#include "Emu/Cell/ErrorCodes.h"
|
2020-11-17 12:50:00 +01:00
|
|
|
#include "Emu/Cell/SPUThread.h"
|
2018-05-07 20:57:06 +02:00
|
|
|
#include "Emu/IdManager.h"
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2020-11-07 23:56:35 +01:00
|
|
|
#include "util/vm.hpp"
|
2020-12-18 15:43:34 +01:00
|
|
|
#include "util/asm.hpp"
|
2020-11-07 23:56:35 +01:00
|
|
|
|
2018-08-25 14:39:00 +02:00
|
|
|
LOG_CHANNEL(sys_memory);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2022-07-04 15:02:17 +02:00
|
|
|
//
|
|
|
|
|
static shared_mutex s_memstats_mtx;
|
|
|
|
|
|
2021-05-22 19:42:28 +02:00
|
|
|
lv2_memory_container::lv2_memory_container(u32 size, bool from_idm) noexcept
|
|
|
|
|
: size(size)
|
|
|
|
|
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 15:02:17 +02:00
|
|
|
lv2_memory_container::lv2_memory_container(utils::serial& ar, bool from_idm) noexcept
|
|
|
|
|
: size(ar)
|
|
|
|
|
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
|
|
|
|
|
, used(ar)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::shared_ptr<void> lv2_memory_container::load(utils::serial& ar)
|
|
|
|
|
{
|
2023-02-13 11:33:06 +01:00
|
|
|
// Use idm::last_id() only for the instances at IDM
|
2022-07-04 15:02:17 +02:00
|
|
|
return std::make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void lv2_memory_container::save(utils::serial& ar)
|
|
|
|
|
{
|
|
|
|
|
ar(size, used);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
lv2_memory_container* lv2_memory_container::search(u32 id)
|
|
|
|
|
{
|
|
|
|
|
if (id != SYS_MEMORY_CONTAINER_ID_INVALID)
|
|
|
|
|
{
|
|
|
|
|
return idm::check<lv2_memory_container>(id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return &g_fxo->get<lv2_memory_container>();
|
|
|
|
|
}
|
2019-07-04 20:27:06 +02:00
|
|
|
|
2020-06-11 13:35:36 +02:00
|
|
|
struct sys_memory_address_table
|
2018-05-07 20:57:06 +02:00
|
|
|
{
|
2020-06-11 13:35:36 +02:00
|
|
|
atomic_t<lv2_memory_container*> addrs[65536]{};
|
2022-07-04 15:02:17 +02:00
|
|
|
|
|
|
|
|
sys_memory_address_table() = default;
|
|
|
|
|
|
|
|
|
|
SAVESTATE_INIT_POS(id_manager::id_map<lv2_memory_container>::savestate_init_pos + 0.1);
|
|
|
|
|
|
|
|
|
|
sys_memory_address_table(utils::serial& ar)
|
|
|
|
|
{
|
|
|
|
|
// First: address, second: conatiner ID (SYS_MEMORY_CONTAINER_ID_INVALID for global FXO memory container)
|
|
|
|
|
std::unordered_map<u16, u32> mm;
|
|
|
|
|
ar(mm);
|
|
|
|
|
|
|
|
|
|
for (const auto& [addr, id] : mm)
|
|
|
|
|
{
|
|
|
|
|
addrs[addr] = ensure(lv2_memory_container::search(id));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void save(utils::serial& ar)
|
|
|
|
|
{
|
|
|
|
|
std::unordered_map<u16, u32> mm;
|
|
|
|
|
|
|
|
|
|
for (auto& ctr : addrs)
|
|
|
|
|
{
|
|
|
|
|
if (const auto ptr = +ctr)
|
|
|
|
|
{
|
2023-02-13 11:33:06 +01:00
|
|
|
mm[static_cast<u16>(&ctr - addrs)] = ptr->id;
|
2022-07-04 15:02:17 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ar(mm);
|
|
|
|
|
}
|
2020-06-11 13:35:36 +02:00
|
|
|
};
|
2018-05-07 20:57:06 +02:00
|
|
|
|
2023-08-21 11:43:53 +02:00
|
|
|
std::shared_ptr<vm::block_t> reserve_map(u32 alloc_size, u32 align)
|
|
|
|
|
{
|
|
|
|
|
return vm::reserve_map(align == 0x10000 ? vm::user64k : vm::user1m, 0, align == 0x10000 ? 0x20000000 : utils::align(alloc_size, 0x10000000)
|
|
|
|
|
, align == 0x10000 ? (vm::page_size_64k | vm::bf0_0x1) : (vm::page_size_1m | vm::bf0_0x1));
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 00:47:58 +01:00
|
|
|
// Todo: fix order of error checks
|
|
|
|
|
|
2024-10-12 09:40:13 +02:00
|
|
|
error_code sys_memory_allocate(cpu_thread& cpu, u64 size, u64 flags, vm::ptr<u32> alloc_addr)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2020-06-11 13:35:36 +02:00
|
|
|
if (!size)
|
|
|
|
|
{
|
|
|
|
|
return {CELL_EALIGN, size};
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-10 16:45:16 +02:00
|
|
|
// Check allocation size
|
2018-05-07 20:57:06 +02:00
|
|
|
const u32 align =
|
|
|
|
|
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
|
|
|
|
|
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
|
2018-08-15 22:16:12 +02:00
|
|
|
flags == 0 ? 0x100000 : 0;
|
2018-02-09 15:49:37 +01:00
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
if (!align)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
return {CELL_EINVAL, flags};
|
2014-06-25 00:38:34 +02:00
|
|
|
}
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
if (size % align)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
return {CELL_EALIGN, size};
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
// Get "default" memory container
|
2021-03-02 12:59:19 +01:00
|
|
|
auto& dct = g_fxo->get<lv2_memory_container>();
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
// Try to get "physical memory"
|
2021-03-02 12:59:19 +01:00
|
|
|
if (!dct.take(size))
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2024-02-15 21:29:45 +01:00
|
|
|
return {CELL_ENOMEM, dct.size - dct.used};
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
2016-05-25 20:04:08 +02:00
|
|
|
|
2023-08-21 11:43:53 +02:00
|
|
|
if (const auto area = reserve_map(size, align))
|
2018-11-14 00:47:58 +01:00
|
|
|
{
|
2021-04-09 21:12:47 +02:00
|
|
|
if (const u32 addr = area->alloc(size, nullptr, align))
|
2019-01-04 17:42:31 +01:00
|
|
|
{
|
2021-03-02 12:59:19 +01:00
|
|
|
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(&dct));
|
2020-06-11 13:35:36 +02:00
|
|
|
|
2019-01-04 17:42:31 +01:00
|
|
|
if (alloc_addr)
|
|
|
|
|
{
|
2023-08-06 06:38:44 +02:00
|
|
|
sys_memory.notice("sys_memory_allocate(): Allocated 0x%x address (size=0x%x)", addr, size);
|
2023-07-09 09:48:02 +02:00
|
|
|
|
2020-11-15 01:26:43 +01:00
|
|
|
vm::lock_sudo(addr, size);
|
2022-09-18 20:19:34 +02:00
|
|
|
cpu.check_state();
|
2019-01-04 17:42:31 +01:00
|
|
|
*alloc_addr = addr;
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Dealloc using the syscall
|
2020-10-30 16:09:30 +01:00
|
|
|
sys_memory_free(cpu, addr);
|
2019-01-04 17:42:31 +01:00
|
|
|
return CELL_EFAULT;
|
|
|
|
|
}
|
2018-11-14 00:47:58 +01:00
|
|
|
}
|
|
|
|
|
|
2022-06-11 14:12:42 +02:00
|
|
|
dct.free(size);
|
2019-01-04 17:42:31 +01:00
|
|
|
return CELL_ENOMEM;
|
2014-06-25 00:38:34 +02:00
|
|
|
}
|
|
|
|
|
|
2024-10-12 09:40:13 +02:00
|
|
|
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u64 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr);
|
2018-02-09 15:49:37 +01:00
|
|
|
|
2020-06-11 13:35:36 +02:00
|
|
|
if (!size)
|
|
|
|
|
{
|
|
|
|
|
return {CELL_EALIGN, size};
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-10 16:45:16 +02:00
|
|
|
// Check allocation size
|
2018-05-07 20:57:06 +02:00
|
|
|
const u32 align =
|
|
|
|
|
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
|
|
|
|
|
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
|
2018-08-15 22:16:12 +02:00
|
|
|
flags == 0 ? 0x100000 : 0;
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
if (!align)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
return {CELL_EINVAL, flags};
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
if (size % align)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
return {CELL_EALIGN, size};
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
const auto ct = idm::get<lv2_memory_container>(cid, [&](lv2_memory_container& ct) -> CellError
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2016-05-25 20:04:08 +02:00
|
|
|
// Try to get "physical memory"
|
|
|
|
|
if (!ct.take(size))
|
|
|
|
|
{
|
2016-08-19 23:14:10 +02:00
|
|
|
return CELL_ENOMEM;
|
2016-05-25 20:04:08 +02:00
|
|
|
}
|
2015-07-12 13:52:55 +02:00
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
return {};
|
2016-05-25 20:04:08 +02:00
|
|
|
});
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
if (!ct)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2016-05-25 20:04:08 +02:00
|
|
|
return CELL_ESRCH;
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
|
|
|
|
|
2017-02-02 18:32:49 +01:00
|
|
|
if (ct.ret)
|
2016-05-25 20:04:08 +02:00
|
|
|
{
|
2024-02-15 21:29:45 +01:00
|
|
|
return {ct.ret, ct->size - ct->used};
|
2016-05-25 20:04:08 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2024-10-12 09:40:13 +02:00
|
|
|
if (const auto area = reserve_map(static_cast<u32>(size), align))
|
2019-01-04 17:42:31 +01:00
|
|
|
{
|
2024-10-12 09:40:13 +02:00
|
|
|
if (const u32 addr = area->alloc(static_cast<u32>(size)))
|
2019-01-04 17:42:31 +01:00
|
|
|
{
|
2021-03-02 12:59:19 +01:00
|
|
|
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(ct.ptr.get()));
|
2020-06-11 13:35:36 +02:00
|
|
|
|
2019-01-04 17:42:31 +01:00
|
|
|
if (alloc_addr)
|
|
|
|
|
{
|
2024-10-12 09:40:13 +02:00
|
|
|
vm::lock_sudo(addr, static_cast<u32>(size));
|
2022-09-18 20:19:34 +02:00
|
|
|
cpu.check_state();
|
2019-01-04 17:42:31 +01:00
|
|
|
*alloc_addr = addr;
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Dealloc using the syscall
|
2020-10-30 16:09:30 +01:00
|
|
|
sys_memory_free(cpu, addr);
|
2019-01-04 17:42:31 +01:00
|
|
|
return CELL_EFAULT;
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2022-06-11 14:12:42 +02:00
|
|
|
ct->free(size);
|
2019-01-04 17:42:31 +01:00
|
|
|
return CELL_ENOMEM;
|
2014-06-25 00:38:34 +02:00
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_free(cpu_thread& cpu, u32 addr)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_free(addr=0x%x)", addr);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2021-03-02 12:59:19 +01:00
|
|
|
const auto ct = addr % 0x10000 ? nullptr : g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(nullptr);
|
2018-08-13 18:15:56 +02:00
|
|
|
|
2020-06-11 13:35:36 +02:00
|
|
|
if (!ct)
|
2016-05-25 20:04:08 +02:00
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
return {CELL_EINVAL, addr};
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2020-12-09 08:47:45 +01:00
|
|
|
const auto size = (ensure(vm::dealloc(addr)));
|
2022-06-11 14:12:42 +02:00
|
|
|
reader_lock{id_manager::g_mutex}, ct->free(size);
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_page_attr_t> attr)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2022-04-30 15:51:52 +02:00
|
|
|
vm::writer_lock rlock;
|
2019-07-04 20:27:06 +02:00
|
|
|
|
2020-11-17 12:50:00 +01:00
|
|
|
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR)
|
2017-02-17 20:35:57 +01:00
|
|
|
{
|
|
|
|
|
return CELL_EINVAL;
|
|
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2020-11-10 18:09:28 +01:00
|
|
|
if (!vm::check_addr(attr.addr(), vm::page_readable, attr.size()))
|
2017-02-17 20:35:57 +01:00
|
|
|
{
|
|
|
|
|
return CELL_EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE (TODO)
|
2020-09-26 09:08:12 +02:00
|
|
|
attr->access_right = addr >> 28 == 0xdu ? SYS_MEMORY_ACCESS_RIGHT_PPU_THR : SYS_MEMORY_ACCESS_RIGHT_ANY;// (TODO)
|
2017-03-11 19:37:18 +01:00
|
|
|
|
2020-11-10 18:09:28 +01:00
|
|
|
if (vm::check_addr(addr, vm::page_1m_size))
|
2017-03-11 19:37:18 +01:00
|
|
|
{
|
|
|
|
|
attr->page_size = 0x100000;
|
|
|
|
|
}
|
2020-11-10 18:09:28 +01:00
|
|
|
else if (vm::check_addr(addr, vm::page_64k_size))
|
2017-03-11 19:37:18 +01:00
|
|
|
{
|
|
|
|
|
attr->page_size = 0x10000;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
attr->page_size = 4096;
|
|
|
|
|
}
|
2018-02-09 15:49:37 +01:00
|
|
|
|
2019-08-10 14:57:33 +02:00
|
|
|
attr->pad = 0; // Always write 0
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_get_user_memory_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)", mem_info);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
// Get "default" memory container
|
2021-03-02 12:59:19 +01:00
|
|
|
auto& dct = g_fxo->get<lv2_memory_container>();
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2022-09-18 20:19:34 +02:00
|
|
|
sys_memory_info_t out{};
|
|
|
|
|
{
|
|
|
|
|
::reader_lock lock(s_memstats_mtx);
|
2019-07-04 20:27:06 +02:00
|
|
|
|
2022-09-18 20:19:34 +02:00
|
|
|
out.total_user_memory = dct.size;
|
|
|
|
|
out.available_user_memory = dct.size - dct.used;
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2022-09-18 20:19:34 +02:00
|
|
|
// Scan other memory containers
|
|
|
|
|
idm::select<lv2_memory_container>([&](u32, lv2_memory_container& ct)
|
|
|
|
|
{
|
|
|
|
|
out.total_user_memory -= ct.size;
|
|
|
|
|
});
|
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
2022-09-18 20:19:34 +02:00
|
|
|
cpu.check_state();
|
|
|
|
|
*mem_info = out;
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat)
|
2020-02-07 10:31:33 +01:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2020-02-07 10:31:33 +01:00
|
|
|
|
|
|
|
|
sys_memory.todo("sys_memory_get_user_memory_stat(mem_stat=*0x%x)", mem_stat);
|
|
|
|
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-12 09:40:13 +02:00
|
|
|
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u64 size)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid, size);
|
2015-07-10 16:45:16 +02:00
|
|
|
|
|
|
|
|
// Round down to 1 MB granularity
|
|
|
|
|
size &= ~0xfffff;
|
|
|
|
|
|
|
|
|
|
if (!size)
|
|
|
|
|
{
|
|
|
|
|
return CELL_ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-02 12:59:19 +01:00
|
|
|
auto& dct = g_fxo->get<lv2_memory_container>();
|
2015-07-11 22:44:53 +02:00
|
|
|
|
2019-07-04 20:27:06 +02:00
|
|
|
std::lock_guard lock(s_memstats_mtx);
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
// Try to obtain "physical memory" from the default container
|
2021-03-02 12:59:19 +01:00
|
|
|
if (!dct.take(size))
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_ENOMEM;
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2015-07-10 16:45:16 +02:00
|
|
|
// Create the memory container
|
2024-10-12 09:40:13 +02:00
|
|
|
if (const u32 id = idm::make<lv2_memory_container>(static_cast<u32>(size), true))
|
2019-04-30 08:53:46 +02:00
|
|
|
{
|
2022-09-18 20:19:34 +02:00
|
|
|
cpu.check_state();
|
2019-04-30 08:53:46 +02:00
|
|
|
*cid = id;
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2022-06-11 14:12:42 +02:00
|
|
|
dct.free(size);
|
2019-04-30 08:53:46 +02:00
|
|
|
return CELL_EAGAIN;
|
2014-06-25 00:38:34 +02:00
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid);
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2019-07-04 20:27:06 +02:00
|
|
|
std::lock_guard lock(s_memstats_mtx);
|
|
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
const auto ct = idm::withdraw<lv2_memory_container>(cid, [](lv2_memory_container& ct) -> CellError
|
2016-05-25 20:04:08 +02:00
|
|
|
{
|
|
|
|
|
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
|
|
|
|
|
if (!ct.used.compare_and_swap_test(0, ct.size))
|
|
|
|
|
{
|
2016-08-19 23:14:10 +02:00
|
|
|
return CELL_EBUSY;
|
2016-05-25 20:04:08 +02:00
|
|
|
}
|
2015-04-14 04:00:31 +02:00
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
return {};
|
2016-05-25 20:04:08 +02:00
|
|
|
});
|
|
|
|
|
|
2016-08-19 23:14:10 +02:00
|
|
|
if (!ct)
|
2015-04-14 04:00:31 +02:00
|
|
|
{
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_ESRCH;
|
2015-04-14 04:00:31 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2017-02-02 18:32:49 +01:00
|
|
|
if (ct.ret)
|
2015-07-10 16:45:16 +02:00
|
|
|
{
|
2017-02-02 18:32:49 +01:00
|
|
|
return ct.ret;
|
2015-07-10 16:45:16 +02:00
|
|
|
}
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
// Return "physical memory" to the default container
|
2022-06-11 14:12:42 +02:00
|
|
|
g_fxo->get<lv2_memory_container>().free(ct->size);
|
2014-06-25 00:38:34 +02:00
|
|
|
|
|
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-30 16:09:30 +01:00
|
|
|
error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid)
|
2014-06-25 00:38:34 +02:00
|
|
|
{
|
2020-10-30 16:09:30 +01:00
|
|
|
cpu.state += cpu_flag::wait;
|
2019-06-19 19:45:59 +02:00
|
|
|
|
2016-01-12 22:57:16 +01:00
|
|
|
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid);
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
const auto ct = idm::get<lv2_memory_container>(cid);
|
2015-04-14 04:00:31 +02:00
|
|
|
|
|
|
|
|
if (!ct)
|
|
|
|
|
{
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_ESRCH;
|
2015-04-14 04:00:31 +02:00
|
|
|
}
|
2014-06-25 00:38:34 +02:00
|
|
|
|
2022-09-18 20:19:34 +02:00
|
|
|
cpu.check_state();
|
2016-05-25 20:04:08 +02:00
|
|
|
mem_info->total_user_memory = ct->size; // Total container memory
|
2018-05-15 18:40:45 +02:00
|
|
|
mem_info->available_user_memory = ct->size - ct->used; // Available container memory
|
2015-07-10 16:45:16 +02:00
|
|
|
|
2014-06-25 00:38:34 +02:00
|
|
|
return CELL_OK;
|
|
|
|
|
}
|
2023-01-09 18:03:01 +01:00
|
|
|
|
|
|
|
|
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child)
|
|
|
|
|
{
|
|
|
|
|
sys_memory.warning("sys_memory_container_destroy_parent_with_childs(cid=0x%x, must_0=%d, mc_child=*0x%x)", cid, must_0, mc_child);
|
|
|
|
|
|
|
|
|
|
if (must_0)
|
|
|
|
|
{
|
|
|
|
|
return CELL_EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Multi-process is not supported yet so child containers mean nothing at the moment
|
|
|
|
|
// Simply destroy parent
|
|
|
|
|
return sys_memory_container_destroy(cpu, cid);
|
|
|
|
|
}
|