2019-06-25 21:16:19 +02:00
|
|
|
|
#include "stdafx.h"
|
2018-09-25 22:34:45 +02:00
|
|
|
|
#include "vm_locking.h"
|
|
|
|
|
|
#include "vm_ptr.h"
|
|
|
|
|
|
#include "vm_ref.h"
|
|
|
|
|
|
#include "vm_reservation.h"
|
|
|
|
|
|
#include "vm_var.h"
|
|
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
#include "Utilities/mutex.h"
|
2018-05-14 22:07:36 +02:00
|
|
|
|
#include "Utilities/cond.h"
|
2015-07-15 13:58:13 +02:00
|
|
|
|
#include "Utilities/Thread.h"
|
2017-02-17 20:35:57 +01:00
|
|
|
|
#include "Utilities/VirtualMemory.h"
|
2020-04-04 11:04:29 +02:00
|
|
|
|
#include "Utilities/address_range.h"
|
2017-02-17 20:35:57 +01:00
|
|
|
|
#include "Emu/CPU/CPUThread.h"
|
2017-03-12 01:15:22 +01:00
|
|
|
|
#include "Emu/Cell/lv2/sys_memory.h"
|
2017-08-07 23:54:40 +02:00
|
|
|
|
#include "Emu/RSX/GSRender.h"
|
2020-10-07 00:14:35 +02:00
|
|
|
|
#include "Emu/Cell/SPURecompiler.h"
|
2020-10-18 14:00:10 +02:00
|
|
|
|
#include "Emu/perf_meter.hpp"
|
2018-05-21 19:25:05 +02:00
|
|
|
|
#include <thread>
|
2017-02-17 20:35:57 +01:00
|
|
|
|
#include <deque>
|
2016-05-25 20:04:08 +02:00
|
|
|
|
|
2020-02-01 08:43:43 +01:00
|
|
|
|
LOG_CHANNEL(vm_log, "VM");
|
2020-01-31 12:20:54 +01:00
|
|
|
|
|
2014-11-19 15:16:30 +01:00
|
|
|
|
namespace vm
|
|
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
static u8* memory_reserve_4GiB(void* _addr, u64 size = 0x100000000)
|
2017-03-25 16:53:45 +01:00
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
for (u64 addr = reinterpret_cast<u64>(_addr) + 0x100000000;; addr += 0x100000000)
|
2017-03-25 16:53:45 +01:00
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr)))
|
2017-03-25 16:53:45 +01:00
|
|
|
|
{
|
|
|
|
|
|
return static_cast<u8*>(ptr);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// TODO: a condition to break loop
|
2019-12-02 22:31:34 +01:00
|
|
|
|
return static_cast<u8*>(utils::memory_reserve(size));
|
2017-03-25 16:53:45 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Emulated virtual memory
|
2019-12-02 22:31:34 +01:00
|
|
|
|
u8* const g_base_addr = memory_reserve_4GiB(reinterpret_cast<void*>(0x2'0000'0000));
|
2018-09-27 22:04:10 +02:00
|
|
|
|
|
|
|
|
|
|
// Unprotected virtual memory mirror
|
2019-12-02 22:31:34 +01:00
|
|
|
|
u8* const g_sudo_addr = memory_reserve_4GiB(g_base_addr);
|
2017-03-25 16:53:45 +01:00
|
|
|
|
|
|
|
|
|
|
// Auxiliary virtual memory for executable areas
|
2019-12-02 22:31:34 +01:00
|
|
|
|
u8* const g_exec_addr = memory_reserve_4GiB(g_sudo_addr, 0x200000000);
|
2016-02-01 22:52:49 +01:00
|
|
|
|
|
2017-10-05 23:24:50 +02:00
|
|
|
|
// Stats for debugging
|
2019-12-02 22:31:34 +01:00
|
|
|
|
u8* const g_stat_addr = memory_reserve_4GiB(g_exec_addr);
|
2017-10-05 23:24:50 +02:00
|
|
|
|
|
2020-04-11 10:16:28 +02:00
|
|
|
|
// Reservation stats
|
|
|
|
|
|
alignas(4096) u8 g_reservations[65536 / 128 * 64]{0};
|
|
|
|
|
|
|
|
|
|
|
|
// Shareable memory bits
|
|
|
|
|
|
alignas(4096) atomic_t<u8> g_shareable[65536]{0};
|
2018-04-07 16:43:47 +02:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
// Memory locations
|
2020-10-26 02:05:17 +01:00
|
|
|
|
alignas(64) std::vector<std::shared_ptr<block_t>> g_locations;
|
2015-02-07 00:39:51 +01:00
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
// Memory mutex core
|
2017-02-17 20:35:57 +01:00
|
|
|
|
shared_mutex g_mutex;
|
2015-02-07 14:46:42 +01:00
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
// Memory mutex acknowledgement
|
|
|
|
|
|
thread_local atomic_t<cpu_thread*>* g_tls_locked = nullptr;
|
|
|
|
|
|
|
2019-06-11 21:45:11 +02:00
|
|
|
|
// Currently locked cache line
|
2020-04-15 20:06:08 +02:00
|
|
|
|
atomic_t<u64> g_addr_lock = 0;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
// Memory mutex: passive locks
|
2019-08-02 20:53:47 +02:00
|
|
|
|
std::array<atomic_t<cpu_thread*>, g_cfg.core.ppu_threads.max> g_locks{};
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
// Range lock slot allocation bits
|
|
|
|
|
|
atomic_t<u64> g_range_lock_bits{};
|
|
|
|
|
|
|
|
|
|
|
|
// Memory range lock slots (sparse atomics)
|
|
|
|
|
|
atomic_t<u64, 64> g_range_lock_set[64]{};
|
2017-03-11 00:14:48 +01:00
|
|
|
|
|
2020-05-23 15:36:32 +02:00
|
|
|
|
// Page information
|
|
|
|
|
|
struct memory_page
|
|
|
|
|
|
{
|
|
|
|
|
|
// Memory flags
|
|
|
|
|
|
atomic_t<u8> flags;
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Memory pages
|
|
|
|
|
|
std::array<memory_page, 0x100000000 / 4096> g_pages{};
|
|
|
|
|
|
|
2020-10-11 16:18:40 +02:00
|
|
|
|
std::pair<bool, u64> try_reservation_update(u32 addr)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Update reservation info with new timestamp
|
|
|
|
|
|
auto& res = reservation_acquire(addr, 1);
|
|
|
|
|
|
const u64 rtime = res;
|
|
|
|
|
|
|
|
|
|
|
|
return {!(rtime & vm::rsrv_unique_lock) && res.compare_and_swap_test(rtime, rtime + 128), rtime};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void reservation_update(u32 addr)
|
2020-08-15 06:56:34 +02:00
|
|
|
|
{
|
|
|
|
|
|
u64 old = UINT64_MAX;
|
|
|
|
|
|
const auto cpu = get_current_cpu_thread();
|
|
|
|
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
|
|
{
|
2020-10-11 16:18:40 +02:00
|
|
|
|
const auto [ok, rtime] = try_reservation_update(addr);
|
|
|
|
|
|
|
|
|
|
|
|
if (ok || (old & -128) < (rtime & -128))
|
2020-08-15 06:56:34 +02:00
|
|
|
|
{
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
old = rtime;
|
|
|
|
|
|
|
|
|
|
|
|
if (cpu && cpu->test_stopped())
|
|
|
|
|
|
{
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
static void _register_lock(cpu_thread* _cpu)
|
|
|
|
|
|
{
|
2019-06-11 21:45:11 +02:00
|
|
|
|
for (u32 i = 0, max = g_cfg.core.ppu_threads;;)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
|
|
|
|
|
if (!g_locks[i] && g_locks[i].compare_and_swap_test(nullptr, _cpu))
|
|
|
|
|
|
{
|
|
|
|
|
|
g_tls_locked = g_locks.data() + i;
|
2020-06-05 11:36:28 +02:00
|
|
|
|
break;
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
2019-06-11 21:45:11 +02:00
|
|
|
|
|
|
|
|
|
|
if (++i == max) i = 0;
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
atomic_t<u64, 64>* alloc_range_lock()
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const auto [bits, ok] = g_range_lock_bits.fetch_op([](u64& bits)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (~bits) [[likely]]
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
bits |= bits + 1;
|
|
|
|
|
|
return true;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
|
|
if (!ok) [[unlikely]]
|
|
|
|
|
|
{
|
|
|
|
|
|
fmt::throw_exception("Out of range lock bits");
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
g_mutex.lock_unlock();
|
|
|
|
|
|
|
|
|
|
|
|
return &g_range_lock_set[std::countr_one(bits)];
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
perf_meter<"RHW_LOCK"_u64> perf0;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
while (true)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
std::shared_lock lock(g_mutex);
|
|
|
|
|
|
|
|
|
|
|
|
u32 test = 0;
|
|
|
|
|
|
|
|
|
|
|
|
for (u32 i = begin / 4096, max = (begin + size - 1) / 4096; i <= max; i++)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (!(g_pages[i].flags & (vm::page_readable)))
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
test = i * 4096;
|
|
|
|
|
|
break;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
}
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (test)
|
|
|
|
|
|
{
|
|
|
|
|
|
lock.unlock();
|
|
|
|
|
|
|
|
|
|
|
|
// Try tiggering a page fault (write)
|
|
|
|
|
|
// TODO: Read memory if needed
|
|
|
|
|
|
vm::_ref<atomic_t<u8>>(test) += 0;
|
|
|
|
|
|
continue;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
range_lock->release(begin | u64{size} << 32);
|
|
|
|
|
|
return;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
void free_range_lock(atomic_t<u64, 64>* range_lock) noexcept
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (range_lock < g_range_lock_set || range_lock >= std::end(g_range_lock_set))
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
fmt::throw_exception("Invalid range lock" HERE);
|
|
|
|
|
|
}
|
2020-05-09 07:42:59 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
range_lock->release(0);
|
2020-05-09 07:42:59 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
std::shared_lock lock(g_mutex);
|
2020-05-09 07:42:59 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// Use ptr difference to determine location
|
|
|
|
|
|
const auto diff = range_lock - g_range_lock_set;
|
|
|
|
|
|
g_range_lock_bits &= ~(1ull << diff);
|
|
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
template <typename F>
|
|
|
|
|
|
FORCE_INLINE static u64 for_all_range_locks(F func)
|
|
|
|
|
|
{
|
|
|
|
|
|
u64 result = 0;
|
|
|
|
|
|
|
|
|
|
|
|
for (u64 bits = g_range_lock_bits.load(); bits; bits &= bits - 1)
|
2020-05-09 07:42:59 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const u32 id = std::countr_zero(bits);
|
2020-05-09 07:42:59 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const u64 lock_val = g_range_lock_set[id].load();
|
2018-04-03 21:42:47 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (const u32 size = static_cast<u32>(lock_val >> 32)) [[unlikely]]
|
|
|
|
|
|
{
|
|
|
|
|
|
const u32 addr = static_cast<u32>(lock_val);
|
|
|
|
|
|
|
|
|
|
|
|
result += func(addr, size);
|
2018-04-03 21:42:47 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
return result;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
static void _lock_shareable_cache(u8 value, u32 addr, u32 size)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// Block new range locks
|
|
|
|
|
|
g_addr_lock = addr | u64{size} << 32;
|
|
|
|
|
|
|
|
|
|
|
|
ASSUME(size);
|
|
|
|
|
|
|
|
|
|
|
|
const auto range = utils::address_range::start_length(addr, size);
|
|
|
|
|
|
|
|
|
|
|
|
// Wait for range locks to clear
|
|
|
|
|
|
while (value)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const u64 bads = for_all_range_locks([&](u32 addr2, u32 size2)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
ASSUME(size2);
|
2020-04-15 20:06:08 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (range.overlaps(utils::address_range::start_length(addr2, size2))) [[unlikely]]
|
2020-04-15 20:06:08 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
return 1;
|
2020-04-15 20:06:08 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-04-11 10:16:28 +02:00
|
|
|
|
return 0;
|
2020-10-26 02:05:17 +01:00
|
|
|
|
});
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (!bads)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
return;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
_mm_pause();
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
void passive_lock(cpu_thread& cpu)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (!g_tls_locked || *g_tls_locked != &cpu) [[unlikely]]
|
|
|
|
|
|
{
|
|
|
|
|
|
_register_lock(&cpu);
|
|
|
|
|
|
|
|
|
|
|
|
if (cpu.state) [[likely]]
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
cpu.state -= cpu_flag::wait + cpu_flag::memory;
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (g_mutex.is_lockable())
|
2018-04-03 21:42:47 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
return;
|
2018-04-03 21:42:47 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
cpu.state += cpu_flag::wait;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (cpu.state & cpu_flag::wait)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
while (true)
|
2020-05-23 15:36:32 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
g_mutex.lock_unlock();
|
|
|
|
|
|
cpu.state -= cpu_flag::wait + cpu_flag::memory;
|
2020-05-23 15:36:32 +02:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (g_mutex.is_lockable()) [[likely]]
|
|
|
|
|
|
{
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
cpu.state += cpu_flag::wait;
|
|
|
|
|
|
}
|
2020-05-23 15:36:32 +02:00
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void passive_unlock(cpu_thread& cpu)
|
|
|
|
|
|
{
|
2018-04-03 21:42:47 +02:00
|
|
|
|
if (auto& ptr = g_tls_locked)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-04-07 19:29:11 +02:00
|
|
|
|
ptr->release(nullptr);
|
2018-04-03 21:42:47 +02:00
|
|
|
|
ptr = nullptr;
|
|
|
|
|
|
|
2018-09-02 19:22:35 +02:00
|
|
|
|
if (cpu.state & cpu_flag::memory)
|
2018-04-03 21:42:47 +02:00
|
|
|
|
{
|
|
|
|
|
|
cpu.state -= cpu_flag::memory;
|
|
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void cleanup_unlock(cpu_thread& cpu) noexcept
|
|
|
|
|
|
{
|
2019-06-11 21:45:11 +02:00
|
|
|
|
for (u32 i = 0, max = g_cfg.core.ppu_threads; i < max; i++)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
|
|
|
|
|
if (g_locks[i] == &cpu)
|
|
|
|
|
|
{
|
|
|
|
|
|
g_locks[i].compare_and_swap_test(&cpu, nullptr);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void temporary_unlock(cpu_thread& cpu) noexcept
|
|
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (!(cpu.state & cpu_flag::wait)) cpu.state += cpu_flag::wait;
|
2019-06-06 20:32:35 +02:00
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
if (g_tls_locked && g_tls_locked->compare_and_swap_test(&cpu, nullptr))
|
|
|
|
|
|
{
|
2018-04-03 16:19:07 +02:00
|
|
|
|
cpu.cpu_unmem();
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-03-13 11:35:20 +01:00
|
|
|
|
void temporary_unlock() noexcept
|
|
|
|
|
|
{
|
|
|
|
|
|
if (auto cpu = get_current_cpu_thread())
|
|
|
|
|
|
{
|
|
|
|
|
|
temporary_unlock(*cpu);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
reader_lock::reader_lock()
|
|
|
|
|
|
{
|
|
|
|
|
|
auto cpu = get_current_cpu_thread();
|
|
|
|
|
|
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (cpu)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (!g_tls_locked || *g_tls_locked != cpu)
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu = nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu->state += cpu_flag::wait;
|
|
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
2018-02-09 13:24:46 +01:00
|
|
|
|
|
2017-03-11 00:14:48 +01:00
|
|
|
|
g_mutex.lock_shared();
|
|
|
|
|
|
|
|
|
|
|
|
if (cpu)
|
|
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
cpu->state -= cpu_flag::memory + cpu_flag::wait;
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
reader_lock::~reader_lock()
|
|
|
|
|
|
{
|
2018-08-18 20:14:52 +02:00
|
|
|
|
if (m_upgraded)
|
|
|
|
|
|
{
|
|
|
|
|
|
g_mutex.unlock();
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
|
|
|
|
|
g_mutex.unlock_shared();
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-18 20:14:52 +02:00
|
|
|
|
void reader_lock::upgrade()
|
|
|
|
|
|
{
|
|
|
|
|
|
if (m_upgraded)
|
|
|
|
|
|
{
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
g_mutex.lock_upgrade();
|
|
|
|
|
|
m_upgraded = true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-04-11 10:16:28 +02:00
|
|
|
|
writer_lock::writer_lock(u32 addr /*mutable*/)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
|
|
|
|
|
auto cpu = get_current_cpu_thread();
|
|
|
|
|
|
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (cpu)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (!g_tls_locked || *g_tls_locked != cpu)
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu = nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu->state += cpu_flag::wait;
|
|
|
|
|
|
}
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
g_mutex.lock();
|
|
|
|
|
|
|
2020-04-11 10:16:28 +02:00
|
|
|
|
if (addr >= 0x10000)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-10-18 14:00:10 +02:00
|
|
|
|
perf_meter<"SUSPEND"_u64> perf0;
|
|
|
|
|
|
|
2019-06-11 21:45:11 +02:00
|
|
|
|
for (auto lock = g_locks.cbegin(), end = lock + g_cfg.core.ppu_threads; lock != end; lock++)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (auto ptr = +*lock; ptr && !(ptr->state & cpu_flag::memory))
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
|
|
|
|
|
ptr->state.test_and_set(cpu_flag::memory);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
g_addr_lock = addr | (u64{128} << 32);
|
2019-01-15 16:31:21 +01:00
|
|
|
|
|
2020-04-11 10:16:28 +02:00
|
|
|
|
if (g_shareable[addr >> 16])
|
|
|
|
|
|
{
|
|
|
|
|
|
// Reservation address in shareable memory range
|
|
|
|
|
|
addr = addr & 0xffff;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const auto range = utils::address_range::start_length(addr, 128);
|
|
|
|
|
|
|
|
|
|
|
|
while (true)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
const u64 bads = for_all_range_locks([&](u32 addr2, u32 size2)
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// TODO (currently not possible): handle 2 64K pages (inverse range), or more pages
|
|
|
|
|
|
if (g_shareable[addr2 >> 16])
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
addr2 &= 0xffff;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
ASSUME(size2);
|
|
|
|
|
|
|
|
|
|
|
|
if (range.overlaps(utils::address_range::start_length(addr2, size2))) [[unlikely]]
|
2019-01-15 16:31:21 +01:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
return 1;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
return 0;
|
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
|
|
if (!bads) [[likely]]
|
|
|
|
|
|
{
|
|
|
|
|
|
break;
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
2020-10-26 02:05:17 +01:00
|
|
|
|
|
|
|
|
|
|
_mm_pause();
|
2019-01-15 16:31:21 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-06-11 21:45:11 +02:00
|
|
|
|
for (auto lock = g_locks.cbegin(), end = lock + g_cfg.core.ppu_threads; lock != end; lock++)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
if (auto ptr = +*lock)
|
2017-03-11 00:14:48 +01:00
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
while (!(ptr->state & cpu_flag::wait))
|
|
|
|
|
|
_mm_pause();
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (cpu)
|
|
|
|
|
|
{
|
2020-06-05 11:36:28 +02:00
|
|
|
|
cpu->state -= cpu_flag::memory + cpu_flag::wait;
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
writer_lock::~writer_lock()
|
|
|
|
|
|
{
|
2019-01-28 16:39:39 +01:00
|
|
|
|
g_addr_lock.release(0);
|
2019-01-15 16:31:21 +01:00
|
|
|
|
g_mutex.unlock();
|
2017-03-11 00:14:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-08 15:13:55 +02:00
|
|
|
|
u64 reservation_lock_internal(u32 addr, atomic_t<u64>& res)
|
2018-05-21 19:25:05 +02:00
|
|
|
|
{
|
|
|
|
|
|
for (u64 i = 0;; i++)
|
|
|
|
|
|
{
|
2020-10-08 15:13:55 +02:00
|
|
|
|
if (u64 rtime = res; !(rtime & 127) && reservation_try_lock(res, rtime)) [[likely]]
|
2018-05-21 19:25:05 +02:00
|
|
|
|
{
|
2020-09-02 23:58:29 +02:00
|
|
|
|
return rtime;
|
2018-05-21 19:25:05 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-09-02 23:58:29 +02:00
|
|
|
|
if (auto cpu = get_current_cpu_thread(); cpu && cpu->state)
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu->check_state();
|
|
|
|
|
|
}
|
|
|
|
|
|
else if (i < 15)
|
2018-05-21 19:25:05 +02:00
|
|
|
|
{
|
|
|
|
|
|
busy_wait(500);
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
2020-05-23 15:36:32 +02:00
|
|
|
|
// TODO: Accurate locking in this case
|
|
|
|
|
|
if (!(g_pages[addr / 4096].flags & page_writable))
|
|
|
|
|
|
{
|
2020-09-02 23:58:29 +02:00
|
|
|
|
return -1;
|
2020-05-23 15:36:32 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-21 19:25:05 +02:00
|
|
|
|
std::this_thread::yield();
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2020-05-23 15:36:32 +02:00
|
|
|
|
}
|
2015-02-07 00:39:51 +01:00
|
|
|
|
|
2020-10-08 15:13:55 +02:00
|
|
|
|
void reservation_shared_lock_internal(atomic_t<u64>& res)
|
|
|
|
|
|
{
|
|
|
|
|
|
for (u64 i = 0;; i++)
|
|
|
|
|
|
{
|
2020-10-27 19:34:08 +01:00
|
|
|
|
auto [_oldd, _ok] = res.fetch_op([&](u64& r)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (r & rsrv_unique_lock)
|
|
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
r += 1;
|
|
|
|
|
|
return true;
|
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
|
|
if (_ok) [[likely]]
|
2020-10-08 15:13:55 +02:00
|
|
|
|
{
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (auto cpu = get_current_cpu_thread(); cpu && cpu->state)
|
|
|
|
|
|
{
|
|
|
|
|
|
cpu->check_state();
|
|
|
|
|
|
}
|
|
|
|
|
|
else if (i < 15)
|
|
|
|
|
|
{
|
|
|
|
|
|
busy_wait(500);
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
std::this_thread::yield();
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-07 00:14:35 +02:00
|
|
|
|
void reservation_op_internal(u32 addr, std::function<bool()> func)
|
|
|
|
|
|
{
|
2020-10-20 07:41:10 +02:00
|
|
|
|
cpu_thread::suspend_all(get_current_cpu_thread(), [&]
|
2020-10-07 00:14:35 +02:00
|
|
|
|
{
|
|
|
|
|
|
if (func())
|
|
|
|
|
|
{
|
2020-10-08 15:13:55 +02:00
|
|
|
|
// Success, release all locks if necessary
|
2020-10-09 19:33:12 +02:00
|
|
|
|
vm::reservation_acquire(addr, 128) += 127;
|
2020-10-07 00:14:35 +02:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
2020-10-09 19:33:12 +02:00
|
|
|
|
vm::reservation_acquire(addr, 128) -= 1;
|
2020-10-07 00:14:35 +02:00
|
|
|
|
}
|
2020-10-09 19:33:12 +02:00
|
|
|
|
});
|
2020-10-07 00:14:35 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void reservation_escape_internal()
|
|
|
|
|
|
{
|
|
|
|
|
|
const auto _cpu = get_current_cpu_thread();
|
|
|
|
|
|
|
|
|
|
|
|
if (_cpu && _cpu->id_type() == 1)
|
|
|
|
|
|
{
|
|
|
|
|
|
thread_ctrl::emergency_exit("vm::reservation_escape");
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (_cpu && _cpu->id_type() == 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
spu_runtime::g_escape(static_cast<spu_thread*>(_cpu));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm, std::pair<const u32, std::pair<u32, std::shared_ptr<utils::shm>>>* (*search_shm)(vm::block_t* block, utils::shm* shm))
|
2014-08-01 18:27:48 +02:00
|
|
|
|
{
|
2015-09-26 22:46:04 +02:00
|
|
|
|
if (!size || (size | addr) % 4096 || flags & page_allocated)
|
|
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
|
2015-09-26 22:46:04 +02:00
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (g_pages[i].flags)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Memory already mapped (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)" HERE, addr, size, flags, i * 4096);
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (shm && shm->flags() != 0 && shm->info++)
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// Memory mirror found, map its range as shareable
|
|
|
|
|
|
_lock_shareable_cache(1, addr, size);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
|
|
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
g_shareable[i].release(1);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Check ref counter (using unused member info for it)
|
|
|
|
|
|
if (shm->info == 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Find another mirror and map it as shareable too
|
|
|
|
|
|
for (auto& ploc : g_locations)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (auto loc = ploc.get())
|
|
|
|
|
|
{
|
|
|
|
|
|
if (auto pp = search_shm(loc, shm))
|
|
|
|
|
|
{
|
|
|
|
|
|
auto& [size2, ptr] = pp->second;
|
|
|
|
|
|
|
|
|
|
|
|
// Relock cache
|
|
|
|
|
|
_lock_shareable_cache(1, pp->first, size2);
|
|
|
|
|
|
|
|
|
|
|
|
for (u32 i = pp->first / 65536; i < pp->first / 65536 + size2 / 65536; i++)
|
|
|
|
|
|
{
|
|
|
|
|
|
g_shareable[i].release(1);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Unlock
|
2020-04-15 20:06:08 +02:00
|
|
|
|
g_addr_lock.release(0);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-22 16:45:55 +02:00
|
|
|
|
// Notify rsx that range has become valid
|
|
|
|
|
|
// Note: This must be done *before* memory gets mapped while holding the vm lock, otherwise
|
|
|
|
|
|
// the RSX might try to invalidate memory that got unmapped and remapped
|
2019-09-26 17:32:31 +02:00
|
|
|
|
if (const auto rsxthr = g_fxo->get<rsx::thread>())
|
2018-09-22 16:45:55 +02:00
|
|
|
|
{
|
|
|
|
|
|
rsxthr->on_notify_memory_mapped(addr, size);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
if (!shm)
|
|
|
|
|
|
{
|
|
|
|
|
|
utils::memory_protect(g_base_addr + addr, size, utils::protection::rw);
|
|
|
|
|
|
}
|
2018-09-27 22:04:10 +02:00
|
|
|
|
else if (shm->map_critical(g_base_addr + addr) != g_base_addr + addr || shm->map_critical(g_sudo_addr + addr) != g_sudo_addr + addr)
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
|
|
|
|
|
fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
|
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-10-05 23:24:50 +02:00
|
|
|
|
if (flags & page_executable)
|
|
|
|
|
|
{
|
2019-08-11 20:04:17 +02:00
|
|
|
|
// TODO
|
2019-03-22 08:58:04 +01:00
|
|
|
|
utils::memory_commit(g_exec_addr + addr * 2, size * 2);
|
2017-10-05 23:24:50 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-02-09 13:24:46 +01:00
|
|
|
|
if (g_cfg.core.ppu_debug)
|
2017-10-05 23:24:50 +02:00
|
|
|
|
{
|
|
|
|
|
|
utils::memory_commit(g_stat_addr + addr, size);
|
|
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (g_pages[i].flags.exchange(flags | page_allocated))
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)" HERE, addr, size, flags, i * 4096);
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
|
|
|
|
|
|
{
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2015-09-26 22:46:04 +02:00
|
|
|
|
if (!size || (size | addr) % 4096)
|
|
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
|
2015-09-26 22:46:04 +02:00
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
const u8 flags_both = flags_set & flags_clear;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
flags_test |= page_allocated;
|
|
|
|
|
|
flags_set &= ~flags_both;
|
|
|
|
|
|
flags_clear &= ~flags_both;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if ((g_pages[i].flags & flags_test) != (flags_test | page_allocated))
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (!flags_set && !flags_clear)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
u8 start_value = 0xff;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
for (u32 start = addr / 4096, end = start + size / 4096, i = start; i < end + 1; i++)
|
|
|
|
|
|
{
|
|
|
|
|
|
u8 new_val = 0xff;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (i < end)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2019-08-11 20:04:17 +02:00
|
|
|
|
new_val = g_pages[i].flags;
|
|
|
|
|
|
new_val |= flags_set;
|
|
|
|
|
|
new_val &= ~flags_clear;
|
2017-02-17 20:35:57 +01:00
|
|
|
|
|
2019-08-11 20:04:17 +02:00
|
|
|
|
g_pages[i].flags.release(new_val);
|
|
|
|
|
|
new_val &= (page_readable | page_writable);
|
2017-02-17 20:35:57 +01:00
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (new_val != start_value)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (u32 page_size = (i - start) * 4096)
|
|
|
|
|
|
{
|
2017-10-05 23:24:50 +02:00
|
|
|
|
const auto protection = start_value & page_writable ? utils::protection::rw : (start_value & page_readable ? utils::protection::ro : utils::protection::no);
|
|
|
|
|
|
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
2017-02-17 20:35:57 +01:00
|
|
|
|
|
|
|
|
|
|
start_value = new_val;
|
|
|
|
|
|
start = i;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
static u32 _page_unmap(u32 addr, u32 max_size, utils::shm* shm)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
if (!max_size || (max_size | addr) % 4096)
|
2015-09-26 22:46:04 +02:00
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
fmt::throw_exception("Invalid arguments (addr=0x%x, max_size=0x%x)" HERE, addr, max_size);
|
2015-09-26 22:46:04 +02:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
// Determine deallocation size
|
|
|
|
|
|
u32 size = 0;
|
2018-05-07 20:57:06 +02:00
|
|
|
|
bool is_exec = false;
|
|
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + max_size / 4096; i++)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if ((g_pages[i].flags & page_allocated) == 0)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
break;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
2018-05-07 20:57:06 +02:00
|
|
|
|
|
2019-08-11 20:04:17 +02:00
|
|
|
|
if (size == 0)
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
2019-08-11 20:04:17 +02:00
|
|
|
|
is_exec = !!(g_pages[i].flags & page_executable);
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
// Must be consistent
|
|
|
|
|
|
verify(HERE), is_exec == !!(g_pages[i].flags & page_executable);
|
2018-05-07 20:57:06 +02:00
|
|
|
|
}
|
2018-08-27 16:52:47 +02:00
|
|
|
|
|
|
|
|
|
|
size += 4096;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (!(g_pages[i].flags.exchange(0) & page_allocated))
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
if (shm && shm->flags() != 0 && (--shm->info || g_shareable[addr >> 16]))
|
2020-04-11 10:16:28 +02:00
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// Remove mirror from shareable cache
|
|
|
|
|
|
_lock_shareable_cache(0, addr, size);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
|
|
|
|
|
|
{
|
2020-10-26 02:05:17 +01:00
|
|
|
|
g_shareable[i].release(0);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Unlock
|
2020-04-15 20:06:08 +02:00
|
|
|
|
g_addr_lock.release(0);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-09-22 16:45:55 +02:00
|
|
|
|
// Notify rsx to invalidate range
|
|
|
|
|
|
// Note: This must be done *before* memory gets unmapped while holding the vm lock, otherwise
|
|
|
|
|
|
// the RSX might try to call VirtualProtect on memory that is already unmapped
|
2019-09-26 17:32:31 +02:00
|
|
|
|
if (const auto rsxthr = g_fxo->get<rsx::thread>())
|
2018-09-22 16:45:55 +02:00
|
|
|
|
{
|
|
|
|
|
|
rsxthr->on_notify_memory_unmapped(addr, size);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Actually unmap memory
|
2018-08-27 16:52:47 +02:00
|
|
|
|
if (!shm)
|
|
|
|
|
|
{
|
|
|
|
|
|
utils::memory_protect(g_base_addr + addr, size, utils::protection::no);
|
2020-05-06 09:04:47 +02:00
|
|
|
|
std::memset(g_sudo_addr + addr, 0, size);
|
2018-08-27 16:52:47 +02:00
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
shm->unmap_critical(g_base_addr + addr);
|
2018-09-27 22:04:10 +02:00
|
|
|
|
shm->unmap_critical(g_sudo_addr + addr);
|
2018-08-27 16:52:47 +02:00
|
|
|
|
}
|
2018-05-07 20:57:06 +02:00
|
|
|
|
|
|
|
|
|
|
if (is_exec)
|
|
|
|
|
|
{
|
2019-03-22 08:58:04 +01:00
|
|
|
|
utils::memory_decommit(g_exec_addr + addr * 2, size * 2);
|
2018-05-07 20:57:06 +02:00
|
|
|
|
}
|
2015-02-13 15:04:03 +01:00
|
|
|
|
|
2018-02-09 13:24:46 +01:00
|
|
|
|
if (g_cfg.core.ppu_debug)
|
2017-10-05 23:24:50 +02:00
|
|
|
|
{
|
|
|
|
|
|
utils::memory_decommit(g_stat_addr + addr, size);
|
|
|
|
|
|
}
|
2018-08-27 16:52:47 +02:00
|
|
|
|
|
|
|
|
|
|
return size;
|
2015-02-13 15:04:03 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
bool check_addr(u32 addr, u32 size, u8 flags)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2019-07-04 20:27:06 +02:00
|
|
|
|
// Overflow checking
|
|
|
|
|
|
if (addr + size < addr && (addr + size) != 0)
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
2019-07-04 20:27:06 +02:00
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-08-11 22:31:49 +02:00
|
|
|
|
// Always check this flag
|
|
|
|
|
|
flags |= page_allocated;
|
|
|
|
|
|
|
2019-07-04 20:27:06 +02:00
|
|
|
|
for (u32 i = addr / 4096, max = (addr + size - 1) / 4096; i <= max; i++)
|
|
|
|
|
|
{
|
2020-02-05 08:00:08 +01:00
|
|
|
|
if ((g_pages[i].flags & flags) != flags) [[unlikely]]
|
2015-02-13 15:04:03 +01:00
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2018-02-09 13:24:46 +01:00
|
|
|
|
|
2015-02-13 15:04:03 +01:00
|
|
|
|
return true;
|
2014-08-01 18:27:48 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
u32 alloc(u32 size, memory_location_t location, u32 align)
|
2014-08-01 18:27:48 +02:00
|
|
|
|
{
|
2015-07-11 22:44:53 +02:00
|
|
|
|
const auto block = get(location);
|
|
|
|
|
|
|
|
|
|
|
|
if (!block)
|
|
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
fmt::throw_exception("Invalid memory location (%u)" HERE, +location);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
return block->alloc(size, align);
|
2014-08-01 18:27:48 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
u32 falloc(u32 addr, u32 size, memory_location_t location)
|
2014-08-01 18:27:48 +02:00
|
|
|
|
{
|
2015-07-11 22:44:53 +02:00
|
|
|
|
const auto block = get(location, addr);
|
|
|
|
|
|
|
|
|
|
|
|
if (!block)
|
|
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, +location, addr);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
return block->falloc(addr, size);
|
2014-08-01 18:27:48 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
u32 dealloc(u32 addr, memory_location_t location)
|
2014-08-01 18:27:48 +02:00
|
|
|
|
{
|
2015-07-11 22:44:53 +02:00
|
|
|
|
const auto block = get(location, addr);
|
|
|
|
|
|
|
|
|
|
|
|
if (!block)
|
|
|
|
|
|
{
|
2019-12-02 22:31:34 +01:00
|
|
|
|
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, +location, addr);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
return block->dealloc(addr);
|
2014-08-01 18:27:48 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-08-19 13:04:58 +02:00
|
|
|
|
void dealloc_verbose_nothrow(u32 addr, memory_location_t location) noexcept
|
|
|
|
|
|
{
|
|
|
|
|
|
const auto block = get(location, addr);
|
|
|
|
|
|
|
|
|
|
|
|
if (!block)
|
|
|
|
|
|
{
|
2020-01-31 12:20:54 +01:00
|
|
|
|
vm_log.error("vm::dealloc(): invalid memory location (%u, addr=0x%x)\n", +location, addr);
|
2015-08-19 13:04:58 +02:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!block->dealloc(addr))
|
|
|
|
|
|
{
|
2020-01-31 12:20:54 +01:00
|
|
|
|
vm_log.error("vm::dealloc(): deallocation failed (addr=0x%x)\n", addr);
|
2015-08-19 13:04:58 +02:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
bool block_t::try_alloc(u32 addr, u8 flags, u32 size, std::shared_ptr<utils::shm>&& shm)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Check if memory area is already mapped
|
2015-07-11 22:44:53 +02:00
|
|
|
|
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (g_pages[i].flags)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
const u32 page_addr = addr + (this->flags & 0x10 ? 0x1000 : 0);
|
|
|
|
|
|
const u32 page_size = size - (this->flags & 0x10 ? 0x2000 : 0);
|
|
|
|
|
|
|
|
|
|
|
|
if (this->flags & 0x10)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Mark overflow/underflow guard pages as allocated
|
|
|
|
|
|
verify(HERE), !g_pages[addr / 4096].flags.exchange(page_allocated);
|
|
|
|
|
|
verify(HERE), !g_pages[addr / 4096 + size / 4096 - 1].flags.exchange(page_allocated);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2020-10-26 02:05:17 +01:00
|
|
|
|
// Map "real" memory pages; provide a function to search for mirrors with private member access
|
|
|
|
|
|
_page_map(page_addr, flags, page_size, shm.get(), [](vm::block_t* _this, utils::shm* shm)
|
|
|
|
|
|
{
|
|
|
|
|
|
decltype(m_map)::value_type* result = nullptr;
|
|
|
|
|
|
|
|
|
|
|
|
// Check eligibility
|
|
|
|
|
|
if (!_this || !(SYS_MEMORY_PAGE_SIZE_MASK & _this->flags) || _this->addr < 0x20000000 || _this->addr >= 0xC0000000)
|
|
|
|
|
|
{
|
|
|
|
|
|
return result;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (auto& pp : _this->m_map)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (pp.second.second.get() == shm)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Found match
|
|
|
|
|
|
return &pp;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
|
});
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Add entry
|
2018-10-11 00:17:19 +02:00
|
|
|
|
m_map[addr] = std::make_pair(size, std::move(shm));
|
2016-05-25 20:04:08 +02:00
|
|
|
|
|
2015-07-11 22:44:53 +02:00
|
|
|
|
return true;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
block_t::block_t(u32 addr, u32 size, u64 flags)
|
|
|
|
|
|
: addr(addr)
|
|
|
|
|
|
, size(size)
|
|
|
|
|
|
, flags(flags)
|
|
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
if (flags & 0x100)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Special path for 4k-aligned pages
|
|
|
|
|
|
m_common = std::make_shared<utils::shm>(size);
|
|
|
|
|
|
verify(HERE), m_common->map_critical(vm::base(addr), utils::protection::no) == vm::base(addr);
|
2020-04-11 10:16:28 +02:00
|
|
|
|
verify(HERE), m_common->map_critical(vm::get_super_ptr(addr)) == vm::get_super_ptr(addr);
|
2018-08-27 16:52:47 +02:00
|
|
|
|
}
|
2016-05-25 20:04:08 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-07-11 22:44:53 +02:00
|
|
|
|
block_t::~block_t()
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
|
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-12 13:52:55 +02:00
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
// Deallocate all memory
|
2020-09-26 09:08:12 +02:00
|
|
|
|
for (auto it = m_map.begin(), end = m_map.end(); it != end;)
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
const auto next = std::next(it);
|
2018-10-11 00:17:19 +02:00
|
|
|
|
const auto size = it->second.first;
|
|
|
|
|
|
_page_unmap(it->first, size, it->second.second.get());
|
2018-08-27 16:52:47 +02:00
|
|
|
|
it = next;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Special path for 4k-aligned pages
|
|
|
|
|
|
if (m_common)
|
|
|
|
|
|
{
|
|
|
|
|
|
m_common->unmap_critical(vm::base(addr));
|
2018-09-27 22:04:10 +02:00
|
|
|
|
m_common->unmap_critical(vm::get_super_ptr(addr));
|
2018-05-07 20:57:06 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-03-08 10:21:02 +01:00
|
|
|
|
u32 block_t::alloc(const u32 orig_size, u32 align, const std::shared_ptr<utils::shm>* src, u64 flags)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2019-03-08 10:21:02 +01:00
|
|
|
|
if (!src)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Use the block's flags
|
|
|
|
|
|
flags = this->flags;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
// Determine minimal alignment
|
|
|
|
|
|
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
|
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Align to minimal page size
|
2018-10-11 00:17:19 +02:00
|
|
|
|
const u32 size = ::align(orig_size, min_page_size) + (flags & 0x10 ? 0x2000 : 0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Check alignment (it's page allocation, so passing small values there is just silly)
|
2020-04-13 20:57:16 +02:00
|
|
|
|
if (align < min_page_size || align != (0x80000000u >> std::countl_zero(align)))
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)" HERE, size, align);
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Return if size is invalid
|
2019-07-27 06:51:29 +02:00
|
|
|
|
if (!orig_size || !size || orig_size > size || size > this->size)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2015-07-12 13:52:55 +02:00
|
|
|
|
return 0;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
u8 pflags = page_readable | page_writable;
|
|
|
|
|
|
|
2019-02-21 12:14:00 +01:00
|
|
|
|
if ((flags & SYS_MEMORY_PAGE_SIZE_64K) == SYS_MEMORY_PAGE_SIZE_64K)
|
2017-02-17 20:35:57 +01:00
|
|
|
|
{
|
2019-02-21 12:14:00 +01:00
|
|
|
|
pflags |= page_64k_size;
|
2017-02-17 20:35:57 +01:00
|
|
|
|
}
|
2019-02-21 12:14:00 +01:00
|
|
|
|
else if (!(flags & (SYS_MEMORY_PAGE_SIZE_MASK & ~SYS_MEMORY_PAGE_SIZE_1M)))
|
2017-02-17 20:35:57 +01:00
|
|
|
|
{
|
2019-02-21 12:14:00 +01:00
|
|
|
|
pflags |= page_1m_size;
|
2017-02-17 20:35:57 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
// Create or import shared memory object
|
2018-08-27 16:52:47 +02:00
|
|
|
|
std::shared_ptr<utils::shm> shm;
|
|
|
|
|
|
|
|
|
|
|
|
if (m_common)
|
|
|
|
|
|
verify(HERE), !src;
|
|
|
|
|
|
else if (src)
|
|
|
|
|
|
shm = *src;
|
|
|
|
|
|
else
|
|
|
|
|
|
shm = std::make_shared<utils::shm>(size);
|
2018-05-07 20:57:06 +02:00
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Search for an appropriate place (unoptimized)
|
2019-06-22 11:10:47 +02:00
|
|
|
|
for (u32 addr = ::align(this->addr, align); u64{addr} + size <= u64{this->addr} + this->size; addr += align)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2018-08-27 16:52:47 +02:00
|
|
|
|
if (try_alloc(addr, pflags, size, std::move(shm)))
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2018-10-11 00:17:19 +02:00
|
|
|
|
return addr + (flags & 0x10 ? 0x1000 : 0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-07-12 13:52:55 +02:00
|
|
|
|
return 0;
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-03-08 10:21:02 +01:00
|
|
|
|
u32 block_t::falloc(u32 addr, const u32 orig_size, const std::shared_ptr<utils::shm>* src, u64 flags)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2019-03-08 10:21:02 +01:00
|
|
|
|
if (!src)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Use the block's flags
|
|
|
|
|
|
flags = this->flags;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
// Determine minimal alignment
|
|
|
|
|
|
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
|
|
|
|
|
|
|
|
|
|
|
|
// Align to minimal page size
|
|
|
|
|
|
const u32 size = ::align(orig_size, min_page_size);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
|
|
|
|
|
// return if addr or size is invalid
|
2019-07-27 06:51:29 +02:00
|
|
|
|
if (!size || addr < this->addr || orig_size > size || addr + u64{size} > this->addr + u64{this->size} || flags & 0x10)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2015-07-12 13:52:55 +02:00
|
|
|
|
return 0;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2017-09-12 17:33:08 +02:00
|
|
|
|
|
2017-03-12 01:15:22 +01:00
|
|
|
|
u8 pflags = page_readable | page_writable;
|
2017-09-12 17:33:08 +02:00
|
|
|
|
|
2019-02-21 12:14:00 +01:00
|
|
|
|
if ((flags & SYS_MEMORY_PAGE_SIZE_64K) == SYS_MEMORY_PAGE_SIZE_64K)
|
2017-03-12 01:15:22 +01:00
|
|
|
|
{
|
2019-02-21 12:14:00 +01:00
|
|
|
|
pflags |= page_64k_size;
|
2017-03-12 01:15:22 +01:00
|
|
|
|
}
|
2019-02-21 12:14:00 +01:00
|
|
|
|
else if (!(flags & (SYS_MEMORY_PAGE_SIZE_MASK & ~SYS_MEMORY_PAGE_SIZE_1M)))
|
2017-03-12 01:15:22 +01:00
|
|
|
|
{
|
2019-02-21 12:14:00 +01:00
|
|
|
|
pflags |= page_1m_size;
|
2017-03-12 01:15:22 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
// Create or import shared memory object
|
|
|
|
|
|
std::shared_ptr<utils::shm> shm;
|
|
|
|
|
|
|
|
|
|
|
|
if (m_common)
|
|
|
|
|
|
verify(HERE), !src;
|
|
|
|
|
|
else if (src)
|
|
|
|
|
|
shm = *src;
|
|
|
|
|
|
else
|
|
|
|
|
|
shm = std::make_shared<utils::shm>(size);
|
|
|
|
|
|
|
|
|
|
|
|
if (!try_alloc(addr, pflags, size, std::move(shm)))
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2015-07-12 13:52:55 +02:00
|
|
|
|
return 0;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
u32 block_t::dealloc(u32 addr, const std::shared_ptr<utils::shm>* src)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
const auto found = m_map.find(addr - (flags & 0x10 ? 0x1000 : 0));
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
if (found == m_map.end())
|
2017-09-12 17:33:08 +02:00
|
|
|
|
{
|
2018-05-07 20:57:06 +02:00
|
|
|
|
return 0;
|
2017-09-12 17:33:08 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
if (src && found->second.second.get() != src->get())
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
// Get allocation size
|
|
|
|
|
|
const auto size = found->second.first - (flags & 0x10 ? 0x2000 : 0);
|
|
|
|
|
|
|
|
|
|
|
|
if (flags & 0x10)
|
|
|
|
|
|
{
|
|
|
|
|
|
// Clear guard pages
|
|
|
|
|
|
verify(HERE), g_pages[addr / 4096 - 1].flags.exchange(0) == page_allocated;
|
|
|
|
|
|
verify(HERE), g_pages[addr / 4096 + size / 4096].flags.exchange(0) == page_allocated;
|
|
|
|
|
|
}
|
2018-05-07 20:57:06 +02:00
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
// Unmap "real" memory pages
|
2018-10-11 00:17:19 +02:00
|
|
|
|
verify(HERE), size == _page_unmap(addr, size, found->second.second.get());
|
2018-05-07 20:57:06 +02:00
|
|
|
|
|
|
|
|
|
|
// Remove entry
|
|
|
|
|
|
m_map.erase(found);
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
return size;
|
|
|
|
|
|
}
|
2018-05-07 20:57:06 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
std::pair<u32, std::shared_ptr<utils::shm>> block_t::get(u32 addr, u32 size)
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
2019-06-29 17:48:42 +02:00
|
|
|
|
if (addr < this->addr || addr + u64{size} > this->addr + u64{this->size})
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
|
|
|
|
|
return {addr, nullptr};
|
|
|
|
|
|
}
|
2017-08-07 23:54:40 +02:00
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
vm::reader_lock lock;
|
2015-07-12 13:52:55 +02:00
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
const auto upper = m_map.upper_bound(addr);
|
2016-05-25 20:04:08 +02:00
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
if (upper == m_map.begin())
|
|
|
|
|
|
{
|
|
|
|
|
|
return {addr, nullptr};
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
const auto found = std::prev(upper);
|
|
|
|
|
|
|
|
|
|
|
|
// Exact address condition (size == 0)
|
|
|
|
|
|
if (size == 0 && found->first != addr)
|
|
|
|
|
|
{
|
|
|
|
|
|
return {addr, nullptr};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-27 16:52:47 +02:00
|
|
|
|
// Special path
|
|
|
|
|
|
if (m_common)
|
|
|
|
|
|
{
|
|
|
|
|
|
return {this->addr, m_common};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-05-07 20:57:06 +02:00
|
|
|
|
// Range check
|
2019-06-29 17:48:42 +02:00
|
|
|
|
if (addr + u64{size} > found->first + u64{found->second.second->size()})
|
2018-05-07 20:57:06 +02:00
|
|
|
|
{
|
|
|
|
|
|
return {addr, nullptr};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-10-11 00:17:19 +02:00
|
|
|
|
return {found->first, found->second.second};
|
2016-05-25 20:04:08 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-08-09 01:52:28 +02:00
|
|
|
|
u32 block_t::imp_used(const vm::writer_lock&)
|
2016-05-25 20:04:08 +02:00
|
|
|
|
{
|
|
|
|
|
|
u32 result = 0;
|
|
|
|
|
|
|
|
|
|
|
|
for (auto& entry : m_map)
|
|
|
|
|
|
{
|
2018-10-11 00:17:19 +02:00
|
|
|
|
result += entry.second.first - (flags & 0x10 ? 0x2000 : 0);
|
2016-05-25 20:04:08 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return result;
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-08-09 01:52:28 +02:00
|
|
|
|
u32 block_t::used()
|
|
|
|
|
|
{
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2017-08-09 01:52:28 +02:00
|
|
|
|
|
|
|
|
|
|
return imp_used(lock);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-13 18:15:56 +02:00
|
|
|
|
static bool _test_map(u32 addr, u32 size)
|
|
|
|
|
|
{
|
2020-04-04 11:04:29 +02:00
|
|
|
|
const auto range = utils::address_range::start_length(addr, size);
|
|
|
|
|
|
|
|
|
|
|
|
if (!range.valid())
|
|
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-13 18:15:56 +02:00
|
|
|
|
for (auto& block : g_locations)
|
|
|
|
|
|
{
|
2020-04-04 11:04:29 +02:00
|
|
|
|
if (!block)
|
2018-08-13 18:15:56 +02:00
|
|
|
|
{
|
2020-04-04 11:04:29 +02:00
|
|
|
|
continue;
|
2018-08-13 18:15:56 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2020-04-04 11:04:29 +02:00
|
|
|
|
if (range.overlaps(utils::address_range::start_length(block->addr, block->size)))
|
2018-08-13 18:15:56 +02:00
|
|
|
|
{
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static std::shared_ptr<block_t> _find_map(u32 size, u32 align, u64 flags)
|
|
|
|
|
|
{
|
2019-08-01 21:32:08 +02:00
|
|
|
|
for (u32 addr = ::align<u32>(0x20000000, align); addr - 1 < 0xC0000000 - 1; addr += align)
|
2018-08-13 18:15:56 +02:00
|
|
|
|
{
|
|
|
|
|
|
if (_test_map(addr, size))
|
|
|
|
|
|
{
|
2018-08-14 22:57:20 +02:00
|
|
|
|
return std::make_shared<block_t>(addr, size, flags);
|
2018-08-13 18:15:56 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-03-13 11:12:25 +01:00
|
|
|
|
static std::shared_ptr<block_t> _map(u32 addr, u32 size, u64 flags)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2015-07-12 13:52:55 +02:00
|
|
|
|
if (!size || (size | addr) % 4096)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-13 18:15:56 +02:00
|
|
|
|
if (!_test_map(addr, size))
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2018-08-13 18:15:56 +02:00
|
|
|
|
return nullptr;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
|
|
|
|
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
|
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (g_pages[i].flags)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2016-08-08 18:01:06 +02:00
|
|
|
|
fmt::throw_exception("Unexpected pages allocated (current_addr=0x%x)" HERE, i * 4096);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-07-12 13:52:55 +02:00
|
|
|
|
auto block = std::make_shared<block_t>(addr, size, flags);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
|
|
|
|
|
g_locations.emplace_back(block);
|
|
|
|
|
|
|
|
|
|
|
|
return block;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
static std::shared_ptr<block_t> _get_map(memory_location_t location, u32 addr)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (location != any)
|
|
|
|
|
|
{
|
|
|
|
|
|
// return selected location
|
|
|
|
|
|
if (location < g_locations.size())
|
|
|
|
|
|
{
|
|
|
|
|
|
return g_locations[location];
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// search location by address
|
|
|
|
|
|
for (auto& block : g_locations)
|
|
|
|
|
|
{
|
|
|
|
|
|
if (block && addr >= block->addr && addr <= block->addr + block->size - 1)
|
|
|
|
|
|
{
|
|
|
|
|
|
return block;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-03-13 11:12:25 +01:00
|
|
|
|
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
|
|
|
|
|
|
{
|
|
|
|
|
|
vm::writer_lock lock(0);
|
|
|
|
|
|
|
|
|
|
|
|
return _map(addr, size, flags);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-13 18:15:56 +02:00
|
|
|
|
std::shared_ptr<block_t> find_map(u32 orig_size, u32 align, u64 flags)
|
|
|
|
|
|
{
|
|
|
|
|
|
vm::writer_lock lock(0);
|
|
|
|
|
|
|
|
|
|
|
|
// Align to minimal page size
|
|
|
|
|
|
const u32 size = ::align(orig_size, 0x10000);
|
|
|
|
|
|
|
|
|
|
|
|
// Check alignment
|
2020-04-13 20:57:16 +02:00
|
|
|
|
if (align < 0x10000 || align != (0x80000000u >> std::countl_zero(align)))
|
2018-08-13 18:15:56 +02:00
|
|
|
|
{
|
|
|
|
|
|
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)" HERE, size, align);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Return if size is invalid
|
2020-04-04 11:04:29 +02:00
|
|
|
|
if (!size)
|
2018-08-13 18:15:56 +02:00
|
|
|
|
{
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-08-14 22:57:20 +02:00
|
|
|
|
auto block = _find_map(size, align, flags);
|
|
|
|
|
|
|
2019-03-27 18:15:21 +01:00
|
|
|
|
if (block) g_locations.emplace_back(block);
|
2018-08-14 22:57:20 +02:00
|
|
|
|
|
|
|
|
|
|
return block;
|
2018-08-13 18:15:56 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2016-05-25 20:04:08 +02:00
|
|
|
|
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::writer_lock lock(0);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2018-08-18 20:01:32 +02:00
|
|
|
|
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
|
2014-11-19 15:16:30 +01:00
|
|
|
|
{
|
2015-07-11 22:44:53 +02:00
|
|
|
|
if (*it && (*it)->addr == addr)
|
|
|
|
|
|
{
|
2018-08-13 18:15:56 +02:00
|
|
|
|
if (must_be_empty && (*it)->flags & 0x3)
|
|
|
|
|
|
{
|
|
|
|
|
|
continue;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!must_be_empty && ((*it)->flags & 0x3) != 2)
|
|
|
|
|
|
{
|
|
|
|
|
|
continue;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-06-28 09:16:14 +02:00
|
|
|
|
if (must_be_empty && (it->use_count() != 1 || (*it)->imp_used(lock)))
|
2016-05-25 20:04:08 +02:00
|
|
|
|
{
|
|
|
|
|
|
return *it;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2015-07-11 22:44:53 +02:00
|
|
|
|
auto block = std::move(*it);
|
|
|
|
|
|
g_locations.erase(it);
|
|
|
|
|
|
return block;
|
|
|
|
|
|
}
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2015-07-11 22:44:53 +02:00
|
|
|
|
return nullptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2018-04-03 21:42:47 +02:00
|
|
|
|
vm::reader_lock lock;
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
return _get_map(location, addr);
|
|
|
|
|
|
}
|
2018-08-13 18:15:56 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags)
|
|
|
|
|
|
{
|
|
|
|
|
|
vm::reader_lock lock;
|
2018-08-13 18:15:56 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
auto area = _get_map(location, addr);
|
2018-08-13 18:15:56 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
if (area)
|
|
|
|
|
|
{
|
|
|
|
|
|
return area;
|
|
|
|
|
|
}
|
2015-07-11 22:44:53 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
lock.upgrade();
|
2019-09-09 00:29:18 +02:00
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
// Allocation on arbitrary address
|
|
|
|
|
|
if (location != any && location < g_locations.size())
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2019-07-03 19:17:04 +02:00
|
|
|
|
// return selected location
|
|
|
|
|
|
auto& loc = g_locations[location];
|
|
|
|
|
|
|
|
|
|
|
|
if (!loc)
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2019-07-03 19:17:04 +02:00
|
|
|
|
// Deferred allocation
|
|
|
|
|
|
loc = _find_map(area_size, 0x10000000, flags);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-07-03 19:17:04 +02:00
|
|
|
|
return loc;
|
2019-03-13 11:12:25 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-08-20 22:18:54 +02:00
|
|
|
|
// Fixed address allocation
|
|
|
|
|
|
area = _get_map(location, addr);
|
|
|
|
|
|
|
2019-09-17 22:26:59 +02:00
|
|
|
|
if (area)
|
|
|
|
|
|
{
|
|
|
|
|
|
return area;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return _map(addr, area_size, flags);
|
2015-07-11 22:44:53 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2019-11-25 22:01:05 +01:00
|
|
|
|
bool try_access(u32 addr, void* ptr, u32 size, bool is_write)
|
|
|
|
|
|
{
|
|
|
|
|
|
vm::reader_lock lock;
|
|
|
|
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
|
|
{
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (vm::check_addr(addr, size, is_write ? page_writable : page_readable))
|
|
|
|
|
|
{
|
|
|
|
|
|
void* src = vm::g_sudo_addr + addr;
|
|
|
|
|
|
void* dst = ptr;
|
|
|
|
|
|
|
|
|
|
|
|
if (is_write)
|
|
|
|
|
|
std::swap(src, dst);
|
|
|
|
|
|
|
2020-04-25 17:01:39 +02:00
|
|
|
|
if (size <= 16 && (size & (size - 1)) == 0 && (addr & (size - 1)) == 0)
|
2019-11-25 22:01:05 +01:00
|
|
|
|
{
|
|
|
|
|
|
if (is_write)
|
|
|
|
|
|
{
|
|
|
|
|
|
switch (size)
|
|
|
|
|
|
{
|
|
|
|
|
|
case 1: atomic_storage<u8>::release(*static_cast<u8*>(dst), *static_cast<u8*>(src)); break;
|
|
|
|
|
|
case 2: atomic_storage<u16>::release(*static_cast<u16*>(dst), *static_cast<u16*>(src)); break;
|
|
|
|
|
|
case 4: atomic_storage<u32>::release(*static_cast<u32*>(dst), *static_cast<u32*>(src)); break;
|
|
|
|
|
|
case 8: atomic_storage<u64>::release(*static_cast<u64*>(dst), *static_cast<u64*>(src)); break;
|
|
|
|
|
|
case 16: _mm_store_si128(static_cast<__m128i*>(dst), _mm_loadu_si128(static_cast<__m128i*>(src))); break;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::memcpy(dst, src, size);
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2018-02-09 15:49:37 +01:00
|
|
|
|
inline namespace ps3_
|
2015-07-11 22:44:53 +02:00
|
|
|
|
{
|
2014-11-19 15:16:30 +01:00
|
|
|
|
void init()
|
|
|
|
|
|
{
|
2020-03-14 15:29:31 +01:00
|
|
|
|
vm_log.notice("Guest memory bases address ranges:\n"
|
|
|
|
|
|
"vm::g_base_addr = %p - %p\n"
|
|
|
|
|
|
"vm::g_sudo_addr = %p - %p\n"
|
|
|
|
|
|
"vm::g_exec_addr = %p - %p\n"
|
|
|
|
|
|
"vm::g_stat_addr = %p - %p\n"
|
|
|
|
|
|
"vm::g_reservations = %p - %p\n",
|
|
|
|
|
|
g_base_addr, g_base_addr + UINT32_MAX,
|
|
|
|
|
|
g_sudo_addr, g_sudo_addr + UINT32_MAX,
|
|
|
|
|
|
g_exec_addr, g_exec_addr + 0x200000000 - 1,
|
|
|
|
|
|
g_stat_addr, g_stat_addr + UINT32_MAX,
|
2020-04-11 10:16:28 +02:00
|
|
|
|
g_reservations, g_reservations + sizeof(g_reservations) - 1);
|
2020-03-14 15:29:31 +01:00
|
|
|
|
|
2015-07-11 22:44:53 +02:00
|
|
|
|
g_locations =
|
|
|
|
|
|
{
|
2019-02-21 12:14:00 +01:00
|
|
|
|
std::make_shared<block_t>(0x00010000, 0x1FFF0000, 0x200), // main
|
2018-08-18 20:02:16 +02:00
|
|
|
|
std::make_shared<block_t>(0x20000000, 0x10000000, 0x201), // user 64k pages
|
2018-08-13 18:15:56 +02:00
|
|
|
|
nullptr, // user 1m pages
|
2018-10-31 03:40:04 +01:00
|
|
|
|
nullptr, // rsx context
|
2018-08-15 22:12:24 +02:00
|
|
|
|
std::make_shared<block_t>(0xC0000000, 0x10000000), // video
|
2018-10-11 00:17:19 +02:00
|
|
|
|
std::make_shared<block_t>(0xD0000000, 0x10000000, 0x111), // stack
|
2015-09-26 22:46:04 +02:00
|
|
|
|
std::make_shared<block_t>(0xE0000000, 0x20000000), // SPU reserved
|
2015-07-11 22:44:53 +02:00
|
|
|
|
};
|
2020-04-11 10:16:28 +02:00
|
|
|
|
|
|
|
|
|
|
std::memset(g_reservations, 0, sizeof(g_reservations));
|
|
|
|
|
|
std::memset(g_shareable, 0, sizeof(g_shareable));
|
2020-10-26 02:05:17 +01:00
|
|
|
|
std::memset(g_range_lock_set, 0, sizeof(g_range_lock_set));
|
|
|
|
|
|
g_range_lock_bits = 0;
|
2014-11-19 15:16:30 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void close()
|
2014-08-01 18:27:48 +02:00
|
|
|
|
{
|
2015-07-11 22:44:53 +02:00
|
|
|
|
g_locations.clear();
|
2015-01-07 03:14:00 +01:00
|
|
|
|
|
2017-03-13 10:40:54 +01:00
|
|
|
|
utils::memory_decommit(g_base_addr, 0x100000000);
|
2017-03-25 16:53:45 +01:00
|
|
|
|
utils::memory_decommit(g_exec_addr, 0x100000000);
|
2017-10-05 23:24:50 +02:00
|
|
|
|
utils::memory_decommit(g_stat_addr, 0x100000000);
|
2015-01-07 03:14:00 +01:00
|
|
|
|
}
|
2015-02-07 16:35:54 +01:00
|
|
|
|
}
|
2016-08-11 01:29:59 +02:00
|
|
|
|
|
2019-07-15 14:46:46 +02:00
|
|
|
|
void fmt_class_string<vm::_ptr_base<const void, u32>>::format(std::string& out, u64 arg)
|
2016-08-11 01:29:59 +02:00
|
|
|
|
{
|
|
|
|
|
|
fmt_class_string<u32>::format(out, arg);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2019-07-15 14:46:46 +02:00
|
|
|
|
void fmt_class_string<vm::_ptr_base<const char, u32>>::format(std::string& out, u64 arg)
|
2016-08-11 01:29:59 +02:00
|
|
|
|
{
|
|
|
|
|
|
// Special case (may be allowed for some arguments)
|
|
|
|
|
|
if (arg == 0)
|
|
|
|
|
|
{
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"«NULL»");
|
2016-08-11 01:29:59 +02:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Filter certainly invalid addresses (TODO)
|
|
|
|
|
|
if (arg < 0x10000 || arg >= 0xf0000000)
|
|
|
|
|
|
{
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"«INVALID_ADDRESS:");
|
2016-08-11 01:29:59 +02:00
|
|
|
|
fmt_class_string<u32>::format(out, arg);
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"»");
|
2016-08-11 01:29:59 +02:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const auto start = out.size();
|
|
|
|
|
|
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"“");
|
2016-08-11 01:29:59 +02:00
|
|
|
|
|
2019-07-15 14:46:46 +02:00
|
|
|
|
for (vm::_ptr_base<const volatile char, u32> ptr = vm::cast(arg);; ptr++)
|
2017-02-17 20:35:57 +01:00
|
|
|
|
{
|
|
|
|
|
|
if (!vm::check_addr(ptr.addr()))
|
2016-08-11 01:29:59 +02:00
|
|
|
|
{
|
2017-02-17 20:35:57 +01:00
|
|
|
|
// TODO: optimize checks
|
|
|
|
|
|
out.resize(start);
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"«INVALID_ADDRESS:");
|
2017-02-17 20:35:57 +01:00
|
|
|
|
fmt_class_string<u32>::format(out, arg);
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"»");
|
2017-02-17 20:35:57 +01:00
|
|
|
|
return;
|
2016-08-11 01:29:59 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2017-02-17 20:35:57 +01:00
|
|
|
|
if (const char ch = *ptr)
|
|
|
|
|
|
{
|
|
|
|
|
|
out += ch;
|
|
|
|
|
|
}
|
|
|
|
|
|
else
|
|
|
|
|
|
{
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
2016-08-11 01:29:59 +02:00
|
|
|
|
}
|
2017-02-17 20:35:57 +01:00
|
|
|
|
|
2020-02-04 19:37:00 +01:00
|
|
|
|
out += reinterpret_cast<const char*>(u8"”");
|
2016-08-11 01:29:59 +02:00
|
|
|
|
}
|