rx/mem: cross platform implementation
Some checks are pending
Formatting check / formatting-check (push) Waiting to run
Build RPCSX / build-linux (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv8-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv8.1-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv8.2-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv8.4-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv8.5-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv9-a) (push) Waiting to run
Build RPCSX / build-android (arm64-v8a, armv9.1-a) (push) Waiting to run
Build RPCSX / build-android (x86_64, x86-64) (push) Waiting to run

fixed compilation errors
avoid memfd_create usage on android
This commit is contained in:
DH 2025-10-16 21:17:37 +03:00
parent 0bc167ea87
commit 3ffece2d77
10 changed files with 293 additions and 163 deletions

View file

@ -32,8 +32,8 @@ static orbis::ErrorCode pipe_read(orbis::File *file, orbis::Uio *uio,
uio->offset += size; uio->offset += size;
std::memcpy(vec.base, pipe->data.data(), size); std::memcpy(vec.base, pipe->data.data(), size);
ORBIS_LOG_ERROR(__FUNCTION__, thread->name.c_str(), thread->tid, file, ORBIS_LOG_ERROR(__FUNCTION__, thread->tid, file, size,
size, pipe->data.size(), uio->offset, file->nextOff); pipe->data.size(), uio->offset, file->nextOff);
if (pipe->data.size() == size) { if (pipe->data.size() == size) {
pipe->data.clear(); pipe->data.clear();
@ -55,7 +55,7 @@ static orbis::ErrorCode pipe_read(orbis::File *file, orbis::Uio *uio,
static orbis::ErrorCode pipe_write(orbis::File *file, orbis::Uio *uio, static orbis::ErrorCode pipe_write(orbis::File *file, orbis::Uio *uio,
orbis::Thread *thread) { orbis::Thread *thread) {
auto pipe = static_cast<orbis::Pipe *>(file)->other; auto pipe = static_cast<orbis::Pipe *>(file)->other;
ORBIS_LOG_ERROR(__FUNCTION__, thread->name.c_str(), thread->tid, file); ORBIS_LOG_ERROR(__FUNCTION__, thread->tid, file);
std::size_t cnt = 0; std::size_t cnt = 0;
for (auto vec : std::span(uio->iov, uio->iovcnt)) { for (auto vec : std::span(uio->iov, uio->iovcnt)) {
@ -70,7 +70,7 @@ static orbis::ErrorCode pipe_write(orbis::File *file, orbis::Uio *uio,
uio->resid -= cnt; uio->resid -= cnt;
uio->offset += cnt; uio->offset += cnt;
ORBIS_LOG_ERROR(__FUNCTION__, thread->name.c_str(), thread->tid, file, ORBIS_LOG_ERROR(__FUNCTION__, thread->tid, file,
uio->resid, uio->offset, file->nextOff, cnt); uio->resid, uio->offset, file->nextOff, cnt);
thread->where(); thread->where();
return {}; return {};

View file

@ -60,8 +60,8 @@ void AudioOut::channelEntry(AudioOutChannelInfo info) {
} }
auto controlPtr = reinterpret_cast<std::uint8_t *>( auto controlPtr = reinterpret_cast<std::uint8_t *>(
rx::mem::map(nullptr, controlStat.st_size, PROT_READ | PROT_WRITE, ::mmap(nullptr, controlStat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
MAP_SHARED, controlFd)); controlFd, 0));
if (controlPtr == MAP_FAILED) { if (controlPtr == MAP_FAILED) {
perror("mmap"); perror("mmap");
std::abort(); std::abort();

View file

@ -66,11 +66,11 @@ static vk::Context createVkContext(Device *device) {
bool enableValidation = rx::g_config.validateGpu; bool enableValidation = rx::g_config.validateGpu;
for (std::size_t process = 0; process < 6; ++process) { for (std::size_t process = 0; process < 6; ++process) {
if (!rx::mem::reserve( if (auto errc = rx::mem::reserve(rx::AddressRange::fromBeginSize(
reinterpret_cast<void *>(orbis::kMinAddress + orbis::kMinAddress + orbis::kMaxAddress * process,
orbis::kMaxAddress * process), orbis::kMaxAddress - orbis::kMinAddress));
orbis::kMaxAddress - orbis::kMinAddress)) { errc != std::errc{}) {
rx::die("failed to reserve userspace memory"); rx::die("failed to reserve userspace memory: {}", (int)errc);
} }
} }
@ -623,7 +623,11 @@ void Device::unmapProcess(std::uint32_t pid) {
startAddress += orbis::kMinAddress; startAddress += orbis::kMinAddress;
size -= orbis::kMinAddress; size -= orbis::kMinAddress;
rx::mem::reserve(reinterpret_cast<void *>(startAddress), size); if (auto errc = rx::mem::release(
rx::AddressRange::fromBeginSize(startAddress, size), 1 << 14);
errc != std::errc{}) {
rx::die("failed to release userspace memory: {}", (int)errc);
}
::close(process.vmFd); ::close(process.vmFd);
process.vmFd = -1; process.vmFd = -1;
@ -651,7 +655,10 @@ void Device::protectMemory(std::uint32_t pid, std::uint64_t address,
if (process.vmId >= 0) { if (process.vmId >= 0) {
auto memory = amdgpu::RemoteMemory{process.vmId}; auto memory = amdgpu::RemoteMemory{process.vmId};
rx::mem::protect(memory.getPointer(address), size, prot >> 4); rx::mem::protect(
rx::AddressRange::fromBeginSize(memory.getVirtualAddress(address),
size),
rx::EnumBitSet<rx::mem::Protection>::fromUnderlying(prot >> 4));
// std::println(stderr, "protect process {} memory, address {}-{}, prot // std::println(stderr, "protect process {} memory, address {}-{}, prot
// {:x}", // {:x}",
@ -1030,7 +1037,6 @@ void Device::mapMemory(std::uint32_t pid, std::uint64_t address,
if (mmapResult == MAP_FAILED) { if (mmapResult == MAP_FAILED) {
perror("::mmap"); perror("::mmap");
rx::mem::printStats();
rx::die("failed to map process {} memory, address {}-{}, type {:x}, offset " rx::die("failed to map process {} memory, address {}-{}, type {:x}, offset "
"{:x}, prot {:x}", "{:x}, prot {:x}",
pid, memory.getPointer(address), memory.getPointer(address + size), pid, memory.getPointer(address), memory.getPointer(address + size),

View file

@ -56,10 +56,12 @@ struct ProcessInfo {
struct RemoteMemory { struct RemoteMemory {
int vmId; int vmId;
std::uint64_t getVirtualAddress(std::uint64_t address) const {
return address ? static_cast<std::uint64_t>(vmId) << 40 | address : 0;
}
template <typename T = void> T *getPointer(std::uint64_t address) const { template <typename T = void> T *getPointer(std::uint64_t address) const {
return address ? reinterpret_cast<T *>( return reinterpret_cast<T *>(getVirtualAddress(address));
static_cast<std::uint64_t>(vmId) << 40 | address)
: nullptr;
} }
}; };

View file

@ -668,8 +668,8 @@ void ipmi::createShellCoreObjects(orbis::Process *process) {
} }
auto shmAddress = reinterpret_cast<std::uint8_t *>( auto shmAddress = reinterpret_cast<std::uint8_t *>(
rx::mem::map(nullptr, controlStat.st_size, ::mmap(nullptr, controlStat.st_size, PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE, MAP_SHARED, shmFd)); MAP_SHARED, shmFd, 0));
if (shmAddress == MAP_FAILED) { if (shmAddress == MAP_FAILED) {
perror("mmap"); perror("mmap");
std::abort(); std::abort();
@ -689,7 +689,7 @@ void ipmi::createShellCoreObjects(orbis::Process *process) {
std::get<1>(orbis::g_context->dialogs.back()); std::get<1>(orbis::g_context->dialogs.back());
ORBIS_LOG_TODO("Unmap shm after unlinking", currentDialogAddr, ORBIS_LOG_TODO("Unmap shm after unlinking", currentDialogAddr,
currentDialogSize); currentDialogSize);
rx::mem::unmap(currentDialogAddr, currentDialogSize); ::munmap(currentDialogAddr, currentDialogSize);
orbis::g_context->dialogs.pop_back(); orbis::g_context->dialogs.pop_back();
} }
return 0; return 0;

View file

@ -701,16 +701,17 @@ void vm::fork(std::uint64_t pid) {
} }
if (prot & kMapProtCpuAll) { if (prot & kMapProtCpuAll) {
auto mapping = rx::mem::map(nullptr, kPageSize, PROT_WRITE, MAP_SHARED, auto mapping = ::mmap(nullptr, kPageSize, PROT_WRITE, MAP_SHARED,
gMemoryShm, address - kMinAddress); gMemoryShm, address - kMinAddress);
assert(mapping != MAP_FAILED); assert(mapping != MAP_FAILED);
rx::mem::protect(reinterpret_cast<void *>(address), kPageSize, PROT_READ); rx::mem::protect(rx::AddressRange::fromBeginSize(address, kPageSize),
rx::mem::Protection::R);
std::memcpy(mapping, reinterpret_cast<void *>(address), kPageSize); std::memcpy(mapping, reinterpret_cast<void *>(address), kPageSize);
rx::mem::unmap(mapping, kPageSize); ::munmap(mapping, kPageSize);
rx::mem::unmap(reinterpret_cast<void *>(address), kPageSize); ::munmap(reinterpret_cast<void *>(address), kPageSize);
mapping = rx::mem::map(reinterpret_cast<void *>(address), kPageSize, mapping = ::mmap(reinterpret_cast<void *>(address), kPageSize,
prot & kMapProtCpuAll, MAP_FIXED | MAP_SHARED, prot & kMapProtCpuAll, MAP_FIXED | MAP_SHARED,
gMemoryShm, address - kMinAddress); gMemoryShm, address - kMinAddress);
assert(mapping != MAP_FAILED); assert(mapping != MAP_FAILED);
@ -723,8 +724,7 @@ void vm::fork(std::uint64_t pid) {
void vm::reset() { void vm::reset() {
std::memset(gBlocks, 0, sizeof(gBlocks)); std::memset(gBlocks, 0, sizeof(gBlocks));
rx::mem::unmap(reinterpret_cast<void *>(kMinAddress), ::munmap(reinterpret_cast<void *>(kMinAddress), kMaxAddress - kMinAddress);
kMaxAddress - kMinAddress);
if (::ftruncate64(gMemoryShm, 0) < 0) { if (::ftruncate64(gMemoryShm, 0) < 0) {
std::abort(); std::abort();
} }
@ -733,8 +733,8 @@ void vm::reset() {
} }
reserve(0, kMinAddress); reserve(0, kMinAddress);
rx::mem::reserve(reinterpret_cast<void *>(kMinAddress), rx::mem::reserve(
kMaxAddress - kMinAddress); rx::AddressRange::fromBeginSize(kMinAddress, kMaxAddress - kMinAddress));
} }
void vm::initialize(std::uint64_t pid) { void vm::initialize(std::uint64_t pid) {
@ -756,8 +756,7 @@ void vm::initialize(std::uint64_t pid) {
reserve(0, kMinAddress); // unmapped area reserve(0, kMinAddress); // unmapped area
rx::mem::reserve(reinterpret_cast<void *>(kMinAddress), rx::mem::reserve(rx::AddressRange::fromBeginEnd(kMinAddress, kMaxAddress));
kMaxAddress - kMinAddress);
} }
void vm::deinitialize() { void vm::deinitialize() {
@ -981,9 +980,9 @@ void *vm::map(void *addr, std::uint64_t len, std::int32_t prot,
return reinterpret_cast<void *>(address); return reinterpret_cast<void *>(address);
} }
auto result = rx::mem::map(reinterpret_cast<void *>(address), len, auto result =
prot & kMapProtCpuAll, realFlags, gMemoryShm, ::mmap(reinterpret_cast<void *>(address), len, prot & kMapProtCpuAll,
address - kMinAddress); realFlags, gMemoryShm, address - kMinAddress);
if (result != MAP_FAILED && isAnon) { if (result != MAP_FAILED && isAnon) {
bool needReprotect = (prot & PROT_WRITE) == 0; bool needReprotect = (prot & PROT_WRITE) == 0;
@ -1035,7 +1034,7 @@ bool vm::unmap(void *addr, std::uint64_t size) {
address + size); address + size);
} }
gMapInfo.unmap(rx::AddressRange::fromBeginSize(address, size)); gMapInfo.unmap(rx::AddressRange::fromBeginSize(address, size));
return rx::mem::unmap(addr, size); return ::munmap(addr, size);
} }
bool vm::protect(void *addr, std::uint64_t size, std::int32_t prot) { bool vm::protect(void *addr, std::uint64_t size, std::int32_t prot) {

View file

@ -2,21 +2,11 @@
#include "AddressRange.hpp" #include "AddressRange.hpp"
#include "EnumBitSet.hpp" #include "EnumBitSet.hpp"
#include "mem.hpp"
#include <system_error> #include <system_error>
#include <utility> #include <utility>
namespace rx { namespace rx {
enum class Protection {
R,
W,
X,
bitset_last = X
};
std::errc reserveVirtualSpace(rx::AddressRange range);
std::errc releaseVirtualSpace(rx::AddressRange range, std::size_t alignment);
class Mappable { class Mappable {
#ifdef _WIN32 #ifdef _WIN32
using NativeHandle = void *; using NativeHandle = void *;
@ -46,7 +36,8 @@ public:
static std::pair<Mappable, std::errc> CreateMemory(std::size_t size); static std::pair<Mappable, std::errc> CreateMemory(std::size_t size);
static std::pair<Mappable, std::errc> CreateSwap(std::size_t size); static std::pair<Mappable, std::errc> CreateSwap(std::size_t size);
std::errc map(rx::AddressRange virtualRange, std::size_t offset, std::errc map(rx::AddressRange virtualRange, std::size_t offset,
rx::EnumBitSet<Protection> protection, std::size_t alignment); rx::EnumBitSet<mem::Protection> protection,
std::size_t alignment);
[[nodiscard]] NativeHandle release() { [[nodiscard]] NativeHandle release() {
return std::exchange(m_handle, kInvalidHandle); return std::exchange(m_handle, kInvalidHandle);

View file

@ -1,14 +1,31 @@
#pragma once #pragma once
#include "AddressRange.hpp"
#include "EnumBitSet.hpp"
#include <cstddef> #include <cstddef>
#include <system_error>
#include <utility>
namespace rx::mem { namespace rx::mem {
enum class Protection {
R,
W,
X,
bitset_last = X
};
struct VirtualQueryEntry : rx::AddressRange {
rx::EnumBitSet<Protection> flags{};
VirtualQueryEntry() = default;
VirtualQueryEntry(rx::AddressRange range, rx::EnumBitSet<Protection> prot)
: AddressRange(range), flags(prot) {}
};
extern const std::size_t pageSize; extern const std::size_t pageSize;
void *map(void *address, std::size_t size, int prot, int flags, int fd = -1, std::errc reserve(rx::AddressRange range);
std::ptrdiff_t offset = 0); std::errc release(rx::AddressRange range, std::size_t alignment);
void *reserve(std::size_t size); std::errc protect(rx::AddressRange range, rx::EnumBitSet<Protection> prot);
bool reserve(void *address, std::size_t size); std::vector<VirtualQueryEntry> query(rx::AddressRange range);
bool protect(void *address, std::size_t size, int prot);
bool unmap(void *address, std::size_t size);
void printStats();
} // namespace rx::mem } // namespace rx::mem

View file

@ -1,4 +1,5 @@
#include "Mappable.hpp" #include "Mappable.hpp"
#include "mem.hpp"
#include <system_error> #include <system_error>
#ifndef _WIN32 #ifndef _WIN32
@ -13,59 +14,10 @@
#include <windows.h> #include <windows.h>
#endif #endif
std::errc rx::reserveVirtualSpace(rx::AddressRange range) { #ifdef ANDROID
auto pointer = std::bit_cast<void *>(range.beginAddress()); #include "format-base.hpp"
#ifdef _WIN32
auto reservation = VirtualAlloc2(nullptr, pointer, range.size(),
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
PAGE_NOACCESS, nullptr, 0);
if (reservation == nullptr) {
return std::errc::invalid_argument;
}
#else
#ifdef MAP_FIXED_NOREPLACE
static constexpr auto kMapFixedNoReplace = MAP_FIXED_NOREPLACE;
#else
static constexpr auto kMapFixedNoReplace = MAP_FIXED;
#endif #endif
auto reservation = ::mmap(pointer, range.size(), PROT_NONE,
MAP_ANON | kMapFixedNoReplace | MAP_PRIVATE, -1, 0);
if (reservation == MAP_FAILED) {
return std::errc{errno};
}
#endif
return {};
}
std::errc rx::releaseVirtualSpace(rx::AddressRange range,
[[maybe_unused]] std::size_t alignment) {
#ifdef _WIN32
// simple and stupid implementation
for (std::uintptr_t address = range.beginAddress();
address < range.endAddress(); address += alignment) {
auto pointer = std::bit_cast<void *>(address);
if (!UnmapViewOfFileEx(pointer, MEM_PRESERVE_PLACEHOLDER)) {
VirtualFree(pointer, alignment, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
}
#else
auto pointer = std::bit_cast<void *>(range.beginAddress());
auto reservation = ::mmap(pointer, range.size(), PROT_NONE,
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
if (!reservation || reservation != pointer) {
return std::errc{errno};
}
#endif
return {};
}
std::pair<rx::Mappable, std::errc> std::pair<rx::Mappable, std::errc>
rx::Mappable::CreateMemory(std::size_t size) { rx::Mappable::CreateMemory(std::size_t size) {
rx::Mappable result; rx::Mappable result;
@ -80,12 +32,22 @@ rx::Mappable::CreateMemory(std::size_t size) {
} }
result.m_handle = handle; result.m_handle = handle;
#else
#ifdef ANDROID
auto name = rx::format("/{}-{:x}", (void *)&result, size);
auto fd = ::shm_open(name.c_str(), O_CREAT | O_TRUNC, 0666);
#else #else
auto fd = ::memfd_create("", 0); auto fd = ::memfd_create("", 0);
#endif
if (fd < 0) { if (fd < 0) {
return {{}, std::errc{errno}}; return {{}, std::errc{errno}};
} }
#ifdef ANDROID
::shm_unlink(name.c_str());
#endif
result.m_handle = fd; result.m_handle = fd;
if (::ftruncate(fd, size) < 0) { if (::ftruncate(fd, size) < 0) {
@ -119,7 +81,7 @@ std::pair<rx::Mappable, std::errc> rx::Mappable::CreateSwap(std::size_t size) {
} }
std::errc rx::Mappable::map(rx::AddressRange virtualRange, std::size_t offset, std::errc rx::Mappable::map(rx::AddressRange virtualRange, std::size_t offset,
rx::EnumBitSet<Protection> protection, rx::EnumBitSet<mem::Protection> protection,
[[maybe_unused]] std::size_t alignment) { [[maybe_unused]] std::size_t alignment) {
#ifdef _WIN32 #ifdef _WIN32
static const DWORD protTable[] = { static const DWORD protTable[] = {
@ -133,11 +95,11 @@ std::errc rx::Mappable::map(rx::AddressRange virtualRange, std::size_t offset,
PAGE_EXECUTE_READWRITE, // XRW PAGE_EXECUTE_READWRITE, // XRW
}; };
auto prot = auto prot = protTable[(protection & (mem::Protection::R | mem::Protection::W |
protTable[(protection & (Protection::R | Protection::W | Protection::X)) mem::Protection::X))
.toUnderlying()]; .toUnderlying()];
releaseVirtualSpace(virtualRange, alignment); mem::release(virtualRange, alignment);
for (std::uintptr_t address = virtualRange.beginAddress(); for (std::uintptr_t address = virtualRange.beginAddress();
address < virtualRange.endAddress(); address < virtualRange.endAddress();
@ -152,18 +114,16 @@ std::errc rx::Mappable::map(rx::AddressRange virtualRange, std::size_t offset,
return std::errc::invalid_argument; return std::errc::invalid_argument;
} }
} }
return {};
#else #else
int prot = 0; int prot = 0;
if (protection & Protection::R) { if (protection & mem::Protection::R) {
prot |= PROT_READ; prot |= PROT_READ;
} }
if (protection & Protection::W) { if (protection & mem::Protection::W) {
prot |= PROT_READ | PROT_WRITE; prot |= PROT_READ | PROT_WRITE;
} }
if (protection & Protection::X) { if (protection & mem::Protection::X) {
prot |= PROT_EXEC; prot |= PROT_EXEC;
} }
@ -175,9 +135,9 @@ std::errc rx::Mappable::map(rx::AddressRange virtualRange, std::size_t offset,
if (result == MAP_FAILED) { if (result == MAP_FAILED) {
return std::errc{errno}; return std::errc{errno};
} }
#endif
return {}; return {};
#endif
} }
void rx::Mappable::destroy() { void rx::Mappable::destroy() {

View file

@ -1,50 +1,205 @@
#include "mem.hpp" #include "mem.hpp"
#include "print.hpp" #include "die.hpp"
#ifdef __linux__
#ifdef _WIN32
#define NTDDI_VERSION NTDDI_WIN10_NI
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <cstdio> #include <cstdio>
#include <sys/mman.h> #include <sys/mman.h>
#include <unistd.h> #include <unistd.h>
extern const std::size_t rx::mem::pageSize = sysconf(_SC_PAGE_SIZE);
void *rx::mem::map(void *address, std::size_t size, int prot, int flags, int fd,
std::ptrdiff_t offset) {
return ::mmap(address, size, prot, flags, fd, offset);
}
void *rx::mem::reserve(std::size_t size) {
return map(nullptr, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS);
}
bool rx::mem::reserve(void *address, std::size_t size) {
return map(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED) != MAP_FAILED;
}
bool rx::mem::protect(void *address, std::size_t size, int prot) {
return ::mprotect(address, size, prot) == 0;
}
bool rx::mem::unmap(void *address, std::size_t size) {
return ::munmap(address, size) == 0;
}
void rx::mem::printStats() {
FILE *maps = fopen("/proc/self/maps", "r");
if (!maps) {
return;
}
char *line = nullptr;
std::size_t size = 0;
while (getline(&line, &size, maps) > 0) {
rx::print("{}", line);
}
free(line);
fclose(maps);
}
#endif #endif
#ifdef _WIN32
const std::size_t rx::mem::pageSize = [] {
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwPageSize;
}();
#else
const std::size_t rx::mem::pageSize = sysconf(_SC_PAGE_SIZE);
#endif
std::errc rx::mem::reserve(rx::AddressRange range) {
auto pointer = std::bit_cast<void *>(range.beginAddress());
#ifdef _WIN32
auto reservation = VirtualAlloc2(nullptr, pointer, range.size(),
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
PAGE_NOACCESS, nullptr, 0);
if (reservation == nullptr) {
return std::errc::invalid_argument;
}
#else
#ifdef MAP_FIXED_NOREPLACE
static constexpr auto kMapFixedNoReplace = MAP_FIXED_NOREPLACE;
#else
static constexpr auto kMapFixedNoReplace = MAP_FIXED;
#endif
auto reservation = ::mmap(pointer, range.size(), PROT_NONE,
MAP_ANON | kMapFixedNoReplace | MAP_PRIVATE, -1, 0);
if (reservation == MAP_FAILED) {
return std::errc{errno};
}
#endif
return {};
}
std::errc rx::mem::release(rx::AddressRange range,
[[maybe_unused]] std::size_t alignment) {
#ifdef _WIN32
// simple and stupid implementation
for (std::uintptr_t address = range.beginAddress();
address < range.endAddress(); address += alignment) {
auto pointer = std::bit_cast<void *>(address);
if (!UnmapViewOfFileEx(pointer, MEM_PRESERVE_PLACEHOLDER)) {
VirtualFree(pointer, alignment, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
}
#else
auto pointer = std::bit_cast<void *>(range.beginAddress());
auto reservation = ::mmap(pointer, range.size(), PROT_NONE,
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
if (!reservation || reservation != pointer) {
return std::errc{errno};
}
#endif
return {};
}
std::errc rx::mem::protect(rx::AddressRange range,
rx::EnumBitSet<Protection> prot) {
auto pointer = std::bit_cast<void *>(range.beginAddress());
#ifdef _WIN32
static const DWORD protTable[] = {
PAGE_NOACCESS, // 0
PAGE_READONLY, // R
PAGE_EXECUTE_READWRITE, // W
PAGE_EXECUTE_READWRITE, // RW
PAGE_EXECUTE, // X
PAGE_EXECUTE_READWRITE, // XR
PAGE_EXECUTE_READWRITE, // XW
PAGE_EXECUTE_READWRITE, // XRW
};
auto rawProt =
(prot & (mem::Protection::R | mem::Protection::W | mem::Protection::X))
.toUnderlying();
auto wProt = protTable[rawProt];
if (!VirtualProtect(pointer, range.size(), wProt, nullptr)) {
return std::errc::invalid_argument;
}
#else
if (::mprotect(pointer, range.size(), prot.toUnderlying())) {
return std::errc{errno};
}
#endif
return {};
}
std::vector<rx::mem::VirtualQueryEntry> rx::mem::query(rx::AddressRange range) {
std::vector<VirtualQueryEntry> result;
#ifdef _WIN32
std::uintptr_t address = range.beginAddress();
while (address < range.endAddress()) {
MEMORY_BASIC_INFORMATION info;
if (!VirtualQuery((void *)address, &info, sizeof(info))) {
rx::die("VirtualQuery: failed, address = {:x}", address);
}
auto region = rx::AddressRange::fromBeginSize(
(std::uintptr_t)info.BaseAddress, info.RegionSize)
.intersection(range);
address = region.endAddress();
if (info.State == MEM_FREE || !region.isValid() ||
!range.contains(region)) {
continue;
}
rx::EnumBitSet<Protection> flags = {};
switch (info.AllocationProtect & 0xff) {
case PAGE_NOACCESS:
break;
case PAGE_READONLY:
flags = Protection::R;
break;
case PAGE_READWRITE:
case PAGE_WRITECOPY:
flags = Protection::R | Protection::W;
break;
case PAGE_EXECUTE:
flags = Protection::X;
break;
case PAGE_EXECUTE_READ:
flags = Protection::X | Protection::R;
break;
case PAGE_EXECUTE_READWRITE:
case PAGE_EXECUTE_WRITECOPY:
flags = Protection::X | Protection::W | Protection::R;
break;
}
result.emplace_back(region, flags);
}
#elif defined(__linux)
char buf[1024];
auto maps = std::fopen("/proc/self/maps", "r");
while (std::fgets(buf, sizeof(buf), maps)) {
std::uint64_t beginAddress;
std::uint64_t endAddress;
char flagChars[5];
std::sscanf(buf, "%lx-%lx %4s", &beginAddress, &endAddress, flagChars);
auto region = rx::AddressRange::fromBeginEnd(beginAddress, endAddress)
.intersection(range);
if (!region.isValid()) {
continue;
}
if (region.beginAddress() >= range.beginAddress()) {
break;
}
rx::EnumBitSet<Protection> flags = {};
if (flagChars[0] == 'r') {
flags |= Protection::R;
}
if (flagChars[1] == 'w') {
flags |= Protection::W;
}
if (flagChars[2] == 'x') {
flags |= Protection::X;
}
result.emplace_back(region, flags);
}
std::fclose(maps);
#elif defined(__APPLE__)
// FIXME: use mach_vm_region_info?
// workaround: assume all pages are not used
#else
#error "Not implemented"
#endif
return result;
}