share align utility

This commit is contained in:
DH 2024-09-03 13:25:06 +03:00
parent 55ac4dcc1b
commit e259f904a4
8 changed files with 47 additions and 59 deletions

View file

@ -1,5 +1,4 @@
#include "io-device.hpp"
#include "align.hpp"
#include "orbis/KernelAllocator.hpp"
#include "orbis/SocketAddress.hpp"
#include "orbis/file.hpp"
@ -8,7 +7,6 @@
#include "orbis/thread/Thread.hpp"
#include "orbis/uio.hpp"
#include "orbis/utils/Logs.hpp"
#include "rx/mem.hpp"
#include "vfs.hpp"
#include "vm.hpp"
#include <cerrno>
@ -17,6 +15,8 @@
#include <filesystem>
#include <netinet/in.h>
#include <optional>
#include <rx/align.hpp>
#include <rx/mem.hpp>
#include <span>
#include <string>
#include <sys/mman.h>
@ -347,7 +347,7 @@ static orbis::ErrorCode host_mmap(orbis::File *file, void **address,
return orbis::ErrorCode::NOMEM;
}
size = utils::alignUp(size, rx::vm::kPageSize);
size = rx::alignUp(size, rx::vm::kPageSize);
result = ::mmap(
result, size, prot & rx::vm::kMapProtCpuAll,
@ -371,8 +371,7 @@ static orbis::ErrorCode host_mmap(orbis::File *file, void **address,
std::min(offset + size - stat.st_size, rx::vm::kPageSize);
if (rest > rx::mem::pageSize) {
auto fillSize =
utils::alignUp(rest, rx::mem::pageSize) - rx::mem::pageSize;
auto fillSize = rx::alignUp(rest, rx::mem::pageSize) - rx::mem::pageSize;
std::printf("adding dummy mapping %p-%p, file ends at %p\n",
(char *)result + size - fillSize, (char *)result + size,
@ -440,7 +439,7 @@ static orbis::ErrorCode host_truncate(orbis::File *file, std::uint64_t len,
}
if (hostFile->alignTruncate) {
len = utils::alignUp(len, rx::vm::kPageSize);
len = rx::alignUp(len, rx::vm::kPageSize);
}
if (::ftruncate(hostFile->hostFd, len)) {

View file

@ -1,5 +1,4 @@
#include "linker.hpp"
#include "align.hpp"
#include "io-device.hpp"
#include "orbis/KernelAllocator.hpp"
#include "orbis/module/Module.hpp"
@ -13,6 +12,7 @@
#include <filesystem>
#include <map>
#include <orbis/thread/Process.hpp>
#include <rx/align.hpp>
#include <sys/mman.h>
#include <unordered_map>
@ -400,10 +400,9 @@ Ref<orbis::Module> rx::linker::loadModule(std::span<std::byte> image,
break;
case kElfProgramTypeLoad:
baseAddress =
std::min(baseAddress, utils::alignDown(phdr.p_vaddr, phdr.p_align));
endAddress =
std::max(endAddress,
utils::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize));
std::min(baseAddress, rx::alignDown(phdr.p_vaddr, phdr.p_align));
endAddress = std::max(
endAddress, rx::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize));
break;
case kElfProgramTypeDynamic:
dynamicPhdrIndex = index;
@ -435,10 +434,9 @@ Ref<orbis::Module> rx::linker::loadModule(std::span<std::byte> image,
case kElfProgramTypeSceRelRo:
sceRelRoPhdrIndex = index;
baseAddress =
std::min(baseAddress, utils::alignDown(phdr.p_vaddr, phdr.p_align));
endAddress =
std::max(endAddress,
utils::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize));
std::min(baseAddress, rx::alignDown(phdr.p_vaddr, phdr.p_align));
endAddress = std::max(
endAddress, rx::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize));
break;
case kElfProgramTypeGnuEhFrame:
gnuEhFramePhdrIndex = index;
@ -459,7 +457,7 @@ Ref<orbis::Module> rx::linker::loadModule(std::span<std::byte> image,
auto imageBase = reinterpret_cast<std::byte *>(
rx::vm::map(reinterpret_cast<void *>(baseAddress),
utils::alignUp(imageSize, rx::vm::kPageSize), 0,
rx::alignUp(imageSize, rx::vm::kPageSize), 0,
rx::vm::kMapFlagPrivate | rx::vm::kMapFlagAnonymous |
(baseAddress ? rx::vm::kMapFlagFixed : 0)));
@ -852,10 +850,9 @@ Ref<orbis::Module> rx::linker::loadModule(std::span<std::byte> image,
for (auto phdr : phdrs) {
if (phdr.p_type == kElfProgramTypeLoad ||
phdr.p_type == kElfProgramTypeSceRelRo) {
auto segmentEnd =
utils::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize);
auto segmentEnd = rx::alignUp(phdr.p_vaddr + phdr.p_memsz, vm::kPageSize);
auto segmentBegin =
utils::alignDown(phdr.p_vaddr - baseAddress, phdr.p_align);
rx::alignDown(phdr.p_vaddr - baseAddress, phdr.p_align);
auto segmentSize = segmentEnd - segmentBegin;
::mprotect(imageBase + segmentBegin, segmentSize, PROT_WRITE);
std::memcpy(imageBase + phdr.p_vaddr - baseAddress,
@ -865,7 +862,8 @@ Ref<orbis::Module> rx::linker::loadModule(std::span<std::byte> image,
phdr.p_flags |= vm::kMapProtCpuWrite; // TODO: reprotect on relocations
}
vm::protect(imageBase + segmentBegin, segmentSize, phdr.p_flags & (vm::kMapProtCpuAll | vm::kMapProtGpuAll));
vm::protect(imageBase + segmentBegin, segmentSize,
phdr.p_flags & (vm::kMapProtCpuAll | vm::kMapProtGpuAll));
if (phdr.p_type == kElfProgramTypeLoad) {
if (result->segmentCount >= std::size(result->segments)) {

View file

@ -1,4 +1,3 @@
#include "align.hpp"
#include "amdgpu/bridge/bridge.hpp"
#include "backtrace.hpp"
#include "bridge.hpp"
@ -8,12 +7,13 @@
#include "iodev/mbus_av.hpp"
#include "linker.hpp"
#include "ops.hpp"
#include "rx/hexdump.hpp"
#include "thread.hpp"
#include "vfs.hpp"
#include "vm.hpp"
#include "xbyak/xbyak.h"
#include <rx/Version.hpp>
#include <rx/align.hpp>
#include <rx/hexdump.hpp>
#include <elf.h>
#include <linux/prctl.h>
@ -621,8 +621,8 @@ ExecEnv ps4CreateExecEnv(orbis::Thread *mainThread,
for (auto sym : libkernel->symbols) {
if (sym.id == 0xd2f4e7e480cc53d0) {
auto address = (uint64_t)libkernel->base + sym.address;
::mprotect((void *)utils::alignDown(address, 0x1000),
utils::alignUp(sym.size + sym.address, 0x1000), PROT_WRITE);
::mprotect((void *)rx::alignDown(address, 0x1000),
rx::alignUp(sym.size + sym.address, 0x1000), PROT_WRITE);
std::printf("patching sceKernelGetMainSocId\n");
struct GetMainSocId : Xbyak::CodeGenerator {
GetMainSocId(std::uint64_t address, std::uint64_t size)
@ -632,8 +632,8 @@ ExecEnv ps4CreateExecEnv(orbis::Thread *mainThread,
}
} gen{address, sym.size};
::mprotect((void *)utils::alignDown(address, 0x1000),
utils::alignUp(sym.size + sym.address, 0x1000),
::mprotect((void *)rx::alignDown(address, 0x1000),
rx::alignUp(sym.size + sym.address, 0x1000),
PROT_READ | PROT_EXEC);
break;
}

View file

@ -1,6 +1,4 @@
#include "ops.hpp"
#include "align.hpp"
#include "amdgpu/bridge/bridge.hpp"
#include "backtrace.hpp"
#include "io-device.hpp"
#include "io-devices.hpp"
@ -17,8 +15,6 @@
#include "orbis/umtx.hpp"
#include "orbis/utils/Logs.hpp"
#include "orbis/utils/Rc.hpp"
#include "orbis/utils/SharedCV.hpp"
#include "orbis/utils/SharedMutex.hpp"
#include "orbis/vm.hpp"
#include "thread.hpp"
#include "vfs.hpp"

View file

@ -1,14 +1,15 @@
#include "thread.hpp"
#include "align.hpp"
#include "orbis/sys/sysentry.hpp"
#include "orbis/thread/Process.hpp"
#include "orbis/thread/Thread.hpp"
#include "orbis/utils/Logs.hpp"
#include "rx/mem.hpp"
#include <asm/prctl.h>
#include <csignal>
#include <immintrin.h>
#include <link.h>
#include <linux/prctl.h>
#include <rx/align.hpp>
#include <sys/prctl.h>
#include <ucontext.h>
#include <unistd.h>
@ -16,7 +17,7 @@
static std::size_t getSigStackSize() {
static auto sigStackSize = std::max<std::size_t>(
SIGSTKSZ, ::utils::alignUp(64 * 1024 * 1024, sysconf(_SC_PAGE_SIZE)));
SIGSTKSZ, ::rx::alignUp(64 * 1024 * 1024, rx::mem::pageSize));
return sigStackSize;
}
@ -204,7 +205,7 @@ handleSigUser(int sig, siginfo_t *info, void *ucontext) {
}
void rx::thread::initialize() {
struct sigaction act {};
struct sigaction act{};
act.sa_sigaction = handleSigSys;
act.sa_flags = SA_SIGINFO | SA_ONSTACK;

View file

@ -1,5 +1,4 @@
#include "vm.hpp"
#include "align.hpp"
#include "bridge.hpp"
#include "io-device.hpp"
#include "iodev/dmem.hpp"
@ -7,20 +6,19 @@
#include "orbis/thread/Thread.hpp"
#include "orbis/utils/Logs.hpp"
#include "orbis/utils/Rc.hpp"
#include "rx/mem.hpp"
#include <bit>
#include <cassert>
#include <cinttypes>
#include <cstdint>
#include <cstring>
#include <fcntl.h>
#include <map>
#include <mutex>
#include <rx/MemoryTable.hpp>
#include <rx/align.hpp>
#include <rx/mem.hpp>
#include <sys/mman.h>
#include <unistd.h>
#include <rx/MemoryTable.hpp>
static std::mutex g_mtx;
std::string rx::vm::mapFlagsToString(std::int32_t flags) {
@ -564,7 +562,7 @@ struct Block {
while (auto usedCount = std::countr_one(tmpAllocatedBits)) {
auto nextProcessedPages =
utils::alignUp(processedPages + usedCount, groupAlignment);
rx::alignUp(processedPages + usedCount, groupAlignment);
if (nextProcessedPages - processedPages >= 64) {
tmpAllocatedBits = 0;
} else {
@ -578,7 +576,7 @@ struct Block {
// searching on next iterations
auto freeCount = std::countl_zero(allocatedBits);
auto alignedPageIndex =
utils::alignUp(kGroupSize - freeCount, groupAlignment);
rx::alignUp(kGroupSize - freeCount, groupAlignment);
freeCount =
kGroupSize - alignedPageIndex; // calc aligned free pages
@ -594,7 +592,7 @@ struct Block {
for (std::uint64_t groupIndex = 0;
groupIndex < kGroupsInBlock && foundCount < count; ++groupIndex) {
if (foundCount == 0) {
groupIndex = utils::alignUp(groupIndex, blockAlignment);
groupIndex = rx::alignUp(groupIndex, blockAlignment);
if (groupIndex >= kGroupsInBlock) {
break;
@ -773,7 +771,7 @@ void *rx::vm::map(void *addr, std::uint64_t len, std::int32_t prot,
addr, len, mapProtToString(prot).c_str(),
mapFlagsToString(flags).c_str());
len = utils::alignUp(len, kPageSize);
len = rx::alignUp(len, kPageSize);
auto pagesCount = (len + (kPageSize - 1)) >> kPageShift;
auto hitAddress = reinterpret_cast<std::uint64_t>(addr);
@ -803,11 +801,11 @@ void *rx::vm::map(void *addr, std::uint64_t len, std::int32_t prot,
if (hitAddress & (alignment - 1)) {
if (flags & kMapFlagStack) {
hitAddress = utils::alignDown(hitAddress - 1, alignment);
hitAddress = rx::alignDown(hitAddress - 1, alignment);
flags |= kMapFlagFixed;
flags &= ~kMapFlagStack;
} else {
hitAddress = utils::alignUp(hitAddress, alignment);
hitAddress = rx::alignUp(hitAddress, alignment);
}
}
@ -939,13 +937,12 @@ void *rx::vm::map(void *addr, std::uint64_t len, std::int32_t prot,
}
// if (device == nullptr) {
if (auto thr = orbis::g_currentThread) {
rx::bridge.sendMapMemory(thr->tproc->pid, -1, -1, address, len, prot,
address - kMinAddress);
} else {
std::fprintf(stderr, "ignoring mapping %lx-%lx\n", address,
address + len);
}
if (auto thr = orbis::g_currentThread) {
rx::bridge.sendMapMemory(thr->tproc->pid, -1, -1, address, len, prot,
address - kMinAddress);
} else {
std::fprintf(stderr, "ignoring mapping %lx-%lx\n", address, address + len);
}
// }
if (internalFlags & kMapInternalReserveOnly) {
@ -971,7 +968,7 @@ void *rx::vm::map(void *addr, std::uint64_t len, std::int32_t prot,
}
bool rx::vm::unmap(void *addr, std::uint64_t size) {
size = utils::alignUp(size, kPageSize);
size = rx::alignUp(size, kPageSize);
auto pages = (size + (kPageSize - 1)) >> kPageShift;
auto address = reinterpret_cast<std::uint64_t>(addr);
@ -1010,7 +1007,7 @@ bool rx::vm::protect(void *addr, std::uint64_t size, std::int32_t prot) {
std::printf("rx::vm::protect(addr = %p, len = %" PRIu64 ", prot = %s)\n",
addr, size, mapProtToString(prot).c_str());
size = utils::alignUp(size, kPageSize);
size = rx::alignUp(size, kPageSize);
auto pages = (size + (kPageSize - 1)) >> kPageShift;
auto address = reinterpret_cast<std::uint64_t>(addr);
if (address < kMinAddress || address >= kMaxAddress || size > kMaxAddress ||
@ -1124,7 +1121,6 @@ bool rx::vm::virtualQuery(const void *addr, std::int32_t flags,
return false;
}
if ((flags & 1) == 0) {
if (it.endAddress() <= address) {
return false;
@ -1138,8 +1134,7 @@ bool rx::vm::virtualQuery(const void *addr, std::int32_t flags,
std::int32_t memoryType = 0;
std::uint32_t blockFlags = 0;
if (it->device != nullptr) {
if (auto dmem =
dynamic_cast<DmemDevice *>(it->device.get())) {
if (auto dmem = dynamic_cast<DmemDevice *>(it->device.get())) {
auto dmemIt = dmem->allocations.queryArea(it->offset);
if (dmemIt == dmem->allocations.end()) {
return false;

View file

@ -2,7 +2,7 @@
#include <cstdint>
namespace utils {
namespace rx {
inline constexpr std::uint64_t alignUp(std::uint64_t value,
std::uint64_t alignment) {
return (value + (alignment - 1)) & ~(alignment - 1);
@ -11,4 +11,4 @@ inline constexpr std::uint64_t alignDown(std::uint64_t value,
std::uint64_t alignment) {
return value & ~(alignment - 1);
}
} // namespace utils
} // namespace rx

View file

@ -1,4 +1,3 @@
#pragma once
#include "die.hpp"
#include <cstdarg>