mirror of
https://github.com/RPCS3/rpcs3.git
synced 2026-04-04 14:08:30 +00:00
vm: add map_self() method to utils::shm
Add complementary unmap_self() method. Move VirtualMemory to util/vm.hpp Minor associated include cleanup. Move asm.h to util/asm.hpp
This commit is contained in:
parent
b68bdafadc
commit
1c99a2e7fb
24 changed files with 105 additions and 35 deletions
|
|
@ -5,7 +5,7 @@
|
|||
#include "util/logs.hpp"
|
||||
#include "mutex.h"
|
||||
#include "sysinfo.h"
|
||||
#include "VirtualMemory.h"
|
||||
#include "util/vm.hpp"
|
||||
#include <immintrin.h>
|
||||
#include <zlib.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -73,6 +73,7 @@
|
|||
#endif
|
||||
|
||||
#include "sync.h"
|
||||
#include "util/vm.hpp"
|
||||
#include "util/logs.hpp"
|
||||
#include "Emu/Memory/vm_locking.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,328 +0,0 @@
|
|||
#include "stdafx.h"
|
||||
#include "util/logs.hpp"
|
||||
#include "VirtualMemory.h"
|
||||
#ifdef _WIN32
|
||||
#include <Windows.h>
|
||||
#else
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#ifdef __NR_memfd_create
|
||||
#elif __x86_64__
|
||||
#define __NR_memfd_create 319
|
||||
#elif __aarch64__
|
||||
#define __NR_memfd_create 279
|
||||
#endif
|
||||
|
||||
static int memfd_create_(const char *name, uint flags)
|
||||
{
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace utils
|
||||
{
|
||||
// Convert memory protection (internal)
|
||||
static auto operator +(protection prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
DWORD _prot = PAGE_NOACCESS;
|
||||
switch (prot)
|
||||
{
|
||||
case protection::rw: _prot = PAGE_READWRITE; break;
|
||||
case protection::ro: _prot = PAGE_READONLY; break;
|
||||
case protection::no: break;
|
||||
case protection::wx: _prot = PAGE_EXECUTE_READWRITE; break;
|
||||
case protection::rx: _prot = PAGE_EXECUTE_READ; break;
|
||||
}
|
||||
#else
|
||||
int _prot = PROT_NONE;
|
||||
switch (prot)
|
||||
{
|
||||
case protection::rw: _prot = PROT_READ | PROT_WRITE; break;
|
||||
case protection::ro: _prot = PROT_READ; break;
|
||||
case protection::no: break;
|
||||
case protection::wx: _prot = PROT_READ | PROT_WRITE | PROT_EXEC; break;
|
||||
case protection::rx: _prot = PROT_READ | PROT_EXEC; break;
|
||||
}
|
||||
#endif
|
||||
|
||||
return _prot;
|
||||
}
|
||||
|
||||
void* memory_reserve(std::size_t size, void* use_addr)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
return ::VirtualAlloc(use_addr, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
#else
|
||||
if (use_addr && reinterpret_cast<uptr>(use_addr) % 0x10000)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!use_addr)
|
||||
{
|
||||
// Hack: Ensure aligned 64k allocations
|
||||
size += 0x10000;
|
||||
}
|
||||
|
||||
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
|
||||
if (ptr == reinterpret_cast<void*>(-1))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (use_addr && ptr != use_addr)
|
||||
{
|
||||
::munmap(ptr, size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!use_addr && ptr)
|
||||
{
|
||||
// Continuation of the hack above
|
||||
const auto misalign = reinterpret_cast<uptr>(ptr) % 0x10000;
|
||||
::munmap(ptr, 0x10000 - misalign);
|
||||
|
||||
if (misalign)
|
||||
{
|
||||
::munmap(static_cast<u8*>(ptr) + size - misalign, misalign);
|
||||
}
|
||||
|
||||
ptr = static_cast<u8*>(ptr) + (0x10000 - misalign);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
void memory_commit(void* pointer, std::size_t size, protection prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
verify(HERE), ::VirtualAlloc(pointer, size, MEM_COMMIT, +prot);
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
verify(HERE), ::mprotect(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1;
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
void memory_decommit(void* pointer, std::size_t size)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
verify(HERE), ::VirtualFree(pointer, size, MEM_DECOMMIT);
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
verify(HERE), ::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(-1);
|
||||
#ifdef MADV_FREE
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1;
|
||||
#else
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void memory_reset(void* pointer, std::size_t size, protection prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
memory_decommit(pointer, size);
|
||||
memory_commit(pointer, size, prot);
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
#ifdef MADV_FREE
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1;
|
||||
#else
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1;
|
||||
#endif
|
||||
verify(HERE), ::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(-1);
|
||||
verify(HERE), ::madvise(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
void memory_release(void* pointer, std::size_t size)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
verify(HERE), ::VirtualFree(pointer, 0, MEM_RELEASE);
|
||||
#else
|
||||
verify(HERE), ::munmap(pointer, size) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
void memory_protect(void* pointer, std::size_t size, protection prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
for (u64 addr = reinterpret_cast<u64>(pointer), end = addr + size; addr < end;)
|
||||
{
|
||||
const u64 boundary = (addr + 0x10000) & -0x10000;
|
||||
const u64 block_size = std::min(boundary, end) - addr;
|
||||
|
||||
DWORD old;
|
||||
if (!::VirtualProtect(reinterpret_cast<LPVOID>(addr), block_size, +prot, &old))
|
||||
{
|
||||
fmt::throw_exception("VirtualProtect failed (%p, 0x%x, addr=0x%x, error=%#x)", pointer, size, addr, GetLastError());
|
||||
}
|
||||
|
||||
// Next region
|
||||
addr += block_size;
|
||||
}
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
verify(HERE), ::mprotect(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
shm::shm(u32 size, u32 flags)
|
||||
: m_size(::align(size, 0x10000))
|
||||
, m_flags(flags)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
m_handle = ::CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE, 0, m_size, NULL);
|
||||
verify(HERE), m_handle != INVALID_HANDLE_VALUE;
|
||||
#elif __linux__
|
||||
m_file = ::memfd_create_("", 0);
|
||||
verify(HERE), m_file >= 0;
|
||||
verify(HERE), ::ftruncate(m_file, m_size) >= 0;
|
||||
#else
|
||||
const std::string name = "/rpcs3-mem-" + std::to_string(reinterpret_cast<u64>(this));
|
||||
|
||||
while ((m_file = ::shm_open(name.c_str(), O_RDWR | O_CREAT | O_EXCL, S_IWUSR | S_IRUSR)) == -1)
|
||||
{
|
||||
if (errno == EMFILE)
|
||||
{
|
||||
fmt::throw_exception("Too many open files. Raise the limit and try again.");
|
||||
}
|
||||
|
||||
verify(HERE), errno == EEXIST;
|
||||
}
|
||||
|
||||
verify(HERE), ::shm_unlink(name.c_str()) >= 0;
|
||||
verify(HERE), ::ftruncate(m_file, m_size) >= 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
shm::~shm()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
::CloseHandle(m_handle);
|
||||
#else
|
||||
::close(m_file);
|
||||
#endif
|
||||
}
|
||||
|
||||
u8* shm::map(void* ptr, protection prot) const
|
||||
{
|
||||
#ifdef _WIN32
|
||||
DWORD access = FILE_MAP_WRITE;
|
||||
switch (prot)
|
||||
{
|
||||
case protection::rw:
|
||||
case protection::ro:
|
||||
case protection::no:
|
||||
break;
|
||||
case protection::wx:
|
||||
case protection::rx:
|
||||
access |= FILE_MAP_EXECUTE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (auto ret = static_cast<u8*>(::MapViewOfFileEx(m_handle, access, 0, 0, m_size, ptr)))
|
||||
{
|
||||
if (prot != protection::rw && prot != protection::wx)
|
||||
{
|
||||
DWORD old;
|
||||
if (!::VirtualProtect(ret, m_size, +prot, &old))
|
||||
{
|
||||
::UnmapViewOfFile(ret);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(ptr);
|
||||
return static_cast<u8*>(::mmap(reinterpret_cast<void*>(ptr64 & -0x10000), m_size, +prot, MAP_SHARED | (ptr ? MAP_FIXED : 0), m_file, 0));
|
||||
#endif
|
||||
}
|
||||
|
||||
u8* shm::map_critical(void* ptr, protection prot)
|
||||
{
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
#ifdef _WIN32
|
||||
::MEMORY_BASIC_INFORMATION mem;
|
||||
if (!::VirtualQuery(target, &mem, sizeof(mem)) || mem.State != MEM_RESERVE || !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const auto base = (u8*)mem.AllocationBase;
|
||||
const auto size = mem.RegionSize + (target - base);
|
||||
|
||||
if (base < target && !::VirtualAlloc(base, target - base, MEM_RESERVE, PAGE_NOACCESS))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (target + m_size < base + size && !::VirtualAlloc(target + m_size, base + size - target - m_size, MEM_RESERVE, PAGE_NOACCESS))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
return this->map(target, prot);
|
||||
}
|
||||
|
||||
void shm::unmap(void* ptr) const
|
||||
{
|
||||
#ifdef _WIN32
|
||||
::UnmapViewOfFile(ptr);
|
||||
#else
|
||||
::munmap(ptr, m_size);
|
||||
#endif
|
||||
}
|
||||
|
||||
void shm::unmap_critical(void* ptr)
|
||||
{
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
this->unmap(target);
|
||||
|
||||
#ifdef _WIN32
|
||||
::MEMORY_BASIC_INFORMATION mem, mem2;
|
||||
if (!::VirtualQuery(target - 1, &mem, sizeof(mem)) || !::VirtualQuery(target + m_size, &mem2, sizeof(mem2)))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (mem.State == MEM_RESERVE && !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (mem2.State == MEM_RESERVE && !::VirtualFree(mem2.AllocationBase, 0, MEM_RELEASE))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const auto size1 = mem.State == MEM_RESERVE ? target - (u8*)mem.AllocationBase : 0;
|
||||
const auto size2 = mem2.State == MEM_RESERVE ? mem2.RegionSize : 0;
|
||||
|
||||
if (!::VirtualAlloc(mem.State == MEM_RESERVE ? mem.AllocationBase : target, m_size + size1 + size2, MEM_RESERVE, PAGE_NOACCESS))
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.h"
|
||||
|
||||
namespace utils
|
||||
{
|
||||
// Memory protection type
|
||||
enum class protection
|
||||
{
|
||||
rw, // Read + write (default)
|
||||
ro, // Read only
|
||||
no, // No access
|
||||
wx, // Read + write + execute
|
||||
rx, // Read + execute
|
||||
};
|
||||
|
||||
/**
|
||||
* Reserve `size` bytes of virtual memory and returns it.
|
||||
* The memory should be commited before usage.
|
||||
*/
|
||||
void* memory_reserve(std::size_t size, void* use_addr = nullptr);
|
||||
|
||||
/**
|
||||
* Commit `size` bytes of virtual memory starting at pointer.
|
||||
* That is, bake reserved memory with physical memory.
|
||||
* pointer should belong to a range of reserved memory.
|
||||
*/
|
||||
void memory_commit(void* pointer, std::size_t size, protection prot = protection::rw);
|
||||
|
||||
// Decommit all memory committed via commit_page_memory.
|
||||
void memory_decommit(void* pointer, std::size_t size);
|
||||
|
||||
// Decommit all memory and commit it again.
|
||||
void memory_reset(void* pointer, std::size_t size, protection prot = protection::rw);
|
||||
|
||||
// Free memory after reserved by memory_reserve, should specify original size
|
||||
void memory_release(void* pointer, std::size_t size);
|
||||
|
||||
// Set memory protection
|
||||
void memory_protect(void* pointer, std::size_t size, protection prot);
|
||||
|
||||
// Shared memory handle
|
||||
class shm
|
||||
{
|
||||
#ifdef _WIN32
|
||||
void* m_handle;
|
||||
#else
|
||||
int m_file;
|
||||
#endif
|
||||
u32 m_size;
|
||||
u32 m_flags;
|
||||
|
||||
public:
|
||||
explicit shm(u32 size, u32 flags = 0);
|
||||
|
||||
shm(const shm&) = delete;
|
||||
|
||||
shm& operator=(const shm&) = delete;
|
||||
|
||||
~shm();
|
||||
|
||||
// Map shared memory
|
||||
u8* map(void* ptr, protection prot = protection::rw) const;
|
||||
|
||||
// Map shared memory over reserved memory region, which is unsafe (non-atomic) under Win32
|
||||
u8* map_critical(void* ptr, protection prot = protection::rw);
|
||||
|
||||
// Unmap shared memory
|
||||
void unmap(void* ptr) const;
|
||||
|
||||
// Unmap shared memory, undoing map_critical
|
||||
void unmap_critical(void* ptr);
|
||||
|
||||
u32 size() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
// Flags are unspecified, consider it userdata
|
||||
u32 flags() const
|
||||
{
|
||||
return m_flags;
|
||||
}
|
||||
|
||||
// Another userdata
|
||||
u64 info = 0;
|
||||
};
|
||||
}
|
||||
279
Utilities/asm.h
279
Utilities/asm.h
|
|
@ -1,279 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.h"
|
||||
|
||||
extern bool g_use_rtm;
|
||||
extern u64 g_rtm_tx_limit1;
|
||||
|
||||
namespace utils
|
||||
{
|
||||
// Transaction helper (result = pair of success and op result, or just bool)
|
||||
template <typename F, typename R = std::invoke_result_t<F>>
|
||||
inline auto tx_start(F op)
|
||||
{
|
||||
uint status = -1;
|
||||
|
||||
for (auto stamp0 = __rdtsc(), stamp1 = stamp0; g_use_rtm && stamp1 - stamp0 <= g_rtm_tx_limit1; stamp1 = __rdtsc())
|
||||
{
|
||||
#ifndef _MSC_VER
|
||||
__asm__ goto ("xbegin %l[retry];" ::: "memory" : retry);
|
||||
#else
|
||||
status = _xbegin();
|
||||
|
||||
if (status != _XBEGIN_STARTED) [[unlikely]]
|
||||
{
|
||||
goto retry;
|
||||
}
|
||||
#endif
|
||||
|
||||
if constexpr (std::is_void_v<R>)
|
||||
{
|
||||
std::invoke(op);
|
||||
#ifndef _MSC_VER
|
||||
__asm__ volatile ("xend;" ::: "memory");
|
||||
#else
|
||||
_xend();
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto result = std::invoke(op);
|
||||
#ifndef _MSC_VER
|
||||
__asm__ volatile ("xend;" ::: "memory");
|
||||
#else
|
||||
_xend();
|
||||
#endif
|
||||
return std::make_pair(true, std::move(result));
|
||||
}
|
||||
|
||||
retry:
|
||||
#ifndef _MSC_VER
|
||||
__asm__ volatile ("movl %%eax, %0;" : "=r" (status) :: "memory");
|
||||
#endif
|
||||
if (!status) [[unlikely]]
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if constexpr (std::is_void_v<R>)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::make_pair(false, R());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Rotate helpers
|
||||
#if defined(__GNUG__)
|
||||
|
||||
inline u8 rol8(u8 x, u8 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateleft8)
|
||||
return __builtin_rotateleft8(x, n);
|
||||
#else
|
||||
u8 result = x;
|
||||
__asm__("rolb %[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u8 ror8(u8 x, u8 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateright8)
|
||||
return __builtin_rotateright8(x, n);
|
||||
#else
|
||||
u8 result = x;
|
||||
__asm__("rorb %[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u16 rol16(u16 x, u16 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateleft16)
|
||||
return __builtin_rotateleft16(x, n);
|
||||
#else
|
||||
u16 result = x;
|
||||
__asm__("rolw %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u16 ror16(u16 x, u16 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateright16)
|
||||
return __builtin_rotateright16(x, n);
|
||||
#else
|
||||
u16 result = x;
|
||||
__asm__("rorw %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u32 rol32(u32 x, u32 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateleft32)
|
||||
return __builtin_rotateleft32(x, n);
|
||||
#else
|
||||
u32 result = x;
|
||||
__asm__("roll %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u32 ror32(u32 x, u32 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateright32)
|
||||
return __builtin_rotateright32(x, n);
|
||||
#else
|
||||
u32 result = x;
|
||||
__asm__("rorl %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u64 rol64(u64 x, u64 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateleft64)
|
||||
return __builtin_rotateleft64(x, n);
|
||||
#else
|
||||
u64 result = x;
|
||||
__asm__("rolq %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u64 ror64(u64 x, u64 n)
|
||||
{
|
||||
#if __has_builtin(__builtin_rotateright64)
|
||||
return __builtin_rotateright64(x, n);
|
||||
#else
|
||||
u64 result = x;
|
||||
__asm__("rorq %b[n], %[result]" : [result] "+g"(result) : [n] "c"(n));
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
constexpr u64 umulh64(u64 a, u64 b)
|
||||
{
|
||||
const __uint128_t x = a;
|
||||
const __uint128_t y = b;
|
||||
return (x * y) >> 64;
|
||||
}
|
||||
|
||||
constexpr s64 mulh64(s64 a, s64 b)
|
||||
{
|
||||
const __int128_t x = a;
|
||||
const __int128_t y = b;
|
||||
return (x * y) >> 64;
|
||||
}
|
||||
|
||||
constexpr s64 div128(s64 high, s64 low, s64 divisor, s64* remainder = nullptr)
|
||||
{
|
||||
const __int128_t x = (__uint128_t{u64(high)} << 64) | u64(low);
|
||||
const __int128_t r = x / divisor;
|
||||
|
||||
if (remainder)
|
||||
{
|
||||
*remainder = x % divisor;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
constexpr u64 udiv128(u64 high, u64 low, u64 divisor, u64* remainder = nullptr)
|
||||
{
|
||||
const __uint128_t x = (__uint128_t{high} << 64) | low;
|
||||
const __uint128_t r = x / divisor;
|
||||
|
||||
if (remainder)
|
||||
{
|
||||
*remainder = x % divisor;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
inline u8 rol8(u8 x, u8 n)
|
||||
{
|
||||
return _rotl8(x, n);
|
||||
}
|
||||
|
||||
inline u8 ror8(u8 x, u8 n)
|
||||
{
|
||||
return _rotr8(x, n);
|
||||
}
|
||||
|
||||
inline u16 rol16(u16 x, u16 n)
|
||||
{
|
||||
return _rotl16(x, (u8)n);
|
||||
}
|
||||
|
||||
inline u16 ror16(u16 x, u16 n)
|
||||
{
|
||||
return _rotr16(x, (u8)n);
|
||||
}
|
||||
|
||||
inline u32 rol32(u32 x, u32 n)
|
||||
{
|
||||
return _rotl(x, (int)n);
|
||||
}
|
||||
|
||||
inline u32 ror32(u32 x, u32 n)
|
||||
{
|
||||
return _rotr(x, (int)n);
|
||||
}
|
||||
|
||||
inline u64 rol64(u64 x, u64 n)
|
||||
{
|
||||
return _rotl64(x, (int)n);
|
||||
}
|
||||
|
||||
inline u64 ror64(u64 x, u64 n)
|
||||
{
|
||||
return _rotr64(x, (int)n);
|
||||
}
|
||||
|
||||
inline u64 umulh64(u64 x, u64 y)
|
||||
{
|
||||
return __umulh(x, y);
|
||||
}
|
||||
|
||||
inline s64 mulh64(s64 x, s64 y)
|
||||
{
|
||||
return __mulh(x, y);
|
||||
}
|
||||
|
||||
inline s64 div128(s64 high, s64 low, s64 divisor, s64* remainder = nullptr)
|
||||
{
|
||||
s64 rem;
|
||||
s64 r = _div128(high, low, divisor, &rem);
|
||||
|
||||
if (remainder)
|
||||
{
|
||||
*remainder = rem;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
inline u64 udiv128(u64 high, u64 low, u64 divisor, u64* remainder = nullptr)
|
||||
{
|
||||
u64 rem;
|
||||
u64 r = _udiv128(high, low, divisor, &rem);
|
||||
|
||||
if (remainder)
|
||||
{
|
||||
*remainder = rem;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
} // namespace utils
|
||||
|
|
@ -4,7 +4,6 @@
|
|||
#include "mutex.h"
|
||||
#include "util/atomic.hpp"
|
||||
#include "util/typeindices.hpp"
|
||||
#include "VirtualMemory.h"
|
||||
#include <memory>
|
||||
|
||||
namespace utils
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue