rpcsx/orbis-kernel/src/KernelContext.cpp

194 lines
5.4 KiB
C++
Raw Normal View History

2023-07-04 18:19:17 +02:00
#include "orbis/KernelContext.hpp"
#include "orbis/thread/Process.hpp"
2023-07-16 17:31:12 +02:00
#include "orbis/utils/Logs.hpp"
2023-07-04 18:19:17 +02:00
#include <sys/mman.h>
#include <sys/unistd.h>
namespace orbis {
KernelContext &g_context = *[]() -> KernelContext * {
// Allocate global shared kernel memory
// TODO: randomize for hardening and reduce size
auto ptr = mmap(reinterpret_cast<void *>(0x200'0000'0000), 0x1'0000'0000,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (!ptr)
std::abort();
return new (ptr) KernelContext;
}();
2023-07-05 10:38:31 +02:00
KernelContext::KernelContext() {
// Initialize recursive heap mutex
pthread_mutexattr_t mtx_attr;
pthread_mutexattr_init(&mtx_attr);
pthread_mutexattr_settype(&mtx_attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutexattr_setpshared(&mtx_attr, PTHREAD_PROCESS_SHARED);
pthread_mutex_init(&m_heap_mtx, &mtx_attr);
pthread_mutexattr_destroy(&mtx_attr);
2023-07-15 09:15:32 +02:00
std::printf("orbis::KernelContext initialized, addr=%p\n", this);
std::printf("TSC frequency: %lu\n", getTscFreq());
2023-07-05 10:38:31 +02:00
}
2023-07-04 18:19:17 +02:00
KernelContext::~KernelContext() {}
Process *KernelContext::createProcess(pid_t pid) {
auto newProcess = knew<utils::LinkedNode<Process>>();
newProcess->object.context = this;
newProcess->object.pid = pid;
newProcess->object.state = ProcessState::NEW;
{
std::lock_guard lock(m_proc_mtx);
if (m_processes != nullptr) {
m_processes->insertPrev(*newProcess);
}
m_processes = newProcess;
}
return &newProcess->object;
}
void KernelContext::deleteProcess(Process *proc) {
auto procNode = reinterpret_cast<utils::LinkedNode<Process> *>(proc);
auto pid = proc->pid;
{
std::lock_guard lock(m_proc_mtx);
auto next = procNode->erase();
if (procNode == m_processes) {
m_processes = next;
}
}
kdelete(procNode);
}
Process *KernelContext::findProcessById(pid_t pid) const {
std::lock_guard lock(m_proc_mtx);
for (auto proc = m_processes; proc != nullptr; proc = proc->next) {
if (proc->object.pid == pid) {
return &proc->object;
}
}
return nullptr;
}
2023-07-15 09:15:32 +02:00
long KernelContext::getTscFreq() {
auto cal_tsc = []() -> long {
const long timer_freq = 1'000'000'000;
// Calibrate TSC
constexpr int samples = 40;
long rdtsc_data[samples];
long timer_data[samples];
long error_data[samples];
struct timespec ts0;
clock_gettime(CLOCK_MONOTONIC, &ts0);
long sec_base = ts0.tv_sec;
for (int i = 0; i < samples; i++) {
usleep(200);
error_data[i] = (__builtin_ia32_lfence(), __builtin_ia32_rdtsc());
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
rdtsc_data[i] = (__builtin_ia32_lfence(), __builtin_ia32_rdtsc());
timer_data[i] = ts.tv_nsec + (ts.tv_sec - sec_base) * 1'000'000'000;
}
// Compute average TSC
long acc = 0;
for (int i = 0; i < samples - 1; i++) {
acc += (rdtsc_data[i + 1] - rdtsc_data[i]) * timer_freq /
(timer_data[i + 1] - timer_data[i]);
}
// Rounding
acc /= (samples - 1);
constexpr long grain = 1'000'000;
return grain * (acc / grain + long{(acc % grain) > (grain / 2)});
};
long freq = m_tsc_freq.load();
if (freq)
return freq;
m_tsc_freq.compare_exchange_strong(freq, cal_tsc());
return m_tsc_freq.load();
}
2023-07-04 18:19:17 +02:00
void *KernelContext::kalloc(std::size_t size, std::size_t align) {
size = (size + (__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1)) &
~(__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1);
if (!size)
std::abort();
pthread_mutex_lock(&m_heap_mtx);
if (!m_heap_is_freeing) {
// Try to reuse previously freed block
for (auto [it, end] = m_free_heap.equal_range(size); it != end; it++) {
auto result = it->second;
if (!(reinterpret_cast<std::uintptr_t>(result) & (align - 1))) {
2023-07-10 04:19:21 +02:00
auto node = m_free_heap.extract(it);
node.key() = 0;
node.mapped() = nullptr;
m_used_node.insert(m_used_node.begin(), std::move(node));
pthread_mutex_unlock(&m_heap_mtx);
return result;
}
}
}
align = std::max<std::size_t>(align, __STDCPP_DEFAULT_NEW_ALIGNMENT__);
auto heap = reinterpret_cast<std::uintptr_t>(m_heap_next);
2023-07-04 18:19:17 +02:00
heap = (heap + (align - 1)) & ~(align - 1);
auto result = reinterpret_cast<void *>(heap);
m_heap_next = reinterpret_cast<void *>(heap + size);
// Check overflow
if (heap + size < heap)
std::abort();
if (heap + size > (uintptr_t)&g_context + 0x1'0000'0000)
std::abort();
pthread_mutex_unlock(&m_heap_mtx);
2023-07-04 18:19:17 +02:00
return result;
}
void KernelContext::kfree(void *ptr, std::size_t size) {
size = (size + (__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1)) &
~(__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1);
2023-07-04 18:19:17 +02:00
if (!size)
std::abort();
2023-07-16 13:53:35 +02:00
std::memset(ptr, 0xcc, size);
pthread_mutex_lock(&m_heap_mtx);
if (m_heap_is_freeing)
std::abort();
m_heap_is_freeing = true;
if (!m_used_node.empty()) {
auto node = m_used_node.extract(m_used_node.begin());
node.key() = size;
node.mapped() = ptr;
m_free_heap.insert(std::move(node));
} else {
m_free_heap.emplace(size, ptr);
}
m_heap_is_freeing = false;
pthread_mutex_unlock(&m_heap_mtx);
2023-07-04 18:19:17 +02:00
}
inline namespace utils {
void kfree(void *ptr, std::size_t size) { return g_context.kfree(ptr, size); }
2023-07-04 18:19:17 +02:00
void *kalloc(std::size_t size, std::size_t align) {
return g_context.kalloc(size, align);
2023-07-04 18:19:17 +02:00
}
} // namespace utils
2023-07-16 17:31:12 +02:00
inline namespace logs {
template <>
void log_class_string<kstring>::format(std::string &out, const void *arg) {
out += get_object(arg);
}
} // namespace logs
2023-07-04 18:19:17 +02:00
} // namespace orbis