mirror of
https://github.com/RPCSX/rpcsx.git
synced 2026-04-09 00:14:56 +00:00
implement unix socket ops
implement cross process dmem support implement ipmi try send message implement sys_batch_map store saves to game directory (.rpcsx subfolder) fix get dir entries added uvd & vce devices
This commit is contained in:
parent
a6211b514f
commit
6e25f347d3
39 changed files with 1526 additions and 294 deletions
|
|
@ -43,7 +43,7 @@ using namespace amdgpu::device;
|
|||
static const bool kUseDirectMemory = false;
|
||||
static amdgpu::bridge::BridgeHeader *g_bridge;
|
||||
|
||||
void *g_rwMemory;
|
||||
// void *g_rwMemory;
|
||||
std::size_t g_memorySize;
|
||||
std::uint64_t g_memoryBase;
|
||||
RemoteMemory g_hostMemory;
|
||||
|
|
@ -4577,6 +4577,9 @@ static auto g_commandHandlers = [] {
|
|||
static void handleCommandBuffer(TaskChain &waitTaskSet, QueueRegisters ®s,
|
||||
std::span<std::uint32_t> &packets) {
|
||||
while (!packets.empty()) {
|
||||
// std::uint64_t address =
|
||||
// (char *)packets.data() - g_hostMemory.shmPointer + 0x40000;
|
||||
// std::fprintf(stderr, "address = %lx\n", address);
|
||||
auto cmd = packets[0];
|
||||
auto type = getBits(cmd, 31, 30);
|
||||
|
||||
|
|
@ -4637,29 +4640,29 @@ void amdgpu::device::AmdgpuDevice::handleProtectMemory(std::uint64_t address,
|
|||
break;
|
||||
|
||||
case PROT_WRITE | PROT_READ:
|
||||
protStr = "W";
|
||||
protStr = "RW";
|
||||
break;
|
||||
|
||||
default:
|
||||
protStr = "unknown";
|
||||
break;
|
||||
}
|
||||
std::printf("Allocated area at %zx, size %lx, prot %s\n", address, size,
|
||||
protStr);
|
||||
std::fprintf(stderr, "Allocated area at %zx, size %lx, prot %s\n", address,
|
||||
size, protStr);
|
||||
} else {
|
||||
memoryAreaTable.unmap(beginPage, endPage);
|
||||
std::printf("Unmapped area at %zx, size %lx\n", address, size);
|
||||
std::fprintf(stderr, "Unmapped area at %zx, size %lx\n", address, size);
|
||||
}
|
||||
|
||||
std::size_t index = 0;
|
||||
for (auto area : memoryAreaTable) {
|
||||
// std::printf("area %lx-%lx\n", area.beginAddress * kPageSize,
|
||||
// area.endAddress * kPageSize);
|
||||
|
||||
if (index >= std::size(g_bridge->memoryAreas)) {
|
||||
util::unreachable("too many memory areas");
|
||||
}
|
||||
|
||||
// std::printf("area %lx-%lx\n", area.beginAddress * kPageSize,
|
||||
// area.endAddress * kPageSize);
|
||||
|
||||
g_bridge->memoryAreas[index++] = {
|
||||
.address = area.beginAddress * kPageSize,
|
||||
.size = (area.endAddress - area.beginAddress) * kPageSize,
|
||||
|
|
@ -4685,11 +4688,16 @@ void amdgpu::device::AmdgpuDevice::handleCommandBuffer(std::uint64_t queueId,
|
|||
|
||||
if (inserted) {
|
||||
std::printf("creation queue %lx\n", queueId);
|
||||
it->second.sched.enqueue([=, queue = &it->second] {
|
||||
if (queueId == 0xc0023f00) {
|
||||
setThreadName("Graphics queue");
|
||||
} else {
|
||||
setThreadName(("Compute queue" + std::to_string(queueId)).c_str());
|
||||
it->second.sched.enqueue([=, queue = &it->second,
|
||||
initialized = false] mutable {
|
||||
if (!initialized) {
|
||||
initialized = true;
|
||||
|
||||
if (queueId == 0xc0023f00) {
|
||||
setThreadName("Graphics queue");
|
||||
} else {
|
||||
setThreadName(("Compute queue" + std::to_string(queueId)).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
Queue::CommandBuffer *commandBuffer;
|
||||
|
|
@ -4715,7 +4723,7 @@ void amdgpu::device::AmdgpuDevice::handleCommandBuffer(std::uint64_t queueId,
|
|||
});
|
||||
}
|
||||
|
||||
// std::printf("address = %lx, count = %lx\n", address, count);
|
||||
// std::fprintf(stderr, "address = %lx, count = %lx\n", address, count);
|
||||
|
||||
std::lock_guard lock(it->second.mtx);
|
||||
it->second.commandBuffers.push_back(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue